这篇教程Python layers.Reshape方法代码示例写得很实用,希望能帮到您。
本文整理汇总了Python中keras.layers.Reshape方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Reshape方法的具体用法?Python layers.Reshape怎么用?Python layers.Reshape使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块keras.layers 的用法示例。 在下文中一共展示了layers.Reshape方法的27个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。 示例1: build_generator# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def build_generator(self): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((7, 7, 128))) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(1, kernel_size=3, padding="same")) model.add(Activation("tanh")) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
开发者ID:eriklindernoren,项目名称:Keras-GAN,代码行数:26,代码来源:sgan.py
示例2: build_generator# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def build_generator(self): model = Sequential() model.add(Dense(512, input_dim=self.latent_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(np.prod(self.img_shape), activation='tanh')) model.add(Reshape(self.img_shape)) model.summary() z = Input(shape=(self.latent_dim,)) gen_img = model(z) return Model(z, gen_img)
开发者ID:eriklindernoren,项目名称:Keras-GAN,代码行数:20,代码来源:bigan.py
示例3: build_generator# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def build_generator(self): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((7, 7, 128))) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(self.channels, kernel_size=3, padding='same')) model.add(Activation("tanh")) gen_input = Input(shape=(self.latent_dim,)) img = model(gen_input) model.summary() return Model(gen_input, img)
开发者ID:eriklindernoren,项目名称:Keras-GAN,代码行数:26,代码来源:infogan.py
示例4: build_generator# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def build_generator(self): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((7, 7, 128))) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=4, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=4, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(Conv2D(self.channels, kernel_size=4, padding="same")) model.add(Activation("tanh")) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
开发者ID:eriklindernoren,项目名称:Keras-GAN,代码行数:25,代码来源:wgan.py
示例5: build_generator# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def build_generator(self): model = Sequential() model.add(Dense(256, input_dim=self.latent_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(np.prod(self.img_shape), activation='tanh')) model.add(Reshape(self.img_shape)) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
开发者ID:eriklindernoren,项目名称:Keras-GAN,代码行数:24,代码来源:lsgan.py
示例6: build_generator# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def build_generator(self): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((7, 7, 128))) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=3, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=3, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(Conv2D(self.channels, kernel_size=3, padding="same")) model.add(Activation("tanh")) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
开发者ID:eriklindernoren,项目名称:Keras-GAN,代码行数:25,代码来源:dcgan.py
示例7: duc# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def duc(x, factor=8, output_shape=(512, 512, 1)): if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 H, W, c, r = output_shape[0], output_shape[1], output_shape[2], factor h = H / r w = W / r x = Conv2D( c*r*r, (3, 3), padding='same', name='conv_duc_%s'%factor)(x) x = BatchNormalization(axis=bn_axis,name='bn_duc_%s'%factor)(x) x = Activation('relu')(x) x = Permute((3, 1, 2))(x) x = Reshape((c, r, r, h, w))(x) x = Permute((1, 4, 2, 5, 3))(x) x = Reshape((c, H, W))(x) x = Permute((2, 3, 1))(x) return x# interpolation
开发者ID:dhkim0225,项目名称:keras-image-segmentation,代码行数:27,代码来源:pspnet.py
示例8: get_Shared_Model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def get_Shared_Model(input_dim): sharedNet = Sequential() sharedNet.add(Dense(128, input_shape=(input_dim,), activation='relu')) sharedNet.add(Dropout(0.1)) sharedNet.add(Dense(128, activation='relu')) sharedNet.add(Dropout(0.1)) sharedNet.add(Dense(128, activation='relu')) # sharedNet.add(Dropout(0.1)) # sharedNet.add(Dense(3, activation='relu')) # sharedNet = Sequential() # sharedNet.add(Dense(4096, activation="tanh", kernel_regularizer=l2(2e-3))) # sharedNet.add(Reshape(target_shape=(64, 64, 1))) # sharedNet.add(Conv2D(filters=64, kernel_size=3, strides=(2, 2), padding="same", activation="relu", kernel_regularizer=l2(1e-3))) # sharedNet.add(MaxPooling2D()) # sharedNet.add(Conv2D(filters=128, kernel_size=3, strides=(2, 2), padding="same", activation="relu", kernel_regularizer=l2(1e-3))) # sharedNet.add(MaxPooling2D()) # sharedNet.add(Conv2D(filters=64, kernel_size=3, strides=(1, 1), padding="same", activation="relu", kernel_regularizer=l2(1e-3))) # sharedNet.add(Flatten()) # sharedNet.add(Dense(1024, activation="sigmoid", kernel_regularizer=l2(1e-3))) return sharedNet
示例9: call# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def call(self, inputs): def wrapper(rois, mrcnn_class, mrcnn_bbox, image_meta): # currently supports one image per batch b = 0 _, _, window, _ = parse_image_meta(image_meta) detections = refine_detections( rois[b], mrcnn_class[b], mrcnn_bbox[b], window[b], self.config) # Pad with zeros if detections < DETECTION_MAX_INSTANCES gap = self.config.DETECTION_MAX_INSTANCES - detections.shape[0] assert gap >= 0 if gap > 0: detections = np.pad(detections, [(0, gap), (0, 0)], 'constant', constant_values=0) # Cast to float32 # TODO: track where float64 is introduced detections = detections.astype(np.float32) # Reshape output # [batch, num_detections, (y1, x1, y2, x2, class_score)] in pixels return np.reshape(detections, [1, self.config.DETECTION_MAX_INSTANCES, 6]) # Return wrapped function return tf.py_func(wrapper, inputs, tf.float32)
开发者ID:SunskyF,项目名称:EasyPR-python,代码行数:27,代码来源:model.py
示例10: model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def model(self, block_starting_size=128,num_blocks=4): model = Sequential() block_size = block_starting_size model.add(Dense(block_size, input_shape=(self.LATENT_SPACE_SIZE,))) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) for i in range(num_blocks-1): block_size = block_size * 2 model.add(Dense(block_size)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(self.W * self.H * self.C, activation='tanh')) model.add(Reshape((self.W, self.H, self.C))) return model
开发者ID:PacktPublishing,项目名称:Generative-Adversarial-Networks-Cookbook,代码行数:20,代码来源:generator.py
示例11: dc_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def dc_model(self): model = Sequential() model.add(Dense(256*8*8,activation=LeakyReLU(0.2), input_dim=self.LATENT_SPACE_SIZE)) model.add(BatchNormalization()) model.add(Reshape((8, 8, 256))) model.add(UpSampling2D()) model.add(Convolution2D(128, 5, 5, border_mode='same',activation=LeakyReLU(0.2))) model.add(BatchNormalization()) model.add(UpSampling2D()) model.add(Convolution2D(64, 5, 5, border_mode='same',activation=LeakyReLU(0.2))) model.add(BatchNormalization()) model.add(UpSampling2D()) model.add(Convolution2D(self.C, 5, 5, border_mode='same', activation='tanh')) return model
开发者ID:PacktPublishing,项目名称:Generative-Adversarial-Networks-Cookbook,代码行数:23,代码来源:generator.py
示例12: call# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def call(self, inputs): def wrapper(rois, mrcnn_class, mrcnn_bbox, image_meta): detections_batch = [] for b in range(self.config.BATCH_SIZE): _, _, window, _ = parse_image_meta(image_meta) detections = refine_detections( rois[b], mrcnn_class[b], mrcnn_bbox[b], window[b], self.config) # Pad with zeros if detections < DETECTION_MAX_INSTANCES gap = self.config.DETECTION_MAX_INSTANCES - detections.shape[0] assert gap >= 0 if gap > 0: detections = np.pad( detections, [(0, gap), (0, 0)], 'constant', constant_values=0) detections_batch.append(detections) # Stack detections and cast to float32 # TODO: track where float64 is introduced detections_batch = np.array(detections_batch).astype(np.float32) # Reshape output # [batch, num_detections, (y1, x1, y2, x2, class_score)] in pixels return np.reshape(detections_batch, [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6]) # Return wrapped function return tf.py_func(wrapper, inputs, tf.float32)
开发者ID:olgaliak,项目名称:segmentation-unet-maskrcnn,代码行数:26,代码来源:model.py
示例13: build_discriminator# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def build_discriminator(self): """Discriminator network with PatchGAN.""" inp_img = Input(shape = (self.image_size, self.image_size, 3)) x = ZeroPadding2D(padding = 1)(inp_img) x = Conv2D(filters = self.d_conv_dim, kernel_size = 4, strides = 2, padding = 'valid', use_bias = False)(x) x = LeakyReLU(0.01)(x) curr_dim = self.d_conv_dim for i in range(1, self.d_repeat_num): x = ZeroPadding2D(padding = 1)(x) x = Conv2D(filters = curr_dim*2, kernel_size = 4, strides = 2, padding = 'valid')(x) x = LeakyReLU(0.01)(x) curr_dim = curr_dim * 2 kernel_size = int(self.image_size / np.power(2, self.d_repeat_num)) out_src = ZeroPadding2D(padding = 1)(x) out_src = Conv2D(filters = 1, kernel_size = 3, strides = 1, padding = 'valid', use_bias = False)(out_src) out_cls = Conv2D(filters = self.c_dim, kernel_size = kernel_size, strides = 1, padding = 'valid', use_bias = False)(x) out_cls = Reshape((self.c_dim, ))(out_cls) return Model(inp_img, [out_src, out_cls])
开发者ID:hoangthang1607,项目名称:StarGAN-Keras,代码行数:25,代码来源:StarGAN.py
示例14: ssr_F_model_build# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def ssr_F_model_build(self, feat_dim, name_F): input_s1_pre = Input((feat_dim,)) input_s2_pre = Input((feat_dim,)) input_s3_pre = Input((feat_dim,)) def _process_input(stage_index, stage_num, num_classes, input_s_pre): feat_delta_s = FeatSliceLayer(0,4)(input_s_pre) delta_s = Dense(num_classes,activation='tanh',name=f'delta_s{stage_index}')(feat_delta_s) feat_local_s = FeatSliceLayer(4,8)(input_s_pre) local_s = Dense(units=num_classes, activation='tanh', name=f'local_delta_stage{stage_index}')(feat_local_s) feat_pred_s = FeatSliceLayer(8,16)(input_s_pre) feat_pred_s = Dense(stage_num*num_classes,activation='relu')(feat_pred_s) pred_s = Reshape((num_classes,stage_num))(feat_pred_s) return delta_s, local_s, pred_s delta_s1, local_s1, pred_s1 = _process_input(1, self.stage_num[0], self.num_classes, input_s1_pre) delta_s2, local_s2, pred_s2 = _process_input(2, self.stage_num[1], self.num_classes, input_s2_pre) delta_s3, local_s3, pred_s3 = _process_input(3, self.stage_num[2], self.num_classes, input_s3_pre) return Model(inputs=[input_s1_pre,input_s2_pre,input_s3_pre],outputs=[pred_s1,pred_s2,pred_s3,delta_s1,delta_s2,delta_s3,local_s1,local_s2,local_s3], name=name_F)
示例15: ssr_FC_model_build# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def ssr_FC_model_build(self, feat_dim, name_F): input_s1_pre = Input((feat_dim,)) input_s2_pre = Input((feat_dim,)) input_s3_pre = Input((feat_dim,)) def _process_input(stage_index, stage_num, num_classes, input_s_pre): feat_delta_s = Dense(2*num_classes,activation='tanh')(input_s_pre) delta_s = Dense(num_classes,activation='tanh',name=f'delta_s{stage_index}')(feat_delta_s) feat_local_s = Dense(2*num_classes,activation='tanh')(input_s_pre) local_s = Dense(units=num_classes, activation='tanh', name=f'local_delta_stage{stage_index}')(feat_local_s) feat_pred_s = Dense(stage_num*num_classes,activation='relu')(input_s_pre) pred_s = Reshape((num_classes,stage_num))(feat_pred_s) return delta_s, local_s, pred_s delta_s1, local_s1, pred_s1 = _process_input(1, self.stage_num[0], self.num_classes, input_s1_pre) delta_s2, local_s2, pred_s2 = _process_input(2, self.stage_num[1], self.num_classes, input_s2_pre) delta_s3, local_s3, pred_s3 = _process_input(3, self.stage_num[2], self.num_classes, input_s3_pre) return Model(inputs=[input_s1_pre,input_s2_pre,input_s3_pre],outputs=[pred_s1,pred_s2,pred_s3,delta_s1,delta_s2,delta_s3,local_s1,local_s2,local_s3], name=name_F)
示例16: generator# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def generator(input_dim,alpha=0.2): model = Sequential() model.add(Dense(input_dim=input_dim, output_dim=4*4*512)) model.add(Reshape(target_shape=(4,4,512))) model.add(BatchNormalization()) model.add(LeakyReLU(alpha)) model.add(Conv2DTranspose(256, kernel_size=5, strides=2, padding='same')) model.add(BatchNormalization()) model.add(LeakyReLU(alpha)) model.add(Conv2DTranspose(128, kernel_size=5, strides=2, padding='same')) model.add(BatchNormalization()) model.add(LeakyReLU(alpha)) model.add(Conv2DTranspose(3, kernel_size=5, strides=2, padding='same')) model.add(Activation('tanh')) return model#Define the Discriminator Network
开发者ID:PacktPublishing,项目名称:Intelligent-Projects-Using-Python,代码行数:19,代码来源:captcha_gan.py
示例17: weather_fnn# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def weather_fnn(layers, lr, decay, loss, seq_len, input_features, output_features): ori_inputs = Input(shape=(seq_len, input_features), name='input_layer') #print(seq_len*input_features) conv_ = Conv1D(11, kernel_size=13, strides=1, data_format='channels_last', padding='valid', activation='linear')(ori_inputs) conv_ = BatchNormalization(name='BN_conv')(conv_) conv_ = Activation('relu')(conv_) conv_ = Conv1D(5, kernel_size=7, strides=1, data_format='channels_last', padding='valid', activation='linear')(conv_) conv_ = BatchNormalization(name='BN_conv2')(conv_) conv_ = Activation('relu')(conv_) inputs = Reshape((-1,))(conv_) for i, hidden_nums in enumerate(layers): if i==0: hn = Dense(hidden_nums, activation='linear')(inputs) hn = BatchNormalization(name='BN_{}'.format(i))(hn) hn = Activation('relu')(hn) else: hn = Dense(hidden_nums, activation='linear')(hn) hn = BatchNormalization(name='BN_{}'.format(i))(hn) hn = Activation('relu')(hn) #hn = Dropout(0.1)(hn) #print(seq_len, output_features) #print(hn) outputs = Dense(seq_len*output_features, activation='sigmoid', name='output_layer')(hn) # 37*3 outputs = Reshape((seq_len, output_features))(outputs) weather_fnn = Model(ori_inputs, outputs=[outputs]) return weather_fnn
开发者ID:BruceBinBoxing,项目名称:Deep_Learning_Weather_Forecasting,代码行数:39,代码来源:weather_model.py
示例18: _get_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def _get_model(X, cat_cols, num_cols, n_uniq, n_emb, output_activation): inputs = [] num_inputs = [] embeddings = [] for i, col in enumerate(cat_cols): if not n_uniq[i]: n_uniq[i] = X[col].nunique() if not n_emb[i]: n_emb[i] = max(MIN_EMBEDDING, 2 * int(np.log2(n_uniq[i]))) _input = Input(shape=(1,), name=col) _embed = Embedding(input_dim=n_uniq[i], output_dim=n_emb[i], name=col + EMBEDDING_SUFFIX)(_input) _embed = Dropout(.2)(_embed) _embed = Reshape((n_emb[i],))(_embed) inputs.append(_input) embeddings.append(_embed) if num_cols: num_inputs = Input(shape=(len(num_cols),), name='num_inputs') merged_input = Concatenate(axis=1)(embeddings + [num_inputs]) inputs = inputs + [num_inputs] else: merged_input = Concatenate(axis=1)(embeddings) x = BatchNormalization()(merged_input) x = Dense(128, activation='relu')(x) x = Dropout(.5)(x) x = BatchNormalization()(x) x = Dense(64, activation='relu')(x) x = Dropout(.5)(x) x = BatchNormalization()(x) output = Dense(1, activation=output_activation)(x) model = Model(inputs=inputs, outputs=output) return model, n_emb, n_uniq
示例19: channel_shuffle# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def channel_shuffle(self, x): n, h, w, c = x.shape.as_list() x_reshaped = layers.Reshape([h, w, self.groups, int(c // self.groups)])(x) x_transposed = layers.Permute((1, 2, 4, 3))(x_reshaped) output = layers.Reshape([h, w, c])(x_transposed) return output
开发者ID:JACKYLUO1991,项目名称:Face-skin-hair-segmentaiton-and-skin-color-evaluation,代码行数:8,代码来源:lednet.py
示例20: build_generator# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def build_generator(self): model = Sequential() model.add(Dense(256, input_dim=self.latent_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(np.prod(self.img_shape), activation='tanh')) model.add(Reshape(self.img_shape)) model.summary() noise = Input(shape=(self.latent_dim,)) label = Input(shape=(1,), dtype='int32') label_embedding = Flatten()(Embedding(self.num_classes, self.latent_dim)(label)) model_input = multiply([noise, label_embedding]) img = model(model_input) return Model([noise, label], img)
开发者ID:eriklindernoren,项目名称:Keras-GAN,代码行数:28,代码来源:cgan.py
示例21: build_generator# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def build_generator(self): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((7, 7, 128))) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(self.channels, kernel_size=3, padding='same')) model.add(Activation("tanh")) model.summary() noise = Input(shape=(self.latent_dim,)) label = Input(shape=(1,), dtype='int32') label_embedding = Flatten()(Embedding(self.num_classes, self.latent_dim)(label)) model_input = multiply([noise, label_embedding]) img = model(model_input) return Model([noise, label], img)
开发者ID:eriklindernoren,项目名称:Keras-GAN,代码行数:30,代码来源:acgan.py
示例22: build_generators# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def build_generators(self): # Shared weights between generators model = Sequential() model.add(Dense(256, input_dim=self.latent_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) noise = Input(shape=(self.latent_dim,)) feature_repr = model(noise) # Generator 1 g1 = Dense(1024)(feature_repr) g1 = LeakyReLU(alpha=0.2)(g1) g1 = BatchNormalization(momentum=0.8)(g1) g1 = Dense(np.prod(self.img_shape), activation='tanh')(g1) img1 = Reshape(self.img_shape)(g1) # Generator 2 g2 = Dense(1024)(feature_repr) g2 = LeakyReLU(alpha=0.2)(g2) g2 = BatchNormalization(momentum=0.8)(g2) g2 = Dense(np.prod(self.img_shape), activation='tanh')(g2) img2 = Reshape(self.img_shape)(g2) model.summary() return Model(noise, img1), Model(noise, img2)
开发者ID:eriklindernoren,项目名称:Keras-GAN,代码行数:33,代码来源:cogan.py
示例23: call# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def call(self, inputs): output = self.conv1(inputs) output = layers.Reshape(target_shape=[-1, self.dim_capsule], name='primarycap_reshape')(output) return squash(output)
示例24: CapsuleNet# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def CapsuleNet(input_shape, n_class, num_routing): """ The whole capsule network for MNIST recognition. """ # (None, H, W, C) x = Input(input_shape) conv1 = Conv2D(filters=256, kernel_size=9, padding='valid', activation='relu', name='init_conv')(x) # (None, num_capsules, capsule_dim) prim_caps = PrimaryCapsules(filters=32, kernel_size=9, dim_capsule=8, padding='valid', strides=(2, 2))(conv1) # (None, n_class, dim_vector) digit_caps = DigiCaps(num_capsule=n_class, dim_capsule=16, num_routing=num_routing, name='digitcaps')(prim_caps) # (None, n_class) pred = Length(name='out_caps')(digit_caps) # (None, n_class) y = Input(shape=(n_class, )) # (None, n_class * dim_vector) masked = Mask()([digit_caps, y]) x_recon = layers.Dense(512, activation='relu')(masked) x_recon = layers.Dense(1024, activation='relu')(x_recon) x_recon = layers.Dense(784, activation='sigmoid')(x_recon) x_recon = layers.Reshape(target_shape=[28, 28, 1], name='out_recon')(x_recon) # two-input-two-output keras Model return Model([x, y], [pred, x_recon])
开发者ID:l11x0m7,项目名称:CapsNet,代码行数:33,代码来源:capsule.py
示例25: generator# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def generator(self): if self.G: return self.G #Inputs inp = Input(shape = [latent_size]) #Latent #Actual Model x = Dense(4*4*16*cha, kernel_initializer = 'he_normal')(inp) x = Reshape([4, 4, 16*cha])(x) x = g_block(x, 16 * cha, u = False) #4 x = g_block(x, 8 * cha) #8 x = g_block(x, 4 * cha) #16 x = g_block(x, 3 * cha) #32 x = g_block(x, 2 * cha) #64 x = g_block(x, 1 * cha) #128 x = Conv2D(filters = 3, kernel_size = 1, activation = 'sigmoid', padding = 'same', kernel_initializer = 'he_normal')(x) self.G = Model(inputs = inp, outputs = x) return self.G
开发者ID:manicman1999,项目名称:Keras-BiGAN,代码行数:28,代码来源:bigan.py
示例26: call# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def call(self, inputs): rois = inputs[0] mrcnn_class = inputs[1] mrcnn_bbox = inputs[2] image_meta = inputs[3] # Get windows of images in normalized coordinates. Windows are the area # in the image that excludes the padding. # Use the shape of the first image in the batch to normalize the window # because we know that all images get resized to the same size. m = parse_image_meta_graph(image_meta) image_shape = m['image_shape'][0] window = norm_boxes_graph(m['window'], image_shape[:2]) # Run detection refinement graph on each item in the batch detections_batch = utils.batch_slice( [rois, mrcnn_class, mrcnn_bbox, window], lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config), self.config.IMAGES_PER_GPU) # Reshape output # [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in # normalized coordinates return tf.reshape( detections_batch, [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
开发者ID:dataiku,项目名称:dataiku-contrib,代码行数:28,代码来源:model.py
示例27: rpn_graph# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Reshape [as 别名]def rpn_graph(feature_map, anchors_per_location, anchor_stride): """Builds the computation graph of Region Proposal Network. feature_map: backbone features [batch, height, width, depth] anchors_per_location: number of anchors per pixel in the feature map anchor_stride: Controls the density of anchors. Typically 1 (anchors for every pixel in the feature map), or 2 (every other pixel). Returns: rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax) rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities. rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be applied to anchors. """ # TODO: check if stride of 2 causes alignment issues if the feature map # is not even. # Shared convolutional base of the RPN shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu', strides=anchor_stride, name='rpn_conv_shared')(feature_map) # Anchor Score. [batch, height, width, anchors per location * 2]. x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid', activation='linear', name='rpn_class_raw')(shared) # Reshape to [batch, anchors, 2] rpn_class_logits = KL.Lambda( lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x) # Softmax on last dimension of BG/FG. rpn_probs = KL.Activation( "softmax", name="rpn_class_xxx")(rpn_class_logits) # Bounding box refinement. [batch, H, W, anchors per location * depth] # where depth is [x, y, log(w), log(h)] x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid", activation='linear', name='rpn_bbox_pred')(shared) # Reshape to [batch, anchors, 4] rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x) return [rpn_class_logits, rpn_probs, rpn_bbox]
开发者ID:dataiku,项目名称:dataiku-contrib,代码行数:42,代码来源:model.py
|