这篇教程Python layers.Conv2DTranspose方法代码示例写得很实用,希望能帮到您。
本文整理汇总了Python中keras.layers.Conv2DTranspose方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Conv2DTranspose方法的具体用法?Python layers.Conv2DTranspose怎么用?Python layers.Conv2DTranspose使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块keras.layers 的用法示例。 在下文中一共展示了layers.Conv2DTranspose方法的21个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。 示例1: build_mbllen# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv2DTranspose [as 别名]def build_mbllen(input_shape): def EM(input, kernal_size, channel): conv_1 = Conv2D(channel, (3, 3), activation='relu', padding='same', data_format='channels_last')(input) conv_2 = Conv2D(channel, (kernal_size, kernal_size), activation='relu', padding='valid', data_format='channels_last')(conv_1) conv_3 = Conv2D(channel*2, (kernal_size, kernal_size), activation='relu', padding='valid', data_format='channels_last')(conv_2) conv_4 = Conv2D(channel*4, (kernal_size, kernal_size), activation='relu', padding='valid', data_format='channels_last')(conv_3) conv_5 = Conv2DTranspose(channel*2, (kernal_size, kernal_size), activation='relu', padding='valid', data_format='channels_last')(conv_4) conv_6 = Conv2DTranspose(channel, (kernal_size, kernal_size), activation='relu', padding='valid', data_format='channels_last')(conv_5) res = Conv2DTranspose(3, (kernal_size, kernal_size), activation='relu', padding='valid', data_format='channels_last')(conv_6) return res inputs = Input(shape=input_shape) FEM = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_last')(inputs) EM_com = EM(FEM, 5, 8) for j in range(3): for i in range(0, 3): FEM = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_last')(FEM) EM1 = EM(FEM, 5, 8) EM_com = Concatenate(axis=3)([EM_com, EM1]) outputs = Conv2D(3, (1, 1), activation='relu', padding='same', data_format='channels_last')(EM_com) return Model(inputs, outputs)
开发者ID:Lvfeifan,项目名称:MBLLEN,代码行数:26,代码来源:Network.py
示例2: Transpose2D_block# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv2DTranspose [as 别名]def Transpose2D_block(filters, stage, kernel_size=(3,3), upsample_rate=(2,2), transpose_kernel_size=(4,4), use_batchnorm=False, skip=None): def layer(input_tensor): conv_name, bn_name, relu_name, up_name = handle_block_names(stage) x = Conv2DTranspose(filters, transpose_kernel_size, strides=upsample_rate, padding='same', name=up_name, use_bias=not(use_batchnorm))(input_tensor) if use_batchnorm: x = BatchNormalization(name=bn_name+'1')(x) x = Activation('relu', name=relu_name+'1')(x) if skip is not None: x = Concatenate()([x, skip]) x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm, conv_name=conv_name + '2', bn_name=bn_name + '2', relu_name=relu_name + '2')(x) return x return layer
开发者ID:SpaceNetChallenge,项目名称:SpaceNet_Off_Nadir_Solutions,代码行数:23,代码来源:blocks.py
示例3: Conv2DTranspose# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv2DTranspose [as 别名]def Conv2DTranspose(filters, upsample_rate, kernel_size=(4,4), up_name='up', **kwargs): #if not tuple(upsample_rate) == (2,2): # raise NotImplementedError( # f'Conv2DTranspose support only upsample_rate=(2, 2), got {upsample_rate}') def layer(input_tensor): x = Transpose(filters, kernel_size=kernel_size, strides=upsample_rate, padding='same', name=up_name)(input_tensor) return x return layer
开发者ID:SpaceNetChallenge,项目名称:SpaceNet_Off_Nadir_Solutions,代码行数:20,代码来源:blocks.py
示例4: Conv2DTranspose# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv2DTranspose [as 别名]def Conv2DTranspose(filters, upsample_rate, kernel_size=(4,4), up_name='up', **kwargs): if not tuple(upsample_rate) == (2,2): raise NotImplementedError( f'Conv2DTranspose support only upsample_rate=(2, 2), got {upsample_rate}') def layer(input_tensor): x = Transpose(filters, kernel_size=kernel_size, strides=upsample_rate, padding='same', name=up_name)(input_tensor) return x return layer
开发者ID:pubgeo,项目名称:dfc2019,代码行数:20,代码来源:blocks.py
示例5: fsrcnn# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv2DTranspose [as 别名]def fsrcnn(x, d=56, s=12, m=4, scale=3): """Build an FSRCNN model. See https://arxiv.org/abs/1608.00367 """ model = Sequential() model.add(InputLayer(input_shape=x.shape[-3:])) c = x.shape[-1] f = [5, 1] + [3] * m + [1] n = [d, s] + [s] * m + [d] for ni, fi in zip(n, f): model.add(Conv2D(ni, fi, padding='same', kernel_initializer='he_normal', activation='relu')) model.add(Conv2DTranspose(c, 9, strides=scale, padding='same', kernel_initializer='he_normal')) return model
开发者ID:qobilidop,项目名称:srcnn,代码行数:18,代码来源:models.py
示例6: nsfsrcnn# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv2DTranspose [as 别名]def nsfsrcnn(x, d=56, s=12, m=4, scale=3, pos=1): """Build an FSRCNN model, but change deconv position. See https://arxiv.org/abs/1608.00367 """ model = Sequential() model.add(InputLayer(input_shape=x.shape[-3:])) c = x.shape[-1] f1 = [5, 1] + [3] * pos n1 = [d, s] + [s] * pos f2 = [3] * (m - pos - 1) + [1] n2 = [s] * (m - pos - 1) + [d] f3 = 9 n3 = c for ni, fi in zip(n1, f1): model.add(Conv2D(ni, fi, padding='same', kernel_initializer='he_normal', activation='relu')) model.add(Conv2DTranspose(s, 3, strides=scale, padding='same', kernel_initializer='he_normal')) for ni, fi in zip(n2, f2): model.add(Conv2D(ni, fi, padding='same', kernel_initializer='he_normal', activation='relu')) model.add(Conv2D(n3, f3, padding='same', kernel_initializer='he_normal')) return model
开发者ID:qobilidop,项目名称:srcnn,代码行数:27,代码来源:models.py
示例7: classification_branch_wrapper# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv2DTranspose [as 别名]def classification_branch_wrapper(self, input, softmax_trainable=False): x = self.res_block(input, filter=128, stages=9, block=4) # all layers before OPI x = Conv2D(filters=5, kernel_size=(1, 1), padding='same', name='conv2d_after_fourth_resblock', kernel_regularizer=keras.regularizers.l2(self.l2r))(x) x = BatchNormalization(name='bn_after_fourth_resblock')(x) x = Activation('relu',name='relu_after_fourth_resblock')(x) x = Conv2DTranspose(filters=5, kernel_size=(3, 3), strides=(2, 2), padding='same', kernel_regularizer=keras.regularizers.l2(self.l2r), name='secondlast_deconv_before_cls')(x) x = BatchNormalization(name='secondlast_bn_before_cls')(x) x = Activation('relu', name='last_relu_before_cls')(x) x = Conv2DTranspose(filters=5, kernel_size=(3, 3), strides=(2, 2), padding='same', kernel_regularizer=keras.regularizers.l2(self.l2r), name='last_deconv_before_cls')(x) x_output = BatchNormalization(name='last_bn_before_cls')(x) if softmax_trainable == True: x_output = Activation('softmax', name='Classification_output')(x_output) return x_output
开发者ID:zhuyiche,项目名称:sfcn-opi,代码行数:23,代码来源:model.py
示例8: modelGenerator# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv2DTranspose [as 别名]def modelGenerator(self, name): inputImg = Input(shape=self.latent_dim) # Layer 1: 1 res block x = self.resblk(inputImg, 256) # Layer 2: 2 res block x = self.resblk(x, 256) # Layer 3: 3 res block x = self.resblk(x, 256) # Layer 4: x = Conv2DTranspose(128, kernel_size=3, strides=2, padding='same')(x) x = LeakyReLU(alpha=0.01)(x) # Layer 5: x = Conv2DTranspose(64, kernel_size=3, strides=2, padding='same')(x) x = LeakyReLU(alpha=0.01)(x) # Layer 6 x = Conv2DTranspose(self.channels, kernel_size=1, strides=1, padding='valid')(x) z = Activation("tanh")(x) return Model(inputs=inputImg, outputs=z, name=name)
开发者ID:simontomaskarlsson,项目名称:GAN-MRI,代码行数:21,代码来源:UNIT.py
示例9: model_3# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv2DTranspose [as 别名]def model_3(): input_layer = Input(shape=(224,224,3)) from keras.layers import Conv2DTranspose as DeConv resnet = ResNet50(include_top=False, weights="imagenet") resnet.trainable = False res_features = resnet(input_layer) conv = DeConv(1024, padding="valid", activation="relu", kernel_size=3)(res_features) conv = UpSampling2D((2,2))(conv) conv = DeConv(512, padding="valid", activation="relu", kernel_size=5)(conv) conv = UpSampling2D((2,2))(conv) conv = DeConv(128, padding="valid", activation="relu", kernel_size=5)(conv) conv = UpSampling2D((2,2))(conv) conv = DeConv(32, padding="valid", activation="relu", kernel_size=5)(conv) conv = UpSampling2D((2,2))(conv) conv = DeConv(8, padding="valid", activation="relu", kernel_size=5)(conv) conv = UpSampling2D((2,2))(conv) conv = DeConv(4, padding="valid", activation="relu", kernel_size=5)(conv) conv = DeConv(1, padding="valid", activation="sigmoid", kernel_size=5)(conv) model = Model(inputs=input_layer, outputs=conv) return model
示例10: get_unet_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv2DTranspose [as 别名]def get_unet_model(input_channel_num=3, out_ch=3, start_ch=64, depth=4, inc_rate=2., activation='relu', dropout=0.5, batchnorm=False, maxpool=True, upconv=True, residual=False): def _conv_block(m, dim, acti, bn, res, do=0): n = Conv2D(dim, 3, activation=acti, padding='same')(m) n = BatchNormalization()(n) if bn else n n = Dropout(do)(n) if do else n n = Conv2D(dim, 3, activation=acti, padding='same')(n) n = BatchNormalization()(n) if bn else n return Concatenate()([m, n]) if res else n def _level_block(m, dim, depth, inc, acti, do, bn, mp, up, res): if depth > 0: n = _conv_block(m, dim, acti, bn, res) m = MaxPooling2D()(n) if mp else Conv2D(dim, 3, strides=2, padding='same')(n) m = _level_block(m, int(inc * dim), depth - 1, inc, acti, do, bn, mp, up, res) if up: m = UpSampling2D()(m) m = Conv2D(dim, 2, activation=acti, padding='same')(m) else: m = Conv2DTranspose(dim, 3, strides=2, activation=acti, padding='same')(m) n = Concatenate()([n, m]) m = _conv_block(n, dim, acti, bn, res) else: m = _conv_block(m, dim, acti, bn, res, do) return m i = Input(shape=(None, None, input_channel_num)) o = _level_block(i, start_ch, depth, inc_rate, activation, dropout, batchnorm, maxpool, upconv, residual) o = Conv2D(out_ch, 1)(o) model = Model(inputs=i, outputs=o) return model
开发者ID:zxq2233,项目名称:n2n-watermark-remove,代码行数:36,代码来源:model.py
示例11: build_REDNet# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv2DTranspose [as 别名]def build_REDNet(nb_layers, input_size, nb_filters=32, k_size=3, dropout=0, strides=1, every=1): # -> CONV/FC -> BatchNorm -> ReLu(or other activation) -> Dropout -> CONV/FC -> # https://arxiv.org/pdf/1502.03167.pdf input_img = Input(shape=(input_size, input_size, 1)) x = input_img if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 encoderLayers = [None] * nb_layers for i in range(nb_layers): x = Conv2D(nb_filters, kernel_size=k_size, strides=strides, padding='same')(x) x = BatchNormalization(axis=bn_axis)(x) x = Activation('relu')(x) if dropout > 0: x = Dropout(dropout)(x) encoderLayers[i] = x encoded = x for i in range(nb_layers): ind = nb_layers - i - 1 x = layers.add([x, encoderLayers[ind]]) x = Conv2DTranspose(nb_filters, kernel_size=k_size, strides=strides, padding='same')(x) x = BatchNormalization(axis=bn_axis)(x) x = Activation('relu')(x) if dropout > 0: x = Dropout(dropout)(x) decoded = Conv2D(1, kernel_size=k_size, strides=1, padding='same', activation='sigmoid')(x) autoencoder = Model(input_img, decoded) return autoencoder, encoded, decoded
示例12: deconv# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv2DTranspose [as 别名]def deconv(input, channels, kernel_size, scale): return Conv2DTranspose(channels, kernel_size=kernel_size, strides=scale, padding='same')(input)
示例13: uk# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv2DTranspose [as 别名]def uk(self, x, k): # (up sampling followed by 1x1 convolution <=> fractional-strided 1/2) if self.use_resize_convolution: x = UpSampling2D(size=(2, 2))(x) # Nearest neighbor upsampling x = ReflectionPadding2D((1, 1))(x) x = Conv2D(filters=k, kernel_size=3, strides=1, padding='valid')(x) else: x = Conv2DTranspose(filters=k, kernel_size=3, strides=2, padding='same')(x) # this matches fractinoally stided with stride 1/2 x = self.normalization(axis=3, center=True, epsilon=1e-5)(x, training=True) x = Activation('relu')(x) return x#===============================================================================# Models
开发者ID:simontomaskarlsson,项目名称:CycleGAN-Keras,代码行数:16,代码来源:model.py
示例14: upsampling_block# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv2DTranspose [as 别名]def upsampling_block(self, input_tensor, skip_tensor, filters, padding='valid', batchnorm=False, dropout=0.0): x = Conv2DTranspose(filters, kernel_size=(2,2), strides=(2,2))(input_tensor) # compute amount of cropping needed for skip_tensor _, x_height, x_width, _ = K.int_shape(x) _, s_height, s_width, _ = K.int_shape(skip_tensor) h_crop = s_height - x_height w_crop = s_width - x_width assert h_crop >= 0 assert w_crop >= 0 if h_crop == 0 and w_crop == 0: y = skip_tensor else: cropping = ((h_crop//2, h_crop - h_crop//2), (w_crop//2, w_crop - w_crop//2)) y = Cropping2D(cropping=cropping)(skip_tensor) x = Concatenate()([x, y]) # no dilation in upsampling convolutions x = Conv2D(filters, kernel_size=(3,3), padding=padding)(x) x = BatchNormalization()(x) if batchnorm else x x = Activation('relu')(x) x = Dropout(dropout)(x) if dropout > 0 else x x = Conv2D(filters, kernel_size=(3,3), padding=padding)(x) x = BatchNormalization()(x) if batchnorm else x x = Activation('relu')(x) x = Dropout(dropout)(x) if dropout > 0 else x return x
开发者ID:jackkwok,项目名称:neural-road-inspector,代码行数:33,代码来源:unet.py
示例15: TransitionUp# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv2DTranspose [as 别名]def TransitionUp(self,filters,input_shape,output_shape): model = self.model model.add(Conv2DTranspose(filters, kernel_size=(3, 3), strides=(2, 2), padding='same', output_shape=output_shape, input_shape=input_shape, kernel_initializer="he_uniform", data_format='channels_last'))
开发者ID:jackkwok,项目名称:neural-road-inspector,代码行数:10,代码来源:tiramisu.py
示例16: test_transposed_conv# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv2DTranspose [as 别名]def test_transposed_conv(self): keras_model = Sequential() keras_model.add(Conv2DTranspose(32, (2, 2), strides=( 2, 2), input_shape=(3, 32, 32), name='trans')) keras_model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.SGD()) pytorch_model = TransposeNet() self.transfer(keras_model, pytorch_model) self.assertEqualPrediction(keras_model, pytorch_model, self.test_data) # Tests special activation function
示例17: upsampling_block# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv2DTranspose [as 别名]def upsampling_block(input_tensor, skip_tensor, filters, padding='same', batchnorm=True, dropout=0.0): x = Conv2DTranspose(filters, kernel_size=(2, 2), strides=(2, 2))(input_tensor) # compute amount of cropping needed for skip_tensor _, x_height, x_width, _ = K.int_shape(x) _, s_height, s_width, _ = K.int_shape(skip_tensor) h_crop = s_height - x_height w_crop = s_width - x_width assert h_crop >= 0 assert w_crop >= 0 if h_crop == 0 and w_crop == 0: y = skip_tensor else: cropping = ((h_crop // 2, h_crop - h_crop // 2), (w_crop // 2, w_crop - w_crop // 2)) y = Cropping2D(cropping=cropping)(skip_tensor) x = Concatenate()([x, y]) x = Conv2D(filters, kernel_size=(3,3), padding=padding)(x) x = BatchNormalization()(x) if batchnorm else x x = Activation('relu')(x) x = Dropout(dropout)(x) if dropout > 0 else x x = Conv2D(filters, kernel_size=(3, 3), padding=padding)(x) x = BatchNormalization()(x) if batchnorm else x x = Activation('relu')(x) x = Dropout(dropout)(x) if dropout > 0 else x return x
开发者ID:neuropoly,项目名称:spinalcordtoolbox,代码行数:31,代码来源:cnn_models.py
示例18: fCreateConv2DTranspose_ResBlock# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv2DTranspose [as 别名]def fCreateConv2DTranspose_ResBlock(filters, kernel_size=(3, 3), strides=(2, 2), padding='same'): l1_reg = 0 l2_reg = 1e-6 def f(inputs): output = Conv2DTranspose(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs) skip = LeakyReLU()(output) output = Conv2D(filters, kernel_size=kernel_size, strides=(1, 1), padding=padding, kernel_regularizer=l1_l2(l1_reg, l2_reg))(skip) output = LeakyReLU()(output) output = Conv2D(filters, kernel_size=kernel_size, strides=(1, 1), padding=padding, kernel_regularizer=l1_l2(l1_reg, l2_reg))(output) output = LeakyReLU()(output) output = add([skip, output]) return output return f
开发者ID:thomaskuestner,项目名称:CNNArt,代码行数:32,代码来源:network.py
示例19: fCreateConv2DTranspose# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv2DTranspose [as 别名]def fCreateConv2DTranspose(filters, strides, kernel_size=(3, 3), padding='same'): l1_reg = 0 l2_reg = 1e-6 def f(inputs): conv2d = Conv2DTranspose(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs) return LeakyReLU()(conv2d) return f
开发者ID:thomaskuestner,项目名称:CNNArt,代码行数:15,代码来源:network.py
示例20: fCreateConv2DBNTranspose# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv2DTranspose [as 别名]def fCreateConv2DBNTranspose(filters, strides, kernel_size=(3, 3), padding='same'): l1_reg = 0 l2_reg = 1e-6 def f(inputs): output = Conv2DTranspose(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs) output = BatchNormalization(axis=1)(output) return LeakyReLU()(output) return f
开发者ID:thomaskuestner,项目名称:CNNArt,代码行数:16,代码来源:network.py
示例21: convert_weights# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv2DTranspose [as 别名]def convert_weights(layer, weights): if layer.__class__.__name__ == 'GRU': W = [np.split(w, 3, axis=-1) for w in weights] return sum(map(list, zip(*W)), []) elif layer.__class__.__name__ in ('LSTM', 'ConvLSTM2D'): W = [np.split(w, 4, axis=-1) for w in weights] for w in W: w[2], w[1] = w[1], w[2] return sum(map(list, zip(*W)), []) elif layer.__class__.__name__ == 'Conv2DTranspose': return [np.transpose(weights[0], (2, 3, 0, 1)), weights[1]] return weights
|