这篇教程Python layers.Conv3D方法代码示例写得很实用,希望能帮到您。
本文整理汇总了Python中keras.layers.Conv3D方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Conv3D方法的具体用法?Python layers.Conv3D怎么用?Python layers.Conv3D使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块keras.layers 的用法示例。 在下文中一共展示了layers.Conv3D方法的28个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。 示例1: get_model_compiled# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def get_model_compiled(shapeinput, num_class, w_decay=0, lr=1e-3): clf = Sequential() clf.add(Conv3D(32, kernel_size=(5, 5, 24), input_shape=shapeinput)) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(Conv3D(64, (5, 5, 16))) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(MaxPooling3D(pool_size=(2, 2, 1))) clf.add(Flatten()) clf.add(Dense(300, kernel_regularizer=regularizers.l2(w_decay))) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(Dense(num_class, activation='softmax')) clf.compile(loss=categorical_crossentropy, optimizer=Adam(lr=lr), metrics=['accuracy']) return clf
开发者ID:mhaut,项目名称:hyperspectral_deeplearning_review,代码行数:18,代码来源:cnn3d.py
示例2: transition_layer_3D# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def transition_layer_3D(input_tensor, numFilters, compressionFactor=1.0): numOutPutFilters = int(numFilters*compressionFactor) if K.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 x = BatchNormalization(axis=bn_axis)(input_tensor) x = Activation('relu')(x) x = Conv3D(numOutPutFilters, (1, 1, 1), strides=(1, 1, 1), padding='same', kernel_initializer='he_normal')(x) # downsampling x = AveragePooling3D((2, 2, 2), strides=(2, 2, 2), padding='valid', data_format='channels_last', name='')(x) return x, numOutPutFilters
示例3: transition_SE_layer_3D# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def transition_SE_layer_3D(input_tensor, numFilters, compressionFactor=1.0, se_ratio=16): numOutPutFilters = int(numFilters*compressionFactor) if K.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 x = BatchNormalization(axis=bn_axis)(input_tensor) x = Activation('relu')(x) x = Conv3D(numOutPutFilters, (1, 1, 1), strides=(1, 1, 1), padding='same', kernel_initializer='he_normal')(x) # SE Block x = squeeze_excitation_block_3D(x, ratio=se_ratio) #x = BatchNormalization(axis=bn_axis)(x) # downsampling x = AveragePooling3D((2, 2, 2), strides=(2, 2, 2), padding='valid', data_format='channels_last', name='')(x) #x = squeeze_excitation_block(x, ratio=se_ratio) return x, numOutPutFilters
示例4: conv_block3# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def conv_block3(n_filter, n1, n2, n3, activation="relu", border_mode="same", dropout=0.0, batch_norm=False, init="glorot_uniform", **kwargs): def _func(lay): if batch_norm: s = Conv3D(n_filter, (n1, n2, n3), padding=border_mode, kernel_initializer=init, **kwargs)(lay) s = BatchNormalization()(s) s = Activation(activation)(s) else: s = Conv3D(n_filter, (n1, n2, n3), padding=border_mode, kernel_initializer=init, activation=activation, **kwargs)(lay) if dropout is not None and dropout > 0: s = Dropout(dropout)(s) return s return _func
开发者ID:CSBDeep,项目名称:CSBDeep,代码行数:22,代码来源:blocks.py
示例5: denseblock# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def denseblock(x, growth_rate, strides=(1, 1, 1), internal_layers=4, dropout_rate=0., weight_decay=0.005): x = Conv3D(growth_rate, (3, 3, 3), kernel_initializer='he_normal', padding="same", strides=strides, use_bias=False, kernel_regularizer=l2(weight_decay))(x) if dropout_rate: x = Dropout(dropout_rate)(x) list_feat = [] list_feat.append(x) for i in range(internal_layers - 1): x = conv_factory(x, growth_rate, dropout_rate, weight_decay) list_feat.append(x) x = concatenate(list_feat, axis=-1) x = Conv3D(internal_layers * growth_rate, (1, 1, 1), kernel_initializer='he_normal', padding="same", use_bias=False, kernel_regularizer=l2(weight_decay))(x) return x
开发者ID:TianzhongSong,项目名称:3D-ConvNets-for-Action-Recognition,代码行数:24,代码来源:drn.py
示例6: model_thresholding# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def model_thresholding(): IMAGE_ORDERING = "channels_first" img_input = Input(shape=(1,240,240,48)) conv_1 = Conv3D(filters=16,kernel_size=(3, 3, 3),padding='same',activation='relu',name = "CONV3D_1",dilation_rate=(2, 2, 2),data_format=IMAGE_ORDERING)(img_input) maxpool_1 = MaxPool3D(name = "MAXPOOL3D_1",data_format=IMAGE_ORDERING)(conv_1) conv_2 = Conv3D(filters=32,kernel_size=(3, 3, 3),padding='same',activation='relu',name = "CONV3D_2",dilation_rate=(2, 2, 2),data_format=IMAGE_ORDERING)(maxpool_1) maxpool_2 = MaxPool3D(name = "MAXPOOL3D_2",data_format=IMAGE_ORDERING)(conv_2) conv_3 = Conv3D(filters=32,kernel_size=(3, 3, 3),padding='same',activation='relu',name = "CONV3D_3",dilation_rate=(2, 2, 2),data_format=IMAGE_ORDERING)(maxpool_2) convt_1 = Conv3DTranspose(16,kernel_size=(2,2,2),strides=(2,2,2),name = "CONV3DT_1",activation='relu',data_format=IMAGE_ORDERING)(conv_3) concat_1 = Concatenate(axis=1)([convt_1,conv_2]) conv_4 = Conv3D(filters=16,kernel_size=(3, 3, 3),padding='same',activation='relu',name = "CONV3D_4",data_format=IMAGE_ORDERING)(concat_1) convt_2 = Conv3DTranspose(4,kernel_size=(2,2,2),strides=(2,2,2),name = "CONV3DT_2",activation='relu',data_format=IMAGE_ORDERING)(conv_4) concat_2 = Concatenate(axis=1)([convt_2,conv_1]) conv_5 = Conv3D(filters=1,kernel_size=(3, 3, 3),padding='same',activation='sigmoid',name = "CONV3D_5",data_format=IMAGE_ORDERING)(concat_2) return Model(img_input, conv_5) concat_2 = Concatenate(axis=1)([convt_2,conv_1]) conv_5 = Conv3D(filters=1,kernel_size=(3, 3, 3),padding='same',activation='sigmoid',name = "CONV3D_5",data_format=IMAGE_ORDERING)(concat_2) return Model(img_input, conv_5)
开发者ID:ubamba98,项目名称:Brain-Segmentation,代码行数:21,代码来源:model.py
示例7: _TTL# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def _TTL(prev_layer): # print('In _TTL') b1 = BatchNormalization()(prev_layer) b1 = Activation('relu')(b1) # b1 = Conv3D(128, kernel_size=(1), strides=1, use_bias=False, padding='same')(b1) b1 = Conv3D(128, kernel_size=(1, 3, 3), strides=1, use_bias=False, padding='same')(b1) b2 = BatchNormalization()(prev_layer) b2 = Activation('relu')(b2) b2 = Conv3D(128, kernel_size=(3, 3, 3), strides=1, use_bias=False, padding='same')(b2) b3 = BatchNormalization()(prev_layer) b3 = Activation('relu')(b3) b3 = Conv3D(128, kernel_size=(4, 3, 3), strides=1, use_bias=False, padding='same')(b3) x = keras.layers.concatenate([b1, b2, b3], axis=1) # print('completed _TTL') return x
示例8: get_liveness_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def get_liveness_model(): model = Sequential() model.add(Conv3D(32, kernel_size=(3, 3, 3), activation='relu', input_shape=(24,100,100,1))) model.add(Conv3D(64, (3, 3, 3), activation='relu')) model.add(MaxPooling3D(pool_size=(2, 2, 2))) model.add(Conv3D(64, (3, 3, 3), activation='relu')) model.add(MaxPooling3D(pool_size=(2, 2, 2))) model.add(Conv3D(64, (3, 3, 3), activation='relu')) model.add(MaxPooling3D(pool_size=(2, 2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(2, activation='softmax')) return model
示例9: conv3d_bn# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def conv3d_bn(x, filters, num_frames, num_row, num_col, padding='same', strides=(1, 1, 1), use_bias=False, use_activation_fn=True, use_bn=True, name=None): """Utility function to apply conv3d + BN. # Arguments x: input tensor. filters: filters in `Conv3D`. num_frames: frames (time depth) of the convolution kernel. num_row: height of the convolution kernel. num_col: width of the convolution kernel. padding: padding mode in `Conv3D`. strides: strides in `Conv3D`. use_bias: use bias or not use_activation_fn: use an activation function or not. use_bn: use batch normalization or not. name: name of the ops; will become `name + '_conv'` for the convolution and `name + '_bn'` for the batch norm layer. # Returns Output tensor after applying `Conv3D` and `BatchNormalization`. """ if name is not None: bn_name = name + '_bn' conv_name = name + '_conv' else: bn_name = None conv_name = None x = Conv3D(filters, (num_frames, num_row, num_col), strides=strides, padding=padding, use_bias=use_bias, name=conv_name)(x) if use_bn: if K.image_data_format() == 'channels_first': bn_axis = 1 else: bn_axis = 4 x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x) if use_activation_fn: x = Activation('relu', name=name)(x) return x
开发者ID:CMU-CREATE-Lab,项目名称:deep-smoke-machine,代码行数:43,代码来源:i3d_keras.py
示例10: __temporal_convolutional_block# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def __temporal_convolutional_block(tensor, n_channels_per_branch, kernel_sizes, dilation_rates, layer_num, group_num): """ Define 5 branches of convolutions that operate of channels of each group. """ # branch 1: dimension reduction only and no temporal conv t_1 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b1_g%d_tc%d' % (group_num, layer_num))(tensor) t_1 = BatchNormalization(name='bn_b1_g%d_tc%d' % (group_num, layer_num))(t_1) # branch 2: dimension reduction followed by depth-wise temp conv (kernel-size 3) t_2 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b2_g%d_tc%d' % (group_num, layer_num))(tensor) t_2 = DepthwiseConv1DLayer(kernel_sizes[0], dilation_rates[0], padding='same', name='convdw_b2_g%d_tc%d' % (group_num, layer_num))(t_2) t_2 = BatchNormalization(name='bn_b2_g%d_tc%d' % (group_num, layer_num))(t_2) # branch 3: dimension reduction followed by depth-wise temp conv (kernel-size 5) t_3 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b3_g%d_tc%d' % (group_num, layer_num))(tensor) t_3 = DepthwiseConv1DLayer(kernel_sizes[1], dilation_rates[1], padding='same', name='convdw_b3_g%d_tc%d' % (group_num, layer_num))(t_3) t_3 = BatchNormalization(name='bn_b3_g%d_tc%d' % (group_num, layer_num))(t_3) # branch 4: dimension reduction followed by depth-wise temp conv (kernel-size 7) t_4 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b4_g%d_tc%d' % (group_num, layer_num))(tensor) t_4 = DepthwiseConv1DLayer(kernel_sizes[2], dilation_rates[2], padding='same', name='convdw_b4_g%d_tc%d' % (group_num, layer_num))(t_4) t_4 = BatchNormalization(name='bn_b4_g%d_tc%d' % (group_num, layer_num))(t_4) # branch 5: dimension reduction followed by temporal max pooling t_5 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b5_g%d_tc%d' % (group_num, layer_num))(tensor) t_5 = MaxPooling3D(pool_size=(2, 1, 1), strides=(1, 1, 1), padding='same', name='maxpool_b5_g%d_tc%d' % (group_num, layer_num))(t_5) t_5 = BatchNormalization(name='bn_b5_g%d_tc%d' % (group_num, layer_num))(t_5) # concatenate channels of branches tensor = Concatenate(axis=4, name='concat_g%d_tc%d' % (group_num, layer_num))([t_1, t_2, t_3, t_4, t_5]) return tensor
开发者ID:CMU-CREATE-Lab,项目名称:deep-smoke-machine,代码行数:35,代码来源:timeception.py
示例11: makecnn# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def makecnn(learningrate,regular,decay,channel_number): #model structure model=Sequential() model.add(Conv3D(100, kernel_size=(3,3,3), strides=(1, 1, 1), input_shape = (20,20,20,channel_number),padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) #model.add(Dropout(0.3)) model.add(Conv3D(200, kernel_size=(3,3,3), strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) #model.add(Dropout(0.3)) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last')) model.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None)) model.add(Conv3D(400, kernel_size=(3,3,3),strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) #model.add(Dropout(0.3)) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last')) model.add(Flatten()) model.add(Dropout(0.3)) model.add(Dense(1000, use_bias=True, input_shape = (32000,),kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) model.add(Dropout(0.3)) model.add(Dense(100, use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) model.add(Dropout(0.3)) model.add(Dense(1, activation='sigmoid', use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) nadam=Nadam(lr=learningrate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=decay) model.compile(loss='binary_crossentropy', optimizer=nadam, metrics=['accuracy',f1score,precision,recall]) return model
示例12: _convND# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def _convND(ip, rank, channels, kernel=1): assert rank in [3, 4, 5], "Rank of input must be 3, 4 or 5" if rank == 3: x = Conv1D(channels, kernel, padding='same', use_bias=False, kernel_initializer='he_normal')(ip) elif rank == 4: x = Conv2D(channels, (kernel, kernel), padding='same', use_bias=False, kernel_initializer='he_normal')(ip) else: x = Conv3D(channels, (kernel, kernel, kernel), padding='same', use_bias=False, kernel_initializer='he_normal')(ip) return x
开发者ID:titu1994,项目名称:keras-global-context-networks,代码行数:13,代码来源:gc.py
示例13: Unet# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def Unet(input_shape, n_labels, n_filters=32, depth=4, activation='sigmoid'): # Input layer inputs = Input(input_shape) # Start the CNN Model chain with adding the inputs as first tensor cnn_chain = inputs # Cache contracting normalized conv layers # for later copy & concatenate links contracting_convs = [] # Contracting Layers for i in range(0, depth): neurons = n_filters * 2**i cnn_chain, last_conv = contracting_layer(cnn_chain, neurons) contracting_convs.append(last_conv) # Middle Layer neurons = n_filters * 2**depth cnn_chain = middle_layer(cnn_chain, neurons) # Expanding Layers for i in reversed(range(0, depth)): neurons = n_filters * 2**i cnn_chain = expanding_layer(cnn_chain, neurons, contracting_convs[i]) # Output Layer conv_out = Conv3D(n_labels, (1, 1, 1), activation=activation)(cnn_chain) # Create Model with associated input and output layers model = Model(inputs=[inputs], outputs=[conv_out]) # Return model return model#-----------------------------------------------------## Subroutines ##-----------------------------------------------------## Create a contracting layer
开发者ID:muellerdo,项目名称:kits19.MIScnn,代码行数:37,代码来源:residual.py
示例14: contracting_layer# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def contracting_layer(input, neurons): conv1 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(input) conv2 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(conv1) conc1 = concatenate([input, conv2], axis=4) pool = MaxPooling3D(pool_size=(2, 2, 2))(conc1) return pool, conv2# Create the middle layer between the contracting and expanding layers
开发者ID:muellerdo,项目名称:kits19.MIScnn,代码行数:10,代码来源:residual.py
示例15: middle_layer# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def middle_layer(input, neurons): conv_m1 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(input) conv_m2 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(conv_m1) conc1 = concatenate([input, conv_m2], axis=4) return conc1# Create an expanding layer
开发者ID:muellerdo,项目名称:kits19.MIScnn,代码行数:9,代码来源:residual.py
示例16: expanding_layer# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def expanding_layer(input, neurons, concatenate_link): up = concatenate([Conv3DTranspose(neurons, (2, 2, 2), strides=(2, 2, 2), padding='same')(input), concatenate_link], axis=4) conv1 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(up) conv2 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(conv1) conc1 = concatenate([up, conv2], axis=4) return conc1
开发者ID:muellerdo,项目名称:kits19.MIScnn,代码行数:9,代码来源:residual.py
示例17: conv3d_bn# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def conv3d_bn(x, filters, num_row, num_col, num_z, padding='same', strides=(1, 1, 1), activation='relu', name=None): ''' 3D Convolutional layers Arguments: x {keras layer} -- input layer filters {int} -- number of filters num_row {int} -- number of rows in filters num_col {int} -- number of columns in filters num_z {int} -- length along z axis in filters Keyword Arguments: padding {str} -- mode of padding (default: {'same'}) strides {tuple} -- stride of convolution operation (default: {(1, 1, 1)}) activation {str} -- activation function (default: {'relu'}) name {str} -- name of the layer (default: {None}) Returns: [keras layer] -- [output layer] ''' x = Conv3D(filters, (num_row, num_col, num_z), strides=strides, padding=padding, use_bias=False)(x) x = BatchNormalization(axis=4, scale=False)(x) if(activation==None): return x x = Activation(activation, name=name)(x) return x
开发者ID:muellerdo,项目名称:kits19.MIScnn,代码行数:30,代码来源:multiRes.py
示例18: contracting_layer# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def contracting_layer(input, neurons): conv1 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(input) conv2 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(conv1) pool = MaxPooling3D(pool_size=(2, 2, 2))(conv2) return pool, conv2# Create the middle layer between the contracting and expanding layers
开发者ID:muellerdo,项目名称:kits19.MIScnn,代码行数:9,代码来源:standard.py
示例19: middle_layer# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def middle_layer(input, neurons): conv_m1 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(input) conv_m2 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(conv_m1) return conv_m2# Create an expanding layer
开发者ID:muellerdo,项目名称:kits19.MIScnn,代码行数:8,代码来源:standard.py
示例20: expanding_layer# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def expanding_layer(input, neurons, concatenate_link): up = concatenate([Conv3DTranspose(neurons, (2, 2, 2), strides=(2, 2, 2), padding='same')(input), concatenate_link], axis=4) conv1 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(up) conv2 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(conv1) return conv2
开发者ID:muellerdo,项目名称:kits19.MIScnn,代码行数:8,代码来源:standard.py
示例21: get_model_compiled# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def get_model_compiled(args, inputshape, num_class): model = Sequential() if args.arch == "CNN1D": model.add(Conv1D(20, (24), activation='relu', input_shape=inputshape)) model.add(MaxPooling1D(pool_size=5)) model.add(Flatten()) model.add(Dense(100)) elif "CNN2D" in args.arch: model.add(Conv2D(50, kernel_size=(5, 5), input_shape=inputshape)) model.add(Activation('relu')) model.add(Conv2D(100, (5, 5))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(100)) elif args.arch == "CNN3D": model.add(Conv3D(32, kernel_size=(5, 5, 24), input_shape=inputshape)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv3D(64, (5, 5, 16))) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling3D(pool_size=(2, 2, 1))) model.add(Flatten()) model.add(Dense(300)) if args.arch != "CNN2D": model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dense(num_class, activation='softmax')) model.compile(loss=categorical_crossentropy, optimizer=Adam(args.lr1), metrics=['accuracy']) return model
示例22: test_conv3d# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def test_conv3d(self): keras_model = Sequential() keras_model.add(Conv3D(8, (5, 5, 5), input_shape=(3, 8, 8, 8), name='conv')) keras_model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.SGD()) pytorch_model = Conv3DNet() self.transfer(keras_model, pytorch_model) self.assertEqualPrediction(keras_model, pytorch_model, self.test_data_3d, delta=1e-4)
示例23: DenseNetUnit3D# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def DenseNetUnit3D(x, growth_rate, ksize, n, bn_decay=0.99): for i in range(n): concat = x x = BatchNormalization(center=True, scale=True, momentum=bn_decay)(x) x = Activation('relu')(x) x = Conv3D(filters=growth_rate, kernel_size=ksize, padding='same', kernel_initializer='he_uniform', use_bias=False)(x) x = concatenate([concat, x]) return x
开发者ID:lelechen63,项目名称:MRI-tumor-segmentation-Brats,代码行数:11,代码来源:test.py
示例24: DenseNetTransit# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def DenseNetTransit(x, rate=1, name=None): if rate != 1: out_features = x.get_shape().as_list()[-1] * rate x = BatchNormalization(center=True, scale=True, name=name + '_bn')(x) x = Activation('relu', name=name + '_relu')(x) x = Conv3D(filters=out_features, kernel_size=1, strides=1, padding='same', kernel_initializer='he_normal', use_bias=False, name=name + '_conv')(x) x = AveragePooling3D(pool_size=2, strides=2, padding='same')(x) return x
开发者ID:lelechen63,项目名称:MRI-tumor-segmentation-Brats,代码行数:11,代码来源:test.py
示例25: dense_net# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def dense_net(input): x = Conv3D(filters=24, kernel_size=3, strides=1, kernel_initializer='he_uniform', padding='same', use_bias=False)( input) x = DenseNetUnit3D(x, growth_rate=12, ksize=3, n=4) x = DenseNetTransit(x) x = DenseNetUnit3D(x, growth_rate=12, ksize=3, n=4) x = DenseNetTransit(x) x = DenseNetUnit3D(x, growth_rate=12, ksize=3, n=4) x = BatchNormalization()(x) x = Activation('relu')(x) return x
开发者ID:lelechen63,项目名称:MRI-tumor-segmentation-Brats,代码行数:13,代码来源:test.py
示例26: dense_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def dense_model(patch_size, num_classes): merged_inputs = Input(shape=patch_size + (4,), name='merged_inputs') flair = Reshape(patch_size + (1,))( Lambda( lambda l: l[:, :, :, :, 0], output_shape=patch_size + (1,))(merged_inputs), ) t2 = Reshape(patch_size + (1,))( Lambda(lambda l: l[:, :, :, :, 1], output_shape=patch_size + (1,))(merged_inputs) ) t1 = Lambda(lambda l: l[:, :, :, :, 2:], output_shape=patch_size + (2,))(merged_inputs) flair = dense_net(flair) t2 = dense_net(t2) t1 = dense_net(t1) t2 = concatenate([flair, t2]) t1 = concatenate([t2, t1]) tumor = Conv3D(2, kernel_size=1, strides=1, name='tumor')(flair) core = Conv3D(3, kernel_size=1, strides=1, name='core')(t2) enhancing = Conv3D(num_classes, kernel_size=1, strides=1, name='enhancing')(t1) net = Model(inputs=merged_inputs, outputs=[tumor, core, enhancing]) return net
开发者ID:lelechen63,项目名称:MRI-tumor-segmentation-Brats,代码行数:28,代码来源:test.py
示例27: nn_architecture_seg_3d# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def nn_architecture_seg_3d(input_shape, pool_size=(2, 2, 2), n_labels=1, initial_learning_rate=0.00001, depth=3, n_base_filters=16, metrics=dice_coefficient, batch_normalization=True): inputs = Input(input_shape) current_layer = inputs levels = list() for layer_depth in range(depth): layer1 = create_convolution_block(input_layer=current_layer, n_filters=n_base_filters * (2**layer_depth), batch_normalization=batch_normalization) layer2 = create_convolution_block(input_layer=layer1, n_filters=n_base_filters * (2**layer_depth) * 2, batch_normalization=batch_normalization) if layer_depth < depth - 1: current_layer = MaxPooling3D(pool_size=pool_size)(layer2) levels.append([layer1, layer2, current_layer]) else: current_layer = layer2 levels.append([layer1, layer2]) for layer_depth in range(depth - 2, -1, -1): up_convolution = UpSampling3D(size=pool_size) concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1) current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1], input_layer=concat, batch_normalization=batch_normalization) current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1], input_layer=current_layer, batch_normalization=batch_normalization) final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer) act = Activation('sigmoid')(final_convolution) model = Model(inputs=inputs, outputs=act) if not isinstance(metrics, list): metrics = [metrics] model.compile(optimizer=Adam(lr=initial_learning_rate), loss=dice_coefficient_loss, metrics=metrics) return model
示例28: create_convolution_block# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Conv3D [as 别名]def create_convolution_block(input_layer, n_filters, batch_normalization=False, kernel=(3, 3, 3), activation=None, padding='same', strides=(1, 1, 1)): layer = Conv3D(n_filters, kernel, padding=padding, strides=strides)(input_layer) if batch_normalization: layer = BatchNormalization(axis=1)(layer) if activation is None: return Activation('relu')(layer) else: return activation()(layer)
|