您当前的位置:首页 > IT编程 > Keras
| C语言 | Java | VB | VC | python | Android | TensorFlow | C++ | oracle | 学术与代码 | cnn卷积神经网络 | gnn | 图像修复 | Keras | 数据集 | Neo4j | 自然语言处理 | 深度学习 | 医学CAD | 医学影像 | 超参数 | pointnet | pytorch | 异常检测 | Transformers | 情感分类 | 知识图谱 |

自学教程:Python layers.MaxPooling3D方法代码示例

51自学网 2020-12-01 11:09:01
  Keras
这篇教程Python layers.MaxPooling3D方法代码示例写得很实用,希望能帮到您。

本文整理汇总了Python中keras.layers.MaxPooling3D方法的典型用法代码示例。如果您正苦于以下问题:Python layers.MaxPooling3D方法的具体用法?Python layers.MaxPooling3D怎么用?Python layers.MaxPooling3D使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块keras.layers的用法示例。

在下文中一共展示了layers.MaxPooling3D方法的26个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: timeception_layers

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def timeception_layers(tensor, n_layers=4, n_groups=8, is_dilated=True):    input_shape = K.int_shape(tensor)    assert len(input_shape) == 5    expansion_factor = 1.25    _, n_timesteps, side_dim, side_dim, n_channels_in = input_shape    # how many layers of timeception    for i in range(n_layers):        layer_num = i + 1        # get details about grouping        n_channels_per_branch, n_channels_out = __get_n_channels_per_branch(n_groups, expansion_factor, n_channels_in)        # temporal conv per group        tensor = __grouped_convolutions(tensor, n_groups, n_channels_per_branch, is_dilated, layer_num)        # downsample over time        tensor = MaxPooling3D(pool_size=(2, 1, 1), name='maxpool_tc%d' % (layer_num))(tensor)        n_channels_in = n_channels_out    return tensor 
开发者ID:CMU-CREATE-Lab,项目名称:deep-smoke-machine,代码行数:24,代码来源:timeception.py


示例2: __define_timeception_layers

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def __define_timeception_layers(self, n_channels_in, n_layers, n_groups, expansion_factor, is_dilated):        """        Define layers inside the timeception layers.        """        # how many layers of timeception        for i in range(n_layers):            layer_num = i + 1            # get details about grouping            n_channels_per_branch, n_channels_out = self.__get_n_channels_per_branch(n_groups, expansion_factor, n_channels_in)            # temporal conv per group            self.__define_grouped_convolutions(n_channels_in, n_groups, n_channels_per_branch, is_dilated, layer_num)            # downsample over time            layer_name = 'maxpool_tc%d' % (layer_num)            layer = MaxPooling3D(pool_size=(2, 1, 1), name=layer_name)            setattr(self, layer_name, layer)            n_channels_in = n_channels_out 
开发者ID:CMU-CREATE-Lab,项目名称:deep-smoke-machine,代码行数:23,代码来源:timeception.py


示例3: get_model_compiled

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def get_model_compiled(shapeinput, num_class, w_decay=0, lr=1e-3):    clf = Sequential()    clf.add(Conv3D(32, kernel_size=(5, 5, 24), input_shape=shapeinput))    clf.add(BatchNormalization())    clf.add(Activation('relu'))    clf.add(Conv3D(64, (5, 5, 16)))    clf.add(BatchNormalization())    clf.add(Activation('relu'))    clf.add(MaxPooling3D(pool_size=(2, 2, 1)))    clf.add(Flatten())    clf.add(Dense(300, kernel_regularizer=regularizers.l2(w_decay)))    clf.add(BatchNormalization())    clf.add(Activation('relu'))    clf.add(Dense(num_class, activation='softmax'))    clf.compile(loss=categorical_crossentropy, optimizer=Adam(lr=lr), metrics=['accuracy'])    return clf 
开发者ID:mhaut,项目名称:hyperspectral_deeplearning_review,代码行数:18,代码来源:cnn3d.py


示例4: dsrff3D

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def dsrff3D(image_size, num_labels):    num_channels=1    inputs = Input(shape = (image_size, image_size, image_size, num_channels))    # modified VGG19 architecture    bn_axis = 3    m = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(inputs)        m = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(m)    m = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2))(m)    m = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(m)        m = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(m)    m = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2))(m)    m = Flatten(name='flatten')(m)    m = Dense(512, activation='relu', name='fc1')(m)    m = Dense(512, activation='relu', name='fc2')(m)    m = Dense(num_labels, activation='softmax')(m)    mod = KM.Model(inputs=inputs, outputs=m)    return mod 
开发者ID:xulabs,项目名称:aitom,代码行数:24,代码来源:subdivide.py


示例5: get_liveness_model

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def get_liveness_model():    model = Sequential()    model.add(Conv3D(32, kernel_size=(3, 3, 3),                    activation='relu',                    input_shape=(24,100,100,1)))    model.add(Conv3D(64, (3, 3, 3), activation='relu'))    model.add(MaxPooling3D(pool_size=(2, 2, 2)))    model.add(Conv3D(64, (3, 3, 3), activation='relu'))    model.add(MaxPooling3D(pool_size=(2, 2, 2)))    model.add(Conv3D(64, (3, 3, 3), activation='relu'))    model.add(MaxPooling3D(pool_size=(2, 2, 2)))    model.add(Dropout(0.25))    model.add(Flatten())    model.add(Dense(128, activation='relu'))    model.add(Dropout(0.5))    model.add(Dense(2, activation='softmax'))    return model 
开发者ID:AhmetHamzaEmra,项目名称:Intelegent_Lock,代码行数:21,代码来源:livenessmodel.py


示例6: __temporal_convolutional_block

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def __temporal_convolutional_block(tensor, n_channels_per_branch, kernel_sizes, dilation_rates, layer_num, group_num):    """    Define 5 branches of convolutions that operate of channels of each group.    """    # branch 1: dimension reduction only and no temporal conv    t_1 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b1_g%d_tc%d' % (group_num, layer_num))(tensor)    t_1 = BatchNormalization(name='bn_b1_g%d_tc%d' % (group_num, layer_num))(t_1)    # branch 2: dimension reduction followed by depth-wise temp conv (kernel-size 3)    t_2 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b2_g%d_tc%d' % (group_num, layer_num))(tensor)    t_2 = DepthwiseConv1DLayer(kernel_sizes[0], dilation_rates[0], padding='same', name='convdw_b2_g%d_tc%d' % (group_num, layer_num))(t_2)    t_2 = BatchNormalization(name='bn_b2_g%d_tc%d' % (group_num, layer_num))(t_2)    # branch 3: dimension reduction followed by depth-wise temp conv (kernel-size 5)    t_3 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b3_g%d_tc%d' % (group_num, layer_num))(tensor)    t_3 = DepthwiseConv1DLayer(kernel_sizes[1], dilation_rates[1], padding='same', name='convdw_b3_g%d_tc%d' % (group_num, layer_num))(t_3)    t_3 = BatchNormalization(name='bn_b3_g%d_tc%d' % (group_num, layer_num))(t_3)    # branch 4: dimension reduction followed by depth-wise temp conv (kernel-size 7)    t_4 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b4_g%d_tc%d' % (group_num, layer_num))(tensor)    t_4 = DepthwiseConv1DLayer(kernel_sizes[2], dilation_rates[2], padding='same', name='convdw_b4_g%d_tc%d' % (group_num, layer_num))(t_4)    t_4 = BatchNormalization(name='bn_b4_g%d_tc%d' % (group_num, layer_num))(t_4)    # branch 5: dimension reduction followed by temporal max pooling    t_5 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b5_g%d_tc%d' % (group_num, layer_num))(tensor)    t_5 = MaxPooling3D(pool_size=(2, 1, 1), strides=(1, 1, 1), padding='same', name='maxpool_b5_g%d_tc%d' % (group_num, layer_num))(t_5)    t_5 = BatchNormalization(name='bn_b5_g%d_tc%d' % (group_num, layer_num))(t_5)    # concatenate channels of branches    tensor = Concatenate(axis=4, name='concat_g%d_tc%d' % (group_num, layer_num))([t_1, t_2, t_3, t_4, t_5])    return tensor 
开发者ID:CMU-CREATE-Lab,项目名称:deep-smoke-machine,代码行数:35,代码来源:timeception.py


示例7: contracting_layer

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def contracting_layer(input, neurons):    conv1 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(input)    conv2 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(conv1)    conc1 = concatenate([input, conv2], axis=4)    pool = MaxPooling3D(pool_size=(2, 2, 2))(conc1)    return pool, conv2# Create the middle layer between the contracting and expanding layers 
开发者ID:muellerdo,项目名称:kits19.MIScnn,代码行数:10,代码来源:residual.py


示例8: contracting_layer

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def contracting_layer(input, neurons):    conv1 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(input)    conv2 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(conv1)    pool = MaxPooling3D(pool_size=(2, 2, 2))(conv2)    return pool, conv2# Create the middle layer between the contracting and expanding layers 
开发者ID:muellerdo,项目名称:kits19.MIScnn,代码行数:9,代码来源:standard.py


示例9: get_model_compiled

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def get_model_compiled(args, inputshape, num_class):    model = Sequential()    if args.arch == "CNN1D":        model.add(Conv1D(20, (24), activation='relu', input_shape=inputshape))        model.add(MaxPooling1D(pool_size=5))        model.add(Flatten())        model.add(Dense(100))    elif "CNN2D" in args.arch:        model.add(Conv2D(50, kernel_size=(5, 5), input_shape=inputshape))        model.add(Activation('relu'))        model.add(Conv2D(100, (5, 5)))        model.add(Activation('relu'))        model.add(MaxPooling2D(pool_size=(2, 2)))        model.add(Flatten())        model.add(Dense(100))    elif args.arch == "CNN3D":        model.add(Conv3D(32, kernel_size=(5, 5, 24), input_shape=inputshape))        model.add(BatchNormalization())        model.add(Activation('relu'))        model.add(Conv3D(64, (5, 5, 16)))        model.add(BatchNormalization())        model.add(Activation('relu'))        model.add(MaxPooling3D(pool_size=(2, 2, 1)))        model.add(Flatten())        model.add(Dense(300))    if args.arch != "CNN2D": model.add(BatchNormalization())    model.add(Activation('relu'))    model.add(Dense(num_class, activation='softmax'))    model.compile(loss=categorical_crossentropy, optimizer=Adam(args.lr1), metrics=['accuracy'])     return model 
开发者ID:mhaut,项目名称:hyperspectral_deeplearning_review,代码行数:32,代码来源:transfer_learning.py


示例10: nn_architecture_seg_3d

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def nn_architecture_seg_3d(input_shape, pool_size=(2, 2, 2), n_labels=1, initial_learning_rate=0.00001,                        depth=3, n_base_filters=16, metrics=dice_coefficient, batch_normalization=True):    inputs = Input(input_shape)    current_layer = inputs    levels = list()    for layer_depth in range(depth):        layer1 = create_convolution_block(input_layer=current_layer, n_filters=n_base_filters * (2**layer_depth),                                          batch_normalization=batch_normalization)        layer2 = create_convolution_block(input_layer=layer1, n_filters=n_base_filters * (2**layer_depth) * 2,                                          batch_normalization=batch_normalization)        if layer_depth < depth - 1:            current_layer = MaxPooling3D(pool_size=pool_size)(layer2)            levels.append([layer1, layer2, current_layer])        else:            current_layer = layer2            levels.append([layer1, layer2])    for layer_depth in range(depth - 2, -1, -1):        up_convolution = UpSampling3D(size=pool_size)        concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1)        current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],                                                 input_layer=concat, batch_normalization=batch_normalization)        current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],                                                 input_layer=current_layer,                                                 batch_normalization=batch_normalization)    final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)    act = Activation('sigmoid')(final_convolution)    model = Model(inputs=inputs, outputs=act)    if not isinstance(metrics, list):        metrics = [metrics]    model.compile(optimizer=Adam(lr=initial_learning_rate), loss=dice_coefficient_loss, metrics=metrics)    return model 
开发者ID:neuropoly,项目名称:spinalcordtoolbox,代码行数:38,代码来源:cnn_models_3d.py


示例11: inception3D

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def inception3D(image_size, num_labels):    num_channels=1    inputs = Input(shape = (image_size, image_size, image_size, num_channels))    m = Convolution3D(32, 5, 5, 5, subsample=(1, 1, 1), activation='relu', border_mode='valid', input_shape=())(inputs)    m = MaxPooling3D(pool_size=(2, 2, 2), strides=None, border_mode='same')(m)    # inception module 0    branch1x1 = Convolution3D(32, 1, 1, 1, subsample=(1, 1, 1), activation='relu', border_mode='same')(m)    branch3x3_reduce = Convolution3D(32, 1, 1, 1, subsample=(1, 1, 1), activation='relu', border_mode='same')(m)    branch3x3 = Convolution3D(64, 3, 3, 3, subsample=(1, 1, 1), activation='relu', border_mode='same')(branch3x3_reduce)    branch5x5_reduce = Convolution3D(16, 1, 1, 1, subsample=(1, 1, 1), activation='relu', border_mode='same')(m)    branch5x5 = Convolution3D(32, 5, 5, 5, subsample=(1, 1, 1), activation='relu', border_mode='same')(branch5x5_reduce)    branch_pool = MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), border_mode='same')(m)    branch_pool_proj = Convolution3D(32, 1, 1, 1, subsample=(1, 1, 1), activation='relu', border_mode='same')(branch_pool)    #m = merge([branch1x1, branch3x3, branch5x5, branch_pool_proj], mode='concat', concat_axis=-1)    from keras.layers import concatenate    m = concatenate([branch1x1, branch3x3, branch5x5, branch_pool_proj],axis=-1)    m = AveragePooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), border_mode='valid')(m)    m = Flatten()(m)    m = Dropout(0.7)(m)    # expliciately seperate Dense and Activation layers in order for projecting to structural feature space    m = Dense(num_labels, activation='linear')(m)    m = Activation('softmax')(m)    mod = KM.Model(input=inputs, output=m)    return mod 
开发者ID:xulabs,项目名称:aitom,代码行数:32,代码来源:subdivide.py


示例12: model_simple_upsampling__reshape

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def model_simple_upsampling__reshape(img_shape, class_n=None):    from keras.layers import Input, Dense, Convolution3D, MaxPooling3D, UpSampling3D, Reshape, Flatten    from keras.models import Sequential, Model    from keras.layers.core import Activation    from aitom.classify.deep.unsupervised.autoencoder.seg_util import conv_block    NUM_CHANNELS=1    input_shape = (None, img_shape[0], img_shape[1], img_shape[2], NUM_CHANNELS)    # use relu activation for hidden layer to guarantee non-negative outputs are passed to the max pooling layer. In such case, as long as the output layer is linear activation, the network can still accomodate negative image intendities, just matter of shift back using the bias term    input_img = Input(shape=input_shape[1:])    x = input_img    x = conv_block(x, 32, 3, 3, 3)    x = MaxPooling3D((2, 2, 2), border_mode='same')(x)    x = conv_block(x, 32, 3, 3, 3)    x = MaxPooling3D((2, 2, 2), border_mode='same')(x)    x = conv_block(x, 32, 3, 3, 3)    x = UpSampling3D((2, 2, 2))(x)    x = conv_block(x, 32, 3, 3, 3)    x = UpSampling3D((2, 2, 2))(x)    x = conv_block(x, 32, 3, 3, 3)    x = Convolution3D(class_n, 1, 1, 1, border_mode='same')(x)    x = Reshape((N.prod(img_shape), class_n))(x)    x = Activation('softmax')(x)    model = Model(input=input_img, output=x)    print('model layers:')    for l in model.layers:    print (l.output_shape, l.name)    return model 
开发者ID:xulabs,项目名称:aitom,代码行数:39,代码来源:seg_src.py


示例13: c3d_model

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def c3d_model():    input_shape = (112, 112, 8, 3)    weight_decay = 0.005    nb_classes = 101    inputs = Input(input_shape)    x = Conv3D(64,(3,3,3),strides=(1,1,1),padding='same',               activation='relu',kernel_regularizer=l2(weight_decay))(inputs)    x = MaxPooling3D((2,2,1),strides=(2,2,1),padding='same')(x)    x = Conv3D(128,(3,3,3),strides=(1,1,1),padding='same',               activation='relu',kernel_regularizer=l2(weight_decay))(x)    x = MaxPooling3D((2,2,2),strides=(2,2,2),padding='same')(x)    x = Conv3D(128,(3,3,3),strides=(1,1,1),padding='same',               activation='relu',kernel_regularizer=l2(weight_decay))(x)    x = MaxPooling3D((2,2,2),strides=(2,2,2),padding='same')(x)    x = Conv3D(256,(3,3,3),strides=(1,1,1),padding='same',               activation='relu',kernel_regularizer=l2(weight_decay))(x)    x = MaxPooling3D((2,2,2),strides=(2,2,2),padding='same')(x)    x = Conv3D(256, (3, 3, 3), strides=(1, 1, 1), padding='same',               activation='relu',kernel_regularizer=l2(weight_decay))(x)    x = MaxPooling3D((2, 2, 1), strides=(2, 2, 1), padding='same')(x)    x = Flatten()(x)    x = Dense(2048,activation='relu',kernel_regularizer=l2(weight_decay))(x)    x = Dropout(0.5)(x)    x = Dense(2048,activation='relu',kernel_regularizer=l2(weight_decay))(x)    x = Dropout(0.5)(x)    x = Dense(nb_classes,kernel_regularizer=l2(weight_decay))(x)    x = Activation('softmax')(x)    model = Model(inputs, x)    return model 
开发者ID:TianzhongSong,项目名称:3D-ConvNets-for-Action-Recognition,代码行数:38,代码来源:c3d.py


示例14: timeception_temporal_convolutions

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def timeception_temporal_convolutions(tensor, n_layers, n_groups, expansion_factor, is_dilated=True):    input_shape = K.int_shape(tensor)    assert len(input_shape) == 5    _, n_timesteps, side_dim, side_dim, n_channels_in = input_shape    # collapse regions in one dim    tensor = ReshapeLayer((n_timesteps, side_dim * side_dim, 1, n_channels_in))(tensor)    for i in range(n_layers):        n_channels_per_branch, n_channels_out = __get_n_channels_per_branch(n_groups, expansion_factor, n_channels_in)        # add global pooling as local regions        tensor = __global_spatial_pooling(tensor)        # temporal conv (inception-style, shuffled)        if is_dilated:            tensor = __timeception_shuffled_depthwise_dilated(tensor, n_groups, n_channels_per_branch)        else:            tensor = __timeception_shuffled_depthwise(tensor, n_groups, n_channels_per_branch)        # downsample over time        tensor = MaxPooling3D(pool_size=(2, 1, 1))(tensor)        n_channels_in = n_channels_out    return tensor 
开发者ID:noureldien,项目名称:videograph,代码行数:29,代码来源:timeception.py


示例15: timeception_temporal_convolutions_parallelized

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def timeception_temporal_convolutions_parallelized(tensor, n_layers, n_groups, expansion_factor, is_dilated=True):    input_shape = K.int_shape(tensor)    assert len(input_shape) == 5    raise Exception('Sorry, not implemented now')    _, n_timesteps, side_dim, side_dim, n_channels_in = input_shape    # collapse regions in one dim    tensor = ReshapeLayer((n_timesteps, side_dim * side_dim, 1, n_channels_in))(tensor)    for i in range(n_layers):        # add global pooling as regions        tensor = __global_spatial_pooling(tensor)        # temporal conv (inception-style, shuffled)        n_channels_per_branch, n_channels_out = __get_n_channels_per_branch(n_groups, expansion_factor, n_channels_in)        if is_dilated:            tensor = __timeception_shuffled_depthwise_dilated_parallelized(tensor, n_groups, n_channels_per_branch)        else:            tensor = __timeception_shuffled_depthwise_parallelized(tensor, n_groups, n_channels_per_branch)        tensor = MaxPooling3D(pool_size=(2, 1, 1))(tensor)        n_channels_in = n_channels_out    return tensor# endregion# region Timeception Block 
开发者ID:noureldien,项目名称:videograph,代码行数:31,代码来源:timeception.py


示例16: load_model

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def load_model():    # use simple CNN structure    in_shape = (SequenceLength, IMSIZE[0], IMSIZE[1], 3)    model = Sequential()    model.add(ConvLSTM2D(32, kernel_size=(7, 7), padding='valid', return_sequences=True, input_shape=in_shape))    model.add(Activation('relu'))    model.add(MaxPooling3D(pool_size=(1, 2, 2)))    model.add(ConvLSTM2D(64, kernel_size=(5, 5), padding='valid', return_sequences=True))    model.add(MaxPooling3D(pool_size=(1, 2, 2)))    model.add(ConvLSTM2D(96, kernel_size=(3, 3), padding='valid', return_sequences=True))    model.add(Activation('relu'))    model.add(ConvLSTM2D(96, kernel_size=(3, 3), padding='valid', return_sequences=True))    model.add(Activation('relu'))    model.add(ConvLSTM2D(96, kernel_size=(3, 3), padding='valid', return_sequences=True))    model.add(MaxPooling3D(pool_size=(1, 2, 2)))    model.add(Dense(320))    model.add(Activation('relu'))    model.add(Dropout(0.5))    out_shape = model.output_shape    # print('====Model shape: ', out_shape)    model.add(Reshape((SequenceLength, out_shape[2] * out_shape[3] * out_shape[4])))    model.add(LSTM(64, return_sequences=False))    model.add(Dropout(0.5))    model.add(Dense(N_CLASSES, activation='softmax'))    model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])    # model structure summary    print(model.summary())    return model 
开发者ID:woodfrog,项目名称:ActionRecognition,代码行数:33,代码来源:LRCN_keras.py


示例17: build

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def build():        model = Sequential()        # Conv layer 1        model.add(Convolution3D(            input_shape = (14,32,32,32),            filters=64,            kernel_size=5,            padding='valid',     # Padding method            data_format='channels_first',        ))        model.add(LeakyReLU(alpha = 0.1))        # Dropout 1        model.add(Dropout(0.2))        # Conv layer 2        model.add(Convolution3D(            filters=64,            kernel_size=3,            padding='valid',     # Padding method            data_format='channels_first',        ))        model.add(LeakyReLU(alpha = 0.1))        # Maxpooling 1        model.add(MaxPooling3D(            pool_size=(2,2,2),            strides=None,            padding='valid',    # Padding method            data_format='channels_first'        ))        # Dropout 2        model.add(Dropout(0.4))        # FC 1        model.add(Flatten())        model.add(Dense(128)) # TODO changed to 64 for the CAM        model.add(LeakyReLU(alpha = 0.1))        # Dropout 3        model.add(Dropout(0.4))        # Fully connected layer 2 to shape (2) for 2 classes        model.add(Dense(2))        model.add(Activation('softmax'))        return model 
开发者ID:pulimeng,项目名称:DeepDrug3D,代码行数:42,代码来源:deepdrug3d.py


示例18: test_keras_import

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def test_keras_import(self):        # Global Pooling 1D        model = Sequential()        model.add(GlobalMaxPooling1D(input_shape=(16, 1)))        model.build()        self.keras_param_test(model, 0, 5)        # Global Pooling 2D        model = Sequential()        model.add(GlobalMaxPooling2D(input_shape=(16, 16, 1)))        model.build()        self.keras_param_test(model, 0, 8)        # Pooling 1D        model = Sequential()        model.add(MaxPooling1D(pool_size=2, strides=2, padding='same', input_shape=(16, 1)))        model.build()        self.keras_param_test(model, 0, 5)        # Pooling 2D        model = Sequential()        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', input_shape=(16, 16, 1)))        model.build()        self.keras_param_test(model, 0, 8)        # Pooling 3D        model = Sequential()        model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='same',                               input_shape=(16, 16, 16, 1)))        model.build()        self.keras_param_test(model, 0, 11)# ********** Locally-connected Layers ********** 
开发者ID:Cloud-CV,项目名称:Fabrik,代码行数:32,代码来源:test_views.py


示例19: test_keras_export

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def test_keras_export(self):        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',                                  'keras_export_test.json'), 'r')        response = json.load(tests)        tests.close()        net = yaml.safe_load(json.dumps(response['net']))        net = {'l0': net['Input'], 'l1': net['Input2'], 'l2': net['Input4'], 'l3': net['Pooling']}        # Pool 1D        net['l1']['connection']['output'].append('l3')        net['l3']['connection']['input'] = ['l1']        net['l3']['params']['layer_type'] = '1D'        net['l3']['shape']['input'] = net['l1']['shape']['output']        net['l3']['shape']['output'] = [12, 12]        inp = data(net['l1'], '', 'l1')['l1']        temp = pooling(net['l3'], [inp], 'l3')        model = Model(inp, temp['l3'])        self.assertEqual(model.layers[2].__class__.__name__, 'MaxPooling1D')        # Pool 2D        net['l0']['connection']['output'].append('l0')        net['l3']['connection']['input'] = ['l0']        net['l3']['params']['layer_type'] = '2D'        net['l3']['shape']['input'] = net['l0']['shape']['output']        net['l3']['shape']['output'] = [3, 226, 226]        inp = data(net['l0'], '', 'l0')['l0']        temp = pooling(net['l3'], [inp], 'l3')        model = Model(inp, temp['l3'])        self.assertEqual(model.layers[2].__class__.__name__, 'MaxPooling2D')        # Pool 3D        net['l2']['connection']['output'].append('l3')        net['l3']['connection']['input'] = ['l2']        net['l3']['params']['layer_type'] = '3D'        net['l3']['shape']['input'] = net['l2']['shape']['output']        net['l3']['shape']['output'] = [3, 226, 226, 18]        inp = data(net['l2'], '', 'l2')['l2']        temp = pooling(net['l3'], [inp], 'l3')        model = Model(inp, temp['l3'])        self.assertEqual(model.layers[2].__class__.__name__, 'MaxPooling3D')# ********** Locally-connected Layers ********** 
开发者ID:Cloud-CV,项目名称:Fabrik,代码行数:42,代码来源:test_views.py


示例20: __define_temporal_convolutional_block

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def __define_temporal_convolutional_block(self, n_channels_per_branch, kernel_sizes, dilation_rates, layer_num, group_num):        """        Define 5 branches of convolutions that operate of channels of each group.        """        # branch 1: dimension reduction only and no temporal conv        layer_name = 'conv_b1_g%d_tc%d' % (group_num, layer_num)        layer = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name=layer_name)        setattr(self, layer_name, layer)        layer_name = 'bn_b1_g%d_tc%d' % (group_num, layer_num)        layer = BatchNormalization(name=layer_name)        setattr(self, layer_name, layer)        # branch 2: dimension reduction followed by depth-wise temp conv (kernel-size 3)        layer_name = 'conv_b2_g%d_tc%d' % (group_num, layer_num)        layer = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name=layer_name)        setattr(self, layer_name, layer)        layer_name = 'convdw_b2_g%d_tc%d' % (group_num, layer_num)        layer = DepthwiseConv1DLayer(kernel_sizes[0], dilation_rates[0], padding='same', name=layer_name)        setattr(self, layer_name, layer)        layer_name = 'bn_b2_g%d_tc%d' % (group_num, layer_num)        layer = BatchNormalization(name=layer_name)        setattr(self, layer_name, layer)        # branch 3: dimension reduction followed by depth-wise temp conv (kernel-size 5)        layer_name = 'conv_b3_g%d_tc%d' % (group_num, layer_num)        layer = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name=layer_name)        setattr(self, layer_name, layer)        layer_name = 'convdw_b3_g%d_tc%d' % (group_num, layer_num)        layer = DepthwiseConv1DLayer(kernel_sizes[1], dilation_rates[1], padding='same', name=layer_name)        setattr(self, layer_name, layer)        layer_name = 'bn_b3_g%d_tc%d' % (group_num, layer_num)        layer = BatchNormalization(name=layer_name)        setattr(self, layer_name, layer)        # branch 4: dimension reduction followed by depth-wise temp conv (kernel-size 7)        layer_name = 'conv_b4_g%d_tc%d' % (group_num, layer_num)        layer = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name=layer_name)        setattr(self, layer_name, layer)        layer_name = 'convdw_b4_g%d_tc%d' % (group_num, layer_num)        layer = DepthwiseConv1DLayer(kernel_sizes[2], dilation_rates[2], padding='same', name=layer_name)        setattr(self, layer_name, layer)        layer_name = 'bn_b4_g%d_tc%d' % (group_num, layer_num)        layer = BatchNormalization(name=layer_name)        setattr(self, layer_name, layer)        # branch 5: dimension reduction followed by temporal max pooling        layer_name = 'conv_b5_g%d_tc%d' % (group_num, layer_num)        layer = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name=layer_name)        setattr(self, layer_name, layer)        layer_name = 'maxpool_b5_g%d_tc%d' % (group_num, layer_num)        layer = MaxPooling3D(pool_size=(2, 1, 1), strides=(1, 1, 1), padding='same', name=layer_name)        setattr(self, layer_name, layer)        layer_name = 'bn_b5_g%d_tc%d' % (group_num, layer_num)        layer = BatchNormalization(name=layer_name)        setattr(self, layer_name, layer)        # concatenate channels of branches        layer_name = 'concat_g%d_tc%d' % (group_num, layer_num)        layer = Concatenate(axis=4, name=layer_name)        setattr(self, layer_name, layer) 
开发者ID:CMU-CREATE-Lab,项目名称:deep-smoke-machine,代码行数:63,代码来源:timeception.py


示例21: Unet

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def Unet(input_shape, n_labels, activation='sigmoid'):    '''    MultiResUNet3D    Arguments:        height {int} -- height of image        width {int} -- width of image        z {int} -- length along z axis        n_channels {int} -- number of channels in image    Returns:        [keras model] -- MultiResUNet3D model    '''    inputs = Input(input_shape)    mresblock1 = MultiResBlock(32, inputs)    pool1 = MaxPooling3D(pool_size=(2, 2, 2))(mresblock1)    mresblock1 = ResPath(32, 4, mresblock1)    mresblock2 = MultiResBlock(32*2, pool1)    pool2 = MaxPooling3D(pool_size=(2, 2, 2))(mresblock2)    mresblock2 = ResPath(32*2, 3,mresblock2)    mresblock3 = MultiResBlock(32*4, pool2)    pool3 = MaxPooling3D(pool_size=(2, 2, 2))(mresblock3)    mresblock3 = ResPath(32*4, 2,mresblock3)    mresblock4 = MultiResBlock(32*8, pool3)    pool4 = MaxPooling3D(pool_size=(2, 2, 2))(mresblock4)    mresblock4 = ResPath(32*8, 1,mresblock4)    mresblock5 = MultiResBlock(32*16, pool4)    up6 = concatenate([Conv3DTranspose(32*8, (2, 2, 2), strides=(2, 2, 2), padding='same')(mresblock5), mresblock4], axis=4)    mresblock6 = MultiResBlock(32*8,up6)    up7 = concatenate([Conv3DTranspose(32*4, (2, 2, 2), strides=(2, 2, 2), padding='same')(mresblock6), mresblock3], axis=4)    mresblock7 = MultiResBlock(32*4,up7)    up8 = concatenate([Conv3DTranspose(32*2, (2, 2, 2), strides=(2, 2, 2), padding='same')(mresblock7), mresblock2], axis=4)    mresblock8 = MultiResBlock(32*2,up8)    up9 = concatenate([Conv3DTranspose(32, (2, 2, 2), strides=(2, 2, 2), padding='same')(mresblock8), mresblock1], axis=4)    mresblock9 = MultiResBlock(32,up9)    conv10 = conv3d_bn(mresblock9 , n_labels, 1, 1, 1, activation=activation)    model = Model(inputs=[inputs], outputs=[conv10])    return model#-----------------------------------------------------##                     Subroutines                     ##-----------------------------------------------------# 
开发者ID:muellerdo,项目名称:kits19.MIScnn,代码行数:58,代码来源:multiRes.py


示例22: auto_classifier_model

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def auto_classifier_model(img_shape, encoding_dim=128, NUM_CHANNELS=1, num_of_class=2):    input_shape = (None, img_shape[0], img_shape[1], img_shape[2], NUM_CHANNELS)    mask_shape = (None, num_of_class)    # use relu activation for hidden layer to guarantee non-negative outputs are passed to the max pooling layer. In such case, as long as the output layer is linear activation, the network can still accomodate negative image intendities, just matter of shift back using the bias term    input_img = Input(shape=input_shape[1:])    mask = Input(shape=mask_shape[1:])    x = input_img    x = conv_block(x, 32, 3, 3, 3)    x = MaxPooling3D((2, 2, 2), padding ='same')(x)    x = conv_block(x, 32, 3, 3, 3)    x = MaxPooling3D((2, 2, 2), padding ='same')(x)    encoder_conv_shape = [_.value for _ in  x.get_shape()]          # x.get_shape() returns a list of tensorflow.python.framework.tensor_shape.Dimension objects    x = Flatten()(x)    encoded = Dense(encoding_dim, activation='relu', activity_regularizer=regularizers.l1(10e-5))(x)    encoder = Model(inputs=input_img, outputs=encoded)    x = BatchNormalization()(x)    x = Dense(encoding_dim, activation='relu', activity_regularizer=regularizers.l1(10e-5))(x)    x = Dense(128, activation = 'relu')(x)    x = Dense(num_of_class, activation = 'softmax')(x)        prob = x    # classifier output    classifier = Model(inputs=input_img, outputs=prob)    input_img_decoder = Input(shape=encoder.output_shape[1:])    x = input_img_decoder    x = Dense(np.prod(encoder_conv_shape[1:]), activation='relu')(x)    x = Reshape(encoder_conv_shape[1:])(x)    x = UpSampling3D((2, 2, 2))(x)    x = conv_block(x, 32, 3, 3, 3)    x = UpSampling3D((2, 2, 2))(x)    x = conv_block(x, 32, 3, 3, 3)    x = Convolution3D(1, (3, 3, 3), activation='linear', padding ='same')(x)    decoded = x    # autoencoder output    decoder = Model(inputs=input_img_decoder, outputs=decoded)        autoencoder = Sequential()    for l in encoder.layers:            autoencoder.add(l)    last = None    for l in decoder.layers:        last = l            autoencoder.add(l)    decoded = autoencoder(input_img)    auto_classifier = Model(inputs=input_img, outputs=[decoded, prob])    auto_classifier.summary()    return auto_classifier 
开发者ID:xulabs,项目名称:aitom,代码行数:63,代码来源:auto_classifier_model.py


示例23: fCreateConv3D_InceptionBlock

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def fCreateConv3D_InceptionBlock(filters):    l1_reg = 0    l2_reg = 1e-6    def f(inputs):        # branch 1x1        branch_1 = Conv3D(filters=filters[0],                          kernel_size=(1, 1, 1),                          strides=(1, 1, 1),                          padding='same',                          kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)        branch_1 = LeakyReLU()(branch_1)        # branch 3x3        branch_3 = Conv3D(filters=filters[0],                          kernel_size=(1, 1, 1),                          strides=(1, 1, 1),                          padding='same',                          kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)        branch_3 = Conv3D(filters=filters[2],                          kernel_size=(3, 3, 3),                          strides=(1, 1, 1),                          padding='same',                          kernel_regularizer=l1_l2(l1_reg, l2_reg))(branch_3)        branch_3 = LeakyReLU()(branch_3)        # branch 5x5        branch_5 = Conv3D(filters=filters[0],                          kernel_size=(1, 1, 1),                          strides=(1, 1, 1),                          padding='same',                          kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)        branch_5 = Conv3D(filters=filters[1],                          kernel_size=(5, 5, 5),                          strides=(1, 1, 1),                          padding='same',                          kernel_regularizer=l1_l2(l1_reg, l2_reg))(branch_5)        branch_5 = LeakyReLU()(branch_5)        # branch maxpooling        branch_pool = MaxPooling3D(pool_size=(3, 3, 3), strides=(1, 1, 1), padding='same')(inputs)        branch_pool = Conv3D(filters=filters[0],                             kernel_size=(1, 1, 1),                             strides=(1, 1, 1),                             padding='same',                             kernel_regularizer=l1_l2(l1_reg, l2_reg))(branch_pool)        branch_pool = LeakyReLU()(branch_pool)        # concatenate branches together        out = concatenate([branch_1, branch_3, branch_5, branch_pool], axis=1)        return out    return f 
开发者ID:thomaskuestner,项目名称:CNNArt,代码行数:54,代码来源:network.py


示例24: get_Discriminator

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def get_Discriminator(input_shape_1, input_shape_2, Encoder):    dis_inputs_1 = Input(shape=input_shape_1) # Image    dis_inputs_2 = Input(shape=input_shape_2) # Segmentation    mul_1 = Multiply()([dis_inputs_1, dis_inputs_2]) # Getting segmented part    encoder_output_1 = Encoder(dis_inputs_1)    encoder_output_2 = Encoder(mul_1)    subtract_dis = Subtract()([encoder_output_1, encoder_output_2])    dis_conv_block = Conv3D(128, (3, 3, 3), strides=(1, 1, 1), padding='same')(subtract_dis)    dis_conv_block = Activation('relu')(dis_conv_block)    dis_conv_block = Conv3D(128, (3, 3, 3), strides=(1, 1, 1), padding='same')(dis_conv_block)    dis_conv_block = Activation('relu')(dis_conv_block)    dis_conv_block = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2))(dis_conv_block)    dis_conv_block = Conv3D(64, (3, 3, 3), strides=(1, 1, 1), padding='same')(dis_conv_block)    dis_conv_block = Activation('relu')(dis_conv_block)    dis_conv_block = Conv3D(64, (3, 3, 3), strides=(1, 1, 1), padding='same')(dis_conv_block)    dis_conv_block = Activation('relu')(dis_conv_block)    dis_conv_block = Conv3D(32, (3, 3, 3), strides=(1, 1, 1), padding='same')(dis_conv_block)    dis_conv_block = Activation('relu')(dis_conv_block)    dis_conv_block = Conv3D(32, (3, 3, 3), strides=(1, 1, 1), padding='same')(dis_conv_block)    dis_conv_block = Activation('relu')(dis_conv_block)    flat_1 = Flatten()(dis_conv_block)    dis_fc_1 = Dense(256)(flat_1)    dis_fc_1 = Activation('relu')(dis_fc_1)    dis_drp_1 = Dropout(0.5)(dis_fc_1)    dis_fc_2 = Dense(128)(dis_drp_1)    dis_fc_2 = Activation('relu')(dis_fc_2)    dis_drp_2 = Dropout(0.5)(dis_fc_2)    dis_fc_3 = Dense(1)(dis_drp_2)    dis_similarity_output = Activation('sigmoid')(dis_fc_3)    Discriminator = Model(inputs=[dis_inputs_1, dis_inputs_2], outputs=dis_similarity_output)    Discriminator.compile(optimizer=Adadelta(lr=0.01), loss='binary_crossentropy', metrics=['accuracy'])    print('Discriminator Architecture:')    print(Discriminator.summary())    return Discriminator 
开发者ID:ardamavi,项目名称:3D-Medical-Segmentation-GAN,代码行数:52,代码来源:get_models.py


示例25: _build

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def _build(self):        # get parameters        proj = self.proj_params        proj_axis = axes_dict(self.config.axes)[proj.axis]        # define surface projection network (3D -> 2D)        inp = u = Input(self.config.unet_input_shape)        def conv_layers(u):            for _ in range(proj.n_conv_per_depth):                u = Conv3D(proj.n_filt, proj.kern, padding='same', activation='relu')(u)            return u        # down        for _ in range(proj.n_depth):            u = conv_layers(u)            u = MaxPooling3D(proj.pool)(u)        # middle        u = conv_layers(u)        # up        for _ in range(proj.n_depth):            u = UpSampling3D(proj.pool)(u)            u = conv_layers(u)        u = Conv3D(1, proj.kern, padding='same', activation='linear')(u)        # convert learned features along Z to surface probabilities        # (add 1 to proj_axis because of batch dimension in tensorflow)        u = Lambda(lambda x: softmax(x, axis=1+proj_axis))(u)        # multiply Z probabilities with Z values in input stack        u = Multiply()([inp, u])        # perform surface projection by summing over weighted Z values        u = Lambda(lambda x: K.sum(x, axis=1+proj_axis))(u)        model_projection = Model(inp, u)        # define denoising network (2D -> 2D)        # (remove projected axis from input_shape)        input_shape = list(self.config.unet_input_shape)        del input_shape[proj_axis]        model_denoising = nets.common_unet(            n_dim           = self.config.n_dim-1,            n_channel_out   = self.config.n_channel_out,            prob_out        = self.config.probabilistic,            residual        = self.config.unet_residual,            n_depth         = self.config.unet_n_depth,            kern_size       = self.config.unet_kern_size,            n_first         = self.config.unet_n_first,            last_activation = self.config.unet_last_activation,        )(tuple(input_shape))        # chain models together        return Model(inp, model_denoising(model_projection(inp))) 
开发者ID:CSBDeep,项目名称:CSBDeep,代码行数:50,代码来源:care_projection.py


示例26: __timeception_shuffled_depthwise

# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import MaxPooling3D [as 别名]def __timeception_shuffled_depthwise(tensor_input, n_groups, n_channels_per_branch):    _, n_timesteps, side_dim1, side_dim2, n_channels_in = tensor_input.get_shape().as_list()    assert n_channels_in % n_groups == 0    n_branches = 5    n_channels_per_group_in = n_channels_in / n_groups    n_channels_out = n_groups * n_branches * n_channels_per_branch    n_channels_per_group_out = n_channels_out / n_groups    assert n_channels_out % n_groups == 0    # slice maps into groups    tensors = Lambda(lambda x: [x[:, :, :, :, i * n_channels_per_group_in:(i + 1) * n_channels_per_group_in] for i in range(n_groups)])(tensor_input)    t_outputs = []    for idx_group in range(n_groups):        tensor_group = tensors[idx_group]        # branch 1: dimension reduction only and no temporal conv        t_0 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same')(tensor_group)        t_0 = BatchNormalization()(t_0)        # branch 2: dimension reduction followed by depth-wise temp conv (kernel-size 3)        t_3 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same')(tensor_group)        t_3 = DepthwiseConv1DLayer(3, padding='same')(t_3)        t_3 = BatchNormalization()(t_3)        # branch 3: dimension reduction followed by depth-wise temp conv (kernel-size 5)        t_5 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same')(tensor_group)        t_5 = DepthwiseConv1DLayer(5, padding='same')(t_5)        t_5 = BatchNormalization()(t_5)        # branch 4: dimension reduction followed by depth-wise temp conv (kernel-size 7)        t_7 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same')(tensor_group)        t_7 = DepthwiseConv1DLayer(7, padding='same')(t_7)        t_7 = BatchNormalization()(t_7)        # branch 5: dimension reduction followed by temporal max pooling        t_1 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same')(tensor_group)        t_1 = MaxPooling3D(pool_size=(2, 1, 1), strides=(1, 1, 1), padding='same')(t_1)        t_1 = BatchNormalization()(t_1)        # concatenate channels of branches        tensor = Concatenate(axis=4)([t_0, t_3, t_5, t_7, t_1])        t_outputs.append(tensor)    # concatenate channels of groups    tensor = Concatenate(axis=4)(t_outputs)    tensor = Activation('relu')(tensor)    # shuffle channels    tensor = ReshapeLayer((n_timesteps, side_dim1, side_dim2, n_groups, n_channels_per_group_out))(tensor)    tensor = TransposeLayer((0, 1, 2, 3, 5, 4))(tensor)    tensor = ReshapeLayer((n_timesteps, side_dim1, side_dim2, n_channels_out))(tensor)    return tensor 
开发者ID:noureldien,项目名称:videograph,代码行数:58,代码来源:timeception.py


万事OK自学网:51自学网_软件自学网_CAD自学网自学excel、自学PS、自学CAD、自学C语言、自学css3实例,是一个通过网络自主学习工作技能的自学平台,网友喜欢的软件自学网站。