这篇教程Python layers.ZeroPadding1D方法代码示例写得很实用,希望能帮到您。
本文整理汇总了Python中keras.layers.ZeroPadding1D方法的典型用法代码示例。如果您正苦于以下问题:Python layers.ZeroPadding1D方法的具体用法?Python layers.ZeroPadding1D怎么用?Python layers.ZeroPadding1D使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块keras.layers 的用法示例。 在下文中一共展示了layers.ZeroPadding1D方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。 示例1: test_tiny_conv_pad_1d_random# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import ZeroPadding1D [as 别名]def test_tiny_conv_pad_1d_random(self, model_precision=_MLMODEL_FULL_PRECISION): np.random.seed(1988) input_dim = 2 input_length = 10 filter_length = 3 nb_filters = 4 model = Sequential() model.add( Conv1D( nb_filters, kernel_size=filter_length, padding="same", input_shape=(input_length, input_dim), ) ) model.add(ZeroPadding1D(padding=2)) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Test the keras model self._test_model(model, model_precision=model_precision)
示例2: test_keras_import# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import ZeroPadding1D [as 别名]def test_keras_import(self): # Pad 1D model = Sequential() model.add(ZeroPadding1D(2, input_shape=(224, 3))) model.add(Conv1D(32, 7, strides=2)) model.build() self.pad_test(model, 'pad_w', 2) # Pad 2D model = Sequential() model.add(ZeroPadding2D(2, input_shape=(224, 224, 3))) model.add(Conv2D(32, 7, strides=2)) model.build() self.pad_test(model, 'pad_w', 2) # Pad 3D model = Sequential() model.add(ZeroPadding3D(2, input_shape=(224, 224, 224, 3))) model.add(Conv3D(32, 7, strides=2)) model.build() self.pad_test(model, 'pad_w', 2)# ********** Export json tests **********# ********** Data Layers Test **********
示例3: call# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import ZeroPadding1D [as 别名]def call(self, inputs): x_input_pad = ZeroPadding1D((self.filter_size-1, self.filter_size-1))(inputs) conv_1d = Conv1D(filters=self.filter_num, kernel_size=self.filter_size, strides=1, padding='VALID', kernel_initializer='normal', # )(x_input_pad) activation='tanh')(x_input_pad) return conv_1d
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:11,代码来源:graph.py
示例4: build_ds5_no_ctc_and_xfer_weights# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import ZeroPadding1D [as 别名]def build_ds5_no_ctc_and_xfer_weights(loaded_model, input_dim=161, fc_size=1024, rnn_size=512, output_dim=29, initialization='glorot_uniform', conv_layers=4): """ Pure CNN implementation""" K.set_learning_phase(0) for ind, i in enumerate(loaded_model.layers): print(ind, i) kernel_size = 11 # conv_depth_1 = 64 # conv_depth_2 = 256 # input_data = Input(shape=(None, input_dim), name='the_input') #batch x time x spectro size conv = ZeroPadding1D(padding=(0, 2048))(input_data) #pad on time dimension x = Conv1D(filters=128, name='conv_1', kernel_size=kernel_size, padding='valid', activation='relu', strides=2, weights = loaded_model.layers[2].get_weights())(conv) # x = Conv1D(filters=1024, name='conv_2', kernel_size=kernel_size, padding='valid', activation='relu', strides=2, # weights=loaded_model.layers[3].get_weights())(x) # Last Layer 5+6 Time Dist Dense Layer & Softmax x = TimeDistributed(Dense(fc_size, activation='relu', weights=loaded_model.layers[3].get_weights()))(x) y_pred = TimeDistributed(Dense(output_dim, name="y_pred", activation="softmax"))(x) model = Model(inputs=input_data, outputs=y_pred) return model
开发者ID:robmsmt,项目名称:KerasDeepSpeech,代码行数:32,代码来源:model.py
示例5: cnn_city# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import ZeroPadding1D [as 别名]def cnn_city(input_dim=161, fc_size=1024, rnn_size=512, output_dim=29, initialization='glorot_uniform', conv_layers=4): """ Pure CNN implementation Architecture: 1 Convolutional Layers 1 Fully connected Dense 1 Softmax output Details:s - Network does not dynamically adapt to maximum audio size in the first convolutional layer. Max conv length padded at 2048 chars, otherwise use_conv=False Reference: """ #filters = outputsize #kernal_size = heigth and width of conv window #strides = stepsize on conv window kernel_size = 11 # conv_depth_1 = 64 # conv_depth_2 = 256 # input_data = Input(shape=(None, input_dim), name='the_input') #batch x time x spectro size conv = ZeroPadding1D(padding=(0, 2048))(input_data) #pad on time dimension x = Conv1D(filters=128, name='conv_1', kernel_size=kernel_size, padding='valid', activation='relu', strides=2)(conv) # x = Conv1D(filters=1024, name='conv_2', kernel_size=kernel_size, padding='valid', activation='relu', strides=2)(x) # Last Layer 5+6 Time Dist Dense Layer & Softmax x = TimeDistributed(Dense(fc_size, activation='relu'))(x) y_pred = TimeDistributed(Dense(output_dim, name="y_pred", activation="softmax"))(x) # labels = K.placeholder(name='the_labels', ndim=1, dtype='int32') labels = Input(name='the_labels', shape=[None,], dtype='int32') input_length = Input(name='input_length', shape=[1], dtype='int32') label_length = Input(name='label_length', shape=[1], dtype='int32') # Keras doesn't currently support loss funcs with extra parameters # so CTC loss is implemented in a lambda layer loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred, labels, input_length, label_length]) model = Model(inputs=[input_data, labels, input_length, label_length], outputs=loss_out) return model
开发者ID:robmsmt,项目名称:KerasDeepSpeech,代码行数:55,代码来源:model.py
示例6: create_default_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import ZeroPadding1D [as 别名]def create_default_model(config_data): nb_filter = 200 filter_length = 6 hidden_dims = nb_filter embedding_matrix = load_embedding_matrix(config_data) max_features = embedding_matrix.shape[0] embedding_dims = embedding_matrix.shape[1] max_len = config_data['max_sentence_length'] logging.info('Build Model...') logging.info('Embedding Dimensions: ({},{})'.format(max_features, embedding_dims)) main_input = Input(batch_shape=(None, max_len), dtype='int32', name='main_input') if not config_data.get('random_embedding', None): logging.info('Pretrained Word Embeddings') embeddings = Embedding( max_features, embedding_dims, input_length=max_len, weights=[embedding_matrix], trainable=False )(main_input) else: logging.info('Random Word Embeddings') embeddings = Embedding(max_features, embedding_dims, init='lecun_uniform', input_length=max_len)(main_input) zeropadding = ZeroPadding1D(filter_length - 1)(embeddings) conv1 = Convolution1D( nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', activation='relu', subsample_length=1)(zeropadding) max_pooling1 = MaxPooling1D(pool_length=4, stride=2)(conv1) conv2 = Convolution1D( nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', activation='relu', subsample_length=1)(max_pooling1) max_pooling2 = MaxPooling1D(pool_length=conv2._keras_shape[1])(conv2) flatten = Flatten()(max_pooling2) hidden = Dense(hidden_dims)(flatten) softmax_layer1 = Dense(3, activation='softmax', name='sentiment_softmax', init='lecun_uniform')(hidden) model = Model(input=[main_input], output=softmax_layer1) test_model = Model(input=[main_input], output=[softmax_layer1, hidden]) return model, test_model
示例7: pooling# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import ZeroPadding1D [as 别名]def pooling(layer, layer_in, layerId, tensor=True): poolMap = { ('1D', 'MAX'): MaxPooling1D, ('2D', 'MAX'): MaxPooling2D, ('3D', 'MAX'): MaxPooling3D, ('1D', 'AVE'): AveragePooling1D, ('2D', 'AVE'): AveragePooling2D, ('3D', 'AVE'): AveragePooling3D, } out = {} layer_type = layer['params']['layer_type'] pool_type = layer['params']['pool'] padding = get_padding(layer) if (layer_type == '1D'): strides = layer['params']['stride_w'] kernel = layer['params']['kernel_w'] if (padding == 'custom'): p_w = layer['params']['pad_w'] out[layerId + 'Pad'] = ZeroPadding1D(padding=p_w)(*layer_in) padding = 'valid' layer_in = [out[layerId + 'Pad']] elif (layer_type == '2D'): strides = (layer['params']['stride_h'], layer['params']['stride_w']) kernel = (layer['params']['kernel_h'], layer['params']['kernel_w']) if (padding == 'custom'): p_h, p_w = layer['params']['pad_h'], layer['params']['pad_w'] out[layerId + 'Pad'] = ZeroPadding2D(padding=(p_h, p_w))(*layer_in) padding = 'valid' layer_in = [out[layerId + 'Pad']] else: strides = (layer['params']['stride_h'], layer['params']['stride_w'], layer['params']['stride_d']) kernel = (layer['params']['kernel_h'], layer['params']['kernel_w'], layer['params']['kernel_d']) if (padding == 'custom'): p_h, p_w, p_d = layer['params']['pad_h'], layer['params']['pad_w'],/ layer['params']['pad_d'] out[layerId + 'Pad'] = ZeroPadding3D(padding=(p_h, p_w, p_d))(*layer_in) padding = 'valid' layer_in = [out[layerId + 'Pad']] # Note - figure out a permanent fix for padding calculation of layers # in case padding is given in layer attributes # if ('padding' in layer['params']): # padding = layer['params']['padding'] out[layerId] = poolMap[(layer_type, pool_type)]( pool_size=kernel, strides=strides, padding=padding) if tensor: out[layerId] = out[layerId](*layer_in) return out# ********** Locally-connected Layers **********
|