这篇教程Python layers.SpatialDropout1D方法代码示例写得很实用,希望能帮到您。
本文整理汇总了Python中keras.layers.SpatialDropout1D方法的典型用法代码示例。如果您正苦于以下问题:Python layers.SpatialDropout1D方法的具体用法?Python layers.SpatialDropout1D怎么用?Python layers.SpatialDropout1D使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块keras.layers 的用法示例。 在下文中一共展示了layers.SpatialDropout1D方法的22个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。 示例1: CapsuleNet# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def CapsuleNet(n_capsule = 10, n_routings = 5, capsule_dim = 16, n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001): K.clear_session() inputs = Input(shape=(170,)) x = Embedding(21099, 300, trainable=True)(inputs) x = SpatialDropout1D(dropout_rate)(x) x = Bidirectional( CuDNNGRU(n_recurrent, return_sequences=True, kernel_regularizer=l2(l2_penalty), recurrent_regularizer=l2(l2_penalty)))(x) x = PReLU()(x) x = Capsule( num_capsule=n_capsule, dim_capsule=capsule_dim, routings=n_routings, share_weights=True)(x) x = Flatten(name = 'concatenate')(x) x = Dropout(dropout_rate)(x)# fc = Dense(128, activation='sigmoid')(x) outputs = Dense(6, activation='softmax')(x) model = Model(inputs=inputs, outputs=outputs) model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy']) return model
开发者ID:WeavingWong,项目名称:DigiX_HuaWei_Population_Age_Attribution_Predict,代码行数:24,代码来源:models.py
示例2: CapsuleNet_v2# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def CapsuleNet_v2(n_capsule = 10, n_routings = 5, capsule_dim = 16, n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001): K.clear_session() inputs = Input(shape=(200,)) x = Embedding(20000, 300, trainable=True)(inputs) x = SpatialDropout1D(dropout_rate)(x) x = Bidirectional( CuDNNGRU(n_recurrent, return_sequences=True, kernel_regularizer=l2(l2_penalty), recurrent_regularizer=l2(l2_penalty)))(x) x = PReLU()(x) x = Capsule( num_capsule=n_capsule, dim_capsule=capsule_dim, routings=n_routings, share_weights=True)(x) x = Flatten(name = 'concatenate')(x) x = Dropout(dropout_rate)(x)# fc = Dense(128, activation='sigmoid')(x) outputs = Dense(6, activation='softmax')(x) model = Model(inputs=inputs, outputs=outputs) model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy']) return model
开发者ID:WeavingWong,项目名称:DigiX_HuaWei_Population_Age_Attribution_Predict,代码行数:24,代码来源:models.py
示例3: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) x = self.word_embedding.output x = SpatialDropout1D(self.dropout_spatial)(x) x = AttentionSelf(self.word_embedding.embed_size)(x) x = GlobalMaxPooling1D()(x) x = Dropout(self.dropout)(x) # x = Flatten()(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activate_classify)(x) output = [dense_layer] self.model = Model(self.word_embedding.input, output) self.model.summary(120)
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:20,代码来源:graph.py
示例4: build_model_text_cnn# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def build_model_text_cnn(self): ######### text-cnn ######### # bert embedding bert_inputs, bert_output = KerasBertEmbedding().bert_encode() # text cnn bert_output_emmbed = SpatialDropout1D(rate=self.keep_prob)(bert_output) concat_out = [] for index, filter_size in enumerate(self.filters): x = Conv1D(name='TextCNN_Conv1D_{}'.format(index), filters=int(self.embedding_dim/2), kernel_size=self.filters[index], padding='valid', kernel_initializer='normal', activation='relu')(bert_output_emmbed) x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x) concat_out.append(x) x = Concatenate(axis=1)(concat_out) x = Dropout(self.keep_prob)(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activation)(x) output_layers = [dense_layer] self.model = Model(bert_inputs, output_layers)
示例5: keras_dropout# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def keras_dropout(layer, rate): """ Keras dropout layer. """ from keras import layers input_dim = len(layer.input.shape) if input_dim == 2: return layers.SpatialDropout1D(rate) elif input_dim == 3: return layers.SpatialDropout2D(rate) elif input_dim == 4: return layers.SpatialDropout3D(rate) else: return layers.Dropout(rate)
开发者ID:microsoft,项目名称:nni,代码行数:18,代码来源:layers.py
示例6: Token_Embedding# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def Token_Embedding(x, input_dim, output_dim, embed_weights=None, mask_zero=False, input_length=None, dropout_rate=0, embed_l2=1E-6, name='', time_distributed=False, **kwargs): """ Basic token embedding layer, also included some dropout layer. """ embed_reg = L1L2(l2=embed_l2) if embed_l2 != 0 else None embed_layer = Embedding(input_dim=input_dim, output_dim=output_dim, weights=embed_weights, mask_zero=mask_zero, input_length=input_length, embeddings_regularizer=embed_reg, name=name) if time_distributed: embed = TimeDistributed(embed_layer)(x) else: embed = embed_layer(x) # entire embedding channels are dropped out instead of the # normal Keras embedding dropout, which drops all channels for entire words # many of the datasets contain so few words that losing one or more words can alter the emotions completely if dropout_rate != 0: embed = SpatialDropout1D(dropout_rate)(embed) return embed
示例7: dummy_1_build_fn# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def dummy_1_build_fn(input_shape=(1,)): model = Sequential( [ Embedding(input_dim=9999, output_dim=200, input_length=100, trainable=True), SpatialDropout1D(rate=0.5), Flatten(), Dense(100, activation="relu"), Dense(1, activation="sigmoid"), ] ) model.compile( optimizer=RMSprop(lr=0.02, decay=0.001), loss=mean_absolute_error, metrics=["mean_absolute_error"], ) return model
示例8: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) embedding_output = self.word_embedding.output x = Lambda(lambda x : x[:, 0:1, :])(embedding_output) # 获取CLS # # text cnn # bert_output_emmbed = SpatialDropout1D(rate=self.dropout)(embedding_output) # concat_out = [] # for index, filter_size in enumerate(self.filters): # x = Conv1D(name='TextCNN_Conv1D_{}'.format(index), # filters= self.filters_num, # int(K.int_shape(embedding_output)[-1]/self.len_max), # strides=1, # kernel_size=self.filters[index], # padding='valid', # kernel_initializer='normal', # activation='relu')(bert_output_emmbed) # x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x) # concat_out.append(x) # x = Concatenate(axis=1)(concat_out) # x = Dropout(self.dropout)(x) x = Flatten()(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activate_classify)(x) output_layers = [dense_layer] self.model = Model(self.word_embedding.input, output_layers) self.model.summary(120)
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:32,代码来源:graph.py
示例9: word_level# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def word_level(self): x_input_word = Input(shape=(self.len_max, self.embed_size)) # x = SpatialDropout1D(self.dropout_spatial)(x_input_word) x = Bidirectional(GRU(units=self.rnn_units, return_sequences=True, activation='relu', kernel_regularizer=regularizers.l2(self.l2), recurrent_regularizer=regularizers.l2(self.l2)))(x_input_word) out_sent = AttentionSelf(self.rnn_units*2)(x) model = Model(x_input_word, out_sent) return model
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:13,代码来源:graph.py
示例10: sentence_level# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def sentence_level(self): x_input_sen = Input(shape=(self.len_max, self.rnn_units*2)) # x = SpatialDropout1D(self.dropout_spatial)(x_input_sen) output_doc = Bidirectional(GRU(units=self.rnn_units*2, return_sequences=True, activation='relu', kernel_regularizer=regularizers.l2(self.l2), recurrent_regularizer=regularizers.l2(self.l2)))(x_input_sen) output_doc_att = AttentionSelf(self.word_embedding.embed_size)(output_doc) model = Model(x_input_sen, output_doc_att) return model
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:13,代码来源:graph.py
示例11: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) embedding_output = self.word_embedding.output embedding_output_spatial = SpatialDropout1D(self.dropout_spatial)(embedding_output) # 首先是 region embedding 层 conv_1 = Conv1D(self.filters[0][0], kernel_size=1, strides=1, padding='SAME', kernel_regularizer=l2(self.l2), bias_regularizer=l2(self.l2), activation=self.activation_conv, )(embedding_output_spatial) block = ReLU()(conv_1) for filters_block in self.filters: for j in range(filters_block[1]-1): # conv + short-cut block_mid = self.convolutional_block(block, units=filters_block[0]) block = shortcut_conv(block, block_mid, shortcut=True) # 这里是conv + max-pooling block_mid = self.convolutional_block(block, units=filters_block[0]) block = shortcut_pool(block, block_mid, filters=filters_block[0], pool_type=self.pool_type, shortcut=True) block = k_max_pooling(top_k=self.top_k)(block) block = Flatten()(block) block = Dropout(self.dropout)(block) # 全连接层 # block_fully = Dense(2048, activation='tanh')(block) # output = Dense(2048, activation='tanh')(block_fully) output = Dense(self.label, activation=self.activate_classify)(block) self.model = Model(inputs=self.word_embedding.input, outputs=output) self.model.summary(120)
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:41,代码来源:graph.py
示例12: create_model_gru# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def create_model_gru(self, hyper_parameters): """ 构建神经网络, bi-gru + capsule :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) embedding = self.word_embedding.output embed_layer = SpatialDropout1D(self.dropout)(embedding) x_bi = Bidirectional(GRU(self.filters_num, activation='relu', dropout=self.dropout, recurrent_dropout=self.dropout, return_sequences=True))(embed_layer) # 一层 capsule = Capsule_bojone(num_capsule=self.num_capsule, dim_capsule=self.dim_capsule, routings=self.routings, kernel_size=(3, 1), share_weights=True)(x_bi) # # pooling多层 # conv_pools = [] # for filter in self.filters: # capsule = Capsule_bojone(num_capsule=self.num_capsule, # dim_capsule=self.dim_capsule, # routings=self.routings, # kernel_size=(filter, 1), # share_weights=True)(x_bi) # conv_pools.append(capsule) # capsule = Concatenate(axis=-1)(conv_pools) capsule = Flatten()(capsule) capsule = Dropout(self.dropout)(capsule) output = Dense(self.label, activation=self.activate_classify)(capsule) self.model = Model(inputs=self.word_embedding.input, outputs=output) self.model.summary(120)
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:39,代码来源:graph.py
示例13: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) embedding_output = self.word_embedding.output # x = embedding_output x = Lambda(lambda x : x[:, -2:-1, :])(embedding_output) # 获取CLS # # text cnn # bert_output_emmbed = SpatialDropout1D(rate=self.dropout)(embedding_output) # concat_out = [] # for index, filter_size in enumerate(self.filters): # x = Conv1D(name='TextCNN_Conv1D_{}'.format(index), # filters= self.filters_num, # int(K.int_shape(embedding_output)[-1]/self.len_max), # strides=1, # kernel_size=self.filters[index], # padding='valid', # kernel_initializer='normal', # activation='relu')(bert_output_emmbed) # x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x) # concat_out.append(x) # x = Concatenate(axis=1)(concat_out) # x = Dropout(self.dropout)(x) x = Flatten()(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activate_classify)(x) output_layers = [dense_layer] self.model = Model(self.word_embedding.input, output_layers) self.model.summary(120)
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:33,代码来源:graph.py
示例14: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) embedding_output = self.word_embedding.output x = Lambda(lambda x : x[:, 0:1, :])(embedding_output) # 获取CLS # # text cnn # bert_output_emmbed = SpatialDropout1D(rate=self.dropout)(embedding_output) # concat_out = [] # for index, filter_size in enumerate(self.filters): # x = Conv1D(name='TextCNN_Conv1D_{}'.format(index), # filters= self.filters_num, # int(K.int_shape(embedding_output)[-1]/self.len_max), # strides=1, # kernel_size=self.filters[index], # padding='valid', # kernel_initializer='normal', # activation='relu')(bert_output_emmbed) # x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x) # concat_out.append(x) # x = Concatenate(axis=1)(concat_out) # x = Dropout(self.dropout)(x) x = Flatten()(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activate_classify)(x) output_layers = [dense_layer] self.model = Model(self.word_embedding.input, output_layers) self.model.summary(132)
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:32,代码来源:graph.py
示例15: test_dropout# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def test_dropout(): layer_test(layers.Dropout, kwargs={'rate': 0.5}, input_shape=(3, 2)) layer_test(layers.Dropout, kwargs={'rate': 0.5, 'noise_shape': [3, 1]}, input_shape=(3, 2)) layer_test(layers.Dropout, kwargs={'rate': 0.5, 'noise_shape': [None, 1]}, input_shape=(3, 2)) layer_test(layers.SpatialDropout1D, kwargs={'rate': 0.5}, input_shape=(2, 3, 4)) for data_format in ['channels_last', 'channels_first']: for shape in [(4, 5), (4, 5, 6)]: if data_format == 'channels_last': input_shape = (2,) + shape + (3,) else: input_shape = (2, 3) + shape layer_test(layers.SpatialDropout2D if len(shape) == 2 else layers.SpatialDropout3D, kwargs={'rate': 0.5, 'data_format': data_format}, input_shape=input_shape) # Test invalid use cases with pytest.raises(ValueError): layer_test(layers.SpatialDropout2D if len(shape) == 2 else layers.SpatialDropout3D, kwargs={'rate': 0.5, 'data_format': 'channels_middle'}, input_shape=input_shape)
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:36,代码来源:core_test.py
示例16: build_model_r_cnn# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def build_model_r_cnn(self): ######### RCNN ######### # bert embedding bert_inputs, bert_output = KerasBertEmbedding().bert_encode() # rcnn bert_output_emmbed = SpatialDropout1D(rate=self.keep_prob)(bert_output) if args.use_lstm: if args.use_cudnn_cell: layer_cell = CuDNNLSTM else: layer_cell = LSTM else: if args.use_cudnn_cell: layer_cell = CuDNNGRU else: layer_cell = GRU x = Bidirectional(layer_cell(units=args.units, return_sequences=args.return_sequences, kernel_regularizer=regularizers.l2(args.l2 * 0.1), recurrent_regularizer=regularizers.l2(args.l2) ))(bert_output_emmbed) x = Dropout(args.keep_prob)(x) x = Conv1D(filters=int(self.embedding_dim / 2), kernel_size=2, padding='valid', kernel_initializer='normal', activation='relu')(x) x = GlobalMaxPooling1D()(x) x = Dropout(args.keep_prob)(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activation)(x) output_layers = [dense_layer] self.model = Model(bert_inputs, output_layers)
示例17: build_model_avt_cnn# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def build_model_avt_cnn(self): #########text-cnn######### # bert embedding bert_inputs, bert_output = KerasBertEmbedding().bert_encode() # text cnn bert_output_emmbed = SpatialDropout1D(rate=self.keep_prob)(bert_output) concat_x = [] concat_y = [] concat_z = [] for index, filter_size in enumerate(self.filters): conv = Conv1D(name='TextCNN_Conv1D_{}'.format(index), filters=int(self.embedding_dim/2), kernel_size=self.filters[index], padding='valid', kernel_initializer='normal', activation='relu')(bert_output_emmbed) x = GlobalMaxPooling1D(name='TextCNN_MaxPooling1D_{}'.format(index))(conv) y = GlobalAveragePooling1D(name='TextCNN_AveragePooling1D_{}'.format(index))(conv) z = AttentionWeightedAverage(name='TextCNN_Annention_{}'.format(index))(conv) concat_x.append(x) concat_y.append(y) concat_z.append(z) merge_x = Concatenate(axis=1)(concat_x) merge_y = Concatenate(axis=1)(concat_y) merge_z = Concatenate(axis=1)(concat_z) merge_xyz = Concatenate(axis=1)([merge_x, merge_y, merge_z]) x = Dropout(self.keep_prob)(merge_xyz) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activation)(x) output_layers = [dense_layer] self.model = Model(bert_inputs, output_layers)
示例18: _get_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def _get_model(self): d = 0.5 rd = 0.5 rnn_units = 128 input_text = Input((self.input_length,)) text_embedding = Embedding(input_dim=self.max_words + 2, output_dim=self.emb_dim, input_length=self.input_length, mask_zero=True)(input_text) text_embedding = SpatialDropout1D(0.5)(text_embedding) bilstm = Bidirectional(LSTM(units=rnn_units, return_sequences=True, dropout=d, recurrent_dropout=rd))(text_embedding) x, attn = AttentionWeightedAverage(return_attention=True)(bilstm) x = Dropout(0.5)(x) out = Dense(units=self.n_classes, activation="softmax")(x) model = Model(input_text, out) return model
开发者ID:tsterbak,项目名称:keras_attention,代码行数:17,代码来源:models.py
示例19: conv_bn_relu_spadrop# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def conv_bn_relu_spadrop(X, **conv_params): dropout_rate = conv_params.setdefault("dropout_rate", 0.5) A = conv_bn_relu(X, **conv_params) return SpatialDropout1D(dropout_rate)(A)#-----------------------------------------------------------------------
示例20: conv2d_bn_relu_spadrop# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def conv2d_bn_relu_spadrop(X, **conv_params): dropout_rate = conv_params.setdefault("dropout_rate", 0.5) A = conv2d_bn_relu(X, **conv_params) return SpatialDropout1D(dropout_rate)(A) #-----------------------------------------------------------------------
示例21: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def create_model(self): embedding_size = 100 self.model = Sequential() self.model.add(Embedding(input_dim=self.vocab_size, output_dim=embedding_size, input_length=self.max_len)) self.model.add(SpatialDropout1D(0.2)) self.model.add(LSTM(units=64, dropout=0.2, recurrent_dropout=0.2)) self.model.add(Dense(1, activation='sigmoid')) self.model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
开发者ID:chen0040,项目名称:keras-english-resume-parser-and-analyzer,代码行数:11,代码来源:lstm.py
示例22: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import SpatialDropout1D [as 别名]def create_model(self): embedding_size = 100 self.model = Sequential() self.model.add(Embedding(input_dim=self.vocab_size, input_length=self.max_len, output_dim=embedding_size)) self.model.add(SpatialDropout1D(0.2)) self.model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu')) self.model.add(GlobalMaxPool1D()) self.model.add(Dense(units=len(self.labels), activation='softmax')) self.model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
开发者ID:chen0040,项目名称:keras-english-resume-parser-and-analyzer,代码行数:12,代码来源:cnn.py
|