这篇教程Python layers.GlobalMaxPooling1D方法代码示例写得很实用,希望能帮到您。
本文整理汇总了Python中keras.layers.GlobalMaxPooling1D方法的典型用法代码示例。如果您正苦于以下问题:Python layers.GlobalMaxPooling1D方法的具体用法?Python layers.GlobalMaxPooling1D怎么用?Python layers.GlobalMaxPooling1D使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块keras.layers 的用法示例。 在下文中一共展示了layers.GlobalMaxPooling1D方法的30个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。 示例1: VariousConv1D# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def VariousConv1D(x, filter_sizes, num_filters, name_prefix=''): ''' Layer wrapper function for various filter sizes Conv1Ds # Arguments: x: tensor, shape = (B, T, E) filter_sizes: list of int, list of each Conv1D filter sizes num_filters: list of int, list of each Conv1D num of filters name_prefix: str, layer name prefix # Returns: out: tensor, shape = (B, sum(num_filters)) ''' conv_outputs = [] for filter_size, n_filter in zip(filter_sizes, num_filters): conv_name = '{}VariousConv1D/Conv1D/filter_size_{}'.format(name_prefix, filter_size) pooling_name = '{}VariousConv1D/MaxPooling/filter_size_{}'.format(name_prefix, filter_size) conv_out = Conv1D(n_filter, filter_size, name=conv_name)(x) # (B, time_steps, n_filter) conv_out = GlobalMaxPooling1D(name=pooling_name)(conv_out) # (B, n_filter) conv_outputs.append(conv_out) concatenate_name = '{}VariousConv1D/Concatenate'.format(name_prefix) out = Concatenate(name=concatenate_name)(conv_outputs) return out
开发者ID:tyo-yo,项目名称:SeqGAN,代码行数:23,代码来源:models.py
示例2: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) x = self.word_embedding.output x = SpatialDropout1D(self.dropout_spatial)(x) x = AttentionSelf(self.word_embedding.embed_size)(x) x = GlobalMaxPooling1D()(x) x = Dropout(self.dropout)(x) # x = Flatten()(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activate_classify)(x) output = [dense_layer] self.model = Model(self.word_embedding.input, output) self.model.summary(120)
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:20,代码来源:graph.py
示例3: build_model_text_cnn# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def build_model_text_cnn(self): ######### text-cnn ######### # bert embedding bert_inputs, bert_output = KerasBertEmbedding().bert_encode() # text cnn bert_output_emmbed = SpatialDropout1D(rate=self.keep_prob)(bert_output) concat_out = [] for index, filter_size in enumerate(self.filters): x = Conv1D(name='TextCNN_Conv1D_{}'.format(index), filters=int(self.embedding_dim/2), kernel_size=self.filters[index], padding='valid', kernel_initializer='normal', activation='relu')(bert_output_emmbed) x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x) concat_out.append(x) x = Concatenate(axis=1)(concat_out) x = Dropout(self.keep_prob)(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activation)(x) output_layers = [dense_layer] self.model = Model(bert_inputs, output_layers)
示例4: build_cnn# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def build_cnn(input_shape, output_dim,nb_filter): clf = Sequential() clf.add(Convolution1D(nb_filter=nb_filter, filter_length=4,border_mode="valid",activation="relu",subsample_length=1,input_shape=input_shape)) clf.add(GlobalMaxPooling1D()) clf.add(Dense(100)) clf.add(Dropout(0.2)) clf.add(Activation("tanh")) clf.add(Dense(output_dim=output_dim, activation='softmax')) clf.compile(optimizer='adagrad', loss='categorical_crossentropy', metrics=['accuracy']) return clf# just one filter
开发者ID:UKPLab,项目名称:semeval2017-scienceie,代码行数:18,代码来源:convNet.py
示例5: build_cnn_char# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def build_cnn_char(input_dim, output_dim,nb_filter): clf = Sequential() clf.add(Embedding(input_dim, 32, # character embedding size input_length=maxlen, dropout=0.2)) clf.add(Convolution1D(nb_filter=nb_filter, filter_length=3,border_mode="valid",activation="relu",subsample_length=1)) clf.add(GlobalMaxPooling1D()) clf.add(Dense(100)) clf.add(Dropout(0.2)) clf.add(Activation("tanh")) clf.add(Dense(output_dim=output_dim, activation='softmax')) clf.compile(optimizer='adagrad', loss='categorical_crossentropy', metrics=['accuracy']) return clf# just one filter
开发者ID:UKPLab,项目名称:semeval2017-scienceie,代码行数:22,代码来源:convNet.py
示例6: ConvolutionLayer# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def ConvolutionLayer(input_shape, n_classes, filter_sizes=[2, 3, 4, 5], num_filters=20, word_trainable=False, vocab_sz=None, embedding_matrix=None, word_embedding_dim=100, hidden_dim=20, act='relu', init='ones'): x = Input(shape=(input_shape,), name='input') z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,), name="embedding", weights=[embedding_matrix], trainable=word_trainable)(x) conv_blocks = [] for sz in filter_sizes: conv = Convolution1D(filters=num_filters, kernel_size=sz, padding="valid", activation=act, strides=1, kernel_initializer=init)(z) conv = GlobalMaxPooling1D()(conv) conv_blocks.append(conv) z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0] z = Dense(hidden_dim, activation="relu")(z) y = Dense(n_classes, activation="softmax")(z) return Model(inputs=x, outputs=y, name='classifier')
开发者ID:yumeng5,项目名称:WeSTClass,代码行数:21,代码来源:model.py
示例7: get_umtmum_embedding# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def get_umtmum_embedding(umtmum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2): conv_umtmum = Conv1D(filters = 128, kernel_size = 4, activation = 'relu', kernel_regularizer = l2(0.0), kernel_initializer = 'glorot_uniform', padding = 'valid', strides = 1, name = 'umtmum_conv') path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umtmum_input) output = conv_umtmum(path_input) output = GlobalMaxPooling1D()(output) output = Dropout(0.5)(output) for i in range(1, path_num): path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umtmum_input) tmp_output = GlobalMaxPooling1D()(conv_umtmum(path_input)) tmp_output = Dropout(0.5)(tmp_output) output = concatenate([output, tmp_output]) output = Reshape((path_num, 128))(output) #output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'umtmum') output = GlobalMaxPooling1D()(output) return output
示例8: get_umtm_embedding# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def get_umtm_embedding(umtm_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2): conv_umtm = Conv1D(filters = 128, kernel_size = 4, activation = 'relu', kernel_regularizer = l2(0.0), kernel_initializer = 'glorot_uniform', padding = 'valid', strides = 1, name = 'umtm_conv') path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umtm_input) output = GlobalMaxPooling1D()(conv_umtm(path_input)) output = Dropout(0.5)(output) for i in range(1, path_num): path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umtm_input) tmp_output = GlobalMaxPooling1D()(conv_umtm(path_input)) tmp_output = Dropout(0.5)(tmp_output) output = concatenate([output, tmp_output]) output = Reshape((path_num, 128))(output) #output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'umtm') output = GlobalMaxPooling1D()(output) return output
示例9: get_umum_embedding# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def get_umum_embedding(umum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2): conv_umum = Conv1D(filters = 128, kernel_size = 4, activation = 'relu', kernel_regularizer = l2(0.0), kernel_initializer = 'glorot_uniform', padding = 'valid', strides = 1, name = 'umum_conv') path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umum_input) output = GlobalMaxPooling1D()(conv_umum(path_input)) output = Dropout(0.5)(output) for i in range(1, path_num): path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umum_input) tmp_output = GlobalMaxPooling1D()(conv_umum(path_input)) tmp_output = Dropout(0.5)(tmp_output) output = concatenate([output, tmp_output]) output = Reshape((path_num, 128))(output) #output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'umum') output = GlobalMaxPooling1D()(output) return output
示例10: get_uuum_embedding# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def get_uuum_embedding(umum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2): conv_umum = Conv1D(filters = 128, kernel_size = 4, activation = 'relu', kernel_regularizer = l2(0.0), kernel_initializer = 'glorot_uniform', padding = 'valid', strides = 1, name = 'uuum_conv') path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umum_input) output = GlobalMaxPooling1D()(conv_umum(path_input)) output = Dropout(0.5)(output) for i in range(1, path_num): path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umum_input) tmp_output = GlobalMaxPooling1D()(conv_umum(path_input)) tmp_output = Dropout(0.5)(tmp_output) output = concatenate([output, tmp_output]) output = Reshape((path_num, 128))(output) #output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'uuum') output = GlobalMaxPooling1D()(output) return output
示例11: get_umtmum_embedding# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def get_umtmum_embedding(umtmum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2): conv_umtmum = Conv1D(filters = 128, kernel_size = 4, activation = 'relu', kernel_regularizer = l2(0.0), kernel_initializer = 'glorot_uniform', padding = 'valid', strides = 1, name = 'umtmum_conv') path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umtmum_input) output = conv_umtmum(path_input) output = GlobalMaxPooling1D()(output) output = Dropout(0.5)(output) for i in range(1, path_num): path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umtmum_input) tmp_output = GlobalMaxPooling1D()(conv_umtmum(path_input)) tmp_output = Dropout(0.5)(tmp_output) output = concatenate([output, tmp_output]) output = Reshape((path_num, 128))(output) #output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'umtmum') output = GlobalMaxPooling1D()(output) return output
开发者ID:MaurizioFD,项目名称:RecSys2019_DeepLearning_Evaluation,代码行数:27,代码来源:MCRec.py
示例12: get_umum_embedding# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def get_umum_embedding(umum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2): conv_umum = Conv1D(filters = 128, kernel_size = 4, activation = 'relu', kernel_regularizer = l2(0.0), kernel_initializer = 'glorot_uniform', padding = 'valid', strides = 1, name = 'umum_conv') path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umum_input) output = GlobalMaxPooling1D()(conv_umum(path_input)) output = Dropout(0.5)(output) for i in range(1, path_num): path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umum_input) tmp_output = GlobalMaxPooling1D()(conv_umum(path_input)) tmp_output = Dropout(0.5)(tmp_output) output = concatenate([output, tmp_output]) output = Reshape((path_num, 128))(output) #output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'umum') output = GlobalMaxPooling1D()(output) return output
开发者ID:MaurizioFD,项目名称:RecSys2019_DeepLearning_Evaluation,代码行数:27,代码来源:MCRec.py
示例13: get_uuum_embedding# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def get_uuum_embedding(umum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2): conv_umum = Conv1D(filters = 128, kernel_size = 4, activation = 'relu', kernel_regularizer = l2(0.0), kernel_initializer = 'glorot_uniform', padding = 'valid', strides = 1, name = 'uuum_conv') path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umum_input) output = GlobalMaxPooling1D()(conv_umum(path_input)) output = Dropout(0.5)(output) for i in range(1, path_num): path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umum_input) tmp_output = GlobalMaxPooling1D()(conv_umum(path_input)) tmp_output = Dropout(0.5)(tmp_output) output = concatenate([output, tmp_output]) output = Reshape((path_num, 128))(output) #output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'uuum') output = GlobalMaxPooling1D()(output) return output
开发者ID:MaurizioFD,项目名称:RecSys2019_DeepLearning_Evaluation,代码行数:27,代码来源:MCRec.py
示例14: ConvolutionLayer# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def ConvolutionLayer(x, input_shape, n_classes, filter_sizes=[2, 3, 4, 5], num_filters=20, word_trainable=False, vocab_sz=None, embedding_matrix=None, word_embedding_dim=100, hidden_dim=100, act='relu', init='ones'): if embedding_matrix is not None: z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,), weights=[embedding_matrix], trainable=word_trainable)(x) else: z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,), trainable=word_trainable)(x) conv_blocks = [] for sz in filter_sizes: conv = Convolution1D(filters=num_filters, kernel_size=sz, padding="valid", activation=act, strides=1, kernel_initializer=init)(z) conv = GlobalMaxPooling1D()(conv) conv_blocks.append(conv) z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0] z = Dense(hidden_dim, activation="relu")(z) y = Dense(n_classes, activation="softmax")(z) return Model(inputs=x, outputs=y)
开发者ID:yumeng5,项目名称:WeSHClass,代码行数:24,代码来源:models.py
示例15: build_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def build_model(vocab_size, embedding_dim, sequence_length, embedding_matrix): sequence_input = Input(shape=(sequence_length,), dtype='int32') embedding_layer = Embedding(input_dim=vocab_size, output_dim=embedding_dim, weights=[embedding_matrix], input_length=sequence_length, trainable=False, name="embedding")(sequence_input) x = Conv1D(128, 5, activation='relu')(embedding_layer) x = MaxPooling1D(5)(x) x = Conv1D(128, 5, activation='relu')(x) x = MaxPooling1D(5)(x) x = Conv1D(128, 5, activation='relu')(x) x = GlobalMaxPooling1D()(x) x = Dense(128, activation='relu')(x) preds = Dense(20, activation='softmax')(x) model = Model(sequence_input, preds) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
示例16: build_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def build_model(vocab_size, embedding_dim, sequence_length): sequence_input = Input(shape=(sequence_length,), dtype='int32') embedding_layer = Embedding(input_dim=vocab_size, output_dim=embedding_dim, input_length=sequence_length, name="embedding")(sequence_input) x = Conv1D(128, 5, activation='relu')(embedding_layer) x = MaxPooling1D(5)(x) x = Conv1D(128, 5, activation='relu')(x) x = MaxPooling1D(5)(x) x = Conv1D(128, 5, activation='relu')(x) x = GlobalMaxPooling1D()(x) x = Dense(128, activation='relu')(x) preds = Dense(20, activation='softmax')(x) model = Model(sequence_input, preds) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
示例17: cnn# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def cnn(embedding_matrix, char_matrix, num_classes, max_seq_len, max_ll3_seq_len, num_filters=64, l2_weight_decay=0.0001, dropout_val=0.5, dense_dim=32, add_sigmoid=True, train_embeds=False, gpus=0, n_cnn_layers=1, pool='max', add_embeds=False): if pool == 'max': Pooling = MaxPooling1D GlobalPooling = GlobalMaxPooling1D elif pool == 'avg': Pooling = AveragePooling1D GlobalPooling = GlobalAveragePooling1D input_ = Input(shape=(max_seq_len,)) embeds = Embedding(embedding_matrix.shape[0], embedding_matrix.shape[1], weights=[embedding_matrix], input_length=max_seq_len, trainable=train_embeds)(input_) x = embeds for i in range(n_cnn_layers-1): x = Conv1D(num_filters, 7, activation='relu', padding='same')(x) x = Pooling(2)(x) x = Conv1D(num_filters, 7, activation='relu', padding='same')(x) x = GlobalPooling()(x) if add_embeds: x1 = Conv1D(num_filters, 7, activation='relu', padding='same')(embeds) x1 = GlobalPooling()(x1) x = Concatenate()([x, x1]) x = BatchNormalization()(x) x = Dropout(dropout_val)(x) x = Dense(dense_dim, activation='relu', kernel_regularizer=regularizers.l2(l2_weight_decay))(x) if add_sigmoid: x = Dense(num_classes, activation='sigmoid')(x) model = Model(inputs=input_, outputs=x) if gpus > 0: model = multi_gpu_model(model, gpus=gpus) return model
开发者ID:Donskov7,项目名称:toxic_comments,代码行数:37,代码来源:models.py
示例18: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) embedding_output = self.word_embedding.output x = Lambda(lambda x : x[:, 0:1, :])(embedding_output) # 获取CLS # # text cnn # bert_output_emmbed = SpatialDropout1D(rate=self.dropout)(embedding_output) # concat_out = [] # for index, filter_size in enumerate(self.filters): # x = Conv1D(name='TextCNN_Conv1D_{}'.format(index), # filters= self.filters_num, # int(K.int_shape(embedding_output)[-1]/self.len_max), # strides=1, # kernel_size=self.filters[index], # padding='valid', # kernel_initializer='normal', # activation='relu')(bert_output_emmbed) # x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x) # concat_out.append(x) # x = Concatenate(axis=1)(concat_out) # x = Dropout(self.dropout)(x) x = Flatten()(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activate_classify)(x) output_layers = [dense_layer] self.model = Model(self.word_embedding.input, output_layers) self.model.summary(120)
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:32,代码来源:graph.py
示例19: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) embedding = self.word_embedding.output def win_mean(x): res_list = [] for i in range(self.len_max-self.n_win+1): x_mean = tf.reduce_mean(x[:, i:i + self.n_win, :], axis=1) x_mean_dims = tf.expand_dims(x_mean, axis=-1) res_list.append(x_mean_dims) res_list = tf.concat(res_list, axis=-1) gg = tf.reduce_max(res_list, axis=-1) return gg if self.encode_type=="HIERARCHICAL": x = Lambda(win_mean, output_shape=(self.embed_size, ))(embedding) elif self.encode_type=="MAX": x = GlobalMaxPooling1D()(embedding) elif self.encode_type=="AVG": x = GlobalAveragePooling1D()(embedding) elif self.encode_type == "CONCAT": x_max = GlobalMaxPooling1D()(embedding) x_avg = GlobalAveragePooling1D()(embedding) x = Concatenate()([x_max, x_avg]) else: raise RuntimeError("encode_type must be 'MAX', 'AVG', 'CONCAT', 'HIERARCHICAL'") output = Dense(self.label, activation=self.activate_classify)(x) self.model = Model(inputs=self.word_embedding.input, outputs=output) self.model.summary(132)
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:37,代码来源:graph.py
示例20: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) embedding = self.word_embedding.output x = GlobalMaxPooling1D()(embedding) output = Dense(self.label, activation=self.activate_classify)(x) self.model = Model(inputs=self.word_embedding.input, outputs=output) self.model.summary(132)
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:14,代码来源:graph.py
示例21: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) embedding_output = self.word_embedding.output # x = embedding_output x = Lambda(lambda x : x[:, -2:-1, :])(embedding_output) # 获取CLS # # text cnn # bert_output_emmbed = SpatialDropout1D(rate=self.dropout)(embedding_output) # concat_out = [] # for index, filter_size in enumerate(self.filters): # x = Conv1D(name='TextCNN_Conv1D_{}'.format(index), # filters= self.filters_num, # int(K.int_shape(embedding_output)[-1]/self.len_max), # strides=1, # kernel_size=self.filters[index], # padding='valid', # kernel_initializer='normal', # activation='relu')(bert_output_emmbed) # x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x) # concat_out.append(x) # x = Concatenate(axis=1)(concat_out) # x = Dropout(self.dropout)(x) x = Flatten()(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activate_classify)(x) output_layers = [dense_layer] self.model = Model(self.word_embedding.input, output_layers) self.model.summary(120)
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:33,代码来源:graph.py
示例22: build_model_r_cnn# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def build_model_r_cnn(self): ######### RCNN ######### # bert embedding bert_inputs, bert_output = KerasBertEmbedding().bert_encode() # rcnn bert_output_emmbed = SpatialDropout1D(rate=self.keep_prob)(bert_output) if args.use_lstm: if args.use_cudnn_cell: layer_cell = CuDNNLSTM else: layer_cell = LSTM else: if args.use_cudnn_cell: layer_cell = CuDNNGRU else: layer_cell = GRU x = Bidirectional(layer_cell(units=args.units, return_sequences=args.return_sequences, kernel_regularizer=regularizers.l2(args.l2 * 0.1), recurrent_regularizer=regularizers.l2(args.l2) ))(bert_output_emmbed) x = Dropout(args.keep_prob)(x) x = Conv1D(filters=int(self.embedding_dim / 2), kernel_size=2, padding='valid', kernel_initializer='normal', activation='relu')(x) x = GlobalMaxPooling1D()(x) x = Dropout(args.keep_prob)(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activation)(x) output_layers = [dense_layer] self.model = Model(bert_inputs, output_layers)
示例23: build_model_avt_cnn# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def build_model_avt_cnn(self): #########text-cnn######### # bert embedding bert_inputs, bert_output = KerasBertEmbedding().bert_encode() # text cnn bert_output_emmbed = SpatialDropout1D(rate=self.keep_prob)(bert_output) concat_x = [] concat_y = [] concat_z = [] for index, filter_size in enumerate(self.filters): conv = Conv1D(name='TextCNN_Conv1D_{}'.format(index), filters=int(self.embedding_dim/2), kernel_size=self.filters[index], padding='valid', kernel_initializer='normal', activation='relu')(bert_output_emmbed) x = GlobalMaxPooling1D(name='TextCNN_MaxPooling1D_{}'.format(index))(conv) y = GlobalAveragePooling1D(name='TextCNN_AveragePooling1D_{}'.format(index))(conv) z = AttentionWeightedAverage(name='TextCNN_Annention_{}'.format(index))(conv) concat_x.append(x) concat_y.append(y) concat_z.append(z) merge_x = Concatenate(axis=1)(concat_x) merge_y = Concatenate(axis=1)(concat_y) merge_z = Concatenate(axis=1)(concat_z) merge_xyz = Concatenate(axis=1)([merge_x, merge_y, merge_z]) x = Dropout(self.keep_prob)(merge_xyz) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activation)(x) output_layers = [dense_layer] self.model = Model(bert_inputs, output_layers)
示例24: forward# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def forward(self): model_input = Input(shape=(self.maxlen,), dtype='int32', name='token') # region embedding x = Token_Embedding(model_input, self.nb_tokens, self.embedding_dim, self.token_embeddings, False, self.maxlen, self.embed_dropout_rate, name='token_embeddings') if isinstance(self.region_kernel_size, list): region = [Conv1D(self.nb_filters, f, padding='same')(x) for f in self.region_kernel_size] region_embedding = add(region, name='region_embeddings') else: region_embedding = Conv1D( self.nb_filters, self.region_kernel_size, padding='same', name='region_embeddings')(x) # same padding convolution x = Activation('relu')(region_embedding) x = Conv1D(self.nb_filters, self.conv_kernel_size, padding='same', name='conv_1')(x) x = Activation('relu')(x) x = Conv1D(self.nb_filters, self.conv_kernel_size, padding='same', name='conv_2')(x) # residual connection x = add([x, region_embedding], name='pre_block_hidden') for k in range(self.repeat_time): x = self._block(x, k) x = GlobalMaxPooling1D()(x) outputs = tc_output_logits(x, self.nb_classes, self.final_dropout_rate) self.model = Model(inputs=model_input, outputs=outputs, name="Deep Pyramid CNN")
开发者ID:stevewyl,项目名称:nlp_toolkit,代码行数:32,代码来源:dpcnn.py
示例25: cnn_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def cnn_model(**kwargs): X = Conv1D(filters=kwargs['hidden_units'], kernel_size=3, kernel_initializer='he_normal', padding='valid', activation='relu')(kwargs['embeddings']) X = Conv1D(filters=kwargs['hidden_units'], kernel_size=3, kernel_initializer='he_normal', padding='valid', activation='relu')(X) X = GlobalMaxPooling1D()(X) # X = MaxPooling1D(pool_size=3)(X) # an alternative to global max pooling # X = Flatten()(X) return X# A model using Long Short Term Memory (LSTM) Units
开发者ID:MirunaPislar,项目名称:Sarcasm-Detection,代码行数:14,代码来源:dl_models.py
示例26: c2r# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def c2r(dic_len,input_length,output_length,emb_dim=128,hidden=512,nb_filter=64,deepth=(1,1),stride=3): model = Sequential() model.add(Embedding(input_dim=dic_len, output_dim=emb_dim, input_length=input_length)) for l in range(deepth[0]): model.add(Conv1D(nb_filter,3,activation='relu')) model.add(GlobalMaxPooling1D()) model.add(Dropout(0.5)) model.add(RepeatVector(output_length)) for l in range(deepth[0]): model.add(LSTM(hidden, return_sequences=True)) model.add(TimeDistributed(Dense(units=dic_len, activation='softmax'))) model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['acc']) return model
开发者ID:QuantumLiu,项目名称:Neural-Headline-Generator-CN,代码行数:15,代码来源:models.py
示例27: PLayer# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def PLayer(self, size, filters, activation, initializer, regularizer_param): def f(input): # model_p = Convolution1D(filters=filters, kernel_size=size, padding='valid', activity_regularizer=l2(regularizer_param), kernel_initializer=initializer, kernel_regularizer=l2(regularizer_param))(input) model_p = Convolution1D(filters=filters, kernel_size=size, padding='same', kernel_initializer=initializer, kernel_regularizer=l2(regularizer_param))(input) model_p = BatchNormalization()(model_p) model_p = Activation(activation)(model_p) return GlobalMaxPooling1D()(model_p) return f
示例28: build_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def build_model(train_data, max_features=5000, maxlen=400, batch_size=32, embedding_dims=50, filters=250, kernel_size=3, hidden_dims=250): print('Build model...') model = Sequential() # we start off with an efficient embedding layer which maps # our vocab indices into embedding_dims dimensions model.add(Embedding(max_features, embedding_dims, input_length=maxlen)) model.add(Dropout(0.2)) # we add a Convolution1D, which will learn filters # word group filters of size filter_length: model.add(Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1)) # we use max pooling: model.add(GlobalMaxPooling1D()) # We add a vanilla hidden layer: model.add(Dense(hidden_dims)) model.add(Dropout(0.2)) model.add(Activation('relu')) # We project onto a single unit output layer, and squash it with a sigmoid: model.add(Dense(1)) model.add(Activation('sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model # model.fit(x_train, y_train, # batch_size=batch_size, # epochs=epochs, # validation_data=(x_test, y_test))
开发者ID:Avsecz,项目名称:kopt,代码行数:42,代码来源:model.py
示例29: make_child_parent_branch# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def make_child_parent_branch(token_emb, max_nodes, max_bi_relations): node_indices = Input( shape=(max_nodes,), dtype='int32', name='node_inds') graph_node_embs = token_emb(node_indices) child_rel_outputs, child_rel_inputs = make_pair_branch( graph_node_embs, max_nodes, max_bi_relations, label='child') parent_rel_outputs, parent_rel_inputs = make_pair_branch( graph_node_embs, max_nodes, max_bi_relations, label='parent') x = Add(name='child_parent_add')( child_rel_outputs + parent_rel_outputs) # Integrate node embeddings into a single graph embedding. x = GlobalMaxPooling1D()(x) outputs = [x] inputs = [node_indices] + child_rel_inputs + parent_rel_inputs return outputs, inputs
示例30: build# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]def build(self): sequence_input = Input(shape=(self.max_sequence_length,), dtype='int32') if self.weights is None: embedding = Embedding( self.vocab_size + 1, # due to mask_zero self.embedding_dim, input_length=self.max_sequence_length, )(sequence_input) else: embedding = Embedding( self.weights.shape[0], # due to mask_zero self.weights.shape[1], input_length=self.max_sequence_length, weights=[self.weights], )(sequence_input) convs = [] for filter_size, num_filter in zip(self.filter_sizes, self.num_filters): conv = Conv1D(filters=num_filter, kernel_size=filter_size, activation='relu')(embedding) pool = GlobalMaxPooling1D()(conv) convs.append(pool) z = Concatenate()(convs) z = Dense(self.num_units)(z) z = Dropout(self.keep_prob)(z) z = Activation('relu')(z) pred = Dense(self.num_tags, activation='softmax')(z) model = Model(inputs=[sequence_input], outputs=[pred]) return model
开发者ID:Hironsan,项目名称:awesome-text-classification,代码行数:34,代码来源:model.py
|