这篇教程Python layers.Bidirectional方法代码示例写得很实用,希望能帮到您。
本文整理汇总了Python中keras.layers.Bidirectional方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Bidirectional方法的具体用法?Python layers.Bidirectional怎么用?Python layers.Bidirectional使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块keras.layers 的用法示例。 在下文中一共展示了layers.Bidirectional方法的30个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。 示例1: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def create_model(): inputs = Input(shape=(length,), dtype='int32', name='inputs') embedding_1 = Embedding(len(vocab), EMBED_DIM, input_length=length, mask_zero=True)(inputs) bilstm = Bidirectional(LSTM(EMBED_DIM // 2, return_sequences=True))(embedding_1) bilstm_dropout = Dropout(DROPOUT_RATE)(bilstm) embedding_2 = Embedding(len(vocab), EMBED_DIM, input_length=length)(inputs) con = Conv1D(filters=FILTERS, kernel_size=2 * HALF_WIN_SIZE + 1, padding='same')(embedding_2) con_d = Dropout(DROPOUT_RATE)(con) dense_con = TimeDistributed(Dense(DENSE_DIM))(con_d) rnn_cnn = concatenate([bilstm_dropout, dense_con], axis=2) dense = TimeDistributed(Dense(len(chunk_tags)))(rnn_cnn) crf = CRF(len(chunk_tags), sparse_target=True) crf_output = crf(dense) model = Model(input=[inputs], output=[crf_output]) model.compile(loss=crf.loss_function, optimizer=Adam(), metrics=[crf.accuracy]) return model
示例2: get_audio_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def get_audio_model(self): # Modality specific hyperparameters self.epochs = 100 self.batch_size = 50 # Modality specific parameters self.embedding_dim = self.train_x.shape[2] print("Creating Model...") inputs = Input(shape=(self.sequence_length, self.embedding_dim), dtype='float32') masked = Masking(mask_value =0)(inputs) lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4))(masked) lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4), name="utter")(lstm) output = TimeDistributed(Dense(self.classes,activation='softmax'))(lstm) model = Model(inputs, output) return model
示例3: get_bimodal_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def get_bimodal_model(self): # Modality specific hyperparameters self.epochs = 100 self.batch_size = 10 # Modality specific parameters self.embedding_dim = self.train_x.shape[2] print("Creating Model...") inputs = Input(shape=(self.sequence_length, self.embedding_dim), dtype='float32') masked = Masking(mask_value =0)(inputs) lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4), name="utter")(masked) output = TimeDistributed(Dense(self.classes,activation='softmax'))(lstm) model = Model(inputs, output) return model
示例4: __build_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def __build_model(self): model = Sequential() embedding_layer = Embedding(input_dim=len(self.vocab) + 1, output_dim=self.embedding_dim, weights=[self.embedding_mat], trainable=False) model.add(embedding_layer) bilstm_layer = Bidirectional(LSTM(units=256, return_sequences=True)) model.add(bilstm_layer) model.add(TimeDistributed(Dense(256, activation="relu"))) crf_layer = CRF(units=len(self.tags), sparse_target=True) model.add(crf_layer) model.compile(optimizer="adam", loss=crf_loss, metrics=[crf_viterbi_accuracy]) model.summary() return model
开发者ID:fordai,项目名称:CCKS2019-Chinese-Clinical-NER,代码行数:23,代码来源:model.py
示例5: CapsuleNet# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def CapsuleNet(n_capsule = 10, n_routings = 5, capsule_dim = 16, n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001): K.clear_session() inputs = Input(shape=(170,)) x = Embedding(21099, 300, trainable=True)(inputs) x = SpatialDropout1D(dropout_rate)(x) x = Bidirectional( CuDNNGRU(n_recurrent, return_sequences=True, kernel_regularizer=l2(l2_penalty), recurrent_regularizer=l2(l2_penalty)))(x) x = PReLU()(x) x = Capsule( num_capsule=n_capsule, dim_capsule=capsule_dim, routings=n_routings, share_weights=True)(x) x = Flatten(name = 'concatenate')(x) x = Dropout(dropout_rate)(x)# fc = Dense(128, activation='sigmoid')(x) outputs = Dense(6, activation='softmax')(x) model = Model(inputs=inputs, outputs=outputs) model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy']) return model
开发者ID:WeavingWong,项目名称:DigiX_HuaWei_Population_Age_Attribution_Predict,代码行数:24,代码来源:models.py
示例6: CapsuleNet_v2# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def CapsuleNet_v2(n_capsule = 10, n_routings = 5, capsule_dim = 16, n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001): K.clear_session() inputs = Input(shape=(200,)) x = Embedding(20000, 300, trainable=True)(inputs) x = SpatialDropout1D(dropout_rate)(x) x = Bidirectional( CuDNNGRU(n_recurrent, return_sequences=True, kernel_regularizer=l2(l2_penalty), recurrent_regularizer=l2(l2_penalty)))(x) x = PReLU()(x) x = Capsule( num_capsule=n_capsule, dim_capsule=capsule_dim, routings=n_routings, share_weights=True)(x) x = Flatten(name = 'concatenate')(x) x = Dropout(dropout_rate)(x)# fc = Dense(128, activation='sigmoid')(x) outputs = Dense(6, activation='softmax')(x) model = Model(inputs=inputs, outputs=outputs) model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy']) return model
开发者ID:WeavingWong,项目名称:DigiX_HuaWei_Population_Age_Attribution_Predict,代码行数:24,代码来源:models.py
示例7: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def create_model(maxlen, chars, word_size, infer=False): """ :param infer: :param maxlen: :param chars: :param word_size: :return: """ sequence = Input(shape=(maxlen,), dtype='int32') embedded = Embedding(len(chars) + 1, word_size, input_length=maxlen, mask_zero=True)(sequence) blstm = Bidirectional(LSTM(64, return_sequences=True), merge_mode='sum')(embedded) output = TimeDistributed(Dense(5, activation='softmax'))(blstm) model = Model(input=sequence, output=output) if not infer: model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
开发者ID:stephen-v,项目名称:zh-segmentation-keras,代码行数:19,代码来源:lstm_model.py
示例8: create_lstm# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def create_lstm(hidden_units=[50], dropout=0.05, bidirectional=True): model = Sequential() if bidirectional: i = 0 for unit in hidden_units: if i == 0: model.add(Bidirectional(LSTM(unit, dropout=dropout, return_sequences=True), input_shape=(None, config.N_MELS))) else: model.add(Bidirectional(LSTM(unit, dropout=dropout, return_sequences=True))) i += 1 else: i = 0 for unit in hidden_units: if i == 0: model.add(LSTM(unit, dropout=dropout, return_sequences=True), input_shape=(None, config.N_MELS)) else: model.add(LSTM(unit, dropout=dropout, return_sequences=True)) i += 1 model.add(TimeDistributed(Dense(config.CLASSES, activation='sigmoid'))) return model
开发者ID:qlemaire22,项目名称:speech-music-detection,代码行数:25,代码来源:lstm.py
示例9: __build_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def __build_model(self, emb_matrix=None): word_input = Input(shape=(None,), dtype='int32', name="word_input") word_emb = Embedding(self.vocab_size + 1, self.embed_dim, weights=[emb_matrix] if emb_matrix is not None else None, trainable=True if emb_matrix is None else False, name='word_emb')(word_input) bilstm_output = Bidirectional(LSTM(self.bi_lstm_units // 2, return_sequences=True))(word_emb) bilstm_output = Dropout(self.dropout_rate)(bilstm_output) output = Dense(self.chunk_size + 1, kernel_initializer="he_normal")(bilstm_output) output = CRF(self.chunk_size + 1, sparse_target=self.sparse_target)(output) model = Model([word_input], [output]) parallel_model = model if self.num_gpu > 1: parallel_model = multi_gpu_model(model, gpus=self.num_gpu) parallel_model.compile(optimizer=self.optimizer, loss=crf_loss, metrics=[crf_accuracy]) return model, parallel_model
开发者ID:GlassyWing,项目名称:bi-lstm-crf,代码行数:25,代码来源:core.py
示例10: bidLstm# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def bidLstm(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes): #inp = Input(shape=(maxlen, )) input_layer = Input(shape=(maxlen, embed_size), ) #x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp) x = Bidirectional(LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate, recurrent_dropout=dropout_rate))(input_layer) #x = Dropout(dropout_rate)(x) x = Attention(maxlen)(x) #x = AttentionWeightedAverage(maxlen)(x) #print('len(x):', len(x)) #x = AttentionWeightedAverage(maxlen)(x) x = Dense(dense_size, activation="relu")(x) x = Dropout(dropout_rate)(x) x = Dense(nb_classes, activation="sigmoid")(x) model = Model(inputs=input_layer, outputs=x) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model# conv+GRU with embeddings
开发者ID:kermitt2,项目名称:delft,代码行数:23,代码来源:models.py
示例11: build_model_bilstm_single# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def build_model_bilstm_single(self): if args.use_lstm: if args.use_cudnn_cell: layer_cell = CuDNNLSTM else: layer_cell = LSTM else: if args.use_cudnn_cell: layer_cell = CuDNNGRU else: layer_cell = GRU # bert embedding bert_inputs, bert_output = KerasBertEmbedding().bert_encode() # Bi-LSTM x = Bidirectional(layer_cell(units=args.units, return_sequences=args.return_sequences, kernel_regularizer=regularizers.l2(args.l2 * 0.1), recurrent_regularizer=regularizers.l2(args.l2) ))(bert_output) x = Dropout(args.keep_prob)(x) x = Flatten()(x) # 最后就是softmax dense_layer = Dense(args.label, activation=args.activation)(x) output_layers = [dense_layer] self.model = Model(bert_inputs, output_layers)
示例12: build_model_bilstm_layers# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def build_model_bilstm_layers(self): if args.use_lstm: if args.use_cudnn_cell: layer_cell = CuDNNLSTM else: layer_cell = LSTM else: if args.use_cudnn_cell: layer_cell = CuDNNGRU else: layer_cell = GRU # bert embedding bert_inputs, bert_output = KerasBertEmbedding().bert_encode() # Bi-LSTM x = Bidirectional(layer_cell(units=args.units, return_sequences=args.return_sequences, ))(bert_output) # 最后 x = TimeDistributed(Dropout(self.keep_prob))(x) dense_layer = Dense(args.max_seq_len, activation=args.activation)(x) crf = CRF(args.label, sparse_target=False, learn_mode="join", test_mode='viterbi') output_layers = crf(dense_layer) self.model = Model(bert_inputs, output_layers) self.model.summary(132)
示例13: create_BiLSTM# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def create_BiLSTM(wordvecs, lstm_dim=300, output_dim=2, dropout=.5, weights=None, train=True): model = Sequential() if weights != None: model.add(Embedding(len(wordvecs)+1, len(wordvecs['the']), weights=[weights], trainable=train)) else: model.add(Embedding(len(wordvecs)+1, len(wordvecs['the']), trainable=train)) model.add(Dropout(dropout)) model.add(Bidirectional(LSTM(lstm_dim))) model.add(Dropout(dropout)) model.add(Dense(output_dim, activation='softmax')) if output_dim == 2: model.compile('adam', 'binary_crossentropy', metrics=['accuracy']) else: model.compile('adam', 'categorical_crossentropy', metrics=['accuracy']) return model
开发者ID:Artaches,项目名称:SSAN-self-attention-sentiment-analysis-classification,代码行数:25,代码来源:lstm_bilstm.py
示例14: print_results# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def print_results(bi, file, out_file, file_type): names, results, std_devs, dim = test_embeddings(bi, file, file_type) rr = [[u'{0:.3f} /u00B1{1:.3f}'.format(r, s) for r, s in zip(result, std_dev)] for result, std_dev in zip(results, std_devs)] table_data = [[name] + result for name, result in zip(names, rr)] table = tabulate.tabulate(table_data, headers=['dataset', 'acc', 'prec', 'rec', 'f1'], tablefmt='simple', floatfmt='.3f') if out_file: with open(out_file, 'a') as f: f.write('/n') if bi: f.write('+++Bidirectional LSTM+++/n') else: f.write('+++LSTM+++/n') f.write(table) f.write('/n') else: print() if bi: print('Bidirectional LSTM') else: print('LSTM') print(table)
开发者ID:Artaches,项目名称:SSAN-self-attention-sentiment-analysis-classification,代码行数:26,代码来源:lstm_bilstm.py
示例15: forward# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def forward(self): model_input = Input(shape=(self.maxlen,), dtype='int32', name='token') x = Token_Embedding(model_input, self.nb_tokens, self.embedding_dim, self.token_embeddings, True, self.maxlen, self.embed_dropout_rate, name='token_embeddings') x = Activation('tanh')(x) # skip-connection from embedding to output eases gradient-flow and allows access to lower-level features # ordering of the way the merge is done is important for consistency with the pretrained model lstm_0_output = Bidirectional( LSTM(self.rnn_size, return_sequences=True), name="bi_lstm_0")(x) lstm_1_output = Bidirectional( LSTM(self.rnn_size, return_sequences=True), name="bi_lstm_1")(lstm_0_output) x = concatenate([lstm_1_output, lstm_0_output, x], name='concatenate') x = self.attention_layer(x) if self.return_attention: x, weights = x outputs = tc_output_logits(x, self.nb_classes, self.final_dropout_rate) if self.return_attention: outputs.append(weights) outputs = concatenate(outputs, axis=-1, name='outputs') self.model = Model(inputs=model_input, outputs=outputs, name="Bi_LSTM_Attention")
示例16: emoji2vec_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def emoji2vec_model(embedding_matrix, emoji_vocab_size, word_vocab_size): emoji_model = Sequential() emoji_model.add(Embedding(emoji_vocab_size + 1, embedding_dim, input_length=1, trainable=True)) emoji_model.add(Reshape((embedding_dim,))) word_model = Sequential() word_model.add(Embedding(word_vocab_size + 1, embedding_dim, weights=[embedding_matrix], input_length=maximum_length, trainable=False)) word_model.add(Bidirectional(LSTM(embedding_dim, dropout=0.5), merge_mode='sum')) model = Sequential() model.add(Merge([emoji_model, word_model], mode='concat')) model.add(Dense(embedding_dim * 2, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(2, activation='softmax')) return emoji_model, word_model, model# Solely based on emoji descriptions, obtain the emoji2vec representations for all possible emojis
开发者ID:MirunaPislar,项目名称:Sarcasm-Detection,代码行数:18,代码来源:emoji2vec.py
示例17: AlternativeRNNModel# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def AlternativeRNNModel(vocab_size, max_len, rnnConfig, model_type): embedding_size = rnnConfig['embedding_size'] if model_type == 'inceptionv3': # InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model image_input = Input(shape=(2048,)) elif model_type == 'vgg16': # VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model image_input = Input(shape=(4096,)) image_model_1 = Dense(embedding_size, activation='relu')(image_input) image_model = RepeatVector(max_len)(image_model_1) caption_input = Input(shape=(max_len,)) # mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency. caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input) # Since we are going to predict the next word using the previous words # (length of previous words changes with every iteration over the caption), we have to set return_sequences = True. caption_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=True)(caption_model_1) # caption_model = TimeDistributed(Dense(embedding_size, activation='relu'))(caption_model_2) caption_model = TimeDistributed(Dense(embedding_size))(caption_model_2) # Merging the models and creating a softmax classifier final_model_1 = concatenate([image_model, caption_model]) # final_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=False)(final_model_1) final_model_2 = Bidirectional(LSTM(rnnConfig['LSTM_units'], return_sequences=False))(final_model_1) # final_model_3 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_2) # final_model = Dense(vocab_size, activation='softmax')(final_model_3) final_model = Dense(vocab_size, activation='softmax')(final_model_2) model = Model(inputs=[image_input, caption_input], outputs=final_model) model.compile(loss='categorical_crossentropy', optimizer='adam') # model.compile(loss='categorical_crossentropy', optimizer='rmsprop') return model
开发者ID:dabasajay,项目名称:Image-Caption-Generator,代码行数:34,代码来源:model.py
示例18: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def create_model(time_window_size, metric): model = Sequential() model.add(Bidirectional(LSTM(units=64, dropout=0.2, recurrent_dropout=0.2), input_shape=(time_window_size, 1))) model.add(Dense(units=time_window_size, activation='linear')) model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric]) # model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric]) # model.compile(optimizer="sgd", loss="mse", metrics=[metric]) print(model.summary()) return model
开发者ID:chen0040,项目名称:keras-anomaly-detection,代码行数:16,代码来源:recurrent.py
示例19: S_LSTM# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def S_LSTM(dimx = 30, dimy = 30, embedding_matrix=None, LSTM_neurons = 32): inpx = Input(shape=(dimx,),dtype='int32',name='inpx') x = word2vec_embedding_layer(embedding_matrix,train='False')(inpx) inpy = Input(shape=(dimy,),dtype='int32',name='inpy') y = word2vec_embedding_layer(embedding_matrix,train='False')(inpy) #hx = LSTM(LSTM_neurons)(x) #hy = LSTM(LSTM_neurons)(y) shared_lstm = Bidirectional(LSTM(LSTM_neurons,return_sequences=False),merge_mode='sum') #shared_lstm = LSTM(LSTM_neurons,return_sequences=True) hx = shared_lstm(x) #hx = Dropout(0.2)(hx) hy = shared_lstm(y) #hy = Dropout(0.2)(hy) h1,h2=hx,hy corr1 = Exp()([h1,h2]) adadelta = optimizers.Adadelta() model = Model( [inpx,inpy],corr1) model.compile( loss='binary_crossentropy',optimizer=adadelta) return model
示例20: set_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def set_model(self): """ Set the HAN model according to the given hyperparameters """ if self.hyperparameters['l2_regulizer'] is None: kernel_regularizer = None else: kernel_regularizer = regularizers.l2(self.hyperparameters['l2_regulizer']) if self.hyperparameters['dropout_regulizer'] is None: dropout_regularizer = 1 else: dropout_regularizer = self.hyperparameters['dropout_regulizer'] word_input = Input(shape=(self.max_senten_len,), dtype='float32') word_sequences = self.get_embedding_layer()(word_input) word_lstm = Bidirectional( self.hyperparameters['rnn'](self.hyperparameters['rnn_units'], return_sequences=True, kernel_regularizer=kernel_regularizer))(word_sequences) word_dense = TimeDistributed( Dense(self.hyperparameters['dense_units'], kernel_regularizer=kernel_regularizer))(word_lstm) word_att = AttentionWithContext()(word_dense) wordEncoder = Model(word_input, word_att) sent_input = Input(shape=(self.max_senten_num, self.max_senten_len), dtype='float32') sent_encoder = TimeDistributed(wordEncoder)(sent_input) sent_lstm = Bidirectional(self.hyperparameters['rnn']( self.hyperparameters['rnn_units'], return_sequences=True, kernel_regularizer=kernel_regularizer))(sent_encoder) sent_dense = TimeDistributed( Dense(self.hyperparameters['dense_units'], kernel_regularizer=kernel_regularizer))(sent_lstm) sent_att = Dropout(dropout_regularizer)( AttentionWithContext()(sent_dense)) preds = Dense(len(self.classes), activation=self.hyperparameters['activation'])(sent_att) self.model = Model(sent_input, preds) self.model.compile( loss=self.hyperparameters['loss'], optimizer=self.hyperparameters['optimizer'], metrics=self.hyperparameters['metrics'])
开发者ID:Hsankesara,项目名称:DeepResearch,代码行数:35,代码来源:HAN.py
示例21: __input_layer# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def __input_layer(self, dims, return_sequences): """ Returns GRU or LSTM input layer """ if self.current_params["bidirectional"] == True: return Bidirectional(self.__middle_hidden_layer(return_sequences), input_shape=dims) else: if self.current_params["layer_type"] == "GRU": return GRU(self.current_params["hidden_neurons"], input_shape=dims, return_sequences=return_sequences, kernel_initializer=self.current_params["kernel_initializer"], recurrent_initializer=self.current_params["recurrent_initializer"], recurrent_regularizer=self.__generate_regulariser(self.current_params["r_l1_reg"], self.current_params["r_l2_reg"]), bias_regularizer=self.__generate_regulariser(self.current_params["b_l1_reg"], self.current_params["b_l2_reg"]), dropout=self.current_params["dropout"], recurrent_dropout=self.current_params["recurrent_dropout"] ) return LSTM(self.current_params["hidden_neurons"], input_shape=dims, return_sequences=return_sequences, kernel_initializer=self.current_params["kernel_initializer"], recurrent_initializer=self.current_params["recurrent_initializer"], recurrent_regularizer=self.__generate_regulariser(self.current_params["r_l1_reg"], self.current_params["r_l2_reg"]), bias_regularizer=self.__generate_regulariser(self.current_params["b_l1_reg"], self.current_params["b_l2_reg"]), dropout=self.current_params["dropout"], recurrent_dropout=self.current_params["recurrent_dropout"] )
开发者ID:mprhode,项目名称:malware-prediction-rnn,代码行数:30,代码来源:RNN.py
示例22: __hidden_layer# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def __hidden_layer(self, return_sequences): """ reurns GRU or LSTM hidden layer """ layer = self.__middle_hidden_layer(return_sequences) if self.current_params["bidirectional"] == True: return Bidirectional(layer) return layer
开发者ID:mprhode,项目名称:malware-prediction-rnn,代码行数:9,代码来源:RNN.py
示例23: rnn_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def rnn_model(max_len=400, vocabulary_size=20000, embedding_dim=128, hidden_dim=128, num_classes=4): print("Bidirectional LSTM...") inputs = Input(shape=(max_len,), dtype='int32') embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim, input_length=max_len, name="embedding")(inputs) lstm_layer = Bidirectional(LSTM(hidden_dim))(embedding) output = Dense(num_classes, activation='softmax')(lstm_layer) model = Model(inputs, output) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) model.summary() return model
示例24: han_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def han_model(max_len=400, vocabulary_size=20000, embedding_dim=128, hidden_dim=128, max_sentences=16, num_classes=4): """ Implementation of document classification model described in `Hierarchical Attention Networks for Document Classification (Yang et al., 2016)` (https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf) :param max_len: :param vocabulary_size: :param embedding_dim: :param hidden_dim: :param max_sentences: :param num_classes: :return: """ print("Hierarchical Attention Network...") inputs = Input(shape=(max_len,), dtype='int32') embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim, input_length=max_len, name="embedding")(inputs) lstm_layer = Bidirectional(LSTM(hidden_dim))(embedding) # lstm_layer_att = AttLayer(hidden_dim)(lstm_layer) sent_encoder = Model(inputs, lstm_layer) doc_inputs = Input(shape=(max_sentences, max_len), dtype='int32', name='doc_input') doc_encoder = TimeDistributed(sent_encoder)(doc_inputs) doc_layer = Bidirectional(LSTM(hidden_dim))(doc_encoder) # doc_layer_att = AttLayer(hidden_dim)(doc_layer) output = Dense(num_classes, activation='softmax')(doc_layer) model = Model(doc_inputs, output) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) model.summary() return model
示例25: model_8# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def model_8(input_dim, output_dim): """ model_8 summary ... _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= lstm_1 (LSTM) (None, 50, 256) 328704 _________________________________________________________________ dropout_1 (Dropout) (None, 50, 256) 0 _________________________________________________________________ lstm_2 (LSTM) (None, 256) 525312 _________________________________________________________________ dropout_2 (Dropout) (None, 256) 0 _________________________________________________________________ dense_1 (Dense) (None, 64) 16448 _________________________________________________________________ activation_1 (Activation) (None, 64) 0 ================================================================= Total params: 870,464 Trainable params: 870,464 Non-trainable params: 0 _________________________________________________________________ model_8 count_params ... 870464 :param input_dim: :param output_dim: :return: """ model = Sequential() model.add(LSTM(256, input_shape=input_dim, return_sequences=True, recurrent_dropout=0.1)) model.add(Dropout(0.3)) model.add(LSTM(256, input_shape=input_dim, return_sequences=False, recurrent_dropout=0.1)) model.add(Dropout(0.3)) model.add(Dense(output_dim)) model.add(Activation('softmax')) return model, 'model_8'# Bidirectional LSTM (Many to One)##
示例26: model_9# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def model_9(input_dim, output_dim): """ model_9 summary ... _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= bidirectional_1 (Bidirection (None, 256) 657408 _________________________________________________________________ dense_1 (Dense) (None, 64) 16448 _________________________________________________________________ activation_1 (Activation) (None, 64) 0 ================================================================= Total params: 673,856 Trainable params: 673,856 Non-trainable params: 0 _________________________________________________________________ model_9 count_params ... 673856 :param input_dim: :param output_dim: :return: """ model = Sequential() model.add(Bidirectional(LSTM(256, return_sequences=False), input_shape=input_dim, merge_mode='sum')) model.add(Dense(output_dim)) model.add(Activation('softmax')) return model, 'model_9'# Bidirectional Deep LSTM (Many to One)##
示例27: model_10# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def model_10(input_dim, output_dim): """ model_10 summary ... _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= bidirectional_1 (Bidirection (None, 50, 128) 197632 _________________________________________________________________ bidirectional_2 (Bidirection (None, 128) 263168 _________________________________________________________________ dense_1 (Dense) (None, 64) 8256 _________________________________________________________________ activation_1 (Activation) (None, 64) 0 ================================================================= Total params: 469,056 Trainable params: 469,056 Non-trainable params: 0 _________________________________________________________________ model_10 count_params ... 469056 :param input_dim: :param output_dim: :return: """ model = Sequential() model.add(Bidirectional(LSTM(128, return_sequences=True), input_shape=input_dim, merge_mode='sum')) model.add(Bidirectional(LSTM(128, return_sequences=False), merge_mode='sum')) model.add(Dense(output_dim)) model.add(Activation('softmax')) return model, 'model_10'
示例28: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) x = self.word_embedding.output # x = Reshape((self.len_max, self.embed_size, 1))(embedding) if self.rnn_type=="LSTM": layer_cell = LSTM elif self.rnn_type=="GRU": layer_cell = GRU elif self.rnn_type=="CuDNNLSTM": layer_cell = CuDNNLSTM elif self.rnn_type=="CuDNNGRU": layer_cell = CuDNNGRU else: layer_cell = GRU # Bi-LSTM for nrl in range(self.num_rnn_layers): x = Bidirectional(layer_cell(units=self.rnn_units, return_sequences=True, activation='relu', kernel_regularizer=regularizers.l2(0.32 * 0.1), recurrent_regularizer=regularizers.l2(0.32) ))(x) x = Dropout(self.dropout)(x) x = Flatten()(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activate_classify)(x) output = [dense_layer] self.model = Model(self.word_embedding.input, output) self.model.summary(120)
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:37,代码来源:graph.py
示例29: __init__# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def __init__(self, hyper_parameters): """ 初始化 :param hyper_parameters: json,超参 """ self.rnn_type = hyper_parameters['model'].get('rnn_type', 'Bidirectional-LSTM') self.rnn_units = hyper_parameters['model'].get('rnn_units', 256) self.attention_units = hyper_parameters['model'].get('attention_units', self.rnn_units*2) self.dropout_spatial = hyper_parameters['model'].get('droupout_spatial', 0.2) self.len_max_sen = hyper_parameters['model'].get('len_max_sen', 50) super().__init__(hyper_parameters)
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:13,代码来源:graph.py
示例30: word_level# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Bidirectional [as 别名]def word_level(self): x_input_word = Input(shape=(self.len_max, self.embed_size)) # x = SpatialDropout1D(self.dropout_spatial)(x_input_word) x = Bidirectional(GRU(units=self.rnn_units, return_sequences=True, activation='relu', kernel_regularizer=regularizers.l2(self.l2), recurrent_regularizer=regularizers.l2(self.l2)))(x_input_word) out_sent = AttentionSelf(self.rnn_units*2)(x) model = Model(x_input_word, out_sent) return model
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:13,代码来源:graph.py
|