这篇教程Python layers.LSTM属性代码示例写得很实用,希望能帮到您。
本文整理汇总了Python中keras.layers.LSTM属性的典型用法代码示例。如果您正苦于以下问题:Python layers.LSTM属性的具体用法?Python layers.LSTM怎么用?Python layers.LSTM使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在模块keras.layers 的用法示例。 在下文中一共展示了layers.LSTM属性的29个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。 示例1: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def create_model(time_window_size, metric): model = Sequential() model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu', input_shape=(time_window_size, 1))) model.add(MaxPooling1D(pool_size=4)) model.add(LSTM(64)) model.add(Dense(units=time_window_size, activation='linear')) model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric]) # model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric]) # model.compile(optimizer="sgd", loss="mse", metrics=[metric]) print(model.summary()) return model
开发者ID:chen0040,项目名称:keras-anomaly-detection,代码行数:20,代码来源:recurrent.py
示例2: RNNModel# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def RNNModel(vocab_size, max_len, rnnConfig, model_type): embedding_size = rnnConfig['embedding_size'] if model_type == 'inceptionv3': # InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model image_input = Input(shape=(2048,)) elif model_type == 'vgg16': # VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model image_input = Input(shape=(4096,)) image_model_1 = Dropout(rnnConfig['dropout'])(image_input) image_model = Dense(embedding_size, activation='relu')(image_model_1) caption_input = Input(shape=(max_len,)) # mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency. caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input) caption_model_2 = Dropout(rnnConfig['dropout'])(caption_model_1) caption_model = LSTM(rnnConfig['LSTM_units'])(caption_model_2) # Merging the models and creating a softmax classifier final_model_1 = concatenate([image_model, caption_model]) final_model_2 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_1) final_model = Dense(vocab_size, activation='softmax')(final_model_2) model = Model(inputs=[image_input, caption_input], outputs=final_model) model.compile(loss='categorical_crossentropy', optimizer='adam') return model
开发者ID:dabasajay,项目名称:Image-Caption-Generator,代码行数:27,代码来源:model.py
示例3: get_model_41# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def get_model_41(params): embedding_weights = pickle.load(open("../data/datasets/train_data/embedding_weights_w2v-google_MSD-AG.pk","rb")) # main sequential model model = Sequential() model.add(Embedding(len(embedding_weights[0]), params['embedding_dim'], input_length=params['sequence_length'], weights=embedding_weights)) #model.add(Dropout(params['dropout_prob'][0], input_shape=(params['sequence_length'], params['embedding_dim']))) model.add(LSTM(2048)) #model.add(Dropout(params['dropout_prob'][1])) model.add(Dense(output_dim=params["n_out"], init="uniform")) model.add(Activation(params['final_activation'])) logging.debug("Output CNN: %s" % str(model.output_shape)) if params['final_activation'] == 'linear': model.add(Lambda(lambda x :K.l2_normalize(x, axis=1))) return model# CRNN Arch for audio
开发者ID:sergiooramas,项目名称:tartarus,代码行数:22,代码来源:models.py
示例4: train_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def train_model(): if cxl_model: embedding_matrix = load_embedding() else: embedding_matrix = {} train, label = vocab_train_label(train_path, vocab=vocab, tags=tag, max_chunk_length=length) n = np.array(label, dtype=np.float) labels = n.reshape((n.shape[0], n.shape[1], 1)) model = Sequential([ Embedding(input_dim=len(vocab), output_dim=300, mask_zero=True, input_length=length, weights=[embedding_matrix], trainable=False), SpatialDropout1D(0.2), Bidirectional(layer=LSTM(units=150, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)), TimeDistributed(Dense(len(tag), activation=relu)), ]) crf_ = CRF(units=len(tag), sparse_target=True) model.add(crf_) model.compile(optimizer=Adam(), loss=crf_.loss_function, metrics=[crf_.accuracy]) model.fit(x=np.array(train), y=labels, batch_size=16, epochs=4, callbacks=[RemoteMonitor()]) model.save(model_path)
开发者ID:jtyoui,项目名称:Jtyoui,代码行数:22,代码来源:NER.py
示例5: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def create_model(): inputs = Input(shape=(length,), dtype='int32', name='inputs') embedding_1 = Embedding(len(vocab), EMBED_DIM, input_length=length, mask_zero=True)(inputs) bilstm = Bidirectional(LSTM(EMBED_DIM // 2, return_sequences=True))(embedding_1) bilstm_dropout = Dropout(DROPOUT_RATE)(bilstm) embedding_2 = Embedding(len(vocab), EMBED_DIM, input_length=length)(inputs) con = Conv1D(filters=FILTERS, kernel_size=2 * HALF_WIN_SIZE + 1, padding='same')(embedding_2) con_d = Dropout(DROPOUT_RATE)(con) dense_con = TimeDistributed(Dense(DENSE_DIM))(con_d) rnn_cnn = concatenate([bilstm_dropout, dense_con], axis=2) dense = TimeDistributed(Dense(len(chunk_tags)))(rnn_cnn) crf = CRF(len(chunk_tags), sparse_target=True) crf_output = crf(dense) model = Model(input=[inputs], output=[crf_output]) model.compile(loss=crf.loss_function, optimizer=Adam(), metrics=[crf.accuracy]) return model
示例6: __init__# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def __init__(self, use_gpu: bool = False): import tensorflow as tf from keras.models import Sequential from keras.layers import Dense, Embedding from keras.layers import LSTM from keras.backend import set_session latent_dim = StructureModel.SEQUENCE_LENGTH * 8 model = Sequential() model.add( Embedding(StructureFeatureAnalyzer.NUM_FEATURES, StructureFeatureAnalyzer.NUM_FEATURES, input_length=StructureModel.SEQUENCE_LENGTH)) model.add(LSTM(latent_dim, dropout=0.2, return_sequences=False)) model.add(Dense(StructureFeatureAnalyzer.NUM_FEATURES, activation='softmax')) model.summary() model.compile(loss='sparse_categorical_crossentropy', optimizer='adam') self.model = model if use_gpu: config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config))
开发者ID:csvance,项目名称:armchair-expert,代码行数:25,代码来源:structure.py
示例7: get_audio_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def get_audio_model(self): # Modality specific hyperparameters self.epochs = 100 self.batch_size = 50 # Modality specific parameters self.embedding_dim = self.train_x.shape[2] print("Creating Model...") inputs = Input(shape=(self.sequence_length, self.embedding_dim), dtype='float32') masked = Masking(mask_value =0)(inputs) lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4))(masked) lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4), name="utter")(lstm) output = TimeDistributed(Dense(self.classes,activation='softmax'))(lstm) model = Model(inputs, output) return model
示例8: get_bimodal_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def get_bimodal_model(self): # Modality specific hyperparameters self.epochs = 100 self.batch_size = 10 # Modality specific parameters self.embedding_dim = self.train_x.shape[2] print("Creating Model...") inputs = Input(shape=(self.sequence_length, self.embedding_dim), dtype='float32') masked = Masking(mask_value =0)(inputs) lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4), name="utter")(masked) output = TimeDistributed(Dense(self.classes,activation='softmax'))(lstm) model = Model(inputs, output) return model
示例9: _build# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def _build(self): # the model that will be trained rnn_x = Input(shape=(None, Z_DIM + ACTION_DIM)) lstm = LSTM(HIDDEN_UNITS, return_sequences=True, return_state=True) lstm_output, _, _ = lstm(rnn_x) mdn = Dense(Z_DIM)(lstm_output) rnn = Model(rnn_x, mdn) # the model used during prediction state_input_h = Input(shape=(HIDDEN_UNITS,)) state_input_c = Input(shape=(HIDDEN_UNITS,)) state_inputs = [state_input_h, state_input_c] _, state_h, state_c = lstm(rnn_x, initial_state=state_inputs) forward = Model([rnn_x] + state_inputs, [state_h, state_c]) optimizer = Adam(lr=0.0001) # optimizer = SGD(lr=0.0001, decay=1e-4, momentum=0.9, nesterov=True) rnn.compile(loss='mean_squared_error', optimizer=optimizer) return [rnn, forward]
开发者ID:marooncn,项目名称:navbot,代码行数:25,代码来源:RNN.py
示例10: _build_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def _build_model(self, num_features, num_actions, max_history_len): """Build a keras model and return a compiled model. :param max_history_len: The maximum number of historical turns used to decide on next action""" from keras.layers import LSTM, Activation, Masking, Dense from keras.models import Sequential n_hidden = 32 # size of hidden layer in LSTM # Build Model batch_shape = (None, max_history_len, num_features) model = Sequential() model.add(Masking(-1, batch_input_shape=batch_shape)) model.add(LSTM(n_hidden, batch_input_shape=batch_shape)) model.add(Dense(input_dim=n_hidden, output_dim=num_actions)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) logger.debug(model.summary()) return model
示例11: _build_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def _build_model(self, num_features, num_actions, max_history_len): """Build a keras model and return a compiled model. :param max_history_len: The maximum number of historical turns used to decide on next action """ from keras.layers import LSTM, Activation, Masking, Dense from keras.models import Sequential n_hidden = 32 # Neural Net and training params batch_shape = (None, max_history_len, num_features) # Build Model model = Sequential() model.add(Masking(-1, batch_input_shape=batch_shape)) model.add(LSTM(n_hidden, batch_input_shape=batch_shape)) model.add(Dense(input_dim=n_hidden, units=num_actions)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) logger.debug(model.summary()) return model
示例12: GeneratorPretraining# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def GeneratorPretraining(V, E, H): ''' Model for Generator pretraining. This model's weights should be shared with Generator. # Arguments: V: int, Vocabrary size E: int, Embedding size H: int, LSTM hidden size # Returns: generator_pretraining: keras Model input: word ids, shape = (B, T) output: word probability, shape = (B, T, V) ''' # in comment, B means batch size, T means lengths of time steps. input = Input(shape=(None,), dtype='int32', name='Input') # (B, T) out = Embedding(V, E, mask_zero=True, name='Embedding')(input) # (B, T, E) out = LSTM(H, return_sequences=True, name='LSTM')(out) # (B, T, H) out = TimeDistributed( Dense(V, activation='softmax', name='DenseSoftmax'), name='TimeDenseSoftmax')(out) # (B, T, V) generator_pretraining = Model(input, out) return generator_pretraining
开发者ID:tyo-yo,项目名称:SeqGAN,代码行数:24,代码来源:models.py
示例13: __init__# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def __init__(self, sess, B, V, E, H, lr=1e-3): ''' # Arguments: B: int, Batch size V: int, Vocabrary size E: int, Embedding size H: int, LSTM hidden size # Optional Arguments: lr: float, learning rate, default is 0.001 ''' self.sess = sess self.B = B self.V = V self.E = E self.H = H self.lr = lr self._build_gragh() self.reset_rnn_state()
开发者ID:tyo-yo,项目名称:SeqGAN,代码行数:20,代码来源:models.py
示例14: Discriminator# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def Discriminator(V, E, H=64, dropout=0.1): ''' Disciriminator model. # Arguments: V: int, Vocabrary size E: int, Embedding size H: int, LSTM hidden size dropout: float # Returns: discriminator: keras model input: word ids, shape = (B, T) output: probability of true data or not, shape = (B, 1) ''' input = Input(shape=(None,), dtype='int32', name='Input') # (B, T) out = Embedding(V, E, mask_zero=True, name='Embedding')(input) # (B, T, E) out = LSTM(H)(out) out = Highway(out, num_layers=1) out = Dropout(dropout, name='Dropout')(out) out = Dense(1, activation='sigmoid', name='FC')(out) discriminator = Model(input, out) return discriminator
开发者ID:tyo-yo,项目名称:SeqGAN,代码行数:24,代码来源:models.py
示例15: test_lstm# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def test_lstm(self): x_train = np.random.random((100, 100, 100)) y_train = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10) x_test = np.random.random((20, 100, 100)) y_test = keras.utils.to_categorical(np.random.randint(10, size=(20, 1)), num_classes=10) sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) model = Sequential() model.add(LSTM(32, return_sequences=True, input_shape=(100, 100))) model.add(Flatten()) model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=sgd) model.fit(x_train, y_train, batch_size=32, epochs=1) model.evaluate(x_test, y_test, batch_size=32)
示例16: create_network# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def create_network(network_input, n_vocab): """ create the structure of the neural network """ model = Sequential() model.add(LSTM( 512, input_shape=(network_input.shape[1], network_input.shape[2]), recurrent_dropout=0.3, return_sequences=True )) model.add(LSTM(512, return_sequences=True, recurrent_dropout=0.3,)) model.add(LSTM(512)) model.add(BatchNorm()) model.add(Dropout(0.3)) model.add(Dense(256)) model.add(Activation('relu')) model.add(BatchNorm()) model.add(Dropout(0.3)) model.add(Dense(n_vocab)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') return model
开发者ID:Skuldur,项目名称:Classical-Piano-Composer,代码行数:24,代码来源:lstm.py
示例17: prepare_sequences# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def prepare_sequences(notes, pitchnames, n_vocab): """ Prepare the sequences used by the Neural Network """ # map between notes and integers and back note_to_int = dict((note, number) for number, note in enumerate(pitchnames)) sequence_length = 100 network_input = [] output = [] for i in range(0, len(notes) - sequence_length, 1): sequence_in = notes[i:i + sequence_length] sequence_out = notes[i + sequence_length] network_input.append([note_to_int[char] for char in sequence_in]) output.append(note_to_int[sequence_out]) n_patterns = len(network_input) # reshape the input into a format compatible with LSTM layers normalized_input = numpy.reshape(network_input, (n_patterns, sequence_length, 1)) # normalize input normalized_input = normalized_input / float(n_vocab) return (network_input, normalized_input)
开发者ID:Skuldur,项目名称:Classical-Piano-Composer,代码行数:24,代码来源:predict.py
示例18: create_network# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def create_network(network_input, n_vocab): """ create the structure of the neural network """ model = Sequential() model.add(LSTM( 512, input_shape=(network_input.shape[1], network_input.shape[2]), recurrent_dropout=0.3, return_sequences=True )) model.add(LSTM(512, return_sequences=True, recurrent_dropout=0.3,)) model.add(LSTM(512)) model.add(BatchNorm()) model.add(Dropout(0.3)) model.add(Dense(256)) model.add(Activation('relu')) model.add(BatchNorm()) model.add(Dropout(0.3)) model.add(Dense(n_vocab)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') # Load the weights to each node model.load_weights('weights.hdf5') return model
开发者ID:Skuldur,项目名称:Classical-Piano-Composer,代码行数:27,代码来源:predict.py
示例19: __init__# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def __init__(self, config: ModelConfig) -> None: self.x_input = Input((config.obs_len, config.max_n_peds, pxy_dim)) # y_input = Input((config.obs_len, config.max_n_peds, pxy_dim)) self.grid_input = Input( (config.obs_len, config.max_n_peds, config.max_n_peds, config.grid_side_squared)) self.zeros_input = Input( (config.obs_len, config.max_n_peds, config.lstm_state_dim)) # Social LSTM layers self.lstm_layer = LSTM(config.lstm_state_dim, return_state=True) self.W_e_relu = Dense(config.emb_dim, activation="relu") self.W_a_relu = Dense(config.emb_dim, activation="relu") self.W_p = Dense(out_dim) self._build_model(config)
示例20: __middle_hidden_layer# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def __middle_hidden_layer(self, return_sequences): if self.current_params["layer_type"] == "GRU": layer = GRU(self.current_params["hidden_neurons"], return_sequences=return_sequences, kernel_initializer=self.current_params["kernel_initializer"], recurrent_initializer=self.current_params["recurrent_initializer"], recurrent_regularizer=self.__generate_regulariser(self.current_params["r_l1_reg"], self.current_params["r_l2_reg"]), bias_regularizer=self.__generate_regulariser(self.current_params["b_l1_reg"], self.current_params["b_l2_reg"]), dropout=self.current_params["dropout"], recurrent_dropout=self.current_params["recurrent_dropout"] ) else: layer = LSTM(self.current_params["hidden_neurons"], return_sequences=return_sequences, kernel_initializer=self.current_params["kernel_initializer"], recurrent_initializer=self.current_params["recurrent_initializer"], recurrent_regularizer=self.__generate_regulariser(self.current_params["r_l1_reg"], self.current_params["r_l2_reg"]), bias_regularizer=self.__generate_regulariser(self.current_params["b_l1_reg"], self.current_params["b_l2_reg"]), dropout=self.current_params["dropout"], recurrent_dropout=self.current_params["recurrent_dropout"] ) return layer
开发者ID:mprhode,项目名称:malware-prediction-rnn,代码行数:26,代码来源:RNN.py
示例21: __build_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def __build_model(self): model = Sequential() embedding_layer = Embedding(input_dim=len(self.vocab) + 1, output_dim=self.embedding_dim, weights=[self.embedding_mat], trainable=False) model.add(embedding_layer) bilstm_layer = Bidirectional(LSTM(units=256, return_sequences=True)) model.add(bilstm_layer) model.add(TimeDistributed(Dense(256, activation="relu"))) crf_layer = CRF(units=len(self.tags), sparse_target=True) model.add(crf_layer) model.compile(optimizer="adam", loss=crf_loss, metrics=[crf_viterbi_accuracy]) model.summary() return model
开发者ID:fordai,项目名称:CCKS2019-Chinese-Clinical-NER,代码行数:23,代码来源:model.py
示例22: get_training_data# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def get_training_data(raw_dict, char_to_index): ''' 'Generate data for training LSTM from raw data ' 'raw_dict: original data. struct: {'title':"",'strains':'zzppz$ppzzp$...','paragraphs':"12345$67890$..."} 'char_to_index: dictonary map char to index ' 'return: ' X [input chars sequence] ' Y [char label] ''' data_X = [] data_Y = [] for poem in raw_dict: n_chars = len(poem['paragraphs']) for i in range(0,n_chars - seq_len,1): s_out = poem['paragraphs'][i+seq_len] # never output '$' if(s_out == '$'): continue s_in = poem['paragraphs'][i:i+seq_len] data_X.append([char_to_index[c] for c in s_in]) data_Y.append(char_to_index[s_out]) return data_X,data_Y
开发者ID:Clover27,项目名称:ancient-Chinese-poem-generator,代码行数:27,代码来源:model.py
示例23: get_training_data2# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def get_training_data2(raw_dict, char_to_index): ''' 'Generate data for training LSTM from raw data without considering $ ' 'raw_dict: original data. struct: {'title':"",'strains':'zzppz$ppzzp$...','paragraphs':"12345$67890$..."} 'char_to_index: dictonary map char to index ' 'return: ' X [input chars sequence] ' Y [char label] ''' data_X = [] data_Y = [] for poem in raw_dict: context = poem['paragraphs'] context.replace('$','') n_chars = len(context) for i in range(0,n_chars - seq_len - 1,1): s_out = context[i+seq_len - 1] s_in = context[i:i+seq_len - 1] data_X.append([char_to_index[c] for c in s_in]) data_Y.append(char_to_index[s_out]) return data_X,data_Y
开发者ID:Clover27,项目名称:ancient-Chinese-poem-generator,代码行数:26,代码来源:model.py
示例24: train# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def train(X,Y,file,load_path): # define model model = Sequential() model.add(LSTM(n_mmu, input_shape=(X.shape[1], X.shape[2]), return_sequences=True)) model.add(Dropout(dropout)) model.add(LSTM(n_mmu, return_sequences=True)) model.add(Dropout(dropout)) if n_layer == 3: model.add(LSTM(n_mmu)) model.add(Dropout(dropout)) model.add(Dense(Y.shape[1], activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam') model.save(file + "/model-{}-{}.h5".format(n_mmu,dropout)) # define the checkpoint filepath=file + "/weights-improvement-{epoch:02d}-{loss:.4f}.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min') callbacks_list = [checkpoint] # loading if load_path != "": model.load_weights(load_path) # training model.fit(X, Y, epochs=epoch, batch_size=batch, callbacks=callbacks_list,validation_split = 0.1)
开发者ID:Clover27,项目名称:ancient-Chinese-poem-generator,代码行数:25,代码来源:model.py
示例25: model_keras# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def model_keras(num_words=3000, num_units=128): ''' 生成RNN模型 :param num_words:词汇数量 :param num_units:词向量维度,lstm神经元数量默认一样 :return: ''' data_input = Input(shape=[None]) embedding = Embedding(input_dim=num_words, output_dim=num_units, mask_zero=True)(data_input) lstm = LSTM(units=num_units, return_sequences=True)(embedding) x = LSTM(units=num_units, return_sequences=True)(lstm) # keras好像不支持内部对y操作,不能像tensorflow那样用reshape # x = Reshape(target_shape=[-1, num_units])(x) outputs = Dense(units=num_words, activation='softmax')(x) model = Model(inputs=data_input, outputs=outputs) model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizers.adam(lr=0.01), metrics=['accuracy']) return model
示例26: model_0# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def model_0(input_dim, output_dim): """ Total params: 127,584 Trainable params: 127,584 Non-trainable params: 0 :param input_dim: :param output_dim: :return: """ # build the model: a single LSTM print('Build model...') model = Sequential() model.add(LSTM(128, input_shape=input_dim)) model.add(Dense(output_dim)) model.add(Activation('softmax')) return model, 'model_0'# summery of result for model_1 (deep 2):##
示例27: model_2# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def model_2(input_dim, output_dim): """ Total params: 259,168 Trainable params: 259,168 Non-trainable params: 0 :param input_dim: :param output_dim: :return: """ model = Sequential() # model.add(LSTM(128, input_shape=(maxlen, len(chars)))) model.add(LSTM(128, input_shape=input_dim, return_sequences=True, dropout=0.2, recurrent_dropout=0.1)) model.add(LSTM(128, input_shape=input_dim, return_sequences=False, dropout=0.2, recurrent_dropout=0.1)) # model.add(LSTM(128, activation='relu', dropout=0.2)) model.add(Dense(output_dim)) model.add(Activation('softmax')) return model, 'model_2'# Summery of result for this model:
示例28: model_6# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def model_6(input_dim, output_dim): model = Sequential() model.add(LSTM(128, input_shape=input_dim, return_sequences=True, recurrent_dropout=0.1)) model.add(Dropout(0.3)) model.add(LSTM(128, input_shape=input_dim, return_sequences=False, recurrent_dropout=0.1)) model.add(Dropout(0.3)) model.add(Dense(output_dim)) model.add(Activation('softmax')) return model, 'model_6'# ------------------------------------------------------------------------# Unidirectional LSTM (Many to One)## Summery of result for this model:# Try 3:# batch_size=128, lr=0.001# With step 1 and neuron size 128 was very bad. Set step=3 and neuron size=256 and step=3# With Adam Optimizer, Lr=0.001 and step=3. after 61 epoch is the bset model !!!# Change from RMSProp to Adam fix the learning process#
示例29: AlternativeRNNModel# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import LSTM [as 别名]def AlternativeRNNModel(vocab_size, max_len, rnnConfig, model_type): embedding_size = rnnConfig['embedding_size'] if model_type == 'inceptionv3': # InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model image_input = Input(shape=(2048,)) elif model_type == 'vgg16': # VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model image_input = Input(shape=(4096,)) image_model_1 = Dense(embedding_size, activation='relu')(image_input) image_model = RepeatVector(max_len)(image_model_1) caption_input = Input(shape=(max_len,)) # mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency. caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input) # Since we are going to predict the next word using the previous words # (length of previous words changes with every iteration over the caption), we have to set return_sequences = True. caption_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=True)(caption_model_1) # caption_model = TimeDistributed(Dense(embedding_size, activation='relu'))(caption_model_2) caption_model = TimeDistributed(Dense(embedding_size))(caption_model_2) # Merging the models and creating a softmax classifier final_model_1 = concatenate([image_model, caption_model]) # final_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=False)(final_model_1) final_model_2 = Bidirectional(LSTM(rnnConfig['LSTM_units'], return_sequences=False))(final_model_1) # final_model_3 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_2) # final_model = Dense(vocab_size, activation='softmax')(final_model_3) final_model = Dense(vocab_size, activation='softmax')(final_model_2) model = Model(inputs=[image_input, caption_input], outputs=final_model) model.compile(loss='categorical_crossentropy', optimizer='adam') # model.compile(loss='categorical_crossentropy', optimizer='rmsprop') return model
开发者ID:dabasajay,项目名称:Image-Caption-Generator,代码行数:34,代码来源:model.py
|