这篇教程Python layers.Embedding方法代码示例写得很实用,希望能帮到您。
本文整理汇总了Python中keras.layers.Embedding方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Embedding方法的具体用法?Python layers.Embedding怎么用?Python layers.Embedding使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块keras.layers 的用法示例。 在下文中一共展示了layers.Embedding方法的29个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。 示例1: RNNModel# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def RNNModel(vocab_size, max_len, rnnConfig, model_type): embedding_size = rnnConfig['embedding_size'] if model_type == 'inceptionv3': # InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model image_input = Input(shape=(2048,)) elif model_type == 'vgg16': # VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model image_input = Input(shape=(4096,)) image_model_1 = Dropout(rnnConfig['dropout'])(image_input) image_model = Dense(embedding_size, activation='relu')(image_model_1) caption_input = Input(shape=(max_len,)) # mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency. caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input) caption_model_2 = Dropout(rnnConfig['dropout'])(caption_model_1) caption_model = LSTM(rnnConfig['LSTM_units'])(caption_model_2) # Merging the models and creating a softmax classifier final_model_1 = concatenate([image_model, caption_model]) final_model_2 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_1) final_model = Dense(vocab_size, activation='softmax')(final_model_2) model = Model(inputs=[image_input, caption_input], outputs=final_model) model.compile(loss='categorical_crossentropy', optimizer='adam') return model
开发者ID:dabasajay,项目名称:Image-Caption-Generator,代码行数:27,代码来源:model.py
示例2: get_model_41# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def get_model_41(params): embedding_weights = pickle.load(open("../data/datasets/train_data/embedding_weights_w2v-google_MSD-AG.pk","rb")) # main sequential model model = Sequential() model.add(Embedding(len(embedding_weights[0]), params['embedding_dim'], input_length=params['sequence_length'], weights=embedding_weights)) #model.add(Dropout(params['dropout_prob'][0], input_shape=(params['sequence_length'], params['embedding_dim']))) model.add(LSTM(2048)) #model.add(Dropout(params['dropout_prob'][1])) model.add(Dense(output_dim=params["n_out"], init="uniform")) model.add(Activation(params['final_activation'])) logging.debug("Output CNN: %s" % str(model.output_shape)) if params['final_activation'] == 'linear': model.add(Lambda(lambda x :K.l2_normalize(x, axis=1))) return model# CRNN Arch for audio
开发者ID:sergiooramas,项目名称:tartarus,代码行数:22,代码来源:models.py
示例3: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def create_model(): inputs = Input(shape=(length,), dtype='int32', name='inputs') embedding_1 = Embedding(len(vocab), EMBED_DIM, input_length=length, mask_zero=True)(inputs) bilstm = Bidirectional(LSTM(EMBED_DIM // 2, return_sequences=True))(embedding_1) bilstm_dropout = Dropout(DROPOUT_RATE)(bilstm) embedding_2 = Embedding(len(vocab), EMBED_DIM, input_length=length)(inputs) con = Conv1D(filters=FILTERS, kernel_size=2 * HALF_WIN_SIZE + 1, padding='same')(embedding_2) con_d = Dropout(DROPOUT_RATE)(con) dense_con = TimeDistributed(Dense(DENSE_DIM))(con_d) rnn_cnn = concatenate([bilstm_dropout, dense_con], axis=2) dense = TimeDistributed(Dense(len(chunk_tags)))(rnn_cnn) crf = CRF(len(chunk_tags), sparse_target=True) crf_output = crf(dense) model = Model(input=[inputs], output=[crf_output]) model.compile(loss=crf.loss_function, optimizer=Adam(), metrics=[crf.accuracy]) return model
示例4: add_glove_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def add_glove_model(self): """ Read and save Pretrained Embedding model """ embeddings_index = {} try: f = open(self.embedded_dir) for line in f: values = line.split() word = values[0] coefs = np.asarray(values[1:], dtype='float32') assert (coefs.shape[0] == self.embed_size) embeddings_index[word] = coefs f.close() except OSError: print('Embedded file does not found') exit() except AssertionError: print("Embedding vector size does not match with given embedded size") return embeddings_index
开发者ID:Hsankesara,项目名称:DeepResearch,代码行数:22,代码来源:HAN.py
示例5: get_embedding_matrix# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def get_embedding_matrix(self): """ Returns Embedding matrix """ embedding_matrix = np.random.random((len(self.word_index) + 1, self.embed_size)) absent_words = 0 for word, i in self.word_index.items(): embedding_vector = self.embedding_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector else: absent_words += 1 if self.verbose == 1: print('Total absent words are', absent_words, 'which is', "%0.2f" % (absent_words * 100 / len(self.word_index)), '% of total words') return embedding_matrix
开发者ID:Hsankesara,项目名称:DeepResearch,代码行数:19,代码来源:HAN.py
示例6: GeneratorPretraining# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def GeneratorPretraining(V, E, H): ''' Model for Generator pretraining. This model's weights should be shared with Generator. # Arguments: V: int, Vocabrary size E: int, Embedding size H: int, LSTM hidden size # Returns: generator_pretraining: keras Model input: word ids, shape = (B, T) output: word probability, shape = (B, T, V) ''' # in comment, B means batch size, T means lengths of time steps. input = Input(shape=(None,), dtype='int32', name='Input') # (B, T) out = Embedding(V, E, mask_zero=True, name='Embedding')(input) # (B, T, E) out = LSTM(H, return_sequences=True, name='LSTM')(out) # (B, T, H) out = TimeDistributed( Dense(V, activation='softmax', name='DenseSoftmax'), name='TimeDenseSoftmax')(out) # (B, T, V) generator_pretraining = Model(input, out) return generator_pretraining
开发者ID:tyo-yo,项目名称:SeqGAN,代码行数:24,代码来源:models.py
示例7: __init__# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def __init__(self, sess, B, V, E, H, lr=1e-3): ''' # Arguments: B: int, Batch size V: int, Vocabrary size E: int, Embedding size H: int, LSTM hidden size # Optional Arguments: lr: float, learning rate, default is 0.001 ''' self.sess = sess self.B = B self.V = V self.E = E self.H = H self.lr = lr self._build_gragh() self.reset_rnn_state()
开发者ID:tyo-yo,项目名称:SeqGAN,代码行数:20,代码来源:models.py
示例8: Discriminator# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def Discriminator(V, E, H=64, dropout=0.1): ''' Disciriminator model. # Arguments: V: int, Vocabrary size E: int, Embedding size H: int, LSTM hidden size dropout: float # Returns: discriminator: keras model input: word ids, shape = (B, T) output: probability of true data or not, shape = (B, 1) ''' input = Input(shape=(None,), dtype='int32', name='Input') # (B, T) out = Embedding(V, E, mask_zero=True, name='Embedding')(input) # (B, T, E) out = LSTM(H)(out) out = Highway(out, num_layers=1) out = Dropout(dropout, name='Dropout')(out) out = Dense(1, activation='sigmoid', name='FC')(out) discriminator = Model(input, out) return discriminator
开发者ID:tyo-yo,项目名称:SeqGAN,代码行数:24,代码来源:models.py
示例9: DiscriminatorConv# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def DiscriminatorConv(V, E, filter_sizes, num_filters, dropout): ''' Another Discriminator model, currently unused because keras don't support masking for Conv1D and it does huge influence on training. # Arguments: V: int, Vocabrary size E: int, Embedding size filter_sizes: list of int, list of each Conv1D filter sizes num_filters: list of int, list of each Conv1D num of filters dropout: float # Returns: discriminator: keras model input: word ids, shape = (B, T) output: probability of true data or not, shape = (B, 1) ''' input = Input(shape=(None,), dtype='int32', name='Input') # (B, T) out = Embedding(V, E, name='Embedding')(input) # (B, T, E) out = VariousConv1D(out, filter_sizes, num_filters) out = Highway(out, num_layers=1) out = Dropout(dropout, name='Dropout')(out) out = Dense(1, activation='sigmoid', name='FC')(out) discriminator = Model(input, out) return discriminator
开发者ID:tyo-yo,项目名称:SeqGAN,代码行数:26,代码来源:models.py
示例10: parse_args# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def parse_args(): parser = argparse.ArgumentParser(description="Run GMF.") parser.add_argument('--path', nargs='?', default='Data/', help='Input data path.') parser.add_argument('--dataset', nargs='?', default='ml-1m', help='Choose a dataset.') parser.add_argument('--epochs', type=int, default=100, help='Number of epochs.') parser.add_argument('--batch_size', type=int, default=256, help='Batch size.') parser.add_argument('--num_factors', type=int, default=8, help='Embedding size.') parser.add_argument('--regs', nargs='?', default='[0,0]', help="Regularization for user and item embeddings.") parser.add_argument('--num_neg', type=int, default=4, help='Number of negative instances to pair with a positive instance.') parser.add_argument('--lr', type=float, default=0.001, help='Learning rate.') parser.add_argument('--learner', nargs='?', default='adam', help='Specify an optimizer: adagrad, adam, rmsprop, sgd') parser.add_argument('--verbose', type=int, default=1, help='Show performance per X iterations') parser.add_argument('--out', type=int, default=1, help='Whether to save the trained model.') return parser.parse_args()
开发者ID:hexiangnan,项目名称:neural_collaborative_filtering,代码行数:27,代码来源:GMF.py
示例11: get_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def get_model(num_users, num_items, latent_dim, regs=[0,0]): # Input variables user_input = Input(shape=(1,), dtype='int32', name = 'user_input') item_input = Input(shape=(1,), dtype='int32', name = 'item_input') MF_Embedding_User = Embedding(input_dim = num_users, output_dim = latent_dim, name = 'user_embedding', init = init_normal, W_regularizer = l2(regs[0]), input_length=1) MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = latent_dim, name = 'item_embedding', init = init_normal, W_regularizer = l2(regs[1]), input_length=1) # Crucial to flatten an embedding vector! user_latent = Flatten()(MF_Embedding_User(user_input)) item_latent = Flatten()(MF_Embedding_Item(item_input)) # Element-wise product of user and item embeddings predict_vector = merge([user_latent, item_latent], mode = 'mul') # Final prediction layer #prediction = Lambda(lambda x: K.sigmoid(K.sum(x)), output_shape=(1,))(predict_vector) prediction = Dense(1, activation='sigmoid', init='lecun_uniform', name = 'prediction')(predict_vector) model = Model(input=[user_input, item_input], output=prediction) return model
开发者ID:hexiangnan,项目名称:neural_collaborative_filtering,代码行数:27,代码来源:GMF.py
示例12: fasttext_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def fasttext_model(max_len=300, vocabulary_size=20000, embedding_dim=128, num_classes=4): model = Sequential() # embed layer by maps vocab index into emb dimensions model.add(Embedding(input_dim=vocabulary_size, output_dim=embedding_dim, input_length=max_len)) # pooling the embedding model.add(GlobalAveragePooling1D()) # output multi classification of num_classes model.add(Dense(num_classes, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() return model
示例13: get_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def get_model(num_users, num_items, latent_dim, regs=[0,0]): user_input = Input(shape=(1,), dtype='int32', name='user_input') item_input = Input(shape=(1,), dtype='int32', name='item_input') MF_Embedding_User = Embedding(input_dim=num_users, output_dim=latent_dim, name='user_embedding', embeddings_regularizer = l2(regs[0]), input_length=1) MF_Embedding_Item = Embedding(input_dim=num_items, output_dim=latent_dim, name='item_embedding', embeddings_regularizer = l2(regs[1]), input_length=1) user_latent = Flatten()(MF_Embedding_User(user_input)) item_latent = Flatten()(MF_Embedding_Item(item_input)) predict_vector = Multiply()([user_latent, item_latent]) prediction = Dense(1, activation='sigmoid', kernel_initializer='lecun_uniform', name = 'prediction')(predict_vector) model = Model(inputs=[user_input, item_input], outputs=prediction) return model
开发者ID:wyl6,项目名称:Recommender-Systems-Samples,代码行数:19,代码来源:GMF.py
示例14: __build_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def __build_model(self): model = Sequential() embedding_layer = Embedding(input_dim=len(self.vocab) + 1, output_dim=self.embedding_dim, weights=[self.embedding_mat], trainable=False) model.add(embedding_layer) bilstm_layer = Bidirectional(LSTM(units=256, return_sequences=True)) model.add(bilstm_layer) model.add(TimeDistributed(Dense(256, activation="relu"))) crf_layer = CRF(units=len(self.tags), sparse_target=True) model.add(crf_layer) model.compile(optimizer="adam", loss=crf_loss, metrics=[crf_viterbi_accuracy]) model.summary() return model
开发者ID:fordai,项目名称:CCKS2019-Chinese-Clinical-NER,代码行数:23,代码来源:model.py
示例15: CapsuleNet# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def CapsuleNet(n_capsule = 10, n_routings = 5, capsule_dim = 16, n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001): K.clear_session() inputs = Input(shape=(170,)) x = Embedding(21099, 300, trainable=True)(inputs) x = SpatialDropout1D(dropout_rate)(x) x = Bidirectional( CuDNNGRU(n_recurrent, return_sequences=True, kernel_regularizer=l2(l2_penalty), recurrent_regularizer=l2(l2_penalty)))(x) x = PReLU()(x) x = Capsule( num_capsule=n_capsule, dim_capsule=capsule_dim, routings=n_routings, share_weights=True)(x) x = Flatten(name = 'concatenate')(x) x = Dropout(dropout_rate)(x)# fc = Dense(128, activation='sigmoid')(x) outputs = Dense(6, activation='softmax')(x) model = Model(inputs=inputs, outputs=outputs) model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy']) return model
开发者ID:WeavingWong,项目名称:DigiX_HuaWei_Population_Age_Attribution_Predict,代码行数:24,代码来源:models.py
示例16: CapsuleNet_v2# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def CapsuleNet_v2(n_capsule = 10, n_routings = 5, capsule_dim = 16, n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001): K.clear_session() inputs = Input(shape=(200,)) x = Embedding(20000, 300, trainable=True)(inputs) x = SpatialDropout1D(dropout_rate)(x) x = Bidirectional( CuDNNGRU(n_recurrent, return_sequences=True, kernel_regularizer=l2(l2_penalty), recurrent_regularizer=l2(l2_penalty)))(x) x = PReLU()(x) x = Capsule( num_capsule=n_capsule, dim_capsule=capsule_dim, routings=n_routings, share_weights=True)(x) x = Flatten(name = 'concatenate')(x) x = Dropout(dropout_rate)(x)# fc = Dense(128, activation='sigmoid')(x) outputs = Dense(6, activation='softmax')(x) model = Model(inputs=inputs, outputs=outputs) model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy']) return model
开发者ID:WeavingWong,项目名称:DigiX_HuaWei_Population_Age_Attribution_Predict,代码行数:24,代码来源:models.py
示例17: model_keras# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def model_keras(num_words=3000, num_units=128): ''' 生成RNN模型 :param num_words:词汇数量 :param num_units:词向量维度,lstm神经元数量默认一样 :return: ''' data_input = Input(shape=[None]) embedding = Embedding(input_dim=num_words, output_dim=num_units, mask_zero=True)(data_input) lstm = LSTM(units=num_units, return_sequences=True)(embedding) x = LSTM(units=num_units, return_sequences=True)(lstm) # keras好像不支持内部对y操作,不能像tensorflow那样用reshape # x = Reshape(target_shape=[-1, num_units])(x) outputs = Dense(units=num_words, activation='softmax')(x) model = Model(inputs=data_input, outputs=outputs) model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizers.adam(lr=0.01), metrics=['accuracy']) return model
示例18: test_tiny_concat_seq_random# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def test_tiny_concat_seq_random(self): np.random.seed(1988) max_features = 10 embedding_dims = 4 seq_len = 5 num_channels = 6 # Define a model input_tensor = Input(shape=(seq_len,)) x1 = Embedding(max_features, embedding_dims)(input_tensor) x2 = Embedding(max_features, embedding_dims)(input_tensor) x3 = concatenate([x1, x2], axis=1) model = Model(inputs=[input_tensor], outputs=[x3]) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Get the coreml model self._test_model(model, one_dim_seq_flags=[True])
示例19: test_conv_batch_1d# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def test_conv_batch_1d(self): np.random.seed(1988) vocabulary_size = 4 embedding_dimension = 6 input_length = 10 model = Sequential() model.add( Embedding( vocabulary_size, embedding_dimension, input_length=input_length, trainable=True, ) ) model.add(Conv1D(5, 2)) model.add(BatchNormalization()) model.add(Activation("relu")) model.add(MaxPooling1D(2)) model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) self._test_model(model, one_dim_seq_flags=[True])
示例20: test_tiny_image_captioning_feature_merge# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def test_tiny_image_captioning_feature_merge(self): img_input_1 = Input(shape=(16, 16, 3)) x = Conv2D(2, (3, 3))(img_input_1) x = Flatten()(x) img_model = Model([img_input_1], [x]) img_input = Input(shape=(16, 16, 3)) x = img_model(img_input) x = Dense(8, name="cap_dense")(x) x = Reshape((1, 8), name="cap_reshape")(x) sentence_input = Input(shape=(5,)) # max_length = 5 y = Embedding(8, 8, name="cap_embedding")(sentence_input) z = concatenate([x, y], axis=1, name="cap_merge") combined_model = Model(inputs=[img_input, sentence_input], outputs=[z]) self._test_model(combined_model, one_dim_seq_flags=[False, True])
示例21: test_tiny_image_captioning# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def test_tiny_image_captioning(self): # use a conv layer as a image feature branch img_input_1 = Input(shape=(16, 16, 3)) x = Conv2D(2, (3, 3))(img_input_1) x = Flatten()(x) img_model = Model(inputs=[img_input_1], outputs=[x]) img_input = Input(shape=(16, 16, 3)) x = img_model(img_input) x = Dense(8, name="cap_dense")(x) x = Reshape((1, 8), name="cap_reshape")(x) sentence_input = Input(shape=(5,)) # max_length = 5 y = Embedding(8, 8, name="cap_embedding")(sentence_input) z = concatenate([x, y], axis=1, name="cap_merge") z = LSTM(4, return_sequences=True, name="cap_lstm")(z) z = TimeDistributed(Dense(8), name="cap_timedistributed")(z) combined_model = Model(inputs=[img_input, sentence_input], outputs=[z]) self._test_model(combined_model, one_dim_seq_flags=[False, True])
示例22: AlternativeRNNModel# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def AlternativeRNNModel(vocab_size, max_len, rnnConfig, model_type): embedding_size = rnnConfig['embedding_size'] if model_type == 'inceptionv3': # InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model image_input = Input(shape=(2048,)) elif model_type == 'vgg16': # VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model image_input = Input(shape=(4096,)) image_model_1 = Dense(embedding_size, activation='relu')(image_input) image_model = RepeatVector(max_len)(image_model_1) caption_input = Input(shape=(max_len,)) # mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency. caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input) # Since we are going to predict the next word using the previous words # (length of previous words changes with every iteration over the caption), we have to set return_sequences = True. caption_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=True)(caption_model_1) # caption_model = TimeDistributed(Dense(embedding_size, activation='relu'))(caption_model_2) caption_model = TimeDistributed(Dense(embedding_size))(caption_model_2) # Merging the models and creating a softmax classifier final_model_1 = concatenate([image_model, caption_model]) # final_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=False)(final_model_1) final_model_2 = Bidirectional(LSTM(rnnConfig['LSTM_units'], return_sequences=False))(final_model_1) # final_model_3 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_2) # final_model = Dense(vocab_size, activation='softmax')(final_model_3) final_model = Dense(vocab_size, activation='softmax')(final_model_2) model = Model(inputs=[image_input, caption_input], outputs=final_model) model.compile(loss='categorical_crossentropy', optimizer='adam') # model.compile(loss='categorical_crossentropy', optimizer='rmsprop') return model
开发者ID:dabasajay,项目名称:Image-Caption-Generator,代码行数:34,代码来源:model.py
示例23: _get_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def _get_model(X, cat_cols, num_cols, n_uniq, n_emb, output_activation): inputs = [] num_inputs = [] embeddings = [] for i, col in enumerate(cat_cols): if not n_uniq[i]: n_uniq[i] = X[col].nunique() if not n_emb[i]: n_emb[i] = max(MIN_EMBEDDING, 2 * int(np.log2(n_uniq[i]))) _input = Input(shape=(1,), name=col) _embed = Embedding(input_dim=n_uniq[i], output_dim=n_emb[i], name=col + EMBEDDING_SUFFIX)(_input) _embed = Dropout(.2)(_embed) _embed = Reshape((n_emb[i],))(_embed) inputs.append(_input) embeddings.append(_embed) if num_cols: num_inputs = Input(shape=(len(num_cols),), name='num_inputs') merged_input = Concatenate(axis=1)(embeddings + [num_inputs]) inputs = inputs + [num_inputs] else: merged_input = Concatenate(axis=1)(embeddings) x = BatchNormalization()(merged_input) x = Dense(128, activation='relu')(x) x = Dropout(.5)(x) x = BatchNormalization()(x) x = Dense(64, activation='relu')(x) x = Dropout(.5)(x) x = BatchNormalization()(x) output = Dense(1, activation=output_activation)(x) model = Model(inputs=inputs, outputs=output) return model, n_emb, n_uniq
示例24: build_generator# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def build_generator(self): model = Sequential() model.add(Dense(256, input_dim=self.latent_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(np.prod(self.img_shape), activation='tanh')) model.add(Reshape(self.img_shape)) model.summary() noise = Input(shape=(self.latent_dim,)) label = Input(shape=(1,), dtype='int32') label_embedding = Flatten()(Embedding(self.num_classes, self.latent_dim)(label)) model_input = multiply([noise, label_embedding]) img = model(model_input) return Model([noise, label], img)
开发者ID:eriklindernoren,项目名称:Keras-GAN,代码行数:28,代码来源:cgan.py
示例25: build_discriminator# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def build_discriminator(self): model = Sequential() model.add(Dense(512, input_dim=np.prod(self.img_shape))) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.4)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.4)) model.add(Dense(1, activation='sigmoid')) model.summary() img = Input(shape=self.img_shape) label = Input(shape=(1,), dtype='int32') label_embedding = Flatten()(Embedding(self.num_classes, np.prod(self.img_shape))(label)) flat_img = Flatten()(img) model_input = multiply([flat_img, label_embedding]) validity = model(model_input) return Model([img, label], validity)
开发者ID:eriklindernoren,项目名称:Keras-GAN,代码行数:28,代码来源:cgan.py
示例26: build_generator# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def build_generator(self): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((7, 7, 128))) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(self.channels, kernel_size=3, padding='same')) model.add(Activation("tanh")) model.summary() noise = Input(shape=(self.latent_dim,)) label = Input(shape=(1,), dtype='int32') label_embedding = Flatten()(Embedding(self.num_classes, self.latent_dim)(label)) model_input = multiply([noise, label_embedding]) img = model(model_input) return Model([noise, label], img)
开发者ID:eriklindernoren,项目名称:Keras-GAN,代码行数:30,代码来源:acgan.py
示例27: buildModel_RNN# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def buildModel_RNN(word_index, embeddings_index, nClasses, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM): ''' def buildModel_RNN(word_index, embeddings_index, nClasses, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM): word_index in word index , embeddings_index is embeddings index, look at data_helper.py nClasses is number of classes, MAX_SEQUENCE_LENGTH is maximum lenght of text sequences, EMBEDDING_DIM is an int value for dimention of word embedding look at data_helper.py output: RNN model ''' model = Sequential() embedding_matrix = np.random.random((len(word_index) + 1, EMBEDDING_DIM)) for word, i in word_index.items(): embedding_vector = embeddings_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector model.add(Embedding(len(word_index) + 1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=True)) model.add(GRU(100,dropout=0.2, recurrent_dropout=0.2)) model.add(Dense(nClasses, activation='softmax')) model.compile(loss='sparse_categorical_crossentropy', optimizer='rmsprop', metrics=['acc']) return model
示例28: word2vec_embedding_layer# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def word2vec_embedding_layer(embedding_matrix,train=False): layer = Embedding(input_dim=embedding_matrix.shape[0], output_dim=embedding_matrix.shape[1], weights=[embedding_matrix],trainable=train) return layer
开发者ID:GauravBh1010tt,项目名称:DeepLearn,代码行数:5,代码来源:dl.py
示例29: word2vec_embedding_layer# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Embedding [as 别名]def word2vec_embedding_layer(embedding_matrix): #weights = np.load('Word2Vec_QA.syn0.npy') layer = Embedding(input_dim=embedding_matrix.shape[0], output_dim=embedding_matrix.shape[1], weights=[embedding_matrix]) return layer
开发者ID:GauravBh1010tt,项目名称:DeepLearn,代码行数:6,代码来源:p3_cnn.py
|