这篇教程Python layers.CuDNNLSTM方法代码示例写得很实用,希望能帮到您。
本文整理汇总了Python中keras.layers.CuDNNLSTM方法的典型用法代码示例。如果您正苦于以下问题:Python layers.CuDNNLSTM方法的具体用法?Python layers.CuDNNLSTM怎么用?Python layers.CuDNNLSTM使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块keras.layers 的用法示例。 在下文中一共展示了layers.CuDNNLSTM方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。 示例1: create# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import CuDNNLSTM [as 别名]def create(inputtokens, vocabsize, units=16, dropout=0, embedding=32): input_ = Input(shape=(inputtokens,), dtype='int32') # Embedding layer net = Embedding(input_dim=vocabsize, output_dim=embedding, input_length=inputtokens)(input_) net = Dropout(dropout)(net) # Bidirectional LSTM layer net = BatchNormalization()(net) net = Bidirectional(CuDNNLSTM(units))(net) net = Dropout(dropout)(net) # Output layer net = Dense(vocabsize, activation='softmax')(net) model = Model(inputs=input_, outputs=net) # Make data-parallel ngpus = len(get_available_gpus()) if ngpus > 1: model = make_parallel(model, ngpus) return model
开发者ID:albarji,项目名称:neurowriter,代码行数:25,代码来源:models.py
示例2: nnet# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import CuDNNLSTM [as 别名]def nnet(inputs, keep_prob, num_classes): """ # 适用于单导联的深度网络模型 :param inputs: keras tensor, 切片并堆叠后的单导联信号. :param keep_prob: float, dropout-随机片段屏蔽概率. :param num_classes: int, 目标类别数. :return: keras tensor, 各类概率及全连接层前自动提取的特征. """ branches = [] for i in range(int(inputs.shape[-1])): ld = Lambda(Net.__slice, output_shape=(int(inputs.shape[1]), 1), arguments={'index': i})(inputs) ld = Reshape((int(inputs.shape[1]), 1))(ld) bch = Net.__backbone(ld) branches.append(bch) features = Concatenate(axis=1)(branches) features = Dropout(keep_prob, [1, int(inputs.shape[-1]), 1])(features) features = Bidirectional(CuDNNLSTM(1, return_sequences=True), merge_mode='concat')(features) features = Flatten()(features) net = Dense(units=num_classes, activation='softmax')(features) return net, features
示例3: build_and_compile_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import CuDNNLSTM [as 别名]def build_and_compile_model(): seq_input = Input(shape=(lookback_window, 1), name='seq_input', batch_shape=(1, lookback_window, 1)) x = CuDNNLSTM(LSTMunits, kernel_initializer='glorot_uniform', recurrent_initializer='glorot_uniform', return_sequences=True)(seq_input) x = CuDNNLSTM(LSTMunits, kernel_initializer='glorot_uniform', recurrent_initializer='glorot_uniform', return_sequences=False)(x) output_1 = Dense(1, activation='linear', name='output_1')(x) weathernet = Model(inputs=seq_input, outputs=output_1) weathernet.compile(optimizer=keras.optimizers.Adam(lr=1e-3), loss='mse') weathernet.summary() return weathernet#Load existing model
开发者ID:produvia,项目名称:ai-platform,代码行数:14,代码来源:main.py
示例4: build_and_compile_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import CuDNNLSTM [as 别名]def build_and_compile_model(): seq_input = Input(shape=(lookback_window, 1), name='seq_input', batch_shape=(1, lookback_window, 1)) x = CuDNNLSTM(LSTMunits, kernel_initializer='glorot_uniform', recurrent_initializer='glorot_uniform', return_sequences=True)(seq_input) x = CuDNNLSTM(LSTMunits, kernel_initializer='glorot_uniform', recurrent_initializer='glorot_uniform', return_sequences=False)(x) output_1 = Dense(1, activation='linear', name='output_1')(x) weathernet = Model(inputs=seq_input, outputs=output_1) weathernet.compile(optimizer=keras.optimizers.Adam(lr=1e-3), loss='mse') weathernet.summary() return weathernet# Predict
示例5: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import CuDNNLSTM [as 别名]def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) x = self.word_embedding.output # x = Reshape((self.len_max, self.embed_size, 1))(embedding) if self.rnn_type=="LSTM": layer_cell = LSTM elif self.rnn_type=="GRU": layer_cell = GRU elif self.rnn_type=="CuDNNLSTM": layer_cell = CuDNNLSTM elif self.rnn_type=="CuDNNGRU": layer_cell = CuDNNGRU else: layer_cell = GRU # Bi-LSTM for nrl in range(self.num_rnn_layers): x = Bidirectional(layer_cell(units=self.rnn_units, return_sequences=True, activation='relu', kernel_regularizer=regularizers.l2(0.32 * 0.1), recurrent_regularizer=regularizers.l2(0.32) ))(x) x = Dropout(self.dropout)(x) x = Flatten()(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activate_classify)(x) output = [dense_layer] self.model = Model(self.word_embedding.input, output) self.model.summary(120)
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:37,代码来源:graph.py
示例6: lstm# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import CuDNNLSTM [as 别名]def lstm(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes): #inp = Input(shape=(maxlen, )) input_layer = Input(shape=(maxlen, embed_size), ) #x = Embedding(max_features, embed_size, weights=[embedding_matrix], # trainable=False)(inp) x = LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate, recurrent_dropout=dropout_rate)(input_layer) #x = CuDNNLSTM(recurrent_units, return_sequences=True)(x) x = Dropout(dropout_rate)(x) x_a = GlobalMaxPool1D()(x) x_b = GlobalAveragePooling1D()(x) #x_c = AttentionWeightedAverage()(x) #x_a = MaxPooling1D(pool_size=2)(x) #x_b = AveragePooling1D(pool_size=2)(x) x = concatenate([x_a,x_b]) x = Dense(dense_size, activation="relu")(x) x = Dropout(dropout_rate)(x) x = Dense(nb_classes, activation="sigmoid")(x) model = Model(inputs=input_layer, outputs=x) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model# bidirectional LSTM
开发者ID:kermitt2,项目名称:delft,代码行数:29,代码来源:models.py
示例7: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import CuDNNLSTM [as 别名]def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) x = self.word_embedding.output embedding_output_spatial = SpatialDropout1D(self.dropout_spatial)(x) if self.rnn_units=="LSTM": layer_cell = LSTM elif self.rnn_units=="GRU": layer_cell = GRU elif self.rnn_units=="CuDNNLSTM": layer_cell = CuDNNLSTM elif self.rnn_units=="CuDNNGRU": layer_cell = CuDNNGRU else: layer_cell = GRU # CNN convs = [] for kernel_size in self.filters: conv = Conv1D(self.filters_num, kernel_size=kernel_size, strides=1, padding='SAME', kernel_regularizer=regularizers.l2(self.l2), bias_regularizer=regularizers.l2(self.l2), )(embedding_output_spatial) convs.append(conv) x = Concatenate(axis=1)(convs) # Bi-LSTM, 论文中使用的是LSTM x = Bidirectional(layer_cell(units=self.rnn_units, return_sequences=True, activation='relu', kernel_regularizer=regularizers.l2(self.l2), recurrent_regularizer=regularizers.l2(self.l2) ))(x) x = Dropout(self.dropout)(x) x = Flatten()(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activate_classify)(x) output = [dense_layer] self.model = Model(self.word_embedding.input, output) self.model.summary(120)
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:48,代码来源:graph.py
示例8: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import CuDNNLSTM [as 别名]def create_model(self, hyper_parameters): """ 构建神经网络, a bit like RCNN, R :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) x = self.word_embedding.output x = Activation('tanh')(x) # entire embedding channels are dropped out instead of the # normal Keras embedding dropout, which drops all channels for entire words # many of the datasets contain so few words that losing one or more words can alter the emotions completely x = SpatialDropout1D(self.dropout_spatial)(x) if self.rnn_units=="LSTM": layer_cell = LSTM elif self.rnn_units=="GRU": layer_cell = GRU elif self.rnn_units=="CuDNNLSTM": layer_cell = CuDNNLSTM elif self.rnn_units=="CuDNNGRU": layer_cell = CuDNNGRU else: layer_cell = GRU # skip-connection from embedding to output eases gradient-flow and allows access to lower-level features # ordering of the way the merge is done is important for consistency with the pretrained model lstm_0_output = Bidirectional(layer_cell(units=self.rnn_units, return_sequences=True, activation='relu', kernel_regularizer=regularizers.l2(self.l2), recurrent_regularizer=regularizers.l2(self.l2) ), name="bi_lstm_0")(x) lstm_1_output = Bidirectional(layer_cell(units=self.rnn_units, return_sequences=True, activation='relu', kernel_regularizer=regularizers.l2(self.l2), recurrent_regularizer=regularizers.l2(self.l2) ), name="bi_lstm_1")(lstm_0_output) x = concatenate([lstm_1_output, lstm_0_output, x]) # if return_attention is True in AttentionWeightedAverage, an additional tensor # representing the weight at each timestep is returned weights = None x = AttentionWeightedAverage(name='attlayer', return_attention=self.return_attention)(x) if self.return_attention: x, weights = x x = Dropout(self.dropout)(x) # x = Flatten()(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activate_classify)(x) output = [dense_layer] self.model = Model(self.word_embedding.input, output) self.model.summary(120)
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:59,代码来源:graph.py
|