这篇教程Python layers.GRU属性代码示例写得很实用,希望能帮到您。
本文整理汇总了Python中keras.layers.GRU属性的典型用法代码示例。如果您正苦于以下问题:Python layers.GRU属性的具体用法?Python layers.GRU怎么用?Python layers.GRU使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在模块keras.layers 的用法示例。 在下文中一共展示了layers.GRU属性的24个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。 示例1: __middle_hidden_layer# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def __middle_hidden_layer(self, return_sequences): if self.current_params["layer_type"] == "GRU": layer = GRU(self.current_params["hidden_neurons"], return_sequences=return_sequences, kernel_initializer=self.current_params["kernel_initializer"], recurrent_initializer=self.current_params["recurrent_initializer"], recurrent_regularizer=self.__generate_regulariser(self.current_params["r_l1_reg"], self.current_params["r_l2_reg"]), bias_regularizer=self.__generate_regulariser(self.current_params["b_l1_reg"], self.current_params["b_l2_reg"]), dropout=self.current_params["dropout"], recurrent_dropout=self.current_params["recurrent_dropout"] ) else: layer = LSTM(self.current_params["hidden_neurons"], return_sequences=return_sequences, kernel_initializer=self.current_params["kernel_initializer"], recurrent_initializer=self.current_params["recurrent_initializer"], recurrent_regularizer=self.__generate_regulariser(self.current_params["r_l1_reg"], self.current_params["r_l2_reg"]), bias_regularizer=self.__generate_regulariser(self.current_params["b_l1_reg"], self.current_params["b_l2_reg"]), dropout=self.current_params["dropout"], recurrent_dropout=self.current_params["recurrent_dropout"] ) return layer
开发者ID:mprhode,项目名称:malware-prediction-rnn,代码行数:26,代码来源:RNN.py
示例2: test_tiny_no_sequence_gru_random# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def test_tiny_no_sequence_gru_random(self, model_precision=_MLMODEL_FULL_PRECISION): np.random.seed(1988) input_dim = 1 input_length = 1 num_channels = 1 num_samples = 1 # Define a model model = Sequential() model.add( GRU( num_channels, input_shape=(input_length, input_dim), recurrent_activation="sigmoid", ) ) # Set some random weights model.set_weights( [np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()] ) # Test the keras model self._test_model(model, model_precision=model_precision)
示例3: test_small_no_sequence_gru_random# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def test_small_no_sequence_gru_random(self): np.random.seed(1988) input_dim = 10 input_length = 1 num_channels = 1 # Define a model model = Sequential() model.add( GRU( num_channels, input_shape=(input_length, input_dim), recurrent_activation="sigmoid", ) ) # Set some random weights model.set_weights( [np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()] ) # Test the keras model self._test_model(model)
示例4: test_medium_no_sequence_gru_random# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def test_medium_no_sequence_gru_random( self, model_precision=_MLMODEL_FULL_PRECISION ): np.random.seed(1988) input_dim = 10 input_length = 1 num_channels = 10 # Define a model model = Sequential() model.add( GRU( num_channels, input_shape=(input_length, input_dim), recurrent_activation="sigmoid", ) ) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Test the keras model self._test_model(model, model_precision=model_precision)
示例5: test_gru_seq# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def test_gru_seq(self): np.random.seed(1988) input_dim = 11 input_length = 5 # Define a model model = Sequential() model.add( GRU(20, input_shape=(input_length, input_dim), return_sequences=False) ) # Set some random weights model.set_weights( [np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()] ) # Test the keras model self._test_model(model)
示例6: test_tiny_mcrnn_music_tagger# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def test_tiny_mcrnn_music_tagger(self): x_in = Input(shape=(4, 6, 1)) x = ZeroPadding2D(padding=(0, 1))(x_in) x = BatchNormalization(axis=2, name="bn_0_freq")(x) # Conv block 1 x = Conv2D(2, (3, 3), padding="same", name="conv1")(x) x = BatchNormalization(axis=3, name="bn1")(x) x = Activation("elu")(x) x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="pool1")(x) # Conv block 2 x = Conv2D(4, (3, 3), padding="same", name="conv2")(x) x = BatchNormalization(axis=3, name="bn2")(x) x = Activation("elu")(x) x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="pool2")(x) # Should get you (1,1,2,4) x = Reshape((2, 4))(x) x = GRU(32, return_sequences=True, name="gru1")(x) x = GRU(32, return_sequences=False, name="gru2")(x) # Create model. model = Model(x_in, x) model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) self._test_model(model, mode="random_zero_mean", delta=1e-2)
示例7: interp_net# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def interp_net(): if gpu_num > 1: dev = "/cpu:0" else: dev = "/gpu:0" with tf.device(dev): main_input = Input(shape=(4*num_features, timestamp), name='input') sci = single_channel_interp(ref_points, hours_look_ahead) cci = cross_channel_interp() interp = cci(sci(main_input)) reconst = cci(sci(main_input, reconstruction=True), reconstruction=True) aux_output = Lambda(lambda x: x, name='aux_output')(reconst) z = Permute((2, 1))(interp) z = GRU(hid, activation='tanh', recurrent_dropout=0.2, dropout=0.2)(z) main_output = Dense(1, activation='sigmoid', name='main_output')(z) orig_model = Model([main_input], [main_output, aux_output]) if gpu_num > 1: model = multi_gpu_model(orig_model, gpus=gpu_num) else: model = orig_model print(orig_model.summary()) return model
示例8: test_temporal_regression# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def test_temporal_regression(): ''' Predict float numbers (regression) based on sequences of float numbers of length 3 using a single layer of GRU units ''' np.random.seed(1337) (x_train, y_train), (x_test, y_test) = get_test_data(num_train=200, num_test=20, input_shape=(3, 5), output_shape=(2,), classification=False) model = Sequential() model.add(layers.LSTM(y_train.shape[-1], input_shape=(x_train.shape[1], x_train.shape[2]))) model.compile(loss='hinge', optimizer='adam') history = model.fit(x_train, y_train, epochs=5, batch_size=16, validation_data=(x_test, y_test), verbose=0) assert(history.history['loss'][-1] < 1.)
示例9: bidLstm# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def bidLstm(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes): #inp = Input(shape=(maxlen, )) input_layer = Input(shape=(maxlen, embed_size), ) #x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp) x = Bidirectional(LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate, recurrent_dropout=dropout_rate))(input_layer) #x = Dropout(dropout_rate)(x) x = Attention(maxlen)(x) #x = AttentionWeightedAverage(maxlen)(x) #print('len(x):', len(x)) #x = AttentionWeightedAverage(maxlen)(x) x = Dense(dense_size, activation="relu")(x) x = Dropout(dropout_rate)(x) x = Dense(nb_classes, activation="sigmoid")(x) model = Model(inputs=input_layer, outputs=x) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model# conv+GRU with embeddings
开发者ID:kermitt2,项目名称:delft,代码行数:23,代码来源:models.py
示例10: cnn# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def cnn(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes): #inp = Input(shape=(maxlen, )) input_layer = Input(shape=(maxlen, embed_size), ) #x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp) x = Dropout(dropout_rate)(input_layer) x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x) x = MaxPooling1D(pool_size=2)(x) x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x) x = MaxPooling1D(pool_size=2)(x) x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x) x = MaxPooling1D(pool_size=2)(x) x = GRU(recurrent_units)(x) x = Dropout(dropout_rate)(x) x = Dense(dense_size, activation="relu")(x) x = Dense(nb_classes, activation="sigmoid")(x) model = Model(inputs=input_layer, outputs=x) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model
开发者ID:kermitt2,项目名称:delft,代码行数:21,代码来源:models.py
示例11: cnn2_best# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def cnn2_best(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes): #inp = Input(shape=(maxlen, )) input_layer = Input(shape=(maxlen, embed_size), ) #x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp) x = Dropout(dropout_rate)(input_layer) x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x) #x = MaxPooling1D(pool_size=2)(x) x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x) #x = MaxPooling1D(pool_size=2)(x) x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x) #x = MaxPooling1D(pool_size=2)(x) x = GRU(recurrent_units, return_sequences=False, dropout=dropout_rate, recurrent_dropout=dropout_rate)(x) #x = Dropout(dropout_rate)(x) x = Dense(dense_size, activation="relu")(x) x = Dense(nb_classes, activation="sigmoid")(x) model = Model(inputs=input_layer, outputs=x) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model
开发者ID:kermitt2,项目名称:delft,代码行数:22,代码来源:models.py
示例12: cnn2# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def cnn2(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes): #inp = Input(shape=(maxlen, )) input_layer = Input(shape=(maxlen, embed_size), ) #x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp) x = Dropout(dropout_rate)(input_layer) x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x) #x = MaxPooling1D(pool_size=2)(x) x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x) #x = MaxPooling1D(pool_size=2)(x) x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x) #x = MaxPooling1D(pool_size=2)(x) x = GRU(recurrent_units, return_sequences=False, dropout=dropout_rate, recurrent_dropout=dropout_rate)(x) #x = Dropout(dropout_rate)(x) x = Dense(dense_size, activation="relu")(x) x = Dense(nb_classes, activation="sigmoid")(x) model = Model(inputs=input_layer, outputs=x) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model
开发者ID:kermitt2,项目名称:delft,代码行数:22,代码来源:models.py
示例13: ctpn# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def ctpn(base_features, num_anchors, rnn_units=128, fc_units=512): """ ctpn网络 :param base_features: (B,H,W,C) :param num_anchors: anchors个数 :param rnn_units: :param fc_units: :return: """ x = layers.Conv2D(512, kernel_size=(3, 3), padding='same', name='pre_fc')(base_features) # [B,H,W,512] # 沿着宽度方式做rnn rnn_forward = layers.TimeDistributed(layers.GRU(rnn_units, return_sequences=True, kernel_initializer='he_normal'), name='gru_forward')(x) rnn_backward = layers.TimeDistributed( layers.GRU(rnn_units, return_sequences=True, kernel_initializer='he_normal', go_backwards=True), name='gru_backward')(x) rnn_output = layers.Concatenate(name='gru_concat')([rnn_forward, rnn_backward]) # (B,H,W,256) # conv实现fc fc_output = layers.Conv2D(fc_units, kernel_size=(1, 1), activation='relu', name='fc_output')( rnn_output) # (B,H,W,512) # 分类 class_logits = layers.Conv2D(2 * num_anchors, kernel_size=(1, 1), name='cls')(fc_output) class_logits = layers.Reshape(target_shape=(-1, 2), name='cls_reshape')(class_logits) # 中心点垂直坐标和高度回归 predict_deltas = layers.Conv2D(2 * num_anchors, kernel_size=(1, 1), name='deltas')(fc_output) predict_deltas = layers.Reshape(target_shape=(-1, 2), name='deltas_reshape')(predict_deltas) # 侧边精调(只需要预测x偏移即可) predict_side_deltas = layers.Conv2D(num_anchors, kernel_size=(1, 1), name='side_deltas')(fc_output) predict_side_deltas = layers.Reshape(target_shape=(-1, 1), name='side_deltas_reshape')( predict_side_deltas) return class_logits, predict_deltas, predict_side_deltas
开发者ID:yizt,项目名称:keras-ctpn,代码行数:36,代码来源:models.py
示例14: buildModel_RNN# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def buildModel_RNN(word_index, embeddings_index, nClasses, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM): ''' def buildModel_RNN(word_index, embeddings_index, nClasses, MAX_SEQUENCE_LENGTH, EMBEDDING_DIM): word_index in word index , embeddings_index is embeddings index, look at data_helper.py nClasses is number of classes, MAX_SEQUENCE_LENGTH is maximum lenght of text sequences, EMBEDDING_DIM is an int value for dimention of word embedding look at data_helper.py output: RNN model ''' model = Sequential() embedding_matrix = np.random.random((len(word_index) + 1, EMBEDDING_DIM)) for word, i in word_index.items(): embedding_vector = embeddings_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector model.add(Embedding(len(word_index) + 1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=True)) model.add(GRU(100,dropout=0.2, recurrent_dropout=0.2)) model.add(Dense(nClasses, activation='softmax')) model.compile(loss='sparse_categorical_crossentropy', optimizer='rmsprop', metrics=['acc']) return model
示例15: __input_layer# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def __input_layer(self, dims, return_sequences): """ Returns GRU or LSTM input layer """ if self.current_params["bidirectional"] == True: return Bidirectional(self.__middle_hidden_layer(return_sequences), input_shape=dims) else: if self.current_params["layer_type"] == "GRU": return GRU(self.current_params["hidden_neurons"], input_shape=dims, return_sequences=return_sequences, kernel_initializer=self.current_params["kernel_initializer"], recurrent_initializer=self.current_params["recurrent_initializer"], recurrent_regularizer=self.__generate_regulariser(self.current_params["r_l1_reg"], self.current_params["r_l2_reg"]), bias_regularizer=self.__generate_regulariser(self.current_params["b_l1_reg"], self.current_params["b_l2_reg"]), dropout=self.current_params["dropout"], recurrent_dropout=self.current_params["recurrent_dropout"] ) return LSTM(self.current_params["hidden_neurons"], input_shape=dims, return_sequences=return_sequences, kernel_initializer=self.current_params["kernel_initializer"], recurrent_initializer=self.current_params["recurrent_initializer"], recurrent_regularizer=self.__generate_regulariser(self.current_params["r_l1_reg"], self.current_params["r_l2_reg"]), bias_regularizer=self.__generate_regulariser(self.current_params["b_l1_reg"], self.current_params["b_l2_reg"]), dropout=self.current_params["dropout"], recurrent_dropout=self.current_params["recurrent_dropout"] )
开发者ID:mprhode,项目名称:malware-prediction-rnn,代码行数:30,代码来源:RNN.py
示例16: __hidden_layer# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def __hidden_layer(self, return_sequences): """ reurns GRU or LSTM hidden layer """ layer = self.__middle_hidden_layer(return_sequences) if self.current_params["bidirectional"] == True: return Bidirectional(layer) return layer
开发者ID:mprhode,项目名称:malware-prediction-rnn,代码行数:9,代码来源:RNN.py
示例17: __init__# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def __init__(self, layer, attention_vec, attn_activation='tanh', single_attention_param=False, **kwargs): assert isinstance(layer, LSTM) or isinstance(layer, GRU) super(AttentionWrapper, self).__init__(layer, **kwargs) self.supports_masking = True self.attention_vec = attention_vec self.attn_activation = activations.get(attn_activation) self.single_attention_param = single_attention_param
示例18: Encoder# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def Encoder(hidden_size, activation=None, return_sequences=True, bidirectional=False, use_gru=True): if activation is None: activation = ELU() if use_gru: def _encoder(x): if bidirectional: branch_1 = GRU(int(hidden_size/2), activation='linear', return_sequences=return_sequences, go_backwards=False)(x) branch_2 = GRU(int(hidden_size/2), activation='linear', return_sequences=return_sequences, go_backwards=True)(x) x = concatenate([branch_1, branch_2]) x = activation(x) return x else: x = GRU(hidden_size, activation='linear', return_sequences=return_sequences)(x) x = activation(x) return x else: def _encoder(x): if bidirectional: branch_1 = LSTM(int(hidden_size/2), activation='linear', return_sequences=return_sequences, go_backwards=False)(x) branch_2 = LSTM(int(hidden_size/2), activation='linear', return_sequences=return_sequences, go_backwards=True)(x) x = concatenate([branch_1, branch_2]) x = activation(x) return x else: x = LSTM(hidden_size, activation='linear', return_sequences=return_sequences)(x) x = activation(x) return x return _encoder
示例19: AttentionDecoder# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def AttentionDecoder(hidden_size, activation=None, return_sequences=True, bidirectional=False, use_gru=True): if activation is None: activation = ELU() if use_gru: def _decoder(x, attention): if bidirectional: branch_1 = AttentionWrapper(GRU(int(hidden_size/2), activation='linear', return_sequences=return_sequences, go_backwards=False), attention, single_attention_param=True)(x) branch_2 = AttentionWrapper(GRU(int(hidden_size/2), activation='linear', return_sequences=return_sequences, go_backwards=True), attention, single_attention_param=True)(x) x = concatenate([branch_1, branch_2]) return activation(x) else: x = AttentionWrapper(GRU(hidden_size, activation='linear', return_sequences=return_sequences), attention, single_attention_param=True)(x) x = activation(x) return x else: def _decoder(x, attention): if bidirectional: branch_1 = AttentionWrapper(LSTM(int(hidden_size/2), activation='linear', return_sequences=return_sequences, go_backwards=False), attention, single_attention_param=True)(x) branch_2 = AttentionWrapper(LSTM(hidden_size, activation='linear', return_sequences=return_sequences, go_backwards=True), attention, single_attention_param=True)(x) x = concatenate([branch_1, branch_2]) x = activation(x) return x else: x = AttentionWrapper(LSTM(hidden_size, activation='linear', return_sequences=return_sequences), attention, single_attention_param=True)(x) x = activation(x) return x return _decoder
示例20: Decoder# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def Decoder(hidden_size, activation=None, return_sequences=True, bidirectional=False, use_gru=True): if activation is None: activation = ELU() if use_gru: def _decoder(x): if bidirectional: x = Bidirectional( GRU(int(hidden_size/2), activation='linear', return_sequences=return_sequences))(x) x = activation(x) return x else: x = GRU(hidden_size, activation='linear', return_sequences=return_sequences)(x) x = activation(x) return x else: def _decoder(x): if bidirectional: x = Bidirectional( LSTM(int(hidden_size/2), activation='linear', return_sequences=return_sequences))(x) x = activation(x) return x else: x = LSTM(hidden_size, activation='linear', return_sequences=return_sequences)(x) x = activation(x) return x return _decoder
示例21: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) x = self.word_embedding.output # x = Reshape((self.len_max, self.embed_size, 1))(embedding) if self.rnn_type=="LSTM": layer_cell = LSTM elif self.rnn_type=="GRU": layer_cell = GRU elif self.rnn_type=="CuDNNLSTM": layer_cell = CuDNNLSTM elif self.rnn_type=="CuDNNGRU": layer_cell = CuDNNGRU else: layer_cell = GRU # Bi-LSTM for nrl in range(self.num_rnn_layers): x = Bidirectional(layer_cell(units=self.rnn_units, return_sequences=True, activation='relu', kernel_regularizer=regularizers.l2(0.32 * 0.1), recurrent_regularizer=regularizers.l2(0.32) ))(x) x = Dropout(self.dropout)(x) x = Flatten()(x) # 最后就是softmax dense_layer = Dense(self.label, activation=self.activate_classify)(x) output = [dense_layer] self.model = Model(self.word_embedding.input, output) self.model.summary(120)
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:37,代码来源:graph.py
示例22: word_level# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def word_level(self): x_input_word = Input(shape=(self.len_max, self.embed_size)) # x = SpatialDropout1D(self.dropout_spatial)(x_input_word) x = Bidirectional(GRU(units=self.rnn_units, return_sequences=True, activation='relu', kernel_regularizer=regularizers.l2(self.l2), recurrent_regularizer=regularizers.l2(self.l2)))(x_input_word) out_sent = AttentionSelf(self.rnn_units*2)(x) model = Model(x_input_word, out_sent) return model
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:13,代码来源:graph.py
示例23: sentence_level# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def sentence_level(self): x_input_sen = Input(shape=(self.len_max, self.rnn_units*2)) # x = SpatialDropout1D(self.dropout_spatial)(x_input_sen) output_doc = Bidirectional(GRU(units=self.rnn_units*2, return_sequences=True, activation='relu', kernel_regularizer=regularizers.l2(self.l2), recurrent_regularizer=regularizers.l2(self.l2)))(x_input_sen) output_doc_att = AttentionSelf(self.word_embedding.embed_size)(output_doc) model = Model(x_input_sen, output_doc_att) return model
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:13,代码来源:graph.py
示例24: create_model_gru# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import GRU [as 别名]def create_model_gru(self, hyper_parameters): """ 构建神经网络, bi-gru + capsule :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) embedding = self.word_embedding.output embed_layer = SpatialDropout1D(self.dropout)(embedding) x_bi = Bidirectional(GRU(self.filters_num, activation='relu', dropout=self.dropout, recurrent_dropout=self.dropout, return_sequences=True))(embed_layer) # 一层 capsule = Capsule_bojone(num_capsule=self.num_capsule, dim_capsule=self.dim_capsule, routings=self.routings, kernel_size=(3, 1), share_weights=True)(x_bi) # # pooling多层 # conv_pools = [] # for filter in self.filters: # capsule = Capsule_bojone(num_capsule=self.num_capsule, # dim_capsule=self.dim_capsule, # routings=self.routings, # kernel_size=(filter, 1), # share_weights=True)(x_bi) # conv_pools.append(capsule) # capsule = Concatenate(axis=-1)(conv_pools) capsule = Flatten()(capsule) capsule = Dropout(self.dropout)(capsule) output = Dense(self.label, activation=self.activate_classify)(capsule) self.model = Model(inputs=self.word_embedding.input, outputs=output) self.model.summary(120)
开发者ID:yongzhuo,项目名称:Keras-TextClassification,代码行数:39,代码来源:graph.py
|