这篇教程Python layers.TimeDistributed方法代码示例写得很实用,希望能帮到您。
本文整理汇总了Python中keras.layers.TimeDistributed方法的典型用法代码示例。如果您正苦于以下问题:Python layers.TimeDistributed方法的具体用法?Python layers.TimeDistributed怎么用?Python layers.TimeDistributed使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块keras.layers 的用法示例。 在下文中一共展示了layers.TimeDistributed方法的25个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。 示例1: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def create_model(): inputs = Input(shape=(length,), dtype='int32', name='inputs') embedding_1 = Embedding(len(vocab), EMBED_DIM, input_length=length, mask_zero=True)(inputs) bilstm = Bidirectional(LSTM(EMBED_DIM // 2, return_sequences=True))(embedding_1) bilstm_dropout = Dropout(DROPOUT_RATE)(bilstm) embedding_2 = Embedding(len(vocab), EMBED_DIM, input_length=length)(inputs) con = Conv1D(filters=FILTERS, kernel_size=2 * HALF_WIN_SIZE + 1, padding='same')(embedding_2) con_d = Dropout(DROPOUT_RATE)(con) dense_con = TimeDistributed(Dense(DENSE_DIM))(con_d) rnn_cnn = concatenate([bilstm_dropout, dense_con], axis=2) dense = TimeDistributed(Dense(len(chunk_tags)))(rnn_cnn) crf = CRF(len(chunk_tags), sparse_target=True) crf_output = crf(dense) model = Model(input=[inputs], output=[crf_output]) model.compile(loss=crf.loss_function, optimizer=Adam(), metrics=[crf.accuracy]) return model
示例2: get_audio_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def get_audio_model(self): # Modality specific hyperparameters self.epochs = 100 self.batch_size = 50 # Modality specific parameters self.embedding_dim = self.train_x.shape[2] print("Creating Model...") inputs = Input(shape=(self.sequence_length, self.embedding_dim), dtype='float32') masked = Masking(mask_value =0)(inputs) lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4))(masked) lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4), name="utter")(lstm) output = TimeDistributed(Dense(self.classes,activation='softmax'))(lstm) model = Model(inputs, output) return model
示例3: get_bimodal_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def get_bimodal_model(self): # Modality specific hyperparameters self.epochs = 100 self.batch_size = 10 # Modality specific parameters self.embedding_dim = self.train_x.shape[2] print("Creating Model...") inputs = Input(shape=(self.sequence_length, self.embedding_dim), dtype='float32') masked = Masking(mask_value =0)(inputs) lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4), name="utter")(masked) output = TimeDistributed(Dense(self.classes,activation='softmax'))(lstm) model = Model(inputs, output) return model
示例4: classifier# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def classifier(base_layers, input_rois, num_rois, nb_classes = 21, trainable=False): # compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround if K.backend() == 'tensorflow': pooling_regions = 7 input_shape = (num_rois,7,7,512) elif K.backend() == 'theano': pooling_regions = 7 input_shape = (num_rois,512,7,7) out_roi_pool = RoiPoolingConv(pooling_regions, num_rois)([base_layers, input_rois]) out = TimeDistributed(Flatten(name='flatten'))(out_roi_pool) out = TimeDistributed(Dense(4096, activation='relu', name='fc1'))(out) out = TimeDistributed(Dropout(0.5))(out) out = TimeDistributed(Dense(4096, activation='relu', name='fc2'))(out) out = TimeDistributed(Dropout(0.5))(out) out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out) # note: no regression target for bg class out_regr = TimeDistributed(Dense(4 * (nb_classes-1), activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out) return [out_class, out_regr]
开发者ID:kbardool,项目名称:keras-frcnn,代码行数:26,代码来源:vgg.py
示例5: __build_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def __build_model(self): model = Sequential() embedding_layer = Embedding(input_dim=len(self.vocab) + 1, output_dim=self.embedding_dim, weights=[self.embedding_mat], trainable=False) model.add(embedding_layer) bilstm_layer = Bidirectional(LSTM(units=256, return_sequences=True)) model.add(bilstm_layer) model.add(TimeDistributed(Dense(256, activation="relu"))) crf_layer = CRF(units=len(self.tags), sparse_target=True) model.add(crf_layer) model.compile(optimizer="adam", loss=crf_loss, metrics=[crf_viterbi_accuracy]) model.summary() return model
开发者ID:fordai,项目名称:CCKS2019-Chinese-Clinical-NER,代码行数:23,代码来源:model.py
示例6: classifier# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def classifier(base_layers, input_rois, num_rois, nb_classes=21, trainable=False): # compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround if K.backend() == 'tensorflow': pooling_regions = 14 # Changed the input shape to 1088 from 1024 because of nn_base's output being 1088. Not sure if this is correct input_shape = (num_rois, 14, 14, 1088) elif K.backend() == 'theano': pooling_regions = 7 input_shape = (num_rois, 1024, 7, 7) out_roi_pool = RoiPoolingConv(pooling_regions, num_rois)([base_layers, input_rois]) out = classifier_layers(out_roi_pool, input_shape=input_shape, trainable=True) out = TimeDistributed(Flatten())(out) out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out) # note: no regression target for bg class out_regr = TimeDistributed(Dense(4 * (nb_classes-1), activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out) return [out_class, out_regr]
示例7: classifier# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def classifier(base_layers, input_rois, num_rois, nb_classes = 21, trainable=False): # compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround if K.backend() == 'tensorflow': pooling_regions = 7 input_shape = (num_rois, 7, 7, 512) elif K.backend() == 'theano': pooling_regions = 7 input_shape = (num_rois, 512, 7, 7) out_roi_pool = RoiPoolingConv(pooling_regions, num_rois)([base_layers, input_rois]) out = TimeDistributed(Flatten(name='flatten'))(out_roi_pool) out = TimeDistributed(Dense(4096, activation='relu', name='fc1'))(out) out = TimeDistributed(Dense(4096, activation='relu', name='fc2'))(out) out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out) # note: no regression target for bg class out_regr = TimeDistributed(Dense(4 * (nb_classes-1), activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out) return [out_class, out_regr]
开发者ID:you359,项目名称:Keras-FasterRCNN,代码行数:24,代码来源:vgg.py
示例8: classifier_layers# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def classifier_layers(x, input_shape, trainable=False): # compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround # (hence a smaller stride in the region that follows the ROI pool) x = TimeDistributed(SeparableConv2D(1536, (3, 3), padding='same', use_bias=False), name='block14_sepconv1')(x) x = TimeDistributed(BatchNormalization(), name='block14_sepconv1_bn')(x) x = Activation('relu', name='block14_sepconv1_act')(x) x = TimeDistributed(SeparableConv2D(2048, (3, 3), padding='same', use_bias=False), name='block14_sepconv2')(x) x = TimeDistributed(BatchNormalization(), name='block14_sepconv2_bn')(x) x = Activation('relu', name='block14_sepconv2_act')(x) TimeDistributed(GlobalAveragePooling2D(), name='avg_pool')(x) return x
开发者ID:you359,项目名称:Keras-FasterRCNN,代码行数:23,代码来源:xception.py
示例9: classifier# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def classifier(base_layers, input_rois, num_rois, nb_classes,trainable=True): """ The final classifier to match original implementation for VGG-16 The only difference being the Roipooling layer uses tensorflow's bilinear interpolation """ pooling_regions = 7 out_roi_pool = RoiPoolingConv(pooling_regions, num_rois,trainable=trainable)([base_layers, input_rois]) out = TimeDistributed(Flatten(),name="flatten",trainable=trainable)(out_roi_pool) out = TimeDistributed(Dense(4096, activation='relu',trainable=trainable),name="fc1",trainable=trainable)(out) out = TimeDistributed(Dropout(0.5),name="drop_out1",trainable=trainable)(out) # add dropout to match original implememtation out = TimeDistributed(Dense(4096, activation='relu',trainable=trainable),name="fc2",trainable=trainable)(out) out = TimeDistributed(Dropout(0.5),name="drop_out2",trainable=trainable)(out) # add dropout to match original implementation out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero',trainable=trainable), name='dense_class_{}'.format(nb_classes),trainable=trainable)(out) # note: no regression target for bg class out_regr = TimeDistributed(Dense(4 * (nb_classes-1), activation='linear', kernel_initializer='zero',trainable=trainable), name='dense_regress_{}'.format(nb_classes),trainable=trainable)(out) return [out_class, out_regr]
示例10: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def create_model(maxlen, chars, word_size, infer=False): """ :param infer: :param maxlen: :param chars: :param word_size: :return: """ sequence = Input(shape=(maxlen,), dtype='int32') embedded = Embedding(len(chars) + 1, word_size, input_length=maxlen, mask_zero=True)(sequence) blstm = Bidirectional(LSTM(64, return_sequences=True), merge_mode='sum')(embedded) output = TimeDistributed(Dense(5, activation='softmax'))(blstm) model = Model(input=sequence, output=output) if not infer: model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
开发者ID:stephen-v,项目名称:zh-segmentation-keras,代码行数:19,代码来源:lstm_model.py
示例11: create_lstm# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def create_lstm(hidden_units=[50], dropout=0.05, bidirectional=True): model = Sequential() if bidirectional: i = 0 for unit in hidden_units: if i == 0: model.add(Bidirectional(LSTM(unit, dropout=dropout, return_sequences=True), input_shape=(None, config.N_MELS))) else: model.add(Bidirectional(LSTM(unit, dropout=dropout, return_sequences=True))) i += 1 else: i = 0 for unit in hidden_units: if i == 0: model.add(LSTM(unit, dropout=dropout, return_sequences=True), input_shape=(None, config.N_MELS)) else: model.add(LSTM(unit, dropout=dropout, return_sequences=True)) i += 1 model.add(TimeDistributed(Dense(config.CLASSES, activation='sigmoid'))) return model
开发者ID:qlemaire22,项目名称:speech-music-detection,代码行数:25,代码来源:lstm.py
示例12: AlternativeRNNModel# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def AlternativeRNNModel(vocab_size, max_len, rnnConfig, model_type): embedding_size = rnnConfig['embedding_size'] if model_type == 'inceptionv3': # InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model image_input = Input(shape=(2048,)) elif model_type == 'vgg16': # VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model image_input = Input(shape=(4096,)) image_model_1 = Dense(embedding_size, activation='relu')(image_input) image_model = RepeatVector(max_len)(image_model_1) caption_input = Input(shape=(max_len,)) # mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency. caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input) # Since we are going to predict the next word using the previous words # (length of previous words changes with every iteration over the caption), we have to set return_sequences = True. caption_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=True)(caption_model_1) # caption_model = TimeDistributed(Dense(embedding_size, activation='relu'))(caption_model_2) caption_model = TimeDistributed(Dense(embedding_size))(caption_model_2) # Merging the models and creating a softmax classifier final_model_1 = concatenate([image_model, caption_model]) # final_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=False)(final_model_1) final_model_2 = Bidirectional(LSTM(rnnConfig['LSTM_units'], return_sequences=False))(final_model_1) # final_model_3 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_2) # final_model = Dense(vocab_size, activation='softmax')(final_model_3) final_model = Dense(vocab_size, activation='softmax')(final_model_2) model = Model(inputs=[image_input, caption_input], outputs=final_model) model.compile(loss='categorical_crossentropy', optimizer='adam') # model.compile(loss='categorical_crossentropy', optimizer='rmsprop') return model
开发者ID:dabasajay,项目名称:Image-Caption-Generator,代码行数:34,代码来源:model.py
示例13: build# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def build(self, input_shape): self._validate_input_shape(input_shape) d_k = self._d_k if self._d_k else input_shape[1][-1] d_model = self._d_model if self._d_model else input_shape[1][-1] d_v = self._d_v if type(d_k) == tf.Dimension: d_k = d_k.value if type(d_model) == tf.Dimension: d_model = d_model.value self._q_layers = [] self._k_layers = [] self._v_layers = [] self._sdp_layer = ScaledDotProductAttention(return_attention=self._return_attention) for _ in range(self._h): self._q_layers.append( TimeDistributed( Dense(d_k, activation=self._activation, use_bias=False) ) ) self._k_layers.append( TimeDistributed( Dense(d_k, activation=self._activation, use_bias=False) ) ) self._v_layers.append( TimeDistributed( Dense(d_v, activation=self._activation, use_bias=False) ) ) self._output = TimeDistributed(Dense(d_model)) #if self._return_attention: # self._output = Concatenate()
开发者ID:zimmerrol,项目名称:keras-utility-layer-collection,代码行数:39,代码来源:attention.py
示例14: creat_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def creat_model(input_shape, num_class): init = initializers.Orthogonal(gain=args.norm) sequence_input =Input(shape=input_shape) mask = Masking(mask_value=0.)(sequence_input) if args.aug: mask = augmentaion()(mask) X = Noise(0.075)(mask) if args.model[0:2]=='VA': # VA trans = LSTM(args.nhid,recurrent_activation='sigmoid',return_sequences=True,implementation=2,recurrent_initializer=init)(X) trans = Dropout(0.5)(trans) trans = TimeDistributed(Dense(3,kernel_initializer='zeros'))(trans) rot = LSTM(args.nhid,recurrent_activation='sigmoid',return_sequences=True,implementation=2,recurrent_initializer=init)(X) rot = Dropout(0.5)(rot) rot = TimeDistributed(Dense(3,kernel_initializer='zeros'))(rot) transform = Concatenate()([rot,trans]) X = VA()([mask,transform]) X = LSTM(args.nhid,recurrent_activation='sigmoid',return_sequences=True,implementation=2,recurrent_initializer=init)(X) X = Dropout(0.5)(X) X = LSTM(args.nhid,recurrent_activation='sigmoid',return_sequences=True,implementation=2,recurrent_initializer=init)(X) X = Dropout(0.5)(X) X = LSTM(args.nhid,recurrent_activation='sigmoid',return_sequences=True,implementation=2,recurrent_initializer=init)(X) X = Dropout(0.5)(X) X = TimeDistributed(Dense(num_class))(X) X = MeanOverTime()(X) X = Activation('softmax')(X) model=Model(sequence_input,X) return model
开发者ID:microsoft,项目名称:View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition,代码行数:33,代码来源:va-rnn.py
示例15: set_trainable# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1): """Sets model layers as trainable if their names match the given regular expression. """ # Print message on the first call (but not on recursive calls) if verbose > 0 and keras_model is None: log("Selecting layers to train") keras_model = keras_model or self.keras_model # In multi-GPU training, we wrap the model. Get layers # of the inner model because they have the weights. layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")/ else keras_model.layers for layer in layers: # Is the layer a model? if layer.__class__.__name__ == 'Model': print("In model: ", layer.name) self.set_trainable( layer_regex, keras_model=layer, indent=indent + 4) continue if not layer.weights: continue # Is it trainable? trainable = bool(re.fullmatch(layer_regex, layer.name)) # Update layer. If layer is a container, update inner layer. if layer.__class__.__name__ == 'TimeDistributed': layer.layer.trainable = trainable else: layer.trainable = trainable # Print trainable layer names if trainable and verbose > 0: log("{}{:20} ({})".format(" " * indent, layer.name, layer.__class__.__name__))
开发者ID:dataiku,项目名称:dataiku-contrib,代码行数:38,代码来源:model.py
示例16: find_trainable_layer# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def find_trainable_layer(self, layer): """If a layer is encapsulated by another layer, this function digs through the encapsulation and returns the layer that holds the weights. """ if layer.__class__.__name__ == 'TimeDistributed': return self.find_trainable_layer(layer.layer) return layer
开发者ID:dataiku,项目名称:dataiku-contrib,代码行数:10,代码来源:model.py
示例17: ctpn# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def ctpn(base_features, num_anchors, rnn_units=128, fc_units=512): """ ctpn网络 :param base_features: (B,H,W,C) :param num_anchors: anchors个数 :param rnn_units: :param fc_units: :return: """ x = layers.Conv2D(512, kernel_size=(3, 3), padding='same', name='pre_fc')(base_features) # [B,H,W,512] # 沿着宽度方式做rnn rnn_forward = layers.TimeDistributed(layers.GRU(rnn_units, return_sequences=True, kernel_initializer='he_normal'), name='gru_forward')(x) rnn_backward = layers.TimeDistributed( layers.GRU(rnn_units, return_sequences=True, kernel_initializer='he_normal', go_backwards=True), name='gru_backward')(x) rnn_output = layers.Concatenate(name='gru_concat')([rnn_forward, rnn_backward]) # (B,H,W,256) # conv实现fc fc_output = layers.Conv2D(fc_units, kernel_size=(1, 1), activation='relu', name='fc_output')( rnn_output) # (B,H,W,512) # 分类 class_logits = layers.Conv2D(2 * num_anchors, kernel_size=(1, 1), name='cls')(fc_output) class_logits = layers.Reshape(target_shape=(-1, 2), name='cls_reshape')(class_logits) # 中心点垂直坐标和高度回归 predict_deltas = layers.Conv2D(2 * num_anchors, kernel_size=(1, 1), name='deltas')(fc_output) predict_deltas = layers.Reshape(target_shape=(-1, 2), name='deltas_reshape')(predict_deltas) # 侧边精调(只需要预测x偏移即可) predict_side_deltas = layers.Conv2D(num_anchors, kernel_size=(1, 1), name='side_deltas')(fc_output) predict_side_deltas = layers.Reshape(target_shape=(-1, 1), name='side_deltas_reshape')( predict_side_deltas) return class_logits, predict_deltas, predict_side_deltas
开发者ID:yizt,项目名称:keras-ctpn,代码行数:36,代码来源:models.py
示例18: create_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def create_model(self, ret_model = False): #base_model = VGG16(weights='imagenet', include_top=False, input_shape = (224, 224, 3)) #base_model.trainable=False image_model = Sequential() #image_model.add(base_model) #image_model.add(Flatten()) image_model.add(Dense(EMBEDDING_DIM, input_dim = 4096, activation='relu')) image_model.add(RepeatVector(self.max_cap_len)) lang_model = Sequential() lang_model.add(Embedding(self.vocab_size, 256, input_length=self.max_cap_len)) lang_model.add(LSTM(256,return_sequences=True)) lang_model.add(TimeDistributed(Dense(EMBEDDING_DIM))) model = Sequential() model.add(Merge([image_model, lang_model], mode='concat')) model.add(LSTM(1000,return_sequences=False)) model.add(Dense(self.vocab_size)) model.add(Activation('softmax')) print "Model created!" if(ret_model==True): return model model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) return model
示例19: train# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def train(self, text, nb_epoch=100, dropout_rate=0.01, optimizer='rmsprop'): """ Train the scRNN model. :param text: training corpus :param nb_epoch: number of epochs (Default: 100) :param dropout_rate: dropout rate (Default: 0.01) :param optimizer: optimizer (Default: "rmsprop") :type text: str :type nb_epoch: int :type dropout_rate: float :type optimizer: str """ self.dictionary = Dictionary([nospace_tokenize(text), default_specialsignals.values()]) self.onehotencoder.fit(np.arange(len(self.dictionary)).reshape((len(self.dictionary), 1))) xylist = [(xvec.transpose(), yvec.transpose()) for xvec, yvec in self.preprocess_text_train(text)] xtrain = np.array([item[0] for item in xylist]) ytrain = np.array([item[1] for item in xylist]) # neural network here model = Sequential() model.add(LSTM(self.nb_hiddenunits, return_sequences=True, batch_input_shape=(None, self.batchsize, len(self.concatcharvec_encoder)*3))) model.add(Dropout(dropout_rate)) model.add(TimeDistributed(Dense(len(self.dictionary)))) model.add(Activation('softmax')) # compile... more arguments model.compile(loss='categorical_crossentropy', optimizer=optimizer) # training model.fit(xtrain, ytrain, epochs=nb_epoch) self.model = model self.trained = True
开发者ID:stephenhky,项目名称:PyShortTextCategorization,代码行数:35,代码来源:sakaguchi.py
示例20: set_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def set_model(self): """ Set the HAN model according to the given hyperparameters """ if self.hyperparameters['l2_regulizer'] is None: kernel_regularizer = None else: kernel_regularizer = regularizers.l2(self.hyperparameters['l2_regulizer']) if self.hyperparameters['dropout_regulizer'] is None: dropout_regularizer = 1 else: dropout_regularizer = self.hyperparameters['dropout_regulizer'] word_input = Input(shape=(self.max_senten_len,), dtype='float32') word_sequences = self.get_embedding_layer()(word_input) word_lstm = Bidirectional( self.hyperparameters['rnn'](self.hyperparameters['rnn_units'], return_sequences=True, kernel_regularizer=kernel_regularizer))(word_sequences) word_dense = TimeDistributed( Dense(self.hyperparameters['dense_units'], kernel_regularizer=kernel_regularizer))(word_lstm) word_att = AttentionWithContext()(word_dense) wordEncoder = Model(word_input, word_att) sent_input = Input(shape=(self.max_senten_num, self.max_senten_len), dtype='float32') sent_encoder = TimeDistributed(wordEncoder)(sent_input) sent_lstm = Bidirectional(self.hyperparameters['rnn']( self.hyperparameters['rnn_units'], return_sequences=True, kernel_regularizer=kernel_regularizer))(sent_encoder) sent_dense = TimeDistributed( Dense(self.hyperparameters['dense_units'], kernel_regularizer=kernel_regularizer))(sent_lstm) sent_att = Dropout(dropout_regularizer)( AttentionWithContext()(sent_dense)) preds = Dense(len(self.classes), activation=self.hyperparameters['activation'])(sent_att) self.model = Model(sent_input, preds) self.model.compile( loss=self.hyperparameters['loss'], optimizer=self.hyperparameters['optimizer'], metrics=self.hyperparameters['metrics'])
开发者ID:Hsankesara,项目名称:DeepResearch,代码行数:35,代码来源:HAN.py
示例21: set_trainable# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1): """Sets model layers as trainable if their names match the given regular expression. """ # Print message on the first call (but not on recursive calls) if verbose > 0 and keras_model is None: log("Selecting layers to train") keras_model = keras_model or self.keras_model # In multi-GPU training, we wrap the model. Get layers # of the inner model because they have the weights. layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model") / else keras_model.layers for layer in layers: # Is the layer a model? if layer.__class__.__name__ == 'Model': print("In model: ", layer.name) self.set_trainable(layer_regex, keras_model=layer, indent=indent + 4) continue if not layer.weights: continue # Is it trainable? trainable = bool(re.fullmatch(layer_regex, layer.name)) # Update layer. If layer is a container, update inner layer. if layer.__class__.__name__ == 'TimeDistributed': layer.layer.trainable = trainable else: layer.trainable = trainable # Print trainble layer names if trainable and verbose > 0: log("{}{:20} ({})".format(" " * indent, layer.name, layer.__class__.__name__))
开发者ID:SunskyF,项目名称:EasyPR-python,代码行数:37,代码来源:model.py
示例22: set_trainable# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1): """Sets model layers as trainable if their names match the given regular expression. """ # Print message on the first call (but not on recursive calls) if verbose > 0 and keras_model is None: log("Selecting layers to train") keras_model = keras_model or self.keras_model # In multi-GPU training, we wrap the model. Get layers # of the inner model because they have the weights. layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")/ else keras_model.layers for layer in layers: # Is the layer a model? if layer.__class__.__name__ == 'Model': print("In model: ", layer.name) self.set_trainable( layer_regex, keras_model=layer, indent=indent + 4) continue if not layer.weights: continue # Is it trainable? trainable = bool(re.fullmatch(layer_regex, layer.name)) # Update layer. If layer is a container, update inner layer. if layer.__class__.__name__ == 'TimeDistributed': layer.layer.trainable = trainable else: layer.trainable = trainable # Print trainble layer names if trainable and verbose > 0: log("{}{:20} ({})".format(" " * indent, layer.name, layer.__class__.__name__))
开发者ID:olgaliak,项目名称:segmentation-unet-maskrcnn,代码行数:38,代码来源:model.py
示例23: build_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def build_model(batch_size, seq_len, vocab_size=VOCAB_SIZE, embedding_size=32, rnn_size=128, num_layers=2, drop_rate=0.0, learning_rate=0.001, clip_norm=5.0): """ build character embeddings LSTM text generation model. """ logger.info("building model: batch_size=%s, seq_len=%s, vocab_size=%s, " "embedding_size=%s, rnn_size=%s, num_layers=%s, drop_rate=%s, " "learning_rate=%s, clip_norm=%s.", batch_size, seq_len, vocab_size, embedding_size, rnn_size, num_layers, drop_rate, learning_rate, clip_norm) model = Sequential() # input shape: (batch_size, seq_len) model.add(Embedding(vocab_size, embedding_size, batch_input_shape=(batch_size, seq_len))) model.add(Dropout(drop_rate)) # shape: (batch_size, seq_len, embedding_size) for _ in range(num_layers): model.add(LSTM(rnn_size, return_sequences=True, stateful=True)) model.add(Dropout(drop_rate)) # shape: (batch_size, seq_len, rnn_size) model.add(TimeDistributed(Dense(vocab_size, activation="softmax"))) # output shape: (batch_size, seq_len, vocab_size) optimizer = Adam(learning_rate, clipnorm=clip_norm) model.compile(loss="categorical_crossentropy", optimizer=optimizer) return model
示例24: han_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def han_model(max_len=400, vocabulary_size=20000, embedding_dim=128, hidden_dim=128, max_sentences=16, num_classes=4): """ Implementation of document classification model described in `Hierarchical Attention Networks for Document Classification (Yang et al., 2016)` (https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf) :param max_len: :param vocabulary_size: :param embedding_dim: :param hidden_dim: :param max_sentences: :param num_classes: :return: """ print("Hierarchical Attention Network...") inputs = Input(shape=(max_len,), dtype='int32') embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim, input_length=max_len, name="embedding")(inputs) lstm_layer = Bidirectional(LSTM(hidden_dim))(embedding) # lstm_layer_att = AttLayer(hidden_dim)(lstm_layer) sent_encoder = Model(inputs, lstm_layer) doc_inputs = Input(shape=(max_sentences, max_len), dtype='int32', name='doc_input') doc_encoder = TimeDistributed(sent_encoder)(doc_inputs) doc_layer = Bidirectional(LSTM(hidden_dim))(doc_encoder) # doc_layer_att = AttLayer(hidden_dim)(doc_layer) output = Dense(num_classes, activation='softmax')(doc_layer) model = Model(doc_inputs, output) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) model.summary() return model
示例25: buildmodel# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import TimeDistributed [as 别名]def buildmodel(model_type,num_labels,frame_width,timesteps,num_features,color_scale,lstm_cells,feature_map_filters,kernel_size,pool_size,dense_hidden_units,activation_output): if 'lstm' == model_type.lower(): model = Sequential() model.add(LSTM(lstm_cells,return_sequences=True,input_shape=(frame_width,num_features))) model.add(LSTM(lstm_cells,return_sequences=True)) elif 'cnn' == model_type.lower(): model = Sequential() # 4x8 time-frequency filter (goes along both time and frequency axes) model.add(Conv2D(feature_map_filters, kernel_size=kernel_size, activation='relu',input_shape=(frame_width*timesteps,num_features,color_scale))) #non-overlapping pool_size 3x3 model.add(MaxPooling2D(pool_size=pool_size)) model.add(Dropout(0.25)) model.add(Dense(dense_hidden_units)) elif 'cnnlstm' == model_type.lower(): cnn = Sequential() cnn.add(Conv2D(feature_map_filters, kernel_size=kernel_size, activation='relu')) #non-overlapping pool_size 3x3 cnn.add(MaxPooling2D(pool_size=pool_size)) cnn.add(Dropout(0.25)) cnn.add(Flatten()) #prepare stacked LSTM model = Sequential() model.add(TimeDistributed(cnn,input_shape=(timesteps,frame_width,num_features,color_scale))) model.add(LSTM(lstm_cells,return_sequences=True)) model.add(LSTM(lstm_cells,return_sequences=True)) model.add(Flatten()) model.add(Dense(num_labels,activation=activation_output)) return model
开发者ID:a-n-rose,项目名称:Build-CNN-or-LSTM-or-CNNLSTM-with-speech-features,代码行数:35,代码来源:build_model.py
|