这篇教程Python layers.AveragePooling1D方法代码示例写得很实用,希望能帮到您。
本文整理汇总了Python中keras.layers.AveragePooling1D方法的典型用法代码示例。如果您正苦于以下问题:Python layers.AveragePooling1D方法的具体用法?Python layers.AveragePooling1D怎么用?Python layers.AveragePooling1D使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块keras.layers 的用法示例。 在下文中一共展示了layers.AveragePooling1D方法的19个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。 示例1: bidLstm_simple# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import AveragePooling1D [as 别名]def bidLstm_simple(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes): #inp = Input(shape=(maxlen, )) input_layer = Input(shape=(maxlen, embed_size), ) #x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp) x = Bidirectional(LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate, recurrent_dropout=dropout_rate))(input_layer) x = Dropout(dropout_rate)(x) x_a = GlobalMaxPool1D()(x) x_b = GlobalAveragePooling1D()(x) #x_c = AttentionWeightedAverage()(x) #x_a = MaxPooling1D(pool_size=2)(x) #x_b = AveragePooling1D(pool_size=2)(x) x = concatenate([x_a,x_b]) x = Dense(dense_size, activation="relu")(x) x = Dropout(dropout_rate)(x) x = Dense(nb_classes, activation="sigmoid")(x) model = Model(inputs=input_layer, outputs=x) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model# bidirectional LSTM with attention layer
开发者ID:kermitt2,项目名称:delft,代码行数:27,代码来源:models.py
示例2: get_contextual_spatial_gated_input# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import AveragePooling1D [as 别名]def get_contextual_spatial_gated_input(X, conf_dict): # X: input to be gated, (None, steps, x_dim) # return X' = X * sigmoid(Dense(Average(f(X)))), f is a non-linear function. assert len(X._keras_shape) == 3, [X._keras_shape] seq_len, x_dim = X._keras_shape[1], X._keras_shape[2] gating_hidden_dim = conf_dict['gating_hidden_dim'] gating_hidden_actv = conf_dict['gating_hidden_actv'] Xp = ReshapeBatchAdhoc()(X) Xp = Dense(gating_hidden_dim, activation=gating_hidden_actv)(Xp) #Xp = Lambda(lambda x: x * 0)(Xp) Xp = ReshapeBatchAdhoc(mid_dim=seq_len)(Xp) Xp = AveragePooling1D(seq_len)(Xp) # (None, 1, x_dim) Xp = Reshape((Xp._keras_shape[-1], ))(Xp) Xp = Dense(x_dim, activation='sigmoid')(Xp) Xp = Reshape((1, x_dim))(Xp) X = DotMergeAdhoc()([X, Xp]) return X
开发者ID:chentingpc,项目名称:NNCF,代码行数:20,代码来源:gatings.py
示例3: lstm# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import AveragePooling1D [as 别名]def lstm(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes): #inp = Input(shape=(maxlen, )) input_layer = Input(shape=(maxlen, embed_size), ) #x = Embedding(max_features, embed_size, weights=[embedding_matrix], # trainable=False)(inp) x = LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate, recurrent_dropout=dropout_rate)(input_layer) #x = CuDNNLSTM(recurrent_units, return_sequences=True)(x) x = Dropout(dropout_rate)(x) x_a = GlobalMaxPool1D()(x) x_b = GlobalAveragePooling1D()(x) #x_c = AttentionWeightedAverage()(x) #x_a = MaxPooling1D(pool_size=2)(x) #x_b = AveragePooling1D(pool_size=2)(x) x = concatenate([x_a,x_b]) x = Dense(dense_size, activation="relu")(x) x = Dropout(dropout_rate)(x) x = Dense(nb_classes, activation="sigmoid")(x) model = Model(inputs=input_layer, outputs=x) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model# bidirectional LSTM
开发者ID:kermitt2,项目名称:delft,代码行数:29,代码来源:models.py
示例4: cnn3# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import AveragePooling1D [as 别名]def cnn3(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes): #inp = Input(shape=(maxlen, )) input_layer = Input(shape=(maxlen, embed_size), ) #x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp) x = GRU(recurrent_units, return_sequences=True, dropout=dropout_rate, recurrent_dropout=dropout_rate)(input_layer) #x = Dropout(dropout_rate)(x) x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x) x = MaxPooling1D(pool_size=2)(x) x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x) x = MaxPooling1D(pool_size=2)(x) x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x) x = MaxPooling1D(pool_size=2)(x) x_a = GlobalMaxPool1D()(x) x_b = GlobalAveragePooling1D()(x) #x_c = AttentionWeightedAverage()(x) #x_a = MaxPooling1D(pool_size=2)(x) #x_b = AveragePooling1D(pool_size=2)(x) x = concatenate([x_a,x_b]) #x = Dropout(dropout_rate)(x) x = Dense(dense_size, activation="relu")(x) x = Dense(nb_classes, activation="sigmoid")(x) model = Model(inputs=input_layer, outputs=x) model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model
开发者ID:kermitt2,项目名称:delft,代码行数:29,代码来源:models.py
示例5: gru# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import AveragePooling1D [as 别名]def gru(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes): #input_layer = Input(shape=(maxlen,)) input_layer = Input(shape=(maxlen, embed_size), ) #embedding_layer = Embedding(max_features, embed_size, # weights=[embedding_matrix], trainable=False)(input_layer) x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate, recurrent_dropout=recurrent_dropout_rate))(input_layer) x = Dropout(dropout_rate)(x) x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate, recurrent_dropout=recurrent_dropout_rate))(x) #x = AttentionWeightedAverage(maxlen)(x) x_a = GlobalMaxPool1D()(x) x_b = GlobalAveragePooling1D()(x) #x_c = AttentionWeightedAverage()(x) #x_a = MaxPooling1D(pool_size=2)(x) #x_b = AveragePooling1D(pool_size=2)(x) x = concatenate([x_a,x_b], axis=1) #x = Dense(dense_size, activation="relu")(x) #x = Dropout(dropout_rate)(x) x = Dense(dense_size, activation="relu")(x) output_layer = Dense(nb_classes, activation="sigmoid")(x) model = Model(inputs=input_layer, outputs=output_layer) model.summary() model.compile(loss='binary_crossentropy', optimizer=RMSprop(clipvalue=1, clipnorm=1), #optimizer='adam', metrics=['accuracy']) return model
开发者ID:kermitt2,项目名称:delft,代码行数:31,代码来源:models.py
示例6: gru_best# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import AveragePooling1D [as 别名]def gru_best(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes): #input_layer = Input(shape=(maxlen,)) input_layer = Input(shape=(maxlen, embed_size), ) #embedding_layer = Embedding(max_features, embed_size, # weights=[embedding_matrix], trainable=False)(input_layer) x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate, recurrent_dropout=dropout_rate))(input_layer) x = Dropout(dropout_rate)(x) x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate, recurrent_dropout=dropout_rate))(x) #x = AttentionWeightedAverage(maxlen)(x) x_a = GlobalMaxPool1D()(x) x_b = GlobalAveragePooling1D()(x) #x_c = AttentionWeightedAverage()(x) #x_a = MaxPooling1D(pool_size=2)(x) #x_b = AveragePooling1D(pool_size=2)(x) x = concatenate([x_a,x_b], axis=1) #x = Dense(dense_size, activation="relu")(x) #x = Dropout(dropout_rate)(x) x = Dense(dense_size, activation="relu")(x) output_layer = Dense(nb_classes, activation="sigmoid")(x) model = Model(inputs=input_layer, outputs=output_layer) model.summary() model.compile(loss='binary_crossentropy', #optimizer=RMSprop(clipvalue=1, clipnorm=1), optimizer='adam', metrics=['accuracy']) return model# 1 layer bid GRU
开发者ID:kermitt2,项目名称:delft,代码行数:34,代码来源:models.py
示例7: Archi_3CONV2AP_1FC256_f33_17_9fd# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import AveragePooling1D [as 别名]def Archi_3CONV2AP_1FC256_f33_17_9fd(X, nbclasses): #-- get the input sizes m, L, depth = X.shape input_shape = (L,depth) #-- parameters of the architecture l2_rate = 1.e-6 dropout_rate = 0.5 nb_conv = 3 nb_fc= 1 nbunits_fc = 256 #-- will be double # Define the input placeholder. X_input = Input(input_shape) #-- nb_conv CONV layers X = conv_bn_relu(X_input, nbunits=128, kernel_size=33, kernel_regularizer=l2(l2_rate), padding='same') X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=192, kernel_size=17, kernel_regularizer=l2(l2_rate), padding='same') X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=256, kernel_size=9, kernel_regularizer=l2(l2_rate), padding='same') X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) #-- Flatten + 1 FC layers X = Flatten()(X) for add in range(nb_fc): X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate) #-- SOFTMAX layer out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate)) # Create model. return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_f33_17_9fd') #-----------------------------------------------------------------------
示例8: Archi_3CONV2AP_1FC256_f17_9_5fd# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import AveragePooling1D [as 别名]def Archi_3CONV2AP_1FC256_f17_9_5fd(X, nbclasses): #-- get the input sizes m, L, depth = X.shape input_shape = (L,depth) #-- parameters of the architecture l2_rate = 1.e-6 dropout_rate = 0.5 nb_conv = 3 nb_fc= 1 nbunits_fc = 256 #-- will be double # Define the input placeholder. X_input = Input(input_shape) #-- nb_conv CONV layers X = conv_bn_relu(X_input, nbunits=128, kernel_size=17, kernel_regularizer=l2(l2_rate), padding='same') X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=256, kernel_size=9, kernel_regularizer=l2(l2_rate), padding='same') X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=384, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same') X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) #-- Flatten + 1 FC layers X = Flatten()(X) for add in range(nb_fc): X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate) #-- SOFTMAX layer out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate)) # Create model. return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_f17_9_5fd') #-----------------------------------------------------------------------
示例9: Archi_3CONV2AP_1FC256_f9_5_3fd# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import AveragePooling1D [as 别名]def Archi_3CONV2AP_1FC256_f9_5_3fd(X, nbclasses): #-- get the input sizes m, L, depth = X.shape input_shape = (L,depth) #-- parameters of the architecture l2_rate = 1.e-6 dropout_rate = 0.5 nb_conv = 3 nb_fc= 1 nbunits_conv = 128 #-- will be double nbunits_fc = 256 #-- will be double # Define the input placeholder. X_input = Input(input_shape) #-- nb_conv CONV layers X = conv_bn_relu(X_input, nbunits=nbunits_conv, kernel_size=9, kernel_regularizer=l2(l2_rate), padding='same') X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=nbunits_conv*2**1, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same') X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=nbunits_conv*2**2, kernel_size=3, kernel_regularizer=l2(l2_rate), padding='same') X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) #-- Flatten + 1 FC layers X = Flatten()(X) for add in range(nb_fc): X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate) #-- SOFTMAX layer out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate)) # Create model. return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_f9_5_3fd') #-----------------------------------------------------------------------
示例10: Archi_3CONV2AP_1FC256_f5_3_1fd# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import AveragePooling1D [as 别名]def Archi_3CONV2AP_1FC256_f5_3_1fd(X, nbclasses): #-- get the input sizes m, L, depth = X.shape input_shape = (L,depth) #-- parameters of the architecture l2_rate = 1.e-6 dropout_rate = 0.5 nb_conv = 3 nb_fc= 1 nbunits_conv = 128 #-- will be double nbunits_fc = 256 #-- will be double # Define the input placeholder. X_input = Input(input_shape) #-- nb_conv CONV layers X = conv_bn_relu(X_input, nbunits=nbunits_conv, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same') X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=nbunits_conv*2**1, kernel_size=3, kernel_regularizer=l2(l2_rate), padding='same') X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=nbunits_conv*2**1, kernel_size=1, kernel_regularizer=l2(l2_rate), padding='same') #~ X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) #-- Flatten + 1 FC layers X = Flatten()(X) for add in range(nb_fc): X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate) #-- SOFTMAX layer out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate)) # Create model. return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_f5_3_1fd') #-----------------------------------------------------------------------
示例11: Archi_3CONV2AP_1FC256_f3_1_1fd# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import AveragePooling1D [as 别名]def Archi_3CONV2AP_1FC256_f3_1_1fd(X, nbclasses): #-- get the input sizes m, L, depth = X.shape input_shape = (L,depth) #-- parameters of the architecture l2_rate = 1.e-6 dropout_rate = 0.5 nb_conv = 3 nb_fc= 1 nbunits_conv = 128 #-- will be double nbunits_fc = 256 #-- will be double # Define the input placeholder. X_input = Input(input_shape) #-- nb_conv CONV layers X = conv_bn_relu(X_input, nbunits=nbunits_conv, kernel_size=3, kernel_regularizer=l2(l2_rate), padding='same') X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=nbunits_conv, kernel_size=1, kernel_regularizer=l2(l2_rate), padding='same') X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=nbunits_conv, kernel_size=1, kernel_regularizer=l2(l2_rate), padding='same') X = Dropout(dropout_rate)(X) #-- Flatten + 1 FC layers X = Flatten()(X) for add in range(nb_fc): X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate) #-- SOFTMAX layer out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate)) # Create model. return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_f3_1_1fd') #-----------------------------------------------------------------------
示例12: Archi_3CONV2AP_1FC256_GAP_f17_9_5fd# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import AveragePooling1D [as 别名]def Archi_3CONV2AP_1FC256_GAP_f17_9_5fd(X, nbclasses): #-- get the input sizes m, L, depth = X.shape input_shape = (L,depth) #-- parameters of the architecture l2_rate = 1.e-6 dropout_rate = 0.5 nb_conv = 3 nb_fc= 1 nbunits_fc = 256 #-- will be double # Define the input placeholder. X_input = Input(input_shape) #-- nb_conv CONV layers X = conv_bn_relu(X_input, nbunits=256, kernel_size=17, kernel_regularizer=l2(l2_rate), padding='same') X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=512, kernel_size=9, kernel_regularizer=l2(l2_rate), padding='same') X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=512, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same') X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) #-- Flatten + 1 FC layers X = GlobalAveragePooling1D()(X) for add in range(nb_fc): X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate) #-- SOFTMAX layer out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate)) # Create model. return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_GAP_f17_9_5fd') #-----------------------------------------------------------------------
示例13: Archi_3CONV2AP_1FC256_GAP_f9_5_3fd# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import AveragePooling1D [as 别名]def Archi_3CONV2AP_1FC256_GAP_f9_5_3fd(X, nbclasses): #-- get the input sizes m, L, depth = X.shape input_shape = (L,depth) #-- parameters of the architecture l2_rate = 1.e-6 dropout_rate = 0.5 nb_conv = 3 nb_fc= 1 nbunits_fc = 256 #-- will be double # Define the input placeholder. X_input = Input(input_shape) #-- nb_conv CONV layers X = conv_bn_relu(X_input, nbunits=512, kernel_size=9, kernel_regularizer=l2(l2_rate), padding='same') X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=512, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same') X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=512, kernel_size=3, kernel_regularizer=l2(l2_rate), padding='same') X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) #-- Flatten + 1 FC layers X = GlobalAveragePooling1D()(X) for add in range(nb_fc): X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate) #-- SOFTMAX layer out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate)) # Create model. return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_GAP_f9_5_3fd') #-----------------------------------------------------------------------
示例14: Archi_3CONV2AP_1FC256_GAP_f5_3_1fd# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import AveragePooling1D [as 别名]def Archi_3CONV2AP_1FC256_GAP_f5_3_1fd(X, nbclasses): #-- get the input sizes m, L, depth = X.shape input_shape = (L,depth) #-- parameters of the architecture l2_rate = 1.e-6 dropout_rate = 0.5 nb_conv = 3 nb_fc= 1 nbunits_fc = 256 #-- will be double # Define the input placeholder. X_input = Input(input_shape) #-- nb_conv CONV layers X = conv_bn_relu(X_input, nbunits=512, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same') X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=768, kernel_size=3, kernel_regularizer=l2(l2_rate), padding='same') X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=1024, kernel_size=1, kernel_regularizer=l2(l2_rate), padding='same') X = Dropout(dropout_rate)(X) #-- Flatten + 1 FC layers X = GlobalAveragePooling1D()(X) for add in range(nb_fc): X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate) #-- SOFTMAX layer out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate)) # Create model. return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_GAP_f5_3_1fd') #-----------------------------------------------------------------------
示例15: get_contextual_temporal_gated_input# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import AveragePooling1D [as 别名]def get_contextual_temporal_gated_input(X, conf_dict): # X: input to be gated, (None, steps, x_dim) # return X' = X * c * softmax(X.Average(f(X))), f is a non-linear function. assert len(X._keras_shape) == 3, [X._keras_shape] seq_len, x_dim = X._keras_shape[1], X._keras_shape[2] gating_hidden_dim = conf_dict['gating_hidden_dim'] gating_hidden_actv = conf_dict['gating_hidden_actv'] scale = conf_dict['scale'] nl_choice = conf_dict['nl_choice'] Xp = ReshapeBatchAdhoc()(X) Xp = Dense(gating_hidden_dim, activation=gating_hidden_actv)(Xp) Xp = ReshapeBatchAdhoc(mid_dim=seq_len)(Xp) Xp = AveragePooling1D(seq_len)(Xp) # (None, 1, x_dim) Xp = Reshape((Xp._keras_shape[-1], ))(Xp) if nl_choice == 'nl': Xp = Dense(x_dim, activation='relu', bias=True)(Xp) elif nl_choice == 'bn+nl': Xp = BatchNormalization()(Xp) Xp = Dense(x_dim, activation='relu', bias=True)(Xp) elif nl_choice == 'bn+l': Xp = BatchNormalization()(Xp) Xp = Dense(x_dim, activation='linear', bias=True)(Xp) else: assert False, 'nonononon' Xp = Reshape((1, x_dim))(Xp) # (None, 1, x_dim) Xp = DotSumMergeAdhoc()([X, Xp]) # (None, steps, 1) if True: # debug Xp = Activation('sigmoid')(Xp) # (None, steps, 1) else: # following can be uncomment to replace sigmoid with softmax Xp = Reshape((Xp._keras_shape[1], ))(Xp) # (None, steps) Xp = Activation('softmax')(Xp) # (None, steps) Xp = Reshape((Xp._keras_shape[-1], 1))(Xp) # (None, steps, 1) X = DotMergeAdhoc(scale=scale)([X, Xp]) # (None, steps, x_dim) return X
开发者ID:chentingpc,项目名称:NNCF,代码行数:38,代码来源:gatings.py
示例16: ResidualBlock1D_helper# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import AveragePooling1D [as 别名]def ResidualBlock1D_helper(layers, kernel_size, filters, final_stride=1): def f(_input): basic = _input for ln in range(layers): #basic = BatchNormalization()( basic ) # triggers known keras bug w/ TimeDistributed: https://github.com/fchollet/keras/issues/5221 basic = ELU()(basic) basic = Conv1D(filters, kernel_size, kernel_initializer='he_normal', kernel_regularizer=l2(1.e-4), padding='same')(basic) # note that this strides without averaging return AveragePooling1D(pool_size=1, strides=final_stride)(Add()([_input, basic])) return f
示例17: __backbone# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import AveragePooling1D [as 别名]def __backbone(inp, C=0.001, initial='he_normal'): """ # 用于信号片段特征学习的卷积层组合 :param inp: keras tensor, 单个信号切片输入 :param C: double, 正则化系数, 默认0.001 :param initial: str, 初始化方式, 默认he_normal :return: keras tensor, 单个信号切片经过卷积层后的输出 """ net = Conv1D(4, 31, padding='same', kernel_initializer=initial, kernel_regularizer=regularizers.l2(C))(inp) net = BatchNormalization()(net) net = Activation('relu')(net) net = AveragePooling1D(5, 5)(net) net = Conv1D(8, 11, padding='same', kernel_initializer=initial, kernel_regularizer=regularizers.l2(C))(net) net = BatchNormalization()(net) net = Activation('relu')(net) net = AveragePooling1D(5, 5)(net) net = Conv1D(8, 7, padding='same', kernel_initializer=initial, kernel_regularizer=regularizers.l2(C))(net) net = BatchNormalization()(net) net = Activation('relu')(net) net = AveragePooling1D(5, 5)(net) net = Conv1D(16, 5, padding='same', kernel_initializer=initial, kernel_regularizer=regularizers.l2(C))(net) net = BatchNormalization()(net) net = Activation('relu')(net) net = AveragePooling1D(int(net.shape[1]), int(net.shape[1]))(net) return net
示例18: Archi_3CONV2AP_1FC256_GAP_f3_1_1fd# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import AveragePooling1D [as 别名]def Archi_3CONV2AP_1FC256_GAP_f3_1_1fd(X, nbclasses): #-- get the input sizes m, L, depth = X.shape input_shape = (L,depth) #-- parameters of the architecture l2_rate = 1.e-6 dropout_rate = 0.5 nb_conv = 3 nb_fc= 1 nbunits_fc = 256 #-- will be double # Define the input placeholder. X_input = Input(input_shape) #-- nb_conv CONV layers X = conv_bn_relu(X_input, nbunits=768, kernel_size=3, kernel_regularizer=l2(l2_rate), padding='same') X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X) X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=1024, kernel_size=1, kernel_regularizer=l2(l2_rate), padding='same') X = Dropout(dropout_rate)(X) X = conv_bn_relu(X, nbunits=1024, kernel_size=1, kernel_regularizer=l2(l2_rate), padding='same') X = Dropout(dropout_rate)(X) #-- Flatten + 1 FC layers X = GlobalAveragePooling1D()(X) for add in range(nb_fc): X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate) #-- SOFTMAX layer out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate)) # Create model. return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_GAP_f3_1_1fd') #----------------------------------------------------------------------- #-----------------------------------------------------------------------#--------------------- Switcher for running the architectures
示例19: pooling# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import AveragePooling1D [as 别名]def pooling(layer, layer_in, layerId, tensor=True): poolMap = { ('1D', 'MAX'): MaxPooling1D, ('2D', 'MAX'): MaxPooling2D, ('3D', 'MAX'): MaxPooling3D, ('1D', 'AVE'): AveragePooling1D, ('2D', 'AVE'): AveragePooling2D, ('3D', 'AVE'): AveragePooling3D, } out = {} layer_type = layer['params']['layer_type'] pool_type = layer['params']['pool'] padding = get_padding(layer) if (layer_type == '1D'): strides = layer['params']['stride_w'] kernel = layer['params']['kernel_w'] if (padding == 'custom'): p_w = layer['params']['pad_w'] out[layerId + 'Pad'] = ZeroPadding1D(padding=p_w)(*layer_in) padding = 'valid' layer_in = [out[layerId + 'Pad']] elif (layer_type == '2D'): strides = (layer['params']['stride_h'], layer['params']['stride_w']) kernel = (layer['params']['kernel_h'], layer['params']['kernel_w']) if (padding == 'custom'): p_h, p_w = layer['params']['pad_h'], layer['params']['pad_w'] out[layerId + 'Pad'] = ZeroPadding2D(padding=(p_h, p_w))(*layer_in) padding = 'valid' layer_in = [out[layerId + 'Pad']] else: strides = (layer['params']['stride_h'], layer['params']['stride_w'], layer['params']['stride_d']) kernel = (layer['params']['kernel_h'], layer['params']['kernel_w'], layer['params']['kernel_d']) if (padding == 'custom'): p_h, p_w, p_d = layer['params']['pad_h'], layer['params']['pad_w'],/ layer['params']['pad_d'] out[layerId + 'Pad'] = ZeroPadding3D(padding=(p_h, p_w, p_d))(*layer_in) padding = 'valid' layer_in = [out[layerId + 'Pad']] # Note - figure out a permanent fix for padding calculation of layers # in case padding is given in layer attributes # if ('padding' in layer['params']): # padding = layer['params']['padding'] out[layerId] = poolMap[(layer_type, pool_type)]( pool_size=kernel, strides=strides, padding=padding) if tensor: out[layerId] = out[layerId](*layer_in) return out# ********** Locally-connected Layers **********
|