这篇教程Python layers.Multiply方法代码示例写得很实用,希望能帮到您。
本文整理汇总了Python中keras.layers.Multiply方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Multiply方法的具体用法?Python layers.Multiply怎么用?Python layers.Multiply使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块keras.layers 的用法示例。 在下文中一共展示了layers.Multiply方法的21个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。 示例1: get_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Multiply [as 别名]def get_model(num_users, num_items, latent_dim, regs=[0,0]): user_input = Input(shape=(1,), dtype='int32', name='user_input') item_input = Input(shape=(1,), dtype='int32', name='item_input') MF_Embedding_User = Embedding(input_dim=num_users, output_dim=latent_dim, name='user_embedding', embeddings_regularizer = l2(regs[0]), input_length=1) MF_Embedding_Item = Embedding(input_dim=num_items, output_dim=latent_dim, name='item_embedding', embeddings_regularizer = l2(regs[1]), input_length=1) user_latent = Flatten()(MF_Embedding_User(user_input)) item_latent = Flatten()(MF_Embedding_Item(item_input)) predict_vector = Multiply()([user_latent, item_latent]) prediction = Dense(1, activation='sigmoid', kernel_initializer='lecun_uniform', name = 'prediction')(predict_vector) model = Model(inputs=[user_input, item_input], outputs=prediction) return model
开发者ID:wyl6,项目名称:Recommender-Systems-Samples,代码行数:19,代码来源:GMF.py
示例2: _squeeze# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Multiply [as 别名]def _squeeze(self, inputs): """Squeeze and Excitation. This function defines a squeeze structure. # Arguments inputs: Tensor, input tensor of conv layer. """ input_channels = int(inputs.shape[-1]) x = GlobalAveragePooling2D()(inputs) x = Dense(input_channels, activation='relu')(x) x = Dense(input_channels, activation='hard_sigmoid')(x) x = Reshape((1, 1, input_channels))(x) x = Multiply()([inputs, x]) return x
示例3: test_merge_multiply# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Multiply [as 别名]def test_merge_multiply(): i1 = layers.Input(shape=(4, 5)) i2 = layers.Input(shape=(4, 5)) i3 = layers.Input(shape=(4, 5)) o = layers.multiply([i1, i2, i3]) assert o._keras_shape == (None, 4, 5) model = models.Model([i1, i2, i3], o) mul_layer = layers.Multiply() o2 = mul_layer([i1, i2, i3]) assert mul_layer.output_shape == (None, 4, 5) x1 = np.random.random((2, 4, 5)) x2 = np.random.random((2, 4, 5)) x3 = np.random.random((2, 4, 5)) out = model.predict([x1, x2, x3]) assert out.shape == (2, 4, 5) assert_allclose(out, x1 * x2 * x3, atol=1e-4)
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:20,代码来源:merge_test.py
示例4: model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Multiply [as 别名]def model(self): inputs_img = Input(shape=(self.img_height, self.img_width, self.num_channels)) inputs_mask = Input(shape=(self.img_height, self.img_width, self.num_channels)) inputs = Multiply()([inputs_img, inputs_mask]) # Local discriminator l_dis = Conv2D(filters=64, kernel_size=5, strides=(2, 2), padding='same')(inputs) l_dis = LeakyReLU()(l_dis) l_dis = Conv2D(filters=128, kernel_size=5, strides=(2, 2), padding='same')(l_dis) l_dis = LeakyReLU()(l_dis) l_dis = Conv2D(filters=256, kernel_size=5, strides=(2, 2), padding='same')(l_dis) l_dis = LeakyReLU()(l_dis) l_dis = Conv2D(filters=512, kernel_size=5, strides=(2, 2), padding='same')(l_dis) l_dis = LeakyReLU()(l_dis) l_dis = Conv2D(filters=256, kernel_size=5, strides=(2, 2), padding='same')(l_dis) l_dis = LeakyReLU()(l_dis) l_dis = Conv2D(filters=128, kernel_size=5, strides=(2, 2), padding='same')(l_dis) l_dis = LeakyReLU()(l_dis) l_dis = Flatten()(l_dis) l_dis = Dense(units=1)(l_dis) model = Model(name=self.model_name, inputs=[inputs_img, inputs_mask], outputs=l_dis) return model
示例5: joint_branch# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Multiply [as 别名]def joint_branch(self, trainable=True, softmax_trainable=False): """ joint branch of detection and classification :param trainable: unfreeze detection branch layer if set to true """ input_img = Input(shape=self.input_shape) x_future_det_one, x_future_cls_det_two = self.share_layer(input_img, trainable=trainable) x_detection = self.detection_branch_wrapper(x_future_det_one, x_future_cls_det_two, trainable=trainable, softmax_trainable=softmax_trainable) x_classification = self.classification_branch_wrapper(x_future_cls_det_two, softmax_trainable=softmax_trainable) joint_x = Multiply()([x_detection, x_classification], name='joint_multiply_layer') input_img = Input(shape=self.input_shape) joint_model = Model(inputs=input_img, outputs=joint_x) return joint_model
开发者ID:zhuyiche,项目名称:sfcn-opi,代码行数:19,代码来源:model.py
示例6: GMF_get_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Multiply [as 别名]def GMF_get_model(num_users, num_items, latent_dim, regs=[0,0]): # Input variables user_input = Input(shape=(1,), dtype='int32', name = 'user_input') item_input = Input(shape=(1,), dtype='int32', name = 'item_input') MF_Embedding_User = Embedding(input_dim = num_users, output_dim = latent_dim, name = 'user_embedding', embeddings_initializer = 'random_normal', embeddings_regularizer = l2(regs[0]), input_length=1) MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = latent_dim, name = 'item_embedding', embeddings_initializer = 'random_normal', embeddings_regularizer = l2(regs[1]), input_length=1) # Crucial to flatten an embedding vector! user_latent = Flatten()(MF_Embedding_User(user_input)) item_latent = Flatten()(MF_Embedding_Item(item_input)) # Element-wise product of user and item embeddings predict_vector = Multiply()([user_latent, item_latent]) # Final prediction layer prediction = Dense(1, activation='sigmoid', kernel_initializer='lecun_uniform', name = 'prediction')(predict_vector) model = Model(inputs=[user_input, item_input], outputs=prediction) return model
示例7: prepare_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Multiply [as 别名]def prepare_model(ninputs=9600, n_feats=45,nclass=4,n_tfidf=10001): inp1 = Input(shape=(ninputs,)) inp2 = Input(shape=(n_feats,)) inp3 = Input(shape=(n_tfidf,)) reg = 0.00005 out_neurons1 = 500 #out_neurons2 = 20 #out_neurons2 = 10 m1 = Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid'/ ,kernel_regularizer=regularizers.l2(0.00000001))(inp1) m1 = Dropout(0.2)(m1) m1 = Dense(100,activation='sigmoid')(m1) #m1 = Dropout(0.2)(m1) #m1 = Dense(4, activation='sigmoid')(m1) #m2 = Dense(input_dim=n_feats, output_dim=n_feats,activation='relu')(inp2) m2 = Dense(50,activation='relu')(inp2) #m2=Dense(4,activation='relu')(m2) m3 = Dense(500, input_dim=n_tfidf, activation='relu',/ kernel_regularizer=regularizers.l2(reg))(inp3) m3 = Dropout(0.4)(m3) m3 = Dense(50, activation='relu')(m3) #m3 = Dropout(0.4)(m3) #m3 = Dense(4, activation='softmax')(m3) #m1 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='sigmoid')(m1) #m2 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='softmax')(m2) m = Merge(mode='concat')([m1,m2,m3]) #mul = Multiply()([m1,m2]) #add = Abs()([m1,m2]) #m = Merge(mode='concat')([mul,add]) score = Dense(output_dim=nclass,activation='softmax')(m) model = Model([inp1,inp2,inp3],score) model.compile(loss='categorical_crossentropy', optimizer='adam') return model
开发者ID:GauravBh1010tt,项目名称:DeepLearn,代码行数:43,代码来源:eval_fnc.py
示例8: prepare_model2# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Multiply [as 别名]def prepare_model2(ninputs=9600, n_feats=45,nclass=4,n_tfidf=10001): inp1 = Input(shape=(ninputs,)) inp2 = Input(shape=(n_feats,)) inp3 = Input(shape=(n_tfidf,)) reg = 0.00005 out_neurons1 = 500 #out_neurons2 = 20 #out_neurons2 = 10 m1 = Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid'/ ,kernel_regularizer=regularizers.l2(0.00000001))(inp1) m1 = Dropout(0.2)(m1) m1 = Dense(100,activation='sigmoid')(m1) #m1 = Dropout(0.2)(m1) #m1 = Dense(4, activation='sigmoid')(m1) m2 = Dense(input_dim=n_feats, output_dim=n_feats,activation='relu')(inp2) m2 = Dense(4,activation='relu')(inp2) #m2=Dense(4,activation='relu')(m2) m3 = Dense(500, input_dim=n_tfidf, activation='relu',/ kernel_regularizer=regularizers.l2(reg))(inp3) m3 = Dropout(0.4)(m3) m3 = Dense(50, activation='relu')(m3) #m3 = Dropout(0.4)(m3) #m3 = Dense(4, activation='softmax')(m3) #m1 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='sigmoid')(m1) #m2 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='softmax')(m2) m = Merge(mode='concat')([m1,m2,m3]) #mul = Multiply()([m1,m2]) #add = Abs()([m1,m2]) #m = Merge(mode='concat')([mul,add]) score = Dense(output_dim=nclass,activation='softmax')(m) model = Model([inp1,inp2,inp3],score) model.compile(loss='categorical_crossentropy', optimizer='adam') return model
开发者ID:GauravBh1010tt,项目名称:DeepLearn,代码行数:43,代码来源:eval_fnc.py
示例9: attention_temporal# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Multiply [as 别名]def attention_temporal(self, input_data, sequence_length): """ A temporal attention layer :param input_data: Network input :param sequence_length: Length of the input sequence :return: The output of attention layer """ a = Permute((2, 1))(input_data) a = Dense(sequence_length, activation='sigmoid')(a) a_probs = Permute((2, 1))(a) output_attention_mul = Multiply()([input_data, a_probs]) return output_attention_mul
示例10: attention_element# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Multiply [as 别名]def attention_element(self, input_data, input_dim): """ A self-attention unit :param input_data: Network input :param input_dim: The feature dimension of the input :return: The output of the attention network """ input_data_probs = Dense(input_dim, activation='sigmoid')(input_data) # sigmoid output_attention_mul = Multiply()([input_data, input_data_probs]) # name='att_mul' return output_attention_mul
示例11: _to_normal2d# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Multiply [as 别名]def _to_normal2d(output_batch) -> ds.MultivariateNormalTriL: """ :param output_batch: (n_samples, 5) :return """ # mean of x and y x_mean = Lambda(lambda o: o[:, 0])(output_batch) y_mean = Lambda(lambda o: o[:, 1])(output_batch) # std of x and y # std is must be 0 or positive x_std = Lambda(lambda o: K.exp(o[:, 2]))(output_batch) y_std = Lambda(lambda o: K.exp(o[:, 3]))(output_batch) # correlation coefficient # correlation coefficient range is [-1, 1] cor = Lambda(lambda o: K.tanh(o[:, 4]))(output_batch) loc = Concatenate()([ Lambda(lambda x_mean: K.expand_dims(x_mean, 1))(x_mean), Lambda(lambda y_mean: K.expand_dims(y_mean, 1))(y_mean) ]) x_var = Lambda(lambda x_std: K.square(x_std))(x_std) y_var = Lambda(lambda y_std: K.square(y_std))(y_std) xy_cor = Multiply()([x_std, y_std, cor]) cov = Lambda(lambda inputs: K.stack(inputs, axis=0))( [x_var, xy_cor, xy_cor, y_var]) cov = Lambda(lambda cov: K.permute_dimensions(cov, (1, 0)))(cov) cov = Reshape((2, 2))(cov) scale_tril = Lambda(lambda cov: tf.cholesky(cov))(cov) mvn = ds.MultivariateNormalTriL(loc, scale_tril) return mvn
示例12: call# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Multiply [as 别名]def call(self, x): dim = K.int_shape(x)[-1] transform_gate = self.dense_1(x) transform_gate = Activation("sigmoid")(transform_gate) carry_gate = Lambda(lambda x: 1.0 - x, output_shape=(dim,))(transform_gate) transformed_data = self.dense_2(x) transformed_data = Activation(self.activation)(transformed_data) transformed_gated = Multiply()([transform_gate, transformed_data]) identity_gated = Multiply()([carry_gate, x]) value = Add()([transformed_gated, identity_gated]) return value
示例13: call# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Multiply [as 别名]def call(self, inputs, **kwargs): x = inputs[:, 1] # print('x.shape: ' + str(K.int_shape(x))) bool_mask = Lambda(lambda t: K.greater_equal(t[:, 0], t[:, 1]), output_shape=K.int_shape(x)[1:])(inputs) # print('bool_mask.shape: ' + str(K.int_shape(bool_mask))) mask = Lambda(lambda t: K.cast(t, dtype='float32'))(bool_mask) # print('mask.shape: ' + str(K.int_shape(mask))) x = Multiply()([mask, x]) # print('x.shape: ' + str(K.int_shape(x))) return x
示例14: attention# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Multiply [as 别名]def attention(inputs, single_attention_vector=False): # attention机制 time_steps = k_keras.int_shape(inputs)[1] input_dim = k_keras.int_shape(inputs)[2] x = Permute((2, 1))(inputs) x = Dense(time_steps, activation='softmax')(x) if single_attention_vector: x = Lambda(lambda x: k_keras.mean(x, axis=1))(x) x = RepeatVector(input_dim)(x) a_probs = Permute((2, 1))(x) output_attention_mul = Multiply()([inputs, a_probs]) return output_attention_mul
示例15: build# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Multiply [as 别名]def build(self): # qd_input = Input((self.config.kernel_size,), name="qd_input") dd_input = Input((self.config.nb_supervised_doc, self.config.kernel_size), name='dd_input') # z = Dense(self.config.hidden_size, activation='tanh', name="qd_hidden")(qd_input) # qd_out = Dense(self.config.out_size, name="qd_out")(z) z = Dense(self.config.hidden_size, activation='tanh', name="dd_hidden")(dd_input) dd_init_out = Dense(self.config.out_size, name='dd_init_out')(z) dd_gate = Input((self.config.nb_supervised_doc, 1), name='baseline_doc_score') dd_w = Dense(1, kernel_initializer=self.initializer_gate, use_bias=False, name='dd_gate')(dd_gate) # dd_w = Lambda(lambda x: softmax(x, axis=1), output_shape=(self.config.nb_supervised_doc,), name='dd_softmax')(dd_w) dd_w = Reshape((self.config.nb_supervised_doc,))(dd_w) dd_init_out = Reshape((self.config.nb_supervised_doc,))(dd_init_out) if self.config.method in [1, 3]: # no doc gating, with dense layer z = dd_init_out elif self.config.method == 2: logging.info("Apply doc gating") z = Multiply(name='dd_out')([dd_init_out, dd_w]) else: raise ValueError("Method not initialized, please check config file") if self.config.method in [1, 2]: logging.info("Dense layer on top") z = Dense(self.config.merge_hidden, activation='tanh', name='merge_hidden')(z) out = Dense(self.config.merge_out, name='score')(z) else: logging.info("Apply doc gating, No dense layer on top, sum up scores") out = Dot(axes=[1, 1], name='score')([z, dd_w]) model = Model(inputs=[dd_input, dd_gate], outputs=[out]) print(model.summary()) return model
示例16: stateless_attention_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Multiply [as 别名]def stateless_attention_model(**kwargs): X = LSTM(kwargs['hidden_units'], kernel_initializer='he_normal', activation='tanh', dropout=kwargs['dropout'], return_sequences=True)(kwargs['embeddings']) attention_layer = Permute((2, 1))(X) attention_layer = Dense(kwargs['max_tweet_length'], activation='softmax')(attention_layer) attention_layer = Lambda(lambda x: K.mean(x, axis=1), name='dim_reduction')(attention_layer) attention_layer = RepeatVector(int(X.shape[2]))(attention_layer) attention_probabilities = Permute((2, 1), name='attention_probs')(attention_layer) attention_layer = Multiply()([X, attention_probabilities]) attention_layer = Flatten()(attention_layer) return attention_layer
开发者ID:MirunaPislar,项目名称:Sarcasm-Detection,代码行数:13,代码来源:dl_models.py
示例17: interaction# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Multiply [as 别名]def interaction(input_1, input_2): "Get the interaction then concatenate results" mult = Multiply()([input_1, input_2]) add = Add()([input_1, input_2]) sub = substract(input_1, input_2) #distance = el_distance(input_1, input_2) out_= Concatenate()([sub, mult, add,]) return out_
开发者ID:zake7749,项目名称:CIKM-AnalytiCup-2018,代码行数:11,代码来源:utils.py
示例18: NeuCF_get_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Multiply [as 别名]def NeuCF_get_model(num_users, num_items, mf_dim=10, layers=[10], reg_layers=[0], reg_mf=0.0): assert len(layers) == len(reg_layers) num_layer = len(layers) #Number of layers in the MLP # Input variables user_input = Input(shape=(1,), dtype='int32', name = 'user_input') item_input = Input(shape=(1,), dtype='int32', name = 'item_input') # Embedding layer MF_Embedding_User = Embedding(input_dim = num_users, output_dim = mf_dim, name = 'mf_embedding_user', embeddings_initializer = 'random_normal', embeddings_regularizer = l2(reg_mf), input_length=1) MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = mf_dim, name = 'mf_embedding_item', embeddings_initializer = 'random_normal', embeddings_regularizer = l2(reg_mf), input_length=1) MLP_Embedding_User = Embedding(input_dim = num_users, output_dim = int(layers[0]/2), name = "mlp_embedding_user", embeddings_initializer = 'random_normal', embeddings_regularizer = l2(reg_layers[0]), input_length=1) MLP_Embedding_Item = Embedding(input_dim = num_items, output_dim = int(layers[0]/2), name = 'mlp_embedding_item', embeddings_initializer = 'random_normal', embeddings_regularizer = l2(reg_layers[0]), input_length=1) # MF part mf_user_latent = Flatten()(MF_Embedding_User(user_input)) mf_item_latent = Flatten()(MF_Embedding_Item(item_input)) mf_vector = Multiply()([mf_user_latent, mf_item_latent]) # element-wise multiply # MLP part mlp_user_latent = Flatten()(MLP_Embedding_User(user_input)) mlp_item_latent = Flatten()(MLP_Embedding_Item(item_input)) mlp_vector = Concatenate()([mlp_user_latent, mlp_item_latent]) for idx in range(1, num_layer): layer = Dense(layers[idx], kernel_regularizer= l2(reg_layers[idx]), activation='relu', name="layer%d" %idx) mlp_vector = layer(mlp_vector) # Concatenate MF and MLP parts predict_vector = Concatenate()([mf_vector, mlp_vector]) # Final prediction layer prediction = Dense(1, activation='sigmoid', kernel_initializer='lecun_uniform', name = "prediction")(predict_vector) model = Model(inputs=[user_input, item_input], outputs=prediction) return model
示例19: __init__# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Multiply [as 别名]def __init__(self, env, args): super(Agent_ActorCritic,self).__init__(env) self.log_path = './actor_critic.log' self.env = env self.actions_avialbe = env.action_space.n self.feature_dim = env.observation_space.shape[0] self.t = 0 self.prev_x = None self.actor_learning_rate = 1e-3 self.critic_learning_rate = 1e-3 self.gamma = 0.9 self.dummy_act_picked = np.zeros((1,self.actions_avialbe)) # Actor input_frame = Input(shape=(self.feature_dim,)) act_picked = Input(shape=(self.actions_avialbe,)) hidden_f = Dense(20,activation='relu')(input_frame) act_prob = Dense(self.actions_avialbe,activation='softmax')(hidden_f) selected_act_prob = Multiply()([act_prob,act_picked]) selected_act_prob = Lambda(lambda x:K.sum(x, axis=-1, keepdims=True),output_shape=(1,))(selected_act_prob) model = Model(inputs=[input_frame,act_picked], outputs=[act_prob, selected_act_prob]) opt = Adam(lr=self.actor_learning_rate) model.compile(loss=['mse',categorical_crossentropy], loss_weights=[0.0,1.0],optimizer=opt) self.actor = model # Critic model = Sequential() model.add(Dense(20,activation='relu',input_shape=(self.feature_dim,))) model.add(Dense(1)) opt = Adam(lr=self.critic_learning_rate) model.compile(loss='mse', optimizer=opt) self.critic = model
示例20: baseline_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Multiply [as 别名]def baseline_model(): input_1 = Input(shape=(224, 224, 3)) input_2 = Input(shape=(224, 224, 3)) base_model = VGGFace(model='resnet50', include_top=False) for x in base_model.layers[:-3]: x.trainable = True x1 = base_model(input_1) x2 = base_model(input_2) # x1_ = Reshape(target_shape=(7*7, 2048))(x1) # x2_ = Reshape(target_shape=(7*7, 2048))(x2) # # x_dot = Dot(axes=[2, 2], normalize=True)([x1_, x2_]) # x_dot = Flatten()(x_dot) x1 = Concatenate(axis=-1)([GlobalMaxPool2D()(x1), GlobalAvgPool2D()(x1)]) x2 = Concatenate(axis=-1)([GlobalMaxPool2D()(x2), GlobalAvgPool2D()(x2)]) x3 = Subtract()([x1, x2]) x3 = Multiply()([x3, x3]) x = Multiply()([x1, x2]) x = Concatenate(axis=-1)([x, x3]) x = Dense(100, activation="relu")(x) x = Dropout(0.01)(x) out = Dense(1, activation="sigmoid")(x) model = Model([input_1, input_2], out) model.compile(loss="binary_crossentropy", metrics=['acc'], optimizer=Adam(0.00001)) model.summary() return model
开发者ID:CVxTz,项目名称:kinship_prediction,代码行数:41,代码来源:vgg_face.py
示例21: baseline_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Multiply [as 别名]def baseline_model(): input_1 = Input(shape=(224, 224, 3)) input_2 = Input(shape=(224, 224, 3)) base_model = ResNet50(weights='imagenet', include_top=False) for x in base_model.layers[:-3]: x.trainable = True x1 = base_model(input_1) x2 = base_model(input_2) # x1_ = Reshape(target_shape=(7*7, 2048))(x1) # x2_ = Reshape(target_shape=(7*7, 2048))(x2) # # x_dot = Dot(axes=[2, 2], normalize=True)([x1_, x2_]) # x_dot = Flatten()(x_dot) x1 = Concatenate(axis=-1)([GlobalMaxPool2D()(x1), GlobalAvgPool2D()(x1)]) x2 = Concatenate(axis=-1)([GlobalMaxPool2D()(x2), GlobalAvgPool2D()(x2)]) x3 = Subtract()([x1, x2]) x3 = Multiply()([x3, x3]) x = Multiply()([x1, x2]) x = Concatenate(axis=-1)([x, x3]) x = Dense(100, activation="relu")(x) x = Dropout(0.01)(x) out = Dense(1, activation="sigmoid")(x) model = Model([input_1, input_2], out) model.compile(loss="binary_crossentropy", metrics=['acc'], optimizer=Adam(0.00001)) model.summary() return model
开发者ID:CVxTz,项目名称:kinship_prediction,代码行数:41,代码来源:baseline.py
|