这篇教程Python layers.Layer方法代码示例写得很实用,希望能帮到您。
本文整理汇总了Python中keras.layers.Layer方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Layer方法的具体用法?Python layers.Layer怎么用?Python layers.Layer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块keras.layers 的用法示例。 在下文中一共展示了layers.Layer方法的23个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。 示例1: modelDiscriminator# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def modelDiscriminator(self, name=None): # Specify input input_img = Input(shape=self.img_shape) # Layer 1 (#Instance normalization is not used for this layer) x = self.ck(input_img, 64, False, 2) # Layer 2 x = self.ck(x, 128, True, 2) # Layer 3 x = self.ck(x, 256, True, 2) # Layer 4 x = self.ck(x, 512, True, 1) # Output layer if self.use_patchgan: x = Conv2D(filters=1, kernel_size=4, strides=1, padding='same')(x) else: x = Flatten()(x) x = Dense(1)(x) #x = Activation('sigmoid')(x) - No sigmoid to avoid near-fp32 machine epsilon discriminator cost return Model(inputs=input_img, outputs=x, name=name)
开发者ID:simontomaskarlsson,项目名称:CycleGAN-Keras,代码行数:21,代码来源:model.py
示例2: custom_layer# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def custom_layer(): class MyDense(Layer): def __init__(self, output_dim, **kwargs): self.output_dim = output_dim super(MyDense, self).__init__(**kwargs) def build(self, input_shape): self.kernel = self.add_weight(name='kernel', shape=(input_shape[1], self.output_dim), initializer='uniform', trainable=True) super(MyDense, self).build(input_shape) def call(self, x): return K.dot(x, self.kernel) def compute_output_shape(self, input_shape): return (input_shape[0], self.output_dim) def get_config(self): return {'output_dim': self.output_dim} return MyDense
示例3: _validate_input_shape# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def _validate_input_shape(self, input_shape): if len(input_shape) != 3: raise ValueError("Layer received an input shape {0} but expected three inputs (Q, V, K).".format(input_shape)) else: if input_shape[0][0] != input_shape[1][0] or input_shape[1][0] != input_shape[2][0]: raise ValueError("All three inputs (Q, V, K) have to have the same batch size; received batch sizes: {0}, {1}, {2}".format(input_shape[0][0], input_shape[1][0], input_shape[2][0])) if input_shape[0][1] != input_shape[1][1] or input_shape[1][1] != input_shape[2][1]: raise ValueError("All three inputs (Q, V, K) have to have the same length; received lengths: {0}, {1}, {2}".format(input_shape[0][0], input_shape[1][0], input_shape[2][0])) if input_shape[0][2] != input_shape[1][2]: raise ValueError("Input shapes of Q {0} and V {1} do not match.".format(input_shape[0], input_shape[1]))
开发者ID:zimmerrol,项目名称:keras-utility-layer-collection,代码行数:12,代码来源:attention.py
示例4: layernorm# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def layernorm(x, axis, epsilon, gamma, beta): # assert self.built, 'Layer must be built before being called' input_shape = K.shape(x) reduction_axes = list(range(K.ndim(x))) del reduction_axes[axis] del reduction_axes[0] broadcast_shape = [1] * K.ndim(x) broadcast_shape[axis] = input_shape[axis] broadcast_shape[0] = K.shape(x)[0] # Perform normalization: centering and reduction mean = K.mean(x, axis=reduction_axes) broadcast_mean = K.reshape(mean, broadcast_shape) x_centred = x - broadcast_mean variance = K.mean(x_centred ** 2, axis=reduction_axes) + epsilon broadcast_variance = K.reshape(variance, broadcast_shape) x_normed = x_centred / K.sqrt(broadcast_variance) # Perform scaling and shifting broadcast_shape_params = [1] * K.ndim(x) broadcast_shape_params[axis] = K.shape(x)[axis] broadcast_gamma = K.reshape(gamma, broadcast_shape_params) broadcast_beta = K.reshape(beta, broadcast_shape_params) x_LN = broadcast_gamma * x_normed + broadcast_beta return x_LN
开发者ID:ChihebTrabelsi,项目名称:deep_complex_networks,代码行数:32,代码来源:norm.py
示例5: call# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def call(self, x, mask=None): assert self.built, 'Layer must be built before being called' return layernorm(x, self.axis, self.epsilon, self.gamma, self.beta)
开发者ID:ChihebTrabelsi,项目名称:deep_complex_networks,代码行数:5,代码来源:norm.py
示例6: __init__# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def __init__(self, center=True, scale=True, epsilon=None, gamma_initializer='ones', beta_initializer='zeros', gamma_regularizer=None, beta_regularizer=None, gamma_constraint=None, beta_constraint=None, **kwargs): """Layer normalization layer See: [Layer Normalization](https://arxiv.org/pdf/1607.06450.pdf) :param center: Add an offset parameter if it is True. :param scale: Add a scale parameter if it is True. :param epsilon: Epsilon for calculating variance. :param gamma_initializer: Initializer for the gamma weight. :param beta_initializer: Initializer for the beta weight. :param gamma_regularizer: Optional regularizer for the gamma weight. :param beta_regularizer: Optional regularizer for the beta weight. :param gamma_constraint: Optional constraint for the gamma weight. :param beta_constraint: Optional constraint for the beta weight. :param kwargs: """ super(LayerNormalization, self).__init__(**kwargs) self.supports_masking = True self.center = center self.scale = scale if epsilon is None: epsilon = K.epsilon() * K.epsilon() self.epsilon = epsilon self.gamma_initializer = keras.initializers.get(gamma_initializer) self.beta_initializer = keras.initializers.get(beta_initializer) self.gamma_regularizer = keras.regularizers.get(gamma_regularizer) self.beta_regularizer = keras.regularizers.get(beta_regularizer) self.gamma_constraint = keras.constraints.get(gamma_constraint) self.beta_constraint = keras.constraints.get(beta_constraint) self.gamma, self.beta = None, None
示例7: modelGenerator# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def modelGenerator(self, name=None): # Specify input input_img = Input(shape=self.img_shape) # Layer 1 x = ReflectionPadding2D((3, 3))(input_img) x = self.c7Ak(x, 32) # Layer 2 x = self.dk(x, 64) # Layer 3 x = self.dk(x, 128) if self.use_multiscale_discriminator: # Layer 3.5 x = self.dk(x, 256) # Layer 4-12: Residual layer for _ in range(4, 13): x = self.Rk(x) if self.use_multiscale_discriminator: # Layer 12.5 x = self.uk(x, 128) # Layer 13 x = self.uk(x, 64) # Layer 14 x = self.uk(x, 32) x = ReflectionPadding2D((3, 3))(x) x = Conv2D(self.channels, kernel_size=7, strides=1)(x) x = Activation('tanh')(x) # They say they use Relu but really they do not return Model(inputs=input_img, outputs=x, name=name)#===============================================================================# Test - simple model
开发者ID:simontomaskarlsson,项目名称:CycleGAN-Keras,代码行数:36,代码来源:model.py
示例8: build# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def build(self, input_shape): assert len(input_shape) >= 2 F = input_shape[0][-1] # Initialize weights for each attention head for head in range(self.attn_heads): # Layer kernel kernel = self.add_weight(shape=(F, self.F_), initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, name='kernel_{}'.format(head)) self.kernels.append(kernel) # # Layer bias if self.use_bias: bias = self.add_weight(shape=(self.F_, ), initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, name='bias_{}'.format(head)) self.biases.append(bias) # Attention kernels attn_kernel_self = self.add_weight(shape=(self.F_, 1), initializer=self.attn_kernel_initializer, regularizer=self.attn_kernel_regularizer, constraint=self.attn_kernel_constraint, name='attn_kernel_self_{}'.format(head),) attn_kernel_neighs = self.add_weight(shape=(self.F_, 1), initializer=self.attn_kernel_initializer, regularizer=self.attn_kernel_regularizer, constraint=self.attn_kernel_constraint, name='attn_kernel_neigh_{}'.format(head)) self.attn_kernels.append([attn_kernel_self, attn_kernel_neighs]) self.built = True
示例9: __init__# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def __init__(self, inner_layer_arg, **kwargs): # Initialise based on one of the three initialisation methods # Case 1: Check if inner_layer_arg is conv_width if isinstance(inner_layer_arg, (int, long)): self.conv_width = inner_layer_arg dense_layer_kwargs, kwargs = filter_func_args(layers.Dense.__init__, kwargs, overrule_args=['name']) self.create_inner_layer_fn = lambda: layers.Dense(self.conv_width, **dense_layer_kwargs) # Case 2: Check if an initialised keras layer is given elif isinstance(inner_layer_arg, layers.Layer): assert inner_layer_arg.built == False, 'When initialising with a keras layer, it cannot be built.' _, self.conv_width = inner_layer_arg.get_output_shape_for((None, None)) # layer_from_config will mutate the config dict, therefore create a get fn self.create_inner_layer_fn = lambda: layer_from_config(dict( class_name=inner_layer_arg.__class__.__name__, config=inner_layer_arg.get_config())) # Case 3: Check if a function is provided that returns a initialised keras layer elif callable(inner_layer_arg): example_instance = inner_layer_arg() assert isinstance(example_instance, layers.Layer), 'When initialising with a function, the function has to return a keras layer' assert example_instance.built == False, 'When initialising with a keras layer, it cannot be built.' _, self.conv_width = example_instance.get_output_shape_for((None, None)) self.create_inner_layer_fn = inner_layer_arg else: raise ValueError('NeuralGraphHidden has to be initialised with 1). int conv_widht, 2). a keras layer instance, or 3). a function returning a keras layer instance.') super(NeuralGraphHidden, self).__init__(**kwargs)
开发者ID:keiserlab,项目名称:keras-neural-graph-fingerprint,代码行数:33,代码来源:layers.py
示例10: copy_weights# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def copy_weights(src_model, dst_model, must_exist=True): """Copy weights from `src_model` to `dst_model`. Parameters ---------- src_model Keras source model. dst_model Keras destination model. must_exist: bool If `True`, raises `ValueError` if a layer in `dst_model` does not exist in `src_model`. Returns ------- list Names of layers that were copied. """ copied = [] for dst_layer in dst_model.layers: for src_layer in src_model.layers: if src_layer.name == dst_layer.name: break if not src_layer: if must_exist: tmp = 'Layer "%s" not found!' % (src_layer.name) raise ValueError(tmp) else: continue dst_layer.set_weights(src_layer.get_weights()) copied.append(dst_layer.name) return copied
开发者ID:cangermueller,项目名称:deepcpg,代码行数:34,代码来源:utils.py
示例11: embed_input# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def embed_input(self, input_layer: Layer, embed_function: Callable[[Layer, str, str], Layer], text_trainer, embedding_suffix: str=''): return embed_function(input_layer, embedding_name='characters' + embedding_suffix, vocab_name='words')
示例12: embed_input# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def embed_input(self, input_layer: Layer, embed_function: Callable[[Layer, str, str], Layer], text_trainer, embedding_suffix: str=""): # pylint: disable=protected-access return embed_function(input_layer, embedding_name='words' + embedding_suffix, vocab_name='words')
示例13: get_custom_objects# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def get_custom_objects(self) -> Dict[str, 'Layer']: # pylint: disable=no-self-use """ If you use any custom ``Layers`` in your ``embed_input`` method, you need to return them here, so that the ``TextTrainer`` can correctly load models. """ return {}
示例14: embed_input# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def embed_input(self, input_layer: Layer, embed_function: Callable[[Layer, str, str], Layer], text_trainer, embedding_suffix: str=''): """ Applies embedding layers to the input_layer. See :func:`TextTrainer._embed_input <deep_qa.training.text_trainer.TextTrainer._embed_input>` for a more detailed comment on what this method does. Parameters ---------- input_layer: Keras ``Input()`` layer The layer to embed. embed_function: Callable[['Layer', str, str], 'Tensor'] This should be the __get_embedded_input method from your instantiated ``TextTrainer``. This function actually applies an ``Embedding`` layer (and maybe also a projection and dropout) to the input layer. text_trainer: TextTrainer Simple ``Tokenizers`` will just need to use the ``embed_function`` that gets passed as a parameter here, but complex ``Tokenizers`` might need more than just an embedding function. So that you can get an encoder or other things from the ``TextTrainer`` here if you need them, we take this object as a parameter. embedding_suffix: str, optional (default="") A suffix to add to embedding keys that we use, so that, e.g., you could specify several different word embedding matrices, for whatever reason. """ raise NotImplementedError
示例15: reset_states# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def reset_states(self): assert self.stateful, 'Layer must be stateful.' input_shape = self.input_spec[0].shape if not input_shape[0]: raise Exception('If a RNN is stateful, a complete ' + 'input_shape must be provided (including batch size).') if hasattr(self, 'states'): K.set_value(self.states[0], np.zeros((input_shape[0], self.output_dim))) else: self.states = [K.zeros((input_shape[0], self.output_dim))]
开发者ID:DingKe,项目名称:qrnn,代码行数:13,代码来源:qrnn.py
示例16: reset_states# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def reset_states(self, states=None): if not self.stateful: raise AttributeError('Layer must be stateful.') if not self.input_spec: raise RuntimeError('Layer has never been called ' 'and thus has no states.') batch_size = self.input_spec.shape[0] if not batch_size: raise ValueError('If a QRNN is stateful, it needs to know ' 'its batch size. Specify the batch size ' 'of your input tensors: /n' '- If using a Sequential model, ' 'specify the batch size by passing ' 'a `batch_input_shape` ' 'argument to your first layer./n' '- If using the functional API, specify ' 'the time dimension by passing a ' '`batch_shape` argument to your Input layer.') if self.states[0] is None: self.states = [K.zeros((batch_size, self.units)) for _ in self.states] elif states is None: for state in self.states: K.set_value(state, np.zeros((batch_size, self.units))) else: if not isinstance(states, (list, tuple)): states = [states] if len(states) != len(self.states): raise ValueError('Layer ' + self.name + ' expects ' + str(len(self.states)) + ' states, ' 'but it received ' + str(len(states)) + 'state values. Input received: ' + str(states)) for index, (value, state) in enumerate(zip(states, self.states)): if value.shape != (batch_size, self.units): raise ValueError('State ' + str(index) + ' is incompatible with layer ' + self.name + ': expected shape=' + str((batch_size, self.units)) + ', found shape=' + str(value.shape)) K.set_value(state, value)
开发者ID:amansrivastava17,项目名称:embedding-as-service,代码行数:45,代码来源:qrnn.py
示例17: call# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def call(self, inputs, mask=None, initial_state=None, training=None): # input shape: `(samples, time (padded with zeros), input_dim)` # note that the .build() method of subclasses MUST define # self.input_spec and self.state_spec with complete input shapes. if isinstance(inputs, list): initial_states = inputs[1:] inputs = inputs[0] elif initial_state is not None: pass elif self.stateful: initial_states = self.states else: initial_states = self.get_initial_states(inputs) if len(initial_states) != len(self.states): raise ValueError('Layer has ' + str(len(self.states)) + ' states but was passed ' + str(len(initial_states)) + ' initial states.') input_shape = K.int_shape(inputs) if self.unroll and input_shape[1] is None: raise ValueError('Cannot unroll a RNN if the ' 'time dimension is undefined. /n' '- If using a Sequential model, ' 'specify the time dimension by passing ' 'an `input_shape` or `batch_input_shape` ' 'argument to your first layer. If your ' 'first layer is an Embedding, you can ' 'also use the `input_length` argument./n' '- If using the functional API, specify ' 'the time dimension by passing a `shape` ' 'or `batch_shape` argument to your Input layer.') constants = self.get_constants(inputs, training=None) preprocessed_input = self.preprocess_input(inputs, training=None) last_output, outputs, states = K.rnn(self.step, preprocessed_input, initial_states, go_backwards=self.go_backwards, mask=mask, constants=constants, unroll=self.unroll, input_length=input_shape[1]) if self.stateful: updates = [] for i in range(len(states)): updates.append((self.states[i], states[i])) self.add_update(updates, inputs) # Properly set learning phase if 0 < self.dropout < 1: last_output._uses_learning_phase = True outputs._uses_learning_phase = True if self.return_sequences: return outputs else: return last_output
开发者ID:amansrivastava17,项目名称:embedding-as-service,代码行数:59,代码来源:qrnn.py
示例18: __init__# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def __init__(self, F_, attn_heads=1, attn_heads_reduction='concat', # {'concat', 'average'} dropout_rate=0.5, activation='relu', use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', attn_kernel_initializer='glorot_uniform', kernel_regularizer=None, bias_regularizer=None, attn_kernel_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, attn_kernel_constraint=None, **kwargs): if attn_heads_reduction not in {'concat', 'average'}: raise ValueError('Possbile reduction methods: concat, average') self.F_ = F_ # Number of output features (F' in the paper) self.attn_heads = attn_heads # Number of attention heads (K in the paper) self.attn_heads_reduction = attn_heads_reduction # Eq. 5 and 6 in the paper self.dropout_rate = dropout_rate # Internal dropout rate self.activation = activations.get(activation) # Eq. 4 in the paper self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.attn_kernel_initializer = initializers.get(attn_kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.attn_kernel_regularizer = regularizers.get(attn_kernel_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.attn_kernel_constraint = constraints.get(attn_kernel_constraint) self.supports_masking = False # Populated by build() self.kernels = [] # Layer kernels for attention heads self.biases = [] # Layer biases for attention heads self.attn_kernels = [] # Attention kernels for attention heads if attn_heads_reduction == 'concat': # Output will have shape (..., K * F') self.output_dim = self.F_ * self.attn_heads else: # Output will have shape (..., F') self.output_dim = self.F_ super(GraphAttention, self).__init__(**kwargs)
示例19: get_lm_model# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def get_lm_model(self): """construct language model for pretraining""" config = self.config positions_input = Input(shape=(self.max_predictions_per_seq,), dtype='int32', name='masked_lm_positions') cur_inputs = self.inputs + [positions_input] sequence_output = Lambda( function=lambda x: gather_indexes(x[0], x[1]), output_shape=lambda x: (x[0][0], x[1][1], x[0][2]) )([self.sequence_output, positions_input]) sequence_output = Dense( units=config.hidden_size, activation=get_activation(config.hidden_act), kernel_initializer=initializers.truncated_normal(stddev=config.initializer_range), )(sequence_output) sequence_output = LayerNormalization()(sequence_output) sequence_att = Lambda( function=lambda x: K.dot(x[0], K.permute_dimensions(x[1], pattern=(1,0))), output_shape=lambda x: (x[0][0], x[0][1] ,x[1][0]), )([sequence_output, self.embedding_table]) class AddBiasSoftmax(Layer): def __init__(self, **kwargs): super(AddBiasSoftmax, self).__init__(**kwargs) self.supports_masking=True def build(self, input_shape): self.bias = self.add_weight(shape=(input_shape[-1],), name='output_bias', initializer=initializers.get('zeros')) super(AddBiasSoftmax, self).build(input_shape) def call(self, inputs, **kwargs): output = K.bias_add(inputs, self.bias) output = K.softmax(output, axis=-1) return output def compute_output_shape(self, input_shape): return input_shape sequence_softmax = AddBiasSoftmax()(sequence_att) self.lm_model = Model(inputs=cur_inputs, outputs=sequence_softmax, name='lm_model') return self.lm_model
开发者ID:miroozyx,项目名称:BERT_with_keras,代码行数:48,代码来源:modeling.py
示例20: reset_states# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def reset_states(self, states=None): if not self.stateful: raise AttributeError('Layer must be stateful.') if not self.input_spec: raise RuntimeError('Layer has never been called ' 'and thus has no states.') batch_size = self.input_spec.shape[0] if not batch_size: raise ValueError('If a QRNN is stateful, it needs to know ' 'its batch size. Specify the batch size ' 'of your input tensors: /n' '- If using a Sequential model, ' 'specify the batch size by passing ' 'a `batch_input_shape` ' 'argument to your first layer./n' '- If using the functional API, specify ' 'the time dimension by passing a ' '`batch_shape` argument to your Input layer.') if self.states[0] is None: self.states = [K.zeros((batch_size, self.units)) for _ in self.states] elif states is None: for state in self.states: K.set_value(state, np.zeros((batch_size, self.units))) else: if not isinstance(states, (list, tuple)): states = [states] if len(states) != len(self.states): raise ValueError('Layer ' + self.name + ' expects ' + str(len(self.states)) + ' states, ' 'but it received ' + str(len(states)) + 'state values. Input received: ' + str(states)) for index, (value, state) in enumerate(zip(states, self.states)): if value.shape != (batch_size, self.units): raise ValueError('State ' + str(index) + ' is incompatible with layer ' + self.name + ': expected shape=' + str((batch_size, self.units)) + ', found shape=' + str(value.shape)) K.set_value(state, value)
开发者ID:DingKe,项目名称:nn_playground,代码行数:45,代码来源:qrnn.py
示例21: call# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def call(self, inputs, mask=None, initial_state=None, training=None): # input shape: `(samples, time (padded with zeros), input_dim)` # note that the .build() method of subclasses MUST define # self.input_spec and self.state_spec with complete input shapes. if isinstance(inputs, list): initial_states = inputs[1:] inputs = inputs[0] elif initial_state is not None: pass elif self.stateful: initial_states = self.states else: initial_states = self.get_initial_states(inputs) if len(initial_states) != len(self.states): raise ValueError('Layer has ' + str(len(self.states)) + ' states but was passed ' + str(len(initial_states)) + ' initial states.') input_shape = K.int_shape(inputs) if self.unroll and input_shape[1] is None: raise ValueError('Cannot unroll a RNN if the ' 'time dimension is undefined. /n' '- If using a Sequential model, ' 'specify the time dimension by passing ' 'an `input_shape` or `batch_input_shape` ' 'argument to your first layer. If your ' 'first layer is an Embedding, you can ' 'also use the `input_length` argument./n' '- If using the functional API, specify ' 'the time dimension by passing a `shape` ' 'or `batch_shape` argument to your Input layer.') constants = self.get_constants(inputs, training=None) preprocessed_input = self.preprocess_input(inputs, training=None) last_output, outputs, states = K.rnn(self.step, preprocessed_input, initial_states, go_backwards=self.go_backwards, mask=mask, constants=constants, unroll=self.unroll, input_length=input_shape[1]) if self.stateful: updates = [] for i in range(len(states)): updates.append((self.states[i], states[i])) self.add_update(updates, inputs) # Properly set learning phase if 0 < self.dropout < 1: last_output._uses_learning_phase = True outputs._uses_learning_phase = True if self.return_sequences: return outputs else: return last_output
开发者ID:DingKe,项目名称:nn_playground,代码行数:59,代码来源:qrnn.py
示例22: _embed_input# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def _embed_input(self, input_layer: Layer, embedding_suffix: str=""): """ This function embeds a word sequence input, using an embedding defined by ``embedding_suffix``. You should call this function in your ``_build_model`` method any time you want to convert word indices into word embeddings. Note that if this is used in conjunction with ``_get_sentence_shape``, we will do the correct thing for whatever :class:`~deep_qa.data.tokenizers.tokenizer.Tokenizer` you use. The actual input to this might be words and characters, and we might actually do a concatenation of a word embedding and a character-level encoder. All of this is handled transparently to your concrete model subclass, if you use the API correctly, calling ``_get_sentence_shape()`` to get the shape for your ``Input`` layer, and passing that input layer into this ``_embed_input()`` method. We need to take the input Layer here, instead of just returning a Layer that you can use as you wish, because we might have to apply several layers to the input, depending on the parameters you specified for embedding things. So we return, essentially, ``embedding(input_layer)``. The input layer can have arbitrary shape, as long as it ends with a word sequence. For example, you could pass in a single sentence, a set of sentences, or a set of sets of sentences, and we will handle them correctly. Internally, we will create a dictionary mapping embedding names to embedding layers, so if you have several things you want to embed with the same embedding layer, be sure you use the same name each time (or just don't pass a name, which accomplishes the same thing). If for some reason you want to have different embeddings for different inputs, use a different name for the embedding. In this function, we pass the work off to self.tokenizer, which might need to do some additional processing to actually give you a word embedding (e.g., if your text encoder uses both words and characters, we need to run the character encoder and concatenate the result with a word embedding). Note that the ``embedding_suffix`` parameter is a `suffix` to whatever name the tokenizer will give to the embeddings it creates. Typically, the tokenizer will use the name ``words``, though it could also use ``characters``, or something else. So if you pass ``_A`` for ``embedding_suffix``, you will end up with actual embedding names like ``words_A`` and ``characters_A``. These are the keys you need to specify in your parameter file, for embedding sizes etc. When constructing actual ``Embedding`` layers, we will further append the string ``_embedding``, so the layer would be named ``words_A_embedding``. """ return self.tokenizer.embed_input(input_layer, self.__get_embedded_input, self, embedding_suffix)
示例23: __get_new_embedding# 需要导入模块: from keras import layers [as 别名]# 或者: from keras.layers import Layer [as 别名]def __get_new_embedding(self, name: str, vocab_name: str='words'): """ Creates an Embedding Layer (and possibly also a Dense projection Layer) based on the parameters you've passed to the TextTrainer. These could be pre-trained embeddings or not, could include a projection or not, and so on. Parameters ---------- name : ``str`` The name of the embedding. This needs to correspond to one of the keys in the ``embeddings`` parameter dictionary passed to the constructor. """ embedding_params = self.embedding_params.pop(name) with tensorflow.device("/cpu:0"): pretrained_file = embedding_params.pop('pretrained_file', None) projection_layer = None if pretrained_file: embedding_layer = PretrainedEmbeddings.get_embedding_layer( pretrained_file, self.data_indexer, embedding_params.pop('fine_tune', False), name=name + '_embedding') if embedding_params.pop('project', False): # This projection layer is not time distributed, because we handle it later # in __get_embedded_input - this allows us to more easily reuse embeddings # for inputs with different shapes, as Keras sets layer attributes such as # input shape the first time the layer is called, which is overly restrictive # in the case of sharing embedding lookup tables. projection_layer = Dense(units=embedding_params.pop('dimension'), name=name + "_projection") else: embedding_dimension = embedding_params.pop('dimension', None) if embedding_dimension is not None and embedding_dimension != embedding_layer.output_dim: raise ConfigurationError("You have specified both 'pretrained_file' " " and 'dimension' in your embedding parameters, but " "the 'project' argument was either False or unset and the " "dimension you specified was not equal to the pretrained" " embedding size. Refusing to continue without clarification" " of parameters.") else: embedding_layer = Embedding( input_dim=self.data_indexer.get_vocab_size(vocab_name), output_dim=embedding_params.pop('dimension'), mask_zero=True, # this handles padding correctly name=name + '_embedding') if embedding_params.pop('project', False): raise ConfigurationError("You are projecting randomly initialised embeddings. Change " " 'project' to false or add pretrained_file to your config. ") dropout = embedding_params.pop('dropout', 0.5) # We now should have popped all parameters from this # embedding scope, so we check for any which remain. embedding_params.assert_empty("embedding with name {}".format(name)) return embedding_layer, projection_layer, dropout
|