Python源码示例:tensorflow.contrib.rnn.LSTMCell()

示例1
def build_permutation(self):

        with tf.variable_scope("encoder"):
            
            with tf.variable_scope("embedding"):
                # Embed input sequence
                W_embed =tf.get_variable("weights", [1,self.input_dimension+2, self.input_embed], initializer=self.initializer) # +2 for TW feat. here too
                embedded_input = tf.nn.conv1d(self.input_, W_embed, 1, "VALID", name="embedded_input")
                # Batch Normalization
                embedded_input = tf.layers.batch_normalization(embedded_input, axis=2, training=self.is_training, name='layer_norm', reuse=None)

            with tf.variable_scope("dynamic_rnn"):
                # Encode input sequence
                cell1 = LSTMCell(self.num_neurons, initializer=self.initializer)  # BNLSTMCell(self.num_neurons, self.training) or cell1 = DropoutWrapper(cell1, output_keep_prob=0.9)
                # Return the output activations [Batch size, Sequence Length, Num_neurons] and last hidden state as tensors.
                encoder_output, encoder_state = tf.nn.dynamic_rnn(cell1, embedded_input, dtype=tf.float32)

        with tf.variable_scope('decoder'):
            # Ptr-net returns permutations (self.positions), with their log-probability for backprop
            self.ptr = Pointer_decoder(encoder_output, self.config)
            self.positions, self.log_softmax, self.attending, self.pointing = self.ptr.loop_decode(encoder_state)
            variable_summaries('log_softmax',self.log_softmax, with_max_min = True) 
示例2
def __init__(self,
               num_units,
               use_peepholes=False,
               forget_bias=1.0,
               state_is_tuple=True,
               output_is_tuple=True):

    def cell_fn(n):
      return rnn.LSTMCell(
          num_units=n, forget_bias=forget_bias, use_peepholes=use_peepholes)

    super(Grid1LSTMCell, self).__init__(
        num_units=num_units,
        num_dims=1,
        input_dims=0,
        output_dims=0,
        priority_dims=0,
        cell_fn=cell_fn,
        state_is_tuple=state_is_tuple,
        output_is_tuple=output_is_tuple) 
示例3
def __init__(self,
               num_units,
               tied=False,
               non_recurrent_fn=None,
               use_peepholes=False,
               forget_bias=1.0,
               state_is_tuple=True,
               output_is_tuple=True):

    def cell_fn(n):
      return rnn.LSTMCell(
          num_units=n, forget_bias=forget_bias, use_peepholes=use_peepholes)

    super(Grid2LSTMCell, self).__init__(
        num_units=num_units,
        num_dims=2,
        input_dims=0,
        output_dims=0,
        priority_dims=0,
        tied=tied,
        non_recurrent_dims=None if non_recurrent_fn is None else 0,
        cell_fn=cell_fn,
        non_recurrent_fn=non_recurrent_fn,
        state_is_tuple=state_is_tuple,
        output_is_tuple=output_is_tuple) 
示例4
def __init__(self,
               num_units,
               tied=False,
               non_recurrent_fn=None,
               use_peepholes=False,
               forget_bias=1.0,
               state_is_tuple=True,
               output_is_tuple=True):

    def cell_fn(n):
      return rnn.LSTMCell(
          num_units=n, forget_bias=forget_bias, use_peepholes=use_peepholes)

    super(Grid3LSTMCell, self).__init__(
        num_units=num_units,
        num_dims=3,
        input_dims=0,
        output_dims=0,
        priority_dims=0,
        tied=tied,
        non_recurrent_dims=None if non_recurrent_fn is None else 0,
        cell_fn=cell_fn,
        non_recurrent_fn=non_recurrent_fn,
        state_is_tuple=state_is_tuple,
        output_is_tuple=output_is_tuple) 
示例5
def get_rnn_cell_list(config, name, reuse=False, seed=123, dtype=tf.float32):
    cell_list = []
    for i, units in enumerate(config['num_units']):
        cell = None
        if config['cell_type'] == 'clstm':
            cell = CustomLSTMCell(units, layer_norm=config['layer_norm'], activation=config['activation'], seed=seed,
                                  reuse=reuse, dtype=dtype, name='{}_{}'.format(name, i))
        elif config['cell_type'] == 'tflstm':

            act = get_activation(config['activation'])

            if config['layer_norm']:
                cell = LayerNormBasicLSTMCell(num_units=units, activation=act, layer_norm=config['layer_norm'],
                                              reuse=reuse)
            elif config['layer_norm'] == False and config['activation'] != 'tanh':
                cell = LSTMCell(num_units=units, activation=act, reuse=reuse)
            else:
                cell = LSTMBlockCell(num_units=units)
        cell_list.append(cell)

    return cell_list 
示例6
def _create_rnn_cell(self):
        """
        Creates a single RNN cell according to the architecture of this RNN.
        
        Returns
        -------
        rnn cell
            A single RNN cell according to the architecture of this RNN
        """
        keep_prob = 1.0 if self.keep_prob is None else self.keep_prob

        if self.cell_type == CellType.GRU:
            return DropoutWrapper(GRUCell(self.num_units), keep_prob, keep_prob)
        elif self.cell_type == CellType.LSTM:
            return DropoutWrapper(LSTMCell(self.num_units), keep_prob, keep_prob)
        else:
            raise ValueError("unknown cell type: {}".format(self.cell_type)) 
示例7
def __init__(self,
               cell,
               attention_mechanism,
               attention_layer_size=None,
               alignment_history=False,
               cell_input_fn=None,
               output_attention=True,
               initial_cell_state=None,
               name=None):
    if not isinstance(cell, (rnn.LSTMCell, rnn.GRUCell)):
      raise ValueError('SyncAttentionWrapper only supports LSTMCell and GRUCell, '
                       'Got: {}'.format(cell))
    super(SyncAttentionWrapper, self).__init__(
      cell,
      attention_mechanism,
      attention_layer_size=attention_layer_size,
      alignment_history=alignment_history,
      cell_input_fn=cell_input_fn,
      output_attention=output_attention,
      initial_cell_state=initial_cell_state,
      name=name
    ) 
示例8
def __init__(self,
               cell,
               attention_mechanism,
               attention_layer_size=None,
               alignment_history=False,
               cell_input_fn=None,
               output_attention=True,
               initial_cell_state=None,
               name=None):
    if not isinstance(cell, (rnn.LSTMCell, rnn.GRUCell)):
      raise ValueError('SyncAttentionWrapper only supports LSTMCell and GRUCell, '
                       'Got: {}'.format(cell))
    super(SyncAttentionWrapper, self).__init__(
      cell,
      attention_mechanism,
      attention_layer_size=attention_layer_size,
      alignment_history=alignment_history,
      cell_input_fn=cell_input_fn,
      output_attention=output_attention,
      initial_cell_state=initial_cell_state,
      name=name
    ) 
示例9
def build_cell(self, name=None):
        if self.hparams.cell_type == 'linear':
            cell = BasicRNNCell(self.hparams.hidden_units,
                                activation=tf.identity, name=name)
        elif self.hparams.cell_type == 'tanh':
            cell = BasicRNNCell(self.hparams.hidden_units,
                                activation=tf.tanh, name=name)
        elif self.hparams.cell_type == 'relu':
            cell = BasicRNNCell(self.hparams.hidden_units,
                                activation=tf.nn.relu, name=name)
        elif self.hparams.cell_type == 'gru':
            cell = GRUCell(self.hparams.hidden_units, name=name)
        elif self.hparams.cell_type == 'lstm':
            cell = LSTMCell(self.hparams.hidden_units, name=name, state_is_tuple=False)
        else:
            raise ValueError('Provided cell type not supported.')
        return cell 
示例10
def separable_rnn(images, num_filters_out, scope=None, keep_prob=1.0, cellType='LSTM'):
  """Run bidirectional LSTMs first horizontally then vertically.

  Args:
    images: (num_images, height, width, depth) tensor
    num_filters_out: output layer depth
    nhidden: hidden layer depth
    scope: optional scope name

  Returns:
    (num_images, height, width, num_filters_out) tensor
  """
  with tf.variable_scope(scope, "SeparableLstm", [images]):
    with tf.variable_scope("horizontal"):
      if 'LSTM' in cellType:
        cell_fw = LSTMCell(num_filters_out, use_peepholes=True, state_is_tuple=True)
        cell_bw = LSTMCell(num_filters_out, use_peepholes=True, state_is_tuple=True)
      if 'GRU' in cellType:
        cell_fw = GRUCell(num_filters_out)
        cell_bw = GRUCell(num_filters_out)
      hidden = horizontal_cell(images, num_filters_out, cell_fw, cell_bw, keep_prob=keep_prob, scope=scope)
    with tf.variable_scope("vertical"):
      transposed = tf.transpose(hidden, [0, 2, 1, 3])
      if 'LSTM' in cellType:
        cell_fw = LSTMCell(num_filters_out, use_peepholes=True, state_is_tuple=True)
        cell_bw = LSTMCell(num_filters_out, use_peepholes=True, state_is_tuple=True)
      if 'GRU' in cellType:
        cell_fw = GRUCell(num_filters_out)
        cell_bw = GRUCell(num_filters_out)
      output_transposed = horizontal_cell(transposed, num_filters_out, cell_fw, cell_bw, keep_prob=keep_prob, scope=scope)
    output = tf.transpose(output_transposed, [0, 2, 1, 3])
    return output 
示例11
def cell(self):
        """ Return the cell """
        with tf.variable_scope(self.variable_scope, reuse=self.reuse):
            cell = rnn.LSTMCell(self.num_units, reuse=self.reuse)

            if self.num_layers > 1:
                cell = rnn.MultiRNNCell([cell] * self.num_layers)

        return cell 
示例12
def __init__(self, num_units, use_peepholes=False, forget_bias=1.0):
    super(Grid1LSTMCell, self).__init__(
        num_units=num_units, num_dims=1,
        input_dims=0, output_dims=0, priority_dims=0,
        cell_fn=lambda n, i: rnn.LSTMCell(
            num_units=n, input_size=i, use_peepholes=use_peepholes,
            forget_bias=forget_bias, state_is_tuple=False)) 
示例13
def __init__(self,
               num_units,
               tied=False,
               non_recurrent_fn=None,
               use_peepholes=False,
               forget_bias=1.0):
    super(Grid2LSTMCell, self).__init__(
        num_units=num_units, num_dims=2,
        input_dims=0, output_dims=0, priority_dims=0, tied=tied,
        non_recurrent_dims=None if non_recurrent_fn is None else 0,
        cell_fn=lambda n, i: rnn.LSTMCell(
            num_units=n, input_size=i, forget_bias=forget_bias,
            use_peepholes=use_peepholes, state_is_tuple=False),
        non_recurrent_fn=non_recurrent_fn) 
示例14
def __init__(self,
               num_units,
               tied=False,
               non_recurrent_fn=None,
               use_peepholes=False,
               forget_bias=1.0):
    super(Grid3LSTMCell, self).__init__(
        num_units=num_units, num_dims=3,
        input_dims=0, output_dims=0, priority_dims=0, tied=tied,
        non_recurrent_dims=None if non_recurrent_fn is None else 0,
        cell_fn=lambda n, i: rnn.LSTMCell(
            num_units=n, input_size=i, forget_bias=forget_bias,
            use_peepholes=use_peepholes, state_is_tuple=False),
        non_recurrent_fn=non_recurrent_fn) 
示例15
def baseline_forward(self, X, size, n_class):
        shape = X.get_shape()
        seq = tf.transpose(X, [1, 0, 2]) 

        with tf.name_scope("LSTM"):
            lstm_cell = LSTMCell(size, forget_bias=1.0)
            outputs, states = nn.dynamic_rnn(time_major=True, cell=lstm_cell, inputs=seq, dtype=tf.float32)

        with tf.name_scope("LSTM-Classifier"):
            W = tf.Variable(tf.random_normal([size, n_class]), name="W")
            b = tf.Variable(tf.random_normal([n_class]), name="b")
            output = tf.matmul(outputs[-1], W) + b

        return output 
示例16
def cell_create(self,scope_name):
         with tf.variable_scope(scope_name):
             if self.cell_type == 'tanh':
                 cells = rnn.MultiRNNCell([rnn.BasicRNNCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True)
             elif self.cell_type == 'LSTM': 
                 cells = rnn.MultiRNNCell([rnn.BasicLSTMCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True)
             elif self.cell_type == 'GRU':
                 cells = rnn.MultiRNNCell([rnn.GRUCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True)
             elif self.cell_type == 'LSTMP':
                 cells = rnn.MultiRNNCell([rnn.LSTMCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True)
             cells = rnn.DropoutWrapper(cells, input_keep_prob=self.dropout_ph,output_keep_prob=self.dropout_ph) 
         return cells 
示例17
def biLSTM_layer_op(self):
        with tf.variable_scope("bi-lstm"):
            cell_fw = LSTMCell(self.hidden_dim)
            cell_bw = LSTMCell(self.hidden_dim)
            (output_fw_seq, output_bw_seq), _ = tf.nn.bidirectional_dynamic_rnn(
                cell_fw=cell_fw,
                cell_bw=cell_bw,
                inputs=self.word_embeddings,
                sequence_length=self.sequence_lengths,
                dtype=tf.float32)
            output = tf.concat([output_fw_seq, output_bw_seq], axis=-1)
            output = tf.nn.dropout(output, self.dropout_pl)

        with tf.variable_scope("proj"):
            W = tf.get_variable(name="W",
                                shape=[2 * self.hidden_dim, self.num_tags],
                                initializer=tf.contrib.layers.xavier_initializer(),
                                dtype=tf.float32)

            b = tf.get_variable(name="b",
                                shape=[self.num_tags],
                                initializer=tf.zeros_initializer(),
                                dtype=tf.float32)

            s = tf.shape(output)
            output = tf.reshape(output, [-1, 2*self.hidden_dim])
            pred = tf.matmul(output, W) + b

            self.logits = tf.reshape(pred, [-1, s[1], self.num_tags]) 
示例18
def test_lstm():
    """
    Test an LSTM model.
    """
    run_ac_test(partial(RNNCellAC, make_cell=lambda: LSTMCell(32))) 
示例19
def test_multi_rnn():
    """
    Test a stacked LSTM with nested tuple state.
    """
    def make_cell():
        return MultiRNNCell([LSTMCell(16), LSTMCell(32)])

    run_ac_test(partial(RNNCellAC, make_cell=make_cell)) 
示例20
def __call__(self, inputs, training=False):
        """
        Runs the bidirectional LSTM, produces outputs and saves both forward and backward states as well as gradients.
        :param inputs: The inputs should be a list of shape [sequence_length, batch_size, 64]
        :param name: Name to give to the tensorflow op
        :param training: Flag that indicates if this is a training or evaluation stage
        :return: Returns the LSTM outputs, as well as the forward and backward hidden states.
        """
        with tf.variable_scope(self.name, reuse=self.reuse):
            with tf.variable_scope("encoder"):

                fw_lstm_cells_encoder = [rnn.LSTMCell(num_units=self.layer_sizes[i], activation=tf.nn.tanh)
                                         for i in range(len(self.layer_sizes))]
                bw_lstm_cells_encoder = [rnn.LSTMCell(num_units=self.layer_sizes[i], activation=tf.nn.tanh)
                                         for i in range(len(self.layer_sizes))]

                outputs, output_state_fw, output_state_bw = rnn.stack_bidirectional_rnn(
                    fw_lstm_cells_encoder,
                    bw_lstm_cells_encoder,
                    inputs,
                    dtype=tf.float32
                )

            print("g out shape", tf.stack(outputs, axis=1).get_shape().as_list())

        self.reuse = True
        self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
        return outputs 
示例21
def __call__(self, support_set_embeddings, target_set_embeddings, K, training=False):
        """
        Runs the bidirectional LSTM, produces outputs and saves both forward and backward states as well as gradients.
        :param inputs: The inputs should be a list of shape [sequence_length, batch_size, 64]
        :param name: Name to give to the tensorflow op
        :param training: Flag that indicates if this is a training or evaluation stage
        :return: Returns the LSTM outputs, as well as the forward and backward hidden states.
        """
        b, k, h_g_dim = support_set_embeddings.get_shape().as_list()
        b, h_f_dim = target_set_embeddings.get_shape().as_list()
        with tf.variable_scope(self.name, reuse=self.reuse):
            fw_lstm_cells_encoder = rnn.LSTMCell(num_units=self.layer_size, activation=tf.nn.tanh)
            attentional_softmax = tf.ones(shape=(b, k)) * (1.0/k)
            h = tf.zeros(shape=(b, h_g_dim))
            c_h = (h, h)
            c_h = (c_h[0], c_h[1] + target_set_embeddings)
            for i in range(K):
                attentional_softmax = tf.expand_dims(attentional_softmax, axis=2)
                attented_features = support_set_embeddings * attentional_softmax
                attented_features_summed = tf.reduce_sum(attented_features, axis=1)
                c_h = (c_h[0], c_h[1] + attented_features_summed)
                x, h_c = fw_lstm_cells_encoder(inputs=target_set_embeddings, state=c_h)
                attentional_softmax = tf.layers.dense(x, units=k, activation=tf.nn.softmax, reuse=self.reuse)
                self.reuse = True

        outputs = x
        print("out shape", tf.stack(outputs, axis=0).get_shape().as_list())
        self.reuse = True
        self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
        print(self.variables)
        return outputs 
示例22
def define_rnn_cell(cell_class, num_units, num_layers=1, keep_prob=1.0,
                    input_keep_prob=None, output_keep_prob=None):
    if input_keep_prob is None:
        input_keep_prob = keep_prob
    if output_keep_prob is None:
        output_keep_prob = keep_prob

    cells = []
    for _ in range(num_layers):
        if cell_class == 'GRU':
            cell = GRUCell(num_units=num_units)
        elif cell_class == 'LSTM':
            cell = LSTMCell(num_units=num_units)
        else:
            cell = RNNCell(num_units=num_units)

        if keep_prob < 1.0:
            cell = DropoutWrapper(cell=cell, input_keep_prob=input_keep_prob, output_keep_prob=output_keep_prob)
        cells.append(cell)

    if len(cells) > 1:
        final_cell = MultiRNNCell(cells)
    else:
        final_cell = cells[0]

    return final_cell 
示例23
def build_cell(units, cell_type='lstm', num_layers=1):
	if num_layers > 1:
		cell = rnn.MultiRNNCell([
			build_cell(units, cell_type, 1) for _ in range(num_layers)
		])
	else:
		if cell_type == "lstm":
			cell = rnn.LSTMCell(units)
		elif cell_type == "gru":
			cell = rnn.GRUCell(units)
		else:
			raise ValueError('Do not support %s' % cell_type)
	return cell 
示例24
def lstm_cell(self):
        cell = rnn.LSTMCell(self.cfg.getint('net_work', 'hidden_size'), reuse=tf.get_variable_scope().reuse)
        return rnn.DropoutWrapper(cell, output_keep_prob=self.keep_prob) 
示例25
def _single_cell(unit_type,
                 num_units,
                 forget_bias,
                 dropout,
                 mode,
                 residual_connection=False,
                 residual_fn=None,
                 trainable=True):
  """Create an instance of a single RNN cell."""
  # dropout (= 1 - keep_prob) is set to 0 during eval and infer
  dropout = dropout if mode == tf.estimator.ModeKeys.TRAIN else 0.0

  # Cell Type
  if unit_type == "lstm":
    single_cell = contrib_rnn.LSTMCell(
        num_units, forget_bias=forget_bias, trainable=trainable)
  elif unit_type == "gru":
    single_cell = contrib_rnn.GRUCell(num_units, trainable=trainable)
  elif unit_type == "layer_norm_lstm":
    single_cell = contrib_rnn.LayerNormBasicLSTMCell(
        num_units,
        forget_bias=forget_bias,
        layer_norm=True,
        trainable=trainable)
  elif unit_type == "nas":
    single_cell = contrib_rnn.NASCell(num_units, trainable=trainable)
  else:
    raise ValueError("Unknown unit type %s!" % unit_type)

  # Dropout (= 1 - keep_prob).
  if dropout > 0.0:
    single_cell = contrib_rnn.DropoutWrapper(
        cell=single_cell, input_keep_prob=(1.0 - dropout))

  # Residual.
  if residual_connection:
    single_cell = contrib_rnn.ResidualWrapper(
        single_cell, residual_fn=residual_fn)

  return single_cell 
示例26
def stacked_blstm(inputs, num_layers=1):
    """Builds a stack_bidirectional_dynamic_rnn layer, which has a slightly
    different architecture than the deep_lstm function."""
    def get_cell():
        return tf.nn.rnn_cell.LSTMCell(128)

    outputs, _, _ = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
        [get_cell() for _ in range(num_layers)],
        [get_cell() for _ in range(num_layers)],
        inputs,
        dtype=tf.float32)
    return outputs 
示例27
def build_cell(units, cell_type='lstm', num_layers=1):
	if num_layers > 1:
		cell = rnn.MultiRNNCell([
			build_cell(units, cell_type, 1) for _ in range(num_layers)
		])
	else:
		if cell_type == "lstm":
			cell = rnn.LSTMCell(units)
		elif cell_type == "gru":
			cell = rnn.GRUCell(units)
		else:
			raise ValueError('Do not support %s' % cell_type)
	return cell 
示例28
def _biLSTMBlock(self, inputs, num_units, scope, seq_len=None, isReuse=False):
        with tf.variable_scope(scope, reuse=isReuse):
            lstmCell = LSTMCell(num_units=num_units)
            dropLSTMCell = lambda: DropoutWrapper(lstmCell, output_keep_prob=self.dropout_keep_prob)
            fwLSTMCell, bwLSTMCell = dropLSTMCell(), dropLSTMCell()
            output = tf.nn.bidirectional_dynamic_rnn(cell_fw=fwLSTMCell,
                                                     cell_bw=bwLSTMCell,
                                                     inputs=inputs,
                                                     # sequence_length=seq_len,
                                                     dtype=tf.float32)
            return output

    # data encoding block ("3.1 Input Encoding" in paper) 
示例29
def _inference(self):
        logging.info('...create inference')

        fw_state_tuple = self.unstack_fw_states(self.fw_state)

        fw_cells   = list()
        for i in range(0, self.num_layers):
            if (self.cell_type == 'lstm'):
                cell = rnn.LSTMCell(num_units=self.cell_sizes[i], state_is_tuple=True)
            elif (self.cell_type == 'gru'):
                # change to GRU
                cell = rnn.GRUCell(num_units=self.cell_sizes[i])
            else:
                cell = rnn.BasicRNNCell(num_units=self.cell_sizes[i])

            cell = rnn.DropoutWrapper(cell, output_keep_prob=self.keep_prob)
            fw_cells.append(cell)
        self.fw_cells = rnn.MultiRNNCell(fw_cells, state_is_tuple=True)

        rnn_outputs, states = tf.nn.dynamic_rnn(
            self.fw_cells, 
            self.inputs, 
            initial_state=fw_state_tuple,
            sequence_length=self.seq_lengths,
            dtype=tf.float32, time_major=True)

        # project output from rnn output size to OUTPUT_SIZE. Sometimes it is worth adding
        # an extra layer here.
        self.projection = lambda x: layers.linear(x, 
            num_outputs=self.label_classes, activation_fn=tf.nn.sigmoid)

        self.logits = tf.map_fn(self.projection, rnn_outputs, name="logits")
        self.probs  = tf.nn.softmax(self.logits, name="probs")
        self.states = states

        tf.add_to_collection('probs',  self.probs) 
示例30
def __init__(self, num_units, use_peepholes=False, forget_bias=1.0):
    super(Grid1LSTMCell, self).__init__(
        num_units=num_units, num_dims=1,
        input_dims=0, output_dims=0, priority_dims=0,
        cell_fn=lambda n, i: rnn.LSTMCell(
            num_units=n, input_size=i, use_peepholes=use_peepholes,
            forget_bias=forget_bias, state_is_tuple=False))