Python源码示例:tensorflow.models.rnn.rnn.LSTMCell()
示例1
def bi_lstm_layer(in_layer, config, reuse=False, name='Bi_LSTM'):
num_units = config.rnn_hidden_units
output_size = config.rnn_output_size
batch_size = int(in_layer.get_shape()[0])
num_steps = int(in_layer.get_shape()[1])
input_size = int(in_layer.get_shape()[2])
initializer = tf.random_uniform_initializer(-0.1, 0.1)
lstm_cell_f = rnn_cell.LSTMCell(num_units, input_size, use_peepholes=True,
num_proj=output_size, cell_clip=1.0,
initializer=initializer)
lstm_cell_b = rnn_cell.LSTMCell(num_units, input_size, use_peepholes=True,
num_proj=output_size, cell_clip=1.0,
initializer=initializer)
initial_state_f = lstm_cell_f.zero_state(batch_size, tf.float32)
inputs_list = [tf.reshape(x, [batch_size, input_size])
for x in tf.split(1, num_steps, in_layer)]
rnn_out, rnn_states = bi_rnn(lstm_cell_f, lstm_cell_b, inputs_list,
initial_state=initial_state_f, scope=name,
reuse=reuse)
out_layer = tf.transpose(tf.pack(rnn_out), perm=[1, 0, 2])
return out_layer
示例2
def model():
initial_loc = tf.random_uniform((batch_size, 2), minval=-1, maxval=1)
initial_glimpse = get_glimpse(initial_loc)
lstm_cell = rnn_cell.LSTMCell(cell_size, g_size, num_proj=cell_out_size)
initial_state = lstm_cell.zero_state(batch_size, tf.float32)
inputs = [initial_glimpse]
inputs.extend([0] * (glimpses - 1))
outputs, _ = seq2seq.rnn_decoder(inputs, initial_state, lstm_cell, loop_function=get_next_input)
get_next_input(outputs[-1], 0)
return outputs
示例3
def __init__(self, rnn_size, rnn_layer, batch_size, input_embedding_size, dim_image, dim_hidden, max_words_q, vocabulary_size, drop_out_rate):
self.rnn_size = rnn_size
self.rnn_layer = rnn_layer
self.batch_size = batch_size
self.input_embedding_size = input_embedding_size
self.dim_image = dim_image
self.dim_hidden = dim_hidden
self.max_words_q = max_words_q
self.vocabulary_size = vocabulary_size
self.drop_out_rate = drop_out_rate
# question-embedding
self.embed_ques_W = tf.Variable(tf.random_uniform([self.vocabulary_size, self.input_embedding_size], -0.08, 0.08), name='embed_ques_W')
# encoder: RNN body
self.lstm_1 = rnn_cell.LSTMCell(rnn_size, input_embedding_size, use_peepholes=True)
self.lstm_dropout_1 = rnn_cell.DropoutWrapper(self.lstm_1, output_keep_prob = 1 - self.drop_out_rate)
self.lstm_2 = rnn_cell.LSTMCell(rnn_size, rnn_size, use_peepholes=True)
self.lstm_dropout_2 = rnn_cell.DropoutWrapper(self.lstm_2, output_keep_prob = 1 - self.drop_out_rate)
self.stacked_lstm = rnn_cell.MultiRNNCell([self.lstm_dropout_1, self.lstm_dropout_2])
# state-embedding
self.embed_state_W = tf.Variable(tf.random_uniform([2*rnn_size*rnn_layer, self.dim_hidden], -0.08,0.08),name='embed_state_W')
self.embed_state_b = tf.Variable(tf.random_uniform([self.dim_hidden], -0.08, 0.08), name='embed_state_b')
# image-embedding
self.embed_image_W = tf.Variable(tf.random_uniform([dim_image, self.dim_hidden], -0.08, 0.08), name='embed_image_W')
self.embed_image_b = tf.Variable(tf.random_uniform([dim_hidden], -0.08, 0.08), name='embed_image_b')
# score-embedding
self.embed_scor_W = tf.Variable(tf.random_uniform([dim_hidden, num_output], -0.08, 0.08), name='embed_scor_W')
self.embed_scor_b = tf.Variable(tf.random_uniform([num_output], -0.08, 0.08), name='embed_scor_b')