Python源码示例:tensorflow.contrib.rnn.MultiRNNCell()
示例1
def RNN(x, weights, biases):
# reshape to [1, n_input]
x = tf.reshape(x, [-1, n_input])
# Generate a n_input-element sequence of inputs
# (eg. [had] [a] [general] -> [20] [6] [33])
x = tf.split(x, n_input, 1)
# 2-layer LSTM, each layer has n_hidden units.
# Average Accuracy= 95.20% at 50k iter
rnn_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(n_hidden), rnn.BasicLSTMCell(n_hidden)])
# 1-layer LSTM with n_hidden units but with lower accuracy.
# Average Accuracy= 90.60% 50k iter
# Uncomment line below to test but comment out the 2-layer rnn.MultiRNNCell above
# rnn_cell = rnn.BasicLSTMCell(n_hidden)
# generate prediction
outputs, states = rnn.static_rnn(rnn_cell, x, dtype=tf.float32)
# there are n_input outputs but
# we only want the last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
示例2
def __init__(self, state_size, num_layers, dropout_prob, base_cell):
"""Define the cell by composing/wrapping with tf.contrib.rnn functions.
Args:
state_size: number of units in the cell.
num_layers: how many cells to include in the MultiRNNCell.
dropout_prob: probability of a node being dropped.
base_cell: (str) name of underling cell to use (e.g. 'GRUCell')
"""
self._state_size = state_size
self._num_layers = num_layers
self._dropout_prob = dropout_prob
self._base_cell = base_cell
def single_cell():
"""Convert cell name (str) to class, and create it."""
return getattr(tf.contrib.rnn, base_cell)(num_units=state_size)
if num_layers == 1:
self._cell = single_cell()
else:
self._cell = MultiRNNCell(
[single_cell() for _ in range(num_layers)])
示例3
def __init__(self,
state_size,
embed_size,
dropout_prob,
num_layers,
base_cell="GRUCell",
state_wrapper=None):
"""
Args:
state_size: number of units in underlying rnn cell.
embed_size: dimension size of word-embedding space.
dropout_prob: probability of a node being dropped.
num_layers: how many cells to include in the MultiRNNCell.
base_cell: (str) name of underling cell to use (e.g. 'GRUCell')
state_wrapper: allow states to store their wrapper class. See the
wrapper method docstring below for more info.
"""
self.state_size = state_size
self.embed_size = embed_size
self.num_layers = num_layers
self.dropout_prob = dropout_prob
self.base_cell = base_cell
self._wrapper = state_wrapper
示例4
def build_lstm(self):
def build_cell():
cell = rnn.BasicLSTMCell(self._hidden_size, forget_bias=1.0, state_is_tuple=True)
cell = rnn.DropoutWrapper(cell, output_keep_prob=self._keep_prob)
return cell
mul_cell = rnn.MultiRNNCell([build_cell() for _ in range(self._num_layer)],
state_is_tuple=True)
self._init_state = mul_cell.zero_state(self._num_seq, dtype=tf.float32)
outputs, self._final_state = tf.nn.dynamic_rnn(mul_cell, self._inputs,
initial_state=self._init_state)
outputs = tf.reshape(outputs, [-1, self._hidden_size])
W = tf.Variable(tf.truncated_normal([self._hidden_size, self._corpus.word_num],
stddev=0.1, dtype=tf.float32))
bais = tf.Variable(tf.zeros([1, self._corpus.word_num],
dtype=tf.float32), dtype=tf.float32)
self._prediction = tf.nn.softmax(tf.matmul(outputs, W) + bais)
示例5
def create_model(self):
features = tf.placeholder(tf.int32, [None, self.seq_len])
embedding = tf.get_variable(
'embedding', [self.vocab_size + 1, self.n_hidden], dtype=tf.float32)
x = tf.cast(tf.nn.embedding_lookup(embedding, features), tf.float32)
labels = tf.placeholder(tf.float32, [None, self.num_classes])
stacked_lstm = rnn.MultiRNNCell(
[rnn.BasicLSTMCell(self.n_hidden) for _ in range(2)])
outputs, _ = tf.nn.dynamic_rnn(stacked_lstm, x, dtype=tf.float32)
fc1 = tf.layers.dense(inputs=outputs[:, -1, :], units=128)
pred = tf.layers.dense(inputs=fc1, units=self.num_classes)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=labels))
train_op = self.optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(labels, 1))
eval_metric_ops = tf.count_nonzero(correct_pred)
return features, labels, train_op, eval_metric_ops, loss
示例6
def create_model(self):
features = tf.placeholder(tf.int32, [None, self.seq_len])
embedding = tf.get_variable("embedding", [self.num_classes, 8])
x = tf.nn.embedding_lookup(embedding, features)
labels = tf.placeholder(tf.int32, [None, self.num_classes])
stacked_lstm = rnn.MultiRNNCell(
[rnn.BasicLSTMCell(self.n_hidden) for _ in range(2)])
outputs, _ = tf.nn.dynamic_rnn(stacked_lstm, x, dtype=tf.float32)
pred = tf.layers.dense(inputs=outputs[:,-1,:], units=self.num_classes)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=labels))
train_op = self.optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(labels, 1))
eval_metric_ops = tf.count_nonzero(correct_pred)
return features, labels, train_op, eval_metric_ops, loss
示例7
def __init__(self, nlayers, num_units, input_size=None,
use_peepholes=False, cell_clip=None, initializer=None,
num_proj=None, proj_clip=None, num_unit_shards=1,
num_proj_shards=1, forget_bias=1.0, state_is_tuple=True,
activation=tanh):
super(MultiInputLSTM, self).__init__(num_units, input_size=None,
use_peepholes=False,cell_clip=None, initializer=None, num_proj=None,
proj_clip=None, num_unit_shards=1, num_proj_shards=1,
forget_bias=1.0,state_is_tuple=True, activation=tanh)
self.cell = super(MultiInputLSTM, self).__call__
if nlayers > 1:
self.cell = MultiRNNCell([self.cell] * nlayers)
self.nlayers = nlayers
示例8
def __build(self):
w_fc_in = self.__weight_variable([self.nClasses+1, 128], 'w_fc_in')
b_fc_in = self.__bias_variable([128], 'b_fc_in')
w_fc_o = self.__weight_variable([self.rnn_size, 128], 'w_fc_o')
b_fc_o = self.__bias_variable([128], 'b_fc_o')
w_output_action = self.__weight_variable([128, self.nClasses], 'w_fc_in')
b_output_action = self.__bias_variable([self.nClasses], 'b_fc_in')
w_output_len = self.__weight_variable([128, 2], 'w_fc_in')
b_output_len = self.__bias_variable([2], 'b_fc_in')
x = tf.reshape(self.input_seq, [-1, self.nClasses+1])
h1 = tf.nn.relu(tf.matmul(x, w_fc_in) + b_fc_in)
h1 = tf.reshape(h1, [-1,self.max_seq_sz,128])
#rnn
h1 = tf.unstack(h1, axis=1)
def get_cell():
return rnn.GRUCell(self.rnn_size)
gru_cell = rnn.MultiRNNCell([get_cell() for _ in range(self.num_layers)])
outputs, states = rnn.static_rnn(gru_cell, h1, dtype=tf.float32)
#fc_o
h2 = tf.nn.relu(tf.matmul(outputs[-1], w_fc_o) + b_fc_o)
#output
output_label = tf.matmul(h2, w_output_action) + b_output_action
output_len = tf.nn.relu(tf.matmul(h2, w_output_len) + b_output_len)
#
self.prediction = tf.concat([output_label, output_len], 1)
self.saver = tf.train.Saver(write_version=tf.train.SaverDef.V2, max_to_keep=100)
示例9
def cell(self):
""" Return the cell """
with tf.variable_scope(self.variable_scope, reuse=self.reuse):
cell = rnn.LSTMCell(self.num_units, reuse=self.reuse)
if self.num_layers > 1:
cell = rnn.MultiRNNCell([cell] * self.num_layers)
return cell
示例10
def construct_rnn_cell(num_units, cell_type='basic_rnn',
dropout_keep_probabilities=None):
"""Constructs cells, applies dropout and assembles a `MultiRNNCell`.
The cell type chosen by DynamicRNNEstimator.__init__() is the same as
returned by this function when called with the same arguments.
Args:
num_units: A single `int` or a list/tuple of `int`s. The size of the
`RNNCell`s.
cell_type: A string identifying the `RNNCell` type or a subclass of
`RNNCell`.
dropout_keep_probabilities: a list of dropout probabilities or `None`. If a
list is given, it must have length `len(cell_type) + 1`.
Returns:
An initialized `RNNCell`.
"""
if not isinstance(num_units, (list, tuple)):
num_units = (num_units,)
cells = [_get_single_cell(cell_type, n) for n in num_units]
if dropout_keep_probabilities:
cells = apply_dropout(cells, dropout_keep_probabilities)
if len(cells) == 1:
return cells[0]
return contrib_rnn.MultiRNNCell(cells)
示例11
def _to_rnn_cell(cell_or_type, num_units, num_layers):
"""Constructs and return an `RNNCell`.
Args:
cell_or_type: Either a string identifying the `RNNCell` type, a subclass of
`RNNCell` or an instance of an `RNNCell`.
num_units: The number of units in the `RNNCell`.
num_layers: The number of layers in the RNN.
Returns:
An initialized `RNNCell`.
Raises:
ValueError: `cell_or_type` is an invalid `RNNCell` name.
TypeError: `cell_or_type` is not a string or a subclass of `RNNCell`.
"""
if isinstance(cell_or_type, contrib_rnn.RNNCell):
return cell_or_type
if isinstance(cell_or_type, str):
cell_or_type = _CELL_TYPES.get(cell_or_type)
if cell_or_type is None:
raise ValueError('The supported cell types are {}; got {}'.format(
list(_CELL_TYPES.keys()), cell_or_type))
if not issubclass(cell_or_type, contrib_rnn.RNNCell):
raise TypeError(
'cell_or_type must be a subclass of RNNCell or one of {}.'.format(
list(_CELL_TYPES.keys())))
cell = cell_or_type(num_units=num_units)
if num_layers > 1:
cell = contrib_rnn.MultiRNNCell(
[cell] * num_layers, state_is_tuple=True)
return cell
示例12
def cell_create(self,scope_name):
with tf.variable_scope(scope_name):
if self.cell_type == 'tanh':
cells = rnn.MultiRNNCell([rnn.BasicRNNCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True)
elif self.cell_type == 'LSTM':
cells = rnn.MultiRNNCell([rnn.BasicLSTMCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True)
elif self.cell_type == 'GRU':
cells = rnn.MultiRNNCell([rnn.GRUCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True)
elif self.cell_type == 'LSTMP':
cells = rnn.MultiRNNCell([rnn.LSTMCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True)
cells = rnn.DropoutWrapper(cells, input_keep_prob=self.dropout_ph,output_keep_prob=self.dropout_ph)
return cells
示例13
def getLayeredCell(layer_size, num_units, input_keep_prob,
output_keep_prob=1.0):
return rnn.MultiRNNCell([rnn.DropoutWrapper(rnn.BasicLSTMCell(num_units),
input_keep_prob, output_keep_prob) for i in range(layer_size)])
示例14
def test_multi_rnn():
"""
Test a stacked LSTM with nested tuple state.
"""
def make_cell():
return MultiRNNCell([LSTMCell(16), LSTMCell(32)])
run_ac_test(partial(RNNCellAC, make_cell=make_cell))
示例15
def define_rnn_cell(cell_class, num_units, num_layers=1, keep_prob=1.0,
input_keep_prob=None, output_keep_prob=None):
if input_keep_prob is None:
input_keep_prob = keep_prob
if output_keep_prob is None:
output_keep_prob = keep_prob
cells = []
for _ in range(num_layers):
if cell_class == 'GRU':
cell = GRUCell(num_units=num_units)
elif cell_class == 'LSTM':
cell = LSTMCell(num_units=num_units)
else:
cell = RNNCell(num_units=num_units)
if keep_prob < 1.0:
cell = DropoutWrapper(cell=cell, input_keep_prob=input_keep_prob, output_keep_prob=output_keep_prob)
cells.append(cell)
if len(cells) > 1:
final_cell = MultiRNNCell(cells)
else:
final_cell = cells[0]
return final_cell
示例16
def _make_encoder(self):
"""Create the encoder"""
inputs = layers.embed_sequence(
self.X,
vocab_size=self.vocab_size,
embed_dim=self.embed_dim,
scope='embed')
# Project to correct dimensions
# b/c the bidirectional RNN's forward and backward
# outputs are concatenated, the size will be 2x,
# so halve the hidden sizes here to compensate
inputs = tf.layers.dense(inputs, self.hidden_size//2)
cell_fw = rnn.MultiRNNCell([
self._make_cell(self.hidden_size//2) for _ in range(self.depth)
])
cell_bw = rnn.MultiRNNCell([
self._make_cell(self.hidden_size//2) for _ in range(self.depth)
])
encoder_outputs, encoder_final_state = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cell_fw, cell_bw=cell_bw, sequence_length=self.sequence_length,
inputs=inputs, dtype=tf.float32)
# Concat forward and backward outputs
encoder_outputs = tf.concat(encoder_outputs, 2)
# Concat forward and backward layer states
encoder_fw_states, encoder_bw_states = encoder_final_state
encoder_final_state = []
for fw, bw in zip(encoder_fw_states, encoder_bw_states):
c = tf.concat([fw.c, bw.c], 1)
h = tf.concat([fw.h, bw.h], 1)
encoder_final_state.append(rnn.LSTMStateTuple(c=c, h=h))
return encoder_outputs, encoder_final_state
示例17
def maybe_multirnn(lst):
if len(lst) == 1:
return lst[0]
else:
return MultiRNNCell(lst)
示例18
def build_encoder_cell(self):
"""构建一个单独的编码器cell
"""
return MultiRNNCell([
self.build_single_cell(
self.hidden_units,
use_residual=self.use_residual
)
for _ in range(self.depth)
])
示例19
def build_cell(units, cell_type='lstm', num_layers=1):
if num_layers > 1:
cell = rnn.MultiRNNCell([
build_cell(units, cell_type, 1) for _ in range(num_layers)
])
else:
if cell_type == "lstm":
cell = rnn.LSTMCell(units)
elif cell_type == "gru":
cell = rnn.GRUCell(units)
else:
raise ValueError('Do not support %s' % cell_type)
return cell
示例20
def __call__(self, inputs, mask):
'''
inputs: the embeddings of a batch of sequences. (batch_size, seq_length, emb_size)
mask: mask for imcomplete sequences. (batch_size, seq_length, 1)
'''
cells = []
for _ in range(self.layers):
cell = rnn.BasicLSTMCell(self.hidden_units, activation=self.hidden_activation)
cell = rnn.DropoutWrapper(cell, output_keep_prob=1.-self.dropout)
cells.append(cell)
self.cell = cell = rnn.MultiRNNCell(cells)
zero_state = cell.zero_state(tf.shape(inputs)[0], dtype=tf.float32)
sequence_length = tf.count_nonzero(tf.squeeze(mask, [-1]), -1)
outputs, state = tf.nn.dynamic_rnn(cell, inputs, sequence_length=sequence_length, initial_state=zero_state)
return outputs
示例21
def _create_cells(self) -> List[MultiRNNCell]:
"""
Creates the multilayer-RNN cells required by the architecture of this RNN.
Returns
-------
list of MultiRNNCell
A list of MultiRNNCells containing one entry if the RNN is unidirectional, and two identical entries if the
RNN is bidirectional
"""
cells = [[self._create_rnn_cell()
for _ in range(self.num_layers)]
for _ in range(2 if self.bidirectional else 1)]
return [MultiRNNCell(x) for x in cells]
示例22
def blstm_layer(self, embedding_chars):
"""
:return:
"""
with tf.variable_scope('rnn_layer'):
cell_fw, cell_bw = self._bi_dir_rnn()
if self.num_layers > 1:
cell_fw = rnn.MultiRNNCell([cell_fw] * self.num_layers, state_is_tuple=True)
cell_bw = rnn.MultiRNNCell([cell_bw] * self.num_layers, state_is_tuple=True)
outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, embedding_chars,
dtype=tf.float32)
outputs = tf.concat(outputs, axis=2)
return outputs
示例23
def _build_language_model_rnn_cell(config):
if not isinstance(config, predictor_pb2.LanguageModelRnnCell):
raise ValueError('config not of type predictor_pb2.LanguageModelRnnCell')
rnn_cell_list = [
rnn_cell_builder.build(rnn_cell_config) for rnn_cell_config in config.rnn_cell
]
lm_rnn_cell = rnn.MultiRNNCell(rnn_cell_list)
return lm_rnn_cell
示例24
def __init__(self, out_units, attention_cell: AttentionRNN,
is_training, zoneout_factor_cell=0.0, zoneout_factor_output=0.0,
lstm_impl=LSTMImpl.LSTMCell,
trainable=True, name=None, dtype=None, **kwargs):
super(DecoderRNNV2, self).__init__(name=name, trainable=trainable, **kwargs)
self._cell = MultiRNNCell([
attention_cell,
ZoneoutLSTMCell(out_units, is_training, zoneout_factor_cell, zoneout_factor_output, lstm_impl=lstm_impl,
dtype=dtype),
ZoneoutLSTMCell(out_units, is_training, zoneout_factor_cell, zoneout_factor_output, lstm_impl=lstm_impl,
dtype=dtype),
], state_is_tuple=True)
示例25
def rnn_cell(rnn_cell_size, dropout_keep_prob, residual, is_training=True):
"""Builds an LSTMBlockCell based on the given parameters."""
dropout_keep_prob = dropout_keep_prob if is_training else 1.0
cells = []
for i in range(len(rnn_cell_size)):
cell = rnn.LSTMBlockCell(rnn_cell_size[i])
if residual:
cell = rnn.ResidualWrapper(cell)
if i == 0 or rnn_cell_size[i] != rnn_cell_size[i - 1]:
cell = rnn.InputProjectionWrapper(cell, rnn_cell_size[i])
cell = rnn.DropoutWrapper(
cell,
input_keep_prob=dropout_keep_prob)
cells.append(cell)
return rnn.MultiRNNCell(cells)
示例26
def _build_cell(self, m, n_stack=1, wrappers=[]):
if n_stack == 1:
cell = self.c(m)
cell = rnn.MultiRNNCell([self.c(m) for _ in range(n_stack)])
# Apply wrappers; use functools.partial to bind other arguments
for wrapper in wrappers:
cell = wrapper(cell)
return cell
示例27
def build_cell(units, cell_type='lstm', num_layers=1):
if num_layers > 1:
cell = rnn.MultiRNNCell([
build_cell(units, cell_type, 1) for _ in range(num_layers)
])
else:
if cell_type == "lstm":
cell = rnn.LSTMCell(units)
elif cell_type == "gru":
cell = rnn.GRUCell(units)
else:
raise ValueError('Do not support %s' % cell_type)
return cell
示例28
def blstm_layer(self, embedding_chars):
"""
:return:
"""
with tf.variable_scope('rnn_layer'):
cell_fw, cell_bw = self._bi_dir_rnn()
if self.num_layers > 1:
cell_fw = rnn.MultiRNNCell([cell_fw] * self.num_layers, state_is_tuple=True)
cell_bw = rnn.MultiRNNCell([cell_bw] * self.num_layers, state_is_tuple=True)
outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, embedding_chars,
dtype=tf.float32)
outputs = tf.concat(outputs, axis=2)
return outputs
示例29
def _inference(self):
logging.info('...create inference')
fw_state_tuple = self.unstack_fw_states(self.fw_state)
fw_cells = list()
for i in range(0, self.num_layers):
if (self.cell_type == 'lstm'):
cell = rnn.LSTMCell(num_units=self.cell_sizes[i], state_is_tuple=True)
elif (self.cell_type == 'gru'):
# change to GRU
cell = rnn.GRUCell(num_units=self.cell_sizes[i])
else:
cell = rnn.BasicRNNCell(num_units=self.cell_sizes[i])
cell = rnn.DropoutWrapper(cell, output_keep_prob=self.keep_prob)
fw_cells.append(cell)
self.fw_cells = rnn.MultiRNNCell(fw_cells, state_is_tuple=True)
rnn_outputs, states = tf.nn.dynamic_rnn(
self.fw_cells,
self.inputs,
initial_state=fw_state_tuple,
sequence_length=self.seq_lengths,
dtype=tf.float32, time_major=True)
# project output from rnn output size to OUTPUT_SIZE. Sometimes it is worth adding
# an extra layer here.
self.projection = lambda x: layers.linear(x,
num_outputs=self.label_classes, activation_fn=tf.nn.sigmoid)
self.logits = tf.map_fn(self.projection, rnn_outputs, name="logits")
self.probs = tf.nn.softmax(self.logits, name="probs")
self.states = states
tf.add_to_collection('probs', self.probs)
示例30
def build(self, input_number, sequence_length, layers_number, units_number, output_number):
self.x = tf.placeholder("float", [None, sequence_length, input_number])
self.y = tf.placeholder("float", [None, output_number])
self.sequence_length = sequence_length
self.weights = {
'out': tf.Variable(tf.random_normal([units_number, output_number]))
}
self.biases = {
'out': tf.Variable(tf.random_normal([output_number]))
}
x = tf.transpose(self.x, [1, 0, 2])
x = tf.reshape(x, [-1, input_number])
x = tf.split(x, sequence_length, 0)
lstm_layers = []
for i in range(0, layers_number):
lstm_layer = rnn.BasicLSTMCell(units_number)
lstm_layers.append(lstm_layer)
deep_lstm = rnn.MultiRNNCell(lstm_layers)
self.outputs, states = rnn.static_rnn(deep_lstm, x, dtype=tf.float32)
print "Build model with input_number: {}, sequence_length: {}, layers_number: {}, " \
"units_number: {}, output_number: {}".format(input_number, sequence_length, layers_number,
units_number, output_number)
self.save(input_number, sequence_length, layers_number, units_number, output_number)