Python源码示例:entropy.Bias()
示例1
def __init__(self, code_depth, name=None):
super(BrnnPredictor, self).__init__(name)
with self._BlockScope():
hidden_depth = 2 * code_depth
# What is coming from the previous layer/iteration
# is going through a regular Conv2D layer as opposed to the binary codes
# of the current layer/iteration which are going through a masked
# convolution.
self._adaptation0 = blocks.RasterScanConv2D(
hidden_depth, [7, 7], [1, 1], 'SAME',
strict_order=True,
bias=blocks.Bias(0), act=tf.tanh)
self._adaptation1 = blocks.Conv2D(
hidden_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
self._predictor = blocks.CompositionOperator([
blocks.LineOperator(
blocks.RasterScanConv2DLSTM(
depth=hidden_depth,
filter_size=[1, 3],
hidden_filter_size=[1, 3],
strides=[1, 1],
padding='SAME')),
blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
])
示例2
def __init__(self, code_depth, name=None):
super(BrnnPredictor, self).__init__(name)
with self._BlockScope():
hidden_depth = 2 * code_depth
# What is coming from the previous layer/iteration
# is going through a regular Conv2D layer as opposed to the binary codes
# of the current layer/iteration which are going through a masked
# convolution.
self._adaptation0 = blocks.RasterScanConv2D(
hidden_depth, [7, 7], [1, 1], 'SAME',
strict_order=True,
bias=blocks.Bias(0), act=tf.tanh)
self._adaptation1 = blocks.Conv2D(
hidden_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
self._predictor = blocks.CompositionOperator([
blocks.LineOperator(
blocks.RasterScanConv2DLSTM(
depth=hidden_depth,
filter_size=[1, 3],
hidden_filter_size=[1, 3],
strides=[1, 1],
padding='SAME')),
blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
])
示例3
def __init__(self, code_depth, name=None):
super(BrnnPredictor, self).__init__(name)
with self._BlockScope():
hidden_depth = 2 * code_depth
# What is coming from the previous layer/iteration
# is going through a regular Conv2D layer as opposed to the binary codes
# of the current layer/iteration which are going through a masked
# convolution.
self._adaptation0 = blocks.RasterScanConv2D(
hidden_depth, [7, 7], [1, 1], 'SAME',
strict_order=True,
bias=blocks.Bias(0), act=tf.tanh)
self._adaptation1 = blocks.Conv2D(
hidden_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
self._predictor = blocks.CompositionOperator([
blocks.LineOperator(
blocks.RasterScanConv2DLSTM(
depth=hidden_depth,
filter_size=[1, 3],
hidden_filter_size=[1, 3],
strides=[1, 1],
padding='SAME')),
blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
])
示例4
def __init__(self, code_depth, name=None):
super(BrnnPredictor, self).__init__(name)
with self._BlockScope():
hidden_depth = 2 * code_depth
# What is coming from the previous layer/iteration
# is going through a regular Conv2D layer as opposed to the binary codes
# of the current layer/iteration which are going through a masked
# convolution.
self._adaptation0 = blocks.RasterScanConv2D(
hidden_depth, [7, 7], [1, 1], 'SAME',
strict_order=True,
bias=blocks.Bias(0), act=tf.tanh)
self._adaptation1 = blocks.Conv2D(
hidden_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
self._predictor = blocks.CompositionOperator([
blocks.LineOperator(
blocks.RasterScanConv2DLSTM(
depth=hidden_depth,
filter_size=[1, 3],
hidden_filter_size=[1, 3],
strides=[1, 1],
padding='SAME')),
blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
])
示例5
def __init__(self, code_depth, name=None):
super(BrnnPredictor, self).__init__(name)
with self._BlockScope():
hidden_depth = 2 * code_depth
# What is coming from the previous layer/iteration
# is going through a regular Conv2D layer as opposed to the binary codes
# of the current layer/iteration which are going through a masked
# convolution.
self._adaptation0 = blocks.RasterScanConv2D(
hidden_depth, [7, 7], [1, 1], 'SAME',
strict_order=True,
bias=blocks.Bias(0), act=tf.tanh)
self._adaptation1 = blocks.Conv2D(
hidden_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
self._predictor = blocks.CompositionOperator([
blocks.LineOperator(
blocks.RasterScanConv2DLSTM(
depth=hidden_depth,
filter_size=[1, 3],
hidden_filter_size=[1, 3],
strides=[1, 1],
padding='SAME')),
blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
])
示例6
def __init__(self, code_depth, name=None):
super(BrnnPredictor, self).__init__(name)
with self._BlockScope():
hidden_depth = 2 * code_depth
# What is coming from the previous layer/iteration
# is going through a regular Conv2D layer as opposed to the binary codes
# of the current layer/iteration which are going through a masked
# convolution.
self._adaptation0 = blocks.RasterScanConv2D(
hidden_depth, [7, 7], [1, 1], 'SAME',
strict_order=True,
bias=blocks.Bias(0), act=tf.tanh)
self._adaptation1 = blocks.Conv2D(
hidden_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
self._predictor = blocks.CompositionOperator([
blocks.LineOperator(
blocks.RasterScanConv2DLSTM(
depth=hidden_depth,
filter_size=[1, 3],
hidden_filter_size=[1, 3],
strides=[1, 1],
padding='SAME')),
blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
])
示例7
def __init__(self, code_depth, name=None):
super(BrnnPredictor, self).__init__(name)
with self._BlockScope():
hidden_depth = 2 * code_depth
# What is coming from the previous layer/iteration
# is going through a regular Conv2D layer as opposed to the binary codes
# of the current layer/iteration which are going through a masked
# convolution.
self._adaptation0 = blocks.RasterScanConv2D(
hidden_depth, [7, 7], [1, 1], 'SAME',
strict_order=True,
bias=blocks.Bias(0), act=tf.tanh)
self._adaptation1 = blocks.Conv2D(
hidden_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
self._predictor = blocks.CompositionOperator([
blocks.LineOperator(
blocks.RasterScanConv2DLSTM(
depth=hidden_depth,
filter_size=[1, 3],
hidden_filter_size=[1, 3],
strides=[1, 1],
padding='SAME')),
blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
])
示例8
def __init__(self, code_depth, name=None):
super(BrnnPredictor, self).__init__(name)
with self._BlockScope():
hidden_depth = 2 * code_depth
# What is coming from the previous layer/iteration
# is going through a regular Conv2D layer as opposed to the binary codes
# of the current layer/iteration which are going through a masked
# convolution.
self._adaptation0 = blocks.RasterScanConv2D(
hidden_depth, [7, 7], [1, 1], 'SAME',
strict_order=True,
bias=blocks.Bias(0), act=tf.tanh)
self._adaptation1 = blocks.Conv2D(
hidden_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
self._predictor = blocks.CompositionOperator([
blocks.LineOperator(
blocks.RasterScanConv2DLSTM(
depth=hidden_depth,
filter_size=[1, 3],
hidden_filter_size=[1, 3],
strides=[1, 1],
padding='SAME')),
blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
])
示例9
def __init__(self, code_depth, name=None):
super(BrnnPredictor, self).__init__(name)
with self._BlockScope():
hidden_depth = 2 * code_depth
# What is coming from the previous layer/iteration
# is going through a regular Conv2D layer as opposed to the binary codes
# of the current layer/iteration which are going through a masked
# convolution.
self._adaptation0 = blocks.RasterScanConv2D(
hidden_depth, [7, 7], [1, 1], 'SAME',
strict_order=True,
bias=blocks.Bias(0), act=tf.tanh)
self._adaptation1 = blocks.Conv2D(
hidden_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
self._predictor = blocks.CompositionOperator([
blocks.LineOperator(
blocks.RasterScanConv2DLSTM(
depth=hidden_depth,
filter_size=[1, 3],
hidden_filter_size=[1, 3],
strides=[1, 1],
padding='SAME')),
blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
])
示例10
def __init__(self, layer_count, code_depth, name=None):
super(LayerPrediction, self).__init__(name)
self._layer_count = layer_count
# No previous layer.
self._layer_state = None
self._current_layer = 0
with self._BlockScope():
# Layers used to do the conditional code prediction.
self._brnn_predictors = []
for _ in xrange(layer_count):
self._brnn_predictors.append(BrnnPredictor(code_depth))
# Layers used to generate the input of the LSTM operating on the
# iteration/depth domain.
hidden_depth = 2 * code_depth
self._state_blocks = []
for _ in xrange(layer_count):
self._state_blocks.append(blocks.CompositionOperator([
blocks.Conv2D(
hidden_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(
code_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
]))
# Memory of the RNN is equivalent to the size of 2 layers of binary
# codes.
hidden_depth = 2 * code_depth
self._layer_rnn = blocks.CompositionOperator([
blocks.Conv2DLSTM(
depth=hidden_depth,
filter_size=[1, 1],
hidden_filter_size=[1, 1],
strides=[1, 1],
padding='SAME'),
blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
])
示例11
def __init__(self, layer_count, code_depth, name=None):
super(LayerPrediction, self).__init__(name)
self._layer_count = layer_count
# No previous layer.
self._layer_state = None
self._current_layer = 0
with self._BlockScope():
# Layers used to do the conditional code prediction.
self._brnn_predictors = []
for _ in xrange(layer_count):
self._brnn_predictors.append(BrnnPredictor(code_depth))
# Layers used to generate the input of the LSTM operating on the
# iteration/depth domain.
hidden_depth = 2 * code_depth
self._state_blocks = []
for _ in xrange(layer_count):
self._state_blocks.append(blocks.CompositionOperator([
blocks.Conv2D(
hidden_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(
code_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
]))
# Memory of the RNN is equivalent to the size of 2 layers of binary
# codes.
hidden_depth = 2 * code_depth
self._layer_rnn = blocks.CompositionOperator([
blocks.Conv2DLSTM(
depth=hidden_depth,
filter_size=[1, 1],
hidden_filter_size=[1, 1],
strides=[1, 1],
padding='SAME'),
blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
])
示例12
def __init__(self, layer_count, code_depth, name=None):
super(LayerPrediction, self).__init__(name)
self._layer_count = layer_count
# No previous layer.
self._layer_state = None
self._current_layer = 0
with self._BlockScope():
# Layers used to do the conditional code prediction.
self._brnn_predictors = []
for _ in xrange(layer_count):
self._brnn_predictors.append(BrnnPredictor(code_depth))
# Layers used to generate the input of the LSTM operating on the
# iteration/depth domain.
hidden_depth = 2 * code_depth
self._state_blocks = []
for _ in xrange(layer_count):
self._state_blocks.append(blocks.CompositionOperator([
blocks.Conv2D(
hidden_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(
code_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
]))
# Memory of the RNN is equivalent to the size of 2 layers of binary
# codes.
hidden_depth = 2 * code_depth
self._layer_rnn = blocks.CompositionOperator([
blocks.Conv2DLSTM(
depth=hidden_depth,
filter_size=[1, 1],
hidden_filter_size=[1, 1],
strides=[1, 1],
padding='SAME'),
blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
])
示例13
def __init__(self, layer_count, code_depth, name=None):
super(LayerPrediction, self).__init__(name)
self._layer_count = layer_count
# No previous layer.
self._layer_state = None
self._current_layer = 0
with self._BlockScope():
# Layers used to do the conditional code prediction.
self._brnn_predictors = []
for _ in xrange(layer_count):
self._brnn_predictors.append(BrnnPredictor(code_depth))
# Layers used to generate the input of the LSTM operating on the
# iteration/depth domain.
hidden_depth = 2 * code_depth
self._state_blocks = []
for _ in xrange(layer_count):
self._state_blocks.append(blocks.CompositionOperator([
blocks.Conv2D(
hidden_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(
code_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
]))
# Memory of the RNN is equivalent to the size of 2 layers of binary
# codes.
hidden_depth = 2 * code_depth
self._layer_rnn = blocks.CompositionOperator([
blocks.Conv2DLSTM(
depth=hidden_depth,
filter_size=[1, 1],
hidden_filter_size=[1, 1],
strides=[1, 1],
padding='SAME'),
blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
])
示例14
def __init__(self, layer_count, code_depth, name=None):
super(LayerPrediction, self).__init__(name)
self._layer_count = layer_count
# No previous layer.
self._layer_state = None
self._current_layer = 0
with self._BlockScope():
# Layers used to do the conditional code prediction.
self._brnn_predictors = []
for _ in xrange(layer_count):
self._brnn_predictors.append(BrnnPredictor(code_depth))
# Layers used to generate the input of the LSTM operating on the
# iteration/depth domain.
hidden_depth = 2 * code_depth
self._state_blocks = []
for _ in xrange(layer_count):
self._state_blocks.append(blocks.CompositionOperator([
blocks.Conv2D(
hidden_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(
code_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
]))
# Memory of the RNN is equivalent to the size of 2 layers of binary
# codes.
hidden_depth = 2 * code_depth
self._layer_rnn = blocks.CompositionOperator([
blocks.Conv2DLSTM(
depth=hidden_depth,
filter_size=[1, 1],
hidden_filter_size=[1, 1],
strides=[1, 1],
padding='SAME'),
blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
])
示例15
def __init__(self, layer_count, code_depth, name=None):
super(LayerPrediction, self).__init__(name)
self._layer_count = layer_count
# No previous layer.
self._layer_state = None
self._current_layer = 0
with self._BlockScope():
# Layers used to do the conditional code prediction.
self._brnn_predictors = []
for _ in xrange(layer_count):
self._brnn_predictors.append(BrnnPredictor(code_depth))
# Layers used to generate the input of the LSTM operating on the
# iteration/depth domain.
hidden_depth = 2 * code_depth
self._state_blocks = []
for _ in xrange(layer_count):
self._state_blocks.append(blocks.CompositionOperator([
blocks.Conv2D(
hidden_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(
code_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
]))
# Memory of the RNN is equivalent to the size of 2 layers of binary
# codes.
hidden_depth = 2 * code_depth
self._layer_rnn = blocks.CompositionOperator([
blocks.Conv2DLSTM(
depth=hidden_depth,
filter_size=[1, 1],
hidden_filter_size=[1, 1],
strides=[1, 1],
padding='SAME'),
blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
])
示例16
def __init__(self, layer_count, code_depth, name=None):
super(LayerPrediction, self).__init__(name)
self._layer_count = layer_count
# No previous layer.
self._layer_state = None
self._current_layer = 0
with self._BlockScope():
# Layers used to do the conditional code prediction.
self._brnn_predictors = []
for _ in xrange(layer_count):
self._brnn_predictors.append(BrnnPredictor(code_depth))
# Layers used to generate the input of the LSTM operating on the
# iteration/depth domain.
hidden_depth = 2 * code_depth
self._state_blocks = []
for _ in xrange(layer_count):
self._state_blocks.append(blocks.CompositionOperator([
blocks.Conv2D(
hidden_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(
code_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
]))
# Memory of the RNN is equivalent to the size of 2 layers of binary
# codes.
hidden_depth = 2 * code_depth
self._layer_rnn = blocks.CompositionOperator([
blocks.Conv2DLSTM(
depth=hidden_depth,
filter_size=[1, 1],
hidden_filter_size=[1, 1],
strides=[1, 1],
padding='SAME'),
blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
])
示例17
def __init__(self, layer_count, code_depth, name=None):
super(LayerPrediction, self).__init__(name)
self._layer_count = layer_count
# No previous layer.
self._layer_state = None
self._current_layer = 0
with self._BlockScope():
# Layers used to do the conditional code prediction.
self._brnn_predictors = []
for _ in xrange(layer_count):
self._brnn_predictors.append(BrnnPredictor(code_depth))
# Layers used to generate the input of the LSTM operating on the
# iteration/depth domain.
hidden_depth = 2 * code_depth
self._state_blocks = []
for _ in xrange(layer_count):
self._state_blocks.append(blocks.CompositionOperator([
blocks.Conv2D(
hidden_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(
code_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
]))
# Memory of the RNN is equivalent to the size of 2 layers of binary
# codes.
hidden_depth = 2 * code_depth
self._layer_rnn = blocks.CompositionOperator([
blocks.Conv2DLSTM(
depth=hidden_depth,
filter_size=[1, 1],
hidden_filter_size=[1, 1],
strides=[1, 1],
padding='SAME'),
blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
])
示例18
def __init__(self, layer_count, code_depth, name=None):
super(LayerPrediction, self).__init__(name)
self._layer_count = layer_count
# No previous layer.
self._layer_state = None
self._current_layer = 0
with self._BlockScope():
# Layers used to do the conditional code prediction.
self._brnn_predictors = []
for _ in xrange(layer_count):
self._brnn_predictors.append(BrnnPredictor(code_depth))
# Layers used to generate the input of the LSTM operating on the
# iteration/depth domain.
hidden_depth = 2 * code_depth
self._state_blocks = []
for _ in xrange(layer_count):
self._state_blocks.append(blocks.CompositionOperator([
blocks.Conv2D(
hidden_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(
code_depth, [3, 3], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
]))
# Memory of the RNN is equivalent to the size of 2 layers of binary
# codes.
hidden_depth = 2 * code_depth
self._layer_rnn = blocks.CompositionOperator([
blocks.Conv2DLSTM(
depth=hidden_depth,
filter_size=[1, 1],
hidden_filter_size=[1, 1],
strides=[1, 1],
padding='SAME'),
blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh),
blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME',
bias=blocks.Bias(0), act=tf.tanh)
])