Python源码示例:tensorflow.python.ops.standard.less()
示例1
def summarize_activation(op):
"""Summarize an activation.
This applies the given activation and adds useful summaries specific to the
activation.
Args:
op: The tensor to summarize (assumed to be a layer activation).
Returns:
The summary op created to summarize `op`.
"""
if op.op.type in ('Relu', 'Softplus', 'Relu6'):
# Using inputs to avoid floating point equality and/or epsilons.
_add_scalar_summary(
standard_ops.reduce_mean(standard_ops.to_float(standard_ops.less(
op.op.inputs[0], standard_ops.cast(0.0, op.op.inputs[0].dtype)))),
'%s/zeros' % op.op.name)
if op.op.type == 'Relu6':
_add_scalar_summary(
standard_ops.reduce_mean(standard_ops.to_float(standard_ops.greater(
op.op.inputs[0], standard_ops.cast(6.0, op.op.inputs[0].dtype)))),
'%s/sixes' % op.op.name)
return _add_histogram_summary(op, '%s/activation' % op.op.name)
示例2
def summarize_activation(op):
"""Summarize an activation.
This applies the given activation and adds useful summaries specific to the
activation.
Args:
op: The tensor to summarize (assumed to be a layer activation).
Returns:
The summary op created to summarize `op`.
"""
if op.op.type in ('Relu', 'Softplus', 'Relu6'):
# Using inputs to avoid floating point equality and/or epsilons.
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.less(op.op.inputs[
0], standard_ops.cast(0.0, op.op.inputs[0].dtype)))),
'%s/zeros' % op.op.name)
if op.op.type == 'Relu6':
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.greater(op.op.inputs[
0], standard_ops.cast(6.0, op.op.inputs[0].dtype)))),
'%s/sixes' % op.op.name)
return _add_histogram_summary(op, '%s/activation' % op.op.name)
示例3
def summarize_activation(op):
"""Summarize an activation.
This applies the given activation and adds useful summaries specific to the
activation.
Args:
op: The tensor to summarize (assumed to be a layer activation).
Returns:
The summary op created to summarize `op`.
"""
if op.op.type in ('Relu', 'Softplus', 'Relu6'):
# Using inputs to avoid floating point equality and/or epsilons.
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.less(op.op.inputs[
0], standard_ops.cast(0.0, op.op.inputs[0].dtype)))),
'%s/zeros' % op.op.name)
if op.op.type == 'Relu6':
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.greater(op.op.inputs[
0], standard_ops.cast(6.0, op.op.inputs[0].dtype)))),
'%s/sixes' % op.op.name)
return _add_histogram_summary(op, '%s/activation' % op.op.name)
示例4
def summarize_activation(op):
"""Summarize an activation.
This applies the given activation and adds useful summaries specific to the
activation.
Args:
op: The tensor to summarize (assumed to be a layer activation).
Returns:
The summary op created to summarize `op`.
"""
if op.op.type in ('Relu', 'Softplus', 'Relu6'):
# Using inputs to avoid floating point equality and/or epsilons.
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.less(op.op.inputs[
0], standard_ops.cast(0.0, op.op.inputs[0].dtype)))),
'%s/zeros' % op.op.name)
if op.op.type == 'Relu6':
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.greater(op.op.inputs[
0], standard_ops.cast(6.0, op.op.inputs[0].dtype)))),
'%s/sixes' % op.op.name)
return _add_histogram_summary(op, '%s/activation' % op.op.name)
示例5
def summarize_activation(op):
"""Summarize an activation.
This applies the given activation and adds useful summaries specific to the
activation.
Args:
op: The tensor to summarize (assumed to be a layer activation).
Returns:
The summary op created to summarize `op`.
"""
if op.op.type in ('Relu', 'Softplus', 'Relu6'):
# Using inputs to avoid floating point equality and/or epsilons.
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.less(op.op.inputs[
0], standard_ops.cast(0.0, op.op.inputs[0].dtype)))),
'%s/zeros' % op.op.name)
if op.op.type == 'Relu6':
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.greater(op.op.inputs[
0], standard_ops.cast(6.0, op.op.inputs[0].dtype)))),
'%s/sixes' % op.op.name)
return _add_histogram_summary(op, '%s/activation' % op.op.name)
示例6
def testIndexedSlicesWithDenseShape(self):
with self.test_session():
data = ops.IndexedSlices(tf.constant([1, 2, 3]),
tf.constant([0, 1]),
dense_shape=tf.constant([3]))
zero = tf.constant(0)
one = tf.constant(1)
less_op = tf.less(zero, one)
switch_false, switch_true = control_flow_ops.switch(data, less_op)
self.assertAllEqual([1, 2, 3], switch_true.values.eval())
self.assertAllEqual([0, 1], switch_true.indices.eval())
示例7
def testCondContext(self):
with self.test_session() as sess:
x = tf.constant(2)
y = tf.constant(5)
control_flow_ops.cond(tf.less(x, y),
lambda: tf.mul(x, 17),
lambda: tf.add(y, 23))
for op in sess.graph.get_operations():
c = op._get_control_flow_context()
if c:
compare.ProtoEq(
c.to_proto(),
control_flow_ops.CondContext.from_proto(c.to_proto()).to_proto())
示例8
def testWhileContext(self):
with self.test_session() as sess:
i = tf.constant(0)
c = lambda i: tf.less(i, 10)
b = lambda i: tf.add(i, 1)
tf.while_loop(c, b, [i])
for op in sess.graph.get_operations():
c = op._get_control_flow_context()
if c:
compare.ProtoEq(
c.to_proto(),
control_flow_ops.WhileContext.from_proto(c.to_proto()).to_proto())
示例9
def summarize_activation(op):
"""Summarize an activation.
This applies the given activation and adds useful summaries specific to the
activation.
Args:
op: The tensor to summarize (assumed to be a layer activation).
Returns:
The summary op created to summarize `op`.
"""
if op.op.type in ('Relu', 'Softplus', 'Relu6'):
# Using inputs to avoid floating point equality and/or epsilons.
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.less(op.op.inputs[
0], standard_ops.cast(0.0, op.op.inputs[0].dtype)))),
'%s/zeros' % op.op.name)
if op.op.type == 'Relu6':
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.greater(op.op.inputs[
0], standard_ops.cast(6.0, op.op.inputs[0].dtype)))),
'%s/sixes' % op.op.name)
return _add_histogram_summary(op, '%s/activation' % op.op.name)