Python源码示例:tensorflow.python.ops.standard.reduce_mean()
示例1
def summarize_activation(op):
"""Summarize an activation.
This applies the given activation and adds useful summaries specific to the
activation.
Args:
op: The tensor to summarize (assumed to be a layer activation).
Returns:
The summary op created to summarize `op`.
"""
if op.op.type in ('Relu', 'Softplus', 'Relu6'):
# Using inputs to avoid floating point equality and/or epsilons.
_add_scalar_summary(
standard_ops.reduce_mean(standard_ops.to_float(standard_ops.less(
op.op.inputs[0], standard_ops.cast(0.0, op.op.inputs[0].dtype)))),
'%s/zeros' % op.op.name)
if op.op.type == 'Relu6':
_add_scalar_summary(
standard_ops.reduce_mean(standard_ops.to_float(standard_ops.greater(
op.op.inputs[0], standard_ops.cast(6.0, op.op.inputs[0].dtype)))),
'%s/sixes' % op.op.name)
return _add_histogram_summary(op, '%s/activation' % op.op.name)
示例2
def summarize_activation(op):
"""Summarize an activation.
This applies the given activation and adds useful summaries specific to the
activation.
Args:
op: The tensor to summarize (assumed to be a layer activation).
Returns:
The summary op created to summarize `op`.
"""
if op.op.type in ('Relu', 'Softplus', 'Relu6'):
# Using inputs to avoid floating point equality and/or epsilons.
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.less(op.op.inputs[
0], standard_ops.cast(0.0, op.op.inputs[0].dtype)))),
'%s/zeros' % op.op.name)
if op.op.type == 'Relu6':
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.greater(op.op.inputs[
0], standard_ops.cast(6.0, op.op.inputs[0].dtype)))),
'%s/sixes' % op.op.name)
return _add_histogram_summary(op, '%s/activation' % op.op.name)
示例3
def summarize_activation(op):
"""Summarize an activation.
This applies the given activation and adds useful summaries specific to the
activation.
Args:
op: The tensor to summarize (assumed to be a layer activation).
Returns:
The summary op created to summarize `op`.
"""
if op.op.type in ('Relu', 'Softplus', 'Relu6'):
# Using inputs to avoid floating point equality and/or epsilons.
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.less(op.op.inputs[
0], standard_ops.cast(0.0, op.op.inputs[0].dtype)))),
'%s/zeros' % op.op.name)
if op.op.type == 'Relu6':
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.greater(op.op.inputs[
0], standard_ops.cast(6.0, op.op.inputs[0].dtype)))),
'%s/sixes' % op.op.name)
return _add_histogram_summary(op, '%s/activation' % op.op.name)
示例4
def summarize_activation(op):
"""Summarize an activation.
This applies the given activation and adds useful summaries specific to the
activation.
Args:
op: The tensor to summarize (assumed to be a layer activation).
Returns:
The summary op created to summarize `op`.
"""
if op.op.type in ('Relu', 'Softplus', 'Relu6'):
# Using inputs to avoid floating point equality and/or epsilons.
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.less(op.op.inputs[
0], standard_ops.cast(0.0, op.op.inputs[0].dtype)))),
'%s/zeros' % op.op.name)
if op.op.type == 'Relu6':
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.greater(op.op.inputs[
0], standard_ops.cast(6.0, op.op.inputs[0].dtype)))),
'%s/sixes' % op.op.name)
return _add_histogram_summary(op, '%s/activation' % op.op.name)
示例5
def summarize_activation(op):
"""Summarize an activation.
This applies the given activation and adds useful summaries specific to the
activation.
Args:
op: The tensor to summarize (assumed to be a layer activation).
Returns:
The summary op created to summarize `op`.
"""
if op.op.type in ('Relu', 'Softplus', 'Relu6'):
# Using inputs to avoid floating point equality and/or epsilons.
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.less(op.op.inputs[
0], standard_ops.cast(0.0, op.op.inputs[0].dtype)))),
'%s/zeros' % op.op.name)
if op.op.type == 'Relu6':
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.greater(op.op.inputs[
0], standard_ops.cast(6.0, op.op.inputs[0].dtype)))),
'%s/sixes' % op.op.name)
return _add_histogram_summary(op, '%s/activation' % op.op.name)
示例6
def summarize_activation(op):
"""Summarize an activation.
This applies the given activation and adds useful summaries specific to the
activation.
Args:
op: The tensor to summarize (assumed to be a layer activation).
Returns:
The summary op created to summarize `op`.
"""
if op.op.type in ('Relu', 'Softplus', 'Relu6'):
# Using inputs to avoid floating point equality and/or epsilons.
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.less(op.op.inputs[
0], standard_ops.cast(0.0, op.op.inputs[0].dtype)))),
'%s/zeros' % op.op.name)
if op.op.type == 'Relu6':
_add_scalar_summary(
standard_ops.reduce_mean(
standard_ops.to_float(
standard_ops.greater(op.op.inputs[
0], standard_ops.cast(6.0, op.op.inputs[0].dtype)))),
'%s/sixes' % op.op.name)
return _add_histogram_summary(op, '%s/activation' % op.op.name)