Python源码示例:tensorflow.keras.constraints.get()
示例1
def __init__(self,
ratio,
return_mask=False,
sigmoid_gating=False,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
kernel_constraint=None,
**kwargs):
super().__init__(**kwargs)
self.ratio = ratio
self.return_mask = return_mask
self.sigmoid_gating = sigmoid_gating
self.gating_op = K.sigmoid if self.sigmoid_gating else K.tanh
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
示例2
def __init__(self,
channels,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super().__init__(**kwargs)
self.channels = channels
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
示例3
def __init__(self,
k,
channels=None,
return_mask=False,
activation=None,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
kernel_constraint=None,
**kwargs):
super().__init__(**kwargs)
self.k = k
self.channels = channels
self.return_mask = return_mask
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
示例4
def __init__(self,
channels,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
self.channels = channels
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.supports_masking = False
示例5
def convert_sequence_vocab(self, sequence, sequence_lengths):
PFAM_TO_UNIREP_ENCODED = {encoding: UNIREP_VOCAB.get(aa, 23) for aa, encoding in PFAM_VOCAB.items()}
def to_uniprot_unirep(seq, seqlens):
new_seq = np.zeros_like(seq)
for pfam_encoding, unirep_encoding in PFAM_TO_UNIREP_ENCODED.items():
new_seq[seq == pfam_encoding] = unirep_encoding
# add start/stop
new_seq = np.pad(new_seq, [[0, 0], [1, 1]], mode='constant')
new_seq[:, 0] = UNIREP_VOCAB['<START>']
new_seq[np.arange(new_seq.shape[0]), seqlens + 1] = UNIREP_VOCAB['<STOP>']
return new_seq
new_sequence = tf.py_func(to_uniprot_unirep, [sequence, sequence_lengths], sequence.dtype)
new_sequence.set_shape([sequence.shape[0], sequence.shape[1] + 2])
return new_sequence
示例6
def __init__(self,
activation: OptStrOrCallable = None,
use_bias: bool = True,
kernel_initializer: OptStrOrCallable = 'glorot_uniform',
bias_initializer: OptStrOrCallable = 'zeros',
kernel_regularizer: OptStrOrCallable = None,
bias_regularizer: OptStrOrCallable = None,
activity_regularizer: OptStrOrCallable = None,
kernel_constraint: OptStrOrCallable = None,
bias_constraint: OptStrOrCallable = None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
self.activation = activations.get(activation) # noqa
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
super().__init__(**kwargs)
示例7
def call(self, inputs):
def brelu(x):
# get shape of X, we are interested in the last axis, which is constant
shape = K.int_shape(x)
# last axis
dim = shape[-1]
# half of the last axis (+1 if necessary)
dim2 = dim // 2
if dim % 2 != 0:
dim2 += 1
# multiplier will be a tensor of alternated +1 and -1
multiplier = K.ones((dim2,))
multiplier = K.stack([multiplier, -multiplier], axis=-1)
if dim % 2 != 0:
multiplier = multiplier[:-1]
# adjust multiplier shape to the shape of x
multiplier = K.reshape(multiplier, tuple(1 for _ in shape[:-1]) + (-1,))
return multiplier * tf.nn.relu(multiplier * x)
return Lambda(brelu)(inputs)
示例8
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
dilation_rate=1,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
demod=True,
**kwargs):
super(Conv2DMod, self).__init__(**kwargs)
self.filters = filters
self.rank = 2
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2, 'dilation_rate')
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.demod = demod
self.input_spec = [InputSpec(ndim = 4),
InputSpec(ndim = 2)]
示例9
def deserialize_kwarg(key, attr):
if key.endswith('_initializer'):
return initializers.get(attr)
if key.endswith('_regularizer'):
return regularizers.get(attr)
if key.endswith('_constraint'):
return constraints.get(attr)
if key == 'activation':
return activations.get(attr)
示例10
def __init__(self,
trainable_kernel=False,
activation=None,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
kernel_constraint=None,
**kwargs):
super().__init__(**kwargs)
self.trainable_kernel = trainable_kernel
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
示例11
def __init__(self,
input_dim_1=None,
activation=None,
**kwargs):
super(MinkowskiProduct, self).__init__(**kwargs)
self.input_dim_1 = input_dim_1
self.activation = activations.get(activation)
示例12
def __init__(self,
k,
mlp_hidden=None,
mlp_activation='relu',
return_mask=False,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super().__init__(**kwargs)
self.k = k
self.mlp_hidden = mlp_hidden if mlp_hidden else []
self.mlp_activation = mlp_activation
self.return_mask = return_mask
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
示例13
def __init__(self,
groups=4,
axis=-1,
epsilon=1e-5,
center=True,
scale=True,
beta_initializer="zeros",
gamma_initializer="ones",
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs):
super(GroupNormalization, self).__init__(**kwargs)
self.supports_masking = True
self.groups = groups
self.axis = axis
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
示例14
def __init__(self,
kernel_size,
strides=(1, 1),
padding='valid',
depth_multiplier=1,
data_format=None,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
**kwargs):
super(DepthwiseConv2D, self).__init__(
filters=None,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
bias_constraint=bias_constraint,
**kwargs)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.bias_initializer = initializers.get(bias_initializer)
self.depthwise_kernel = None
self.bias = None
示例15
def get_auto_range_constraint_initializer(quantizer, constraint, initializer):
"""Get value range automatically for quantizer.
Arguments:
quantizer: A quantizer class in quantizers.py.
constraint: A tf.keras constraint.
initializer: A tf.keras initializer.
Returns:
a tuple (constraint, initializer), where
constraint is clipped by Clip class in this file, based on the
value range of quantizer.
initializer is initializer contraint by value range of quantizer.
"""
if quantizer is not None:
# let's use now symmetric clipping function
max_value = max(1, quantizer.max()) if hasattr(quantizer, "max") else 1.0
min_value = quantizer.min() if hasattr(quantizer, "min") else -1.0
if constraint:
constraint = constraints.get(constraint)
constraint = Clip(-max_value, max_value, constraint, quantizer)
initializer = initializers.get(initializer)
if initializer and initializer.__class__.__name__ not in ["Ones", "Zeros"]:
# we want to get the max value of the quantizer that depends
# on the distribution and scale
if not (hasattr(quantizer, "alpha") and
isinstance(quantizer.alpha, six.string_types)):
initializer = QInitializer(
initializer, use_scale=True, quantizer=quantizer)
return constraint, initializer
示例16
def get_config(self):
return {
"initializer": self.initializer,
"use_scale": self.use_scale,
"quantizer": self.quantizer,
}
#
# Because it may be hard to get serialization from activation functions,
# we may be replacing their instantiation by QActivation in the future.
#
示例17
def __init__(self, min_value=0.0, max_value=1.0,
constraint=None, quantizer=None):
"""Initializes Clip constraint class."""
self.min_value = min_value
self.max_value = max_value
self.constraint = constraints.get(constraint)
# Don't wrap yourself
if isinstance(self.constraint, Clip):
self.constraint = None
self.quantizer = get_quantizer(quantizer)
示例18
def __init__(self,
groups=8,
axis=-1,
epsilon=1e-5,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs):
""" Initializes one group normalization layer.
References:
- [Group Normalization](https://arxiv.org/abs/1803.08494)
"""
super(GroupNormalization, self).__init__(**kwargs)
self.supports_masking = True
self.groups = groups
self.axis = axis
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
示例19
def __init__(self,
T=3,
n_hidden=512,
activation=None,
activation_lstm='tanh',
recurrent_activation='hard_sigmoid',
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
use_bias=True,
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
**kwargs):
super().__init__(**kwargs)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.activation_lstm = activations.get(activation_lstm)
self.recurrent_activation = activations.get(recurrent_activation)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.unit_forget_bias = unit_forget_bias
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.T = T
self.n_hidden = n_hidden
示例20
def __init__(self,
epsilon=1e-6,
beta_initializer='zeros',
gamma_initializer='ones',
tau_initializers='zeros',
beta_regularizer=None,
gamma_regularizer=None,
tau_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
tau_constraint=None,
**kwargs):
super(FRN, self).__init__(**kwargs)
self.supports_masking = True
self.epsilon = epsilon
self.beta_initializer = initializers.get(beta_initializer)
self.tau_initializer = initializers.get(tau_initializers)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.tau_regularizer = regularizers.get(tau_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
self.tau_constraint = constraints.get(tau_constraint)
self.tau = None
self.gamma = None
self.beta = None
self.axis = -1
示例21
def __init__(
self,
alpha_initializer="zeros",
b_initializer="zeros",
S=1,
alpha_regularizer=None,
b_regularizer=None,
alpha_constraint=None,
b_constraint=None,
shared_axes=None,
**kwargs
):
super(APL, self).__init__(**kwargs)
self.supports_masking = True
self.alpha_initializer = initializers.get(alpha_initializer)
self.alpha_regularizer = regularizers.get(alpha_regularizer)
self.alpha_constraint = constraints.get(alpha_constraint)
self.b_initializer = initializers.get(b_initializer)
self.b_regularizer = regularizers.get(b_regularizer)
self.b_constraint = constraints.get(b_constraint)
if shared_axes is None:
self.shared_axes = None
elif not isinstance(shared_axes, (list, tuple)):
self.shared_axes = [shared_axes]
else:
self.shared_axes = list(shared_axes)
self.S = S
self.alpha_arr = []
self.b_arr = []
示例22
def __init__(self,
channels,
attn_heads=1,
concat_heads=True,
dropout_rate=0.5,
return_attn_coef=False,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
attn_kernel_initializer='glorot_uniform',
kernel_regularizer=None,
bias_regularizer=None,
attn_kernel_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
attn_kernel_constraint=None,
**kwargs):
super().__init__(channels,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
self.attn_heads = attn_heads
self.concat_heads = concat_heads
self.dropout_rate = dropout_rate
self.return_attn_coef = return_attn_coef
self.attn_kernel_initializer = initializers.get(attn_kernel_initializer)
self.attn_kernel_regularizer = regularizers.get(attn_kernel_regularizer)
self.attn_kernel_constraint = constraints.get(attn_kernel_constraint)
if concat_heads:
# Output will have shape (..., attention_heads * channels)
self.output_dim = self.channels * self.attn_heads
else:
# Output will have shape (..., channels)
self.output_dim = self.channels