Python源码示例:tensorflow.python.ops.linalg.eye()

示例1
def _to_dense(self):
    """Generic and often inefficient implementation.  Override often."""
    logging.warn("Using (possibly slow) default implementation of to_dense."
                 "  Converts by self.matmul(identity).")
    if self.batch_shape.is_fully_defined():
      batch_shape = self.batch_shape
    else:
      batch_shape = self.batch_shape_tensor()

    if self.domain_dimension.value is not None:
      n = self.domain_dimension.value
    else:
      n = self.domain_dimension_tensor()

    eye = linalg_ops.eye(num_rows=n, batch_shape=batch_shape, dtype=self.dtype)
    return self.matmul(eye) 
示例2
def _CholeskyGrad(op, grad):
  """Gradient for Cholesky."""

  # Gradient is l^{-H} @ ((l^{H} @ grad) * (tril(ones)-1/2*eye)) @ l^{-1}
  l = op.outputs[0]
  num_rows = array_ops.shape(l)[-1]
  batch_shape = array_ops.shape(l)[:-2]
  l_inverse = linalg_ops.matrix_triangular_solve(l,
                                                 linalg_ops.eye(
                                                     num_rows,
                                                     batch_shape=batch_shape,
                                                     dtype=l.dtype))

  middle = math_ops.matmul(l, grad, adjoint_a=True)
  middle = array_ops.matrix_set_diag(middle,
                                     0.5 * array_ops.matrix_diag_part(middle))
  middle = array_ops.matrix_band_part(middle, -1, 0)

  grad_a = math_ops.matmul(
      math_ops.matmul(l_inverse, middle, adjoint_a=True), l_inverse)

  grad_a += math_ops.conj(array_ops.matrix_transpose(grad_a))
  return grad_a * 0.5 
示例3
def _to_dense(self):
    """Generic and often inefficient implementation.  Override often."""
    if self.batch_shape.is_fully_defined():
      batch_shape = self.batch_shape
    else:
      batch_shape = self.batch_shape_dynamic()

    if self.domain_dimension.value is not None:
      n = self.domain_dimension.value
    else:
      n = self.domain_dimension_dynamic()

    eye = linalg_ops.eye(num_rows=n, batch_shape=batch_shape, dtype=self.dtype)
    return self.apply(eye) 
示例4
def __call__(self, shape, dtype=None, partition_info=None):
    del partition_info  # unused
    assert len(shape) > 2, shape

    support = tuple(shape[:-2]) + (1, 1)
    indices = [[s // 2 for s in support]]
    updates = array_ops.constant([self.gain], dtype=dtype)
    kernel = array_ops.scatter_nd(indices, updates, support)

    assert shape[-2] == shape[-1], shape
    if shape[-1] != 1:
      kernel *= linalg_ops.eye(shape[-1], dtype=dtype)

    return kernel 
示例5
def posdef_inv(tensor, damping):
    """Computes the inverse of tensor + damping * identity."""
    identity = linalg_ops.eye(tensor.shape.as_list()[0], dtype=tensor.dtype)
    damping = math_ops.cast(damping, dtype=tensor.dtype)
    return posdef_inv_functions[POSDEF_INV_METHOD](tensor, identity, damping) 
示例6
def __call__(self, shape, dtype=None, partition_info=None):
    full_shape = shape if partition_info is None else partition_info.full_shape
    if len(full_shape) != 2:
      raise ValueError(
          "Identity matrix initializer can only be used for 2D matrices.")
    if dtype is None:
      dtype = self.dtype
    initializer = linalg_ops.eye(*full_shape, dtype=dtype)
    if partition_info is not None:
      initializer = array_ops.slice(initializer, partition_info.var_offset,
                                    shape)
    return self.gain * initializer 
示例7
def _to_dense(self):
    """Generic and often inefficient implementation.  Override often."""
    if self.batch_shape.is_fully_defined():
      batch_shape = self.batch_shape
    else:
      batch_shape = self.batch_shape_dynamic()

    if self.domain_dimension.value is not None:
      n = self.domain_dimension.value
    else:
      n = self.domain_dimension_dynamic()

    eye = linalg_ops.eye(num_rows=n, batch_shape=batch_shape, dtype=self.dtype)
    return self.apply(eye) 
示例8
def build(self, input_shape):
    channel_axis = self._channel_axis()
    input_shape = tensor_shape.TensorShape(input_shape)
    num_channels = input_shape.dims[channel_axis].value
    if num_channels is None:
      raise ValueError('The channel dimension of the inputs to `GDN` '
                       'must be defined.')
    self._input_rank = input_shape.ndims
    self.input_spec = input_spec.InputSpec(
        ndim=input_shape.ndims, axes={channel_axis: num_channels})

    pedestal = array_ops.constant(self._reparam_offset**2, dtype=self.dtype)
    beta_bound = array_ops.constant(
        (self._beta_min + self._reparam_offset**2)**.5, dtype=self.dtype)
    gamma_bound = array_ops.constant(self._reparam_offset, dtype=self.dtype)

    def beta_initializer(shape, dtype=None, partition_info=None):
      del partition_info  # unused
      pedestal = array_ops.constant(self._reparam_offset**2, dtype=self.dtype)
      return math_ops.sqrt(array_ops.ones(shape, dtype=dtype) + pedestal)

    def gamma_initializer(shape, dtype=None, partition_info=None):
      del partition_info  # unused
      assert len(shape) == 2
      assert shape[0] == shape[1]
      eye = linalg_ops.eye(shape[0], dtype=dtype)
      pedestal = array_ops.constant(self._reparam_offset**2, dtype=self.dtype)
      return math_ops.sqrt(self._gamma_init * eye + pedestal)

    beta = self.add_variable(
        'reparam_beta',
        shape=[num_channels],
        initializer=beta_initializer,
        dtype=self.dtype,
        trainable=True)
    beta = self._lower_bound(beta, beta_bound)
    self.beta = math_ops.square(beta) - pedestal

    gamma = self.add_variable(
        'reparam_gamma',
        shape=[num_channels, num_channels],
        initializer=gamma_initializer,
        dtype=self.dtype,
        trainable=True)
    gamma = self._lower_bound(gamma, gamma_bound)
    self.gamma = math_ops.square(gamma) - pedestal

    self.built = True 
示例9
def build(self, input_shape):
    channel_axis = self._channel_axis()
    input_shape = tensor_shape.TensorShape(input_shape)
    num_channels = input_shape.dims[channel_axis].value
    if num_channels is None:
      raise ValueError('The channel dimension of the inputs to `GDN` '
                       'must be defined.')
    self._input_rank = input_shape.ndims
    self.input_spec = input_spec.InputSpec(
        ndim=input_shape.ndims, axes={channel_axis: num_channels})

    pedestal = array_ops.constant(self._reparam_offset**2, dtype=self.dtype)
    beta_bound = array_ops.constant(
        (self._beta_min + self._reparam_offset**2)**.5, dtype=self.dtype)
    gamma_bound = array_ops.constant(self._reparam_offset, dtype=self.dtype)

    def beta_initializer(shape, dtype=None, partition_info=None):
      del partition_info  # unused
      pedestal = array_ops.constant(self._reparam_offset**2, dtype=self.dtype)
      return math_ops.sqrt(array_ops.ones(shape, dtype=dtype) + pedestal)

    def gamma_initializer(shape, dtype=None, partition_info=None):
      del partition_info  # unused
      assert len(shape) == 2
      assert shape[0] == shape[1]
      eye = linalg_ops.eye(shape[0], dtype=dtype)
      pedestal = array_ops.constant(self._reparam_offset**2, dtype=self.dtype)
      return math_ops.sqrt(self._gamma_init * eye + pedestal)

    beta = self.add_variable(
        'reparam_beta',
        shape=[num_channels],
        initializer=beta_initializer,
        dtype=self.dtype,
        trainable=True)
    beta = self._lower_bound(beta, beta_bound)
    self.beta = math_ops.square(beta) - pedestal

    gamma = self.add_variable(
        'reparam_gamma',
        shape=[num_channels, num_channels],
        initializer=gamma_initializer,
        dtype=self.dtype,
        trainable=True)
    gamma = self._lower_bound(gamma, gamma_bound)
    self.gamma = math_ops.square(gamma) - pedestal

    self.built = True