Python源码示例:tensorflow.python.ops.linalg.svd()

示例1
def _assert_non_singular(self):
    """Private default implementation of _assert_non_singular."""
    logging.warn(
        "Using (possibly slow) default implementation of assert_non_singular."
        "  Requires conversion to a dense matrix and O(N^3) operations.")
    if self._can_use_cholesky():
      return self.assert_positive_definite()
    else:
      singular_values = linalg_ops.svd(
          self._get_cached_dense_matrix(), compute_uv=False)
      # TODO(langmore) Add .eig and .cond as methods.
      cond = (math_ops.reduce_max(singular_values, axis=-1) /
              math_ops.reduce_min(singular_values, axis=-1))
      return check_ops.assert_less(
          cond,
          self._max_condition_number_to_be_non_singular(),
          message="Singular matrix up to precision epsilon.")
    raise NotImplementedError("assert_non_singular is not implemented.") 
示例2
def _symmetric_matrix_square_root(mat, eps=1e-10):
  """Compute square root of a symmetric matrix.

  Note that this is different from an elementwise square root. We want to
  compute M' where M' = sqrt(mat) such that M' * M' = mat.

  Also note that this method **only** works for symmetric matrices.

  Args:
    mat: Matrix to take the square root of.
    eps: Small epsilon such that any element less than eps will not be square
      rooted to guard against numerical instability.

  Returns:
    Matrix square root of mat.
  """
  # Unlike numpy, tensorflow's return order is (s, u, v)
  s, u, v = linalg_ops.svd(mat)
  # sqrt is unstable around 0, just use 0 in such case
  si = array_ops.where(math_ops.less(s, eps), s, math_ops.sqrt(s))
  # Note that the v returned by Tensorflow is v = V
  # (when referencing the equation A = U S V^T)
  # This is unlike Numpy which returns v = V^T
  return math_ops.matmul(
      math_ops.matmul(u, array_ops.diag(si)), v, transpose_b=True) 
示例3
def __call__(self, shape, dtype=None, partition_info=None):
    if dtype is None:
      dtype = self.dtype
    # Check the shape
    if len(shape) < 2:
      raise ValueError("The tensor to initialize must be "
                       "at least two-dimensional")
    # Flatten the input shape with the last dimension remaining
    # its original shape so it works for conv2d
    num_rows = 1
    for dim in shape[:-1]:
      num_rows *= dim
    num_cols = shape[-1]
    flat_shape = (num_rows, num_cols)

    # Generate a random matrix
    a = random_ops.random_uniform(flat_shape, dtype=dtype, seed=self.seed)
    # Compute the svd
    _, u, v = linalg_ops.svd(a, full_matrices=False)
    # Pick the appropriate singular value decomposition
    if num_rows > num_cols:
      q = u
    else:
      # Tensorflow departs from numpy conventions
      # such that we need to transpose axes here
      q = array_ops.transpose(v)
    return self.gain * array_ops.reshape(q, shape)


# Aliases.

# pylint: disable=invalid-name 
示例4
def posdef_eig_svd(mat):
    """Computes the singular values and left singular vectors of a matrix."""
    evals, evecs, _ = linalg_ops.svd(mat)

    return evals, evecs 
示例5
def posdef_eig_self_adjoint(mat):
    """Computes eigendecomposition using self_adjoint_eig."""
    evals, evecs = linalg_ops.self_adjoint_eig(mat)
    evals = math_ops.abs(evals)  # Should be equivalent to svd approach.

    return evals, evecs 
示例6
def __call__(self, shape, dtype=None, partition_info=None):
    if dtype is None:
      dtype = self.dtype
    # Check the shape
    if len(shape) < 2:
      raise ValueError("The tensor to initialize must be "
                       "at least two-dimensional")
    # Flatten the input shape with the last dimension remaining
    # its original shape so it works for conv2d
    num_rows = 1
    for dim in shape[:-1]:
      num_rows *= dim
    num_cols = shape[-1]
    flat_shape = (num_rows, num_cols)

    # Generate a random matrix
    a = random_ops.random_uniform(flat_shape, dtype=dtype, seed=self.seed)
    # Compute the svd
    _, u, v = linalg_ops.svd(a, full_matrices=False)
    # Pick the appropriate singular value decomposition
    if num_rows > num_cols:
      q = u
    else:
      # Tensorflow departs from numpy conventions
      # such that we need to transpose axes here
      q = array_ops.transpose(v)
    return self.gain * array_ops.reshape(q, shape)


# Aliases.

# pylint: disable=invalid-name