Python源码示例:hypothesis.strategies.floats()
示例1
def ibm_compatible_floats(draw, min_value=None, max_value=None):
if min_value is None:
min_value = MIN_IBM_FLOAT
if max_value is None:
max_value = MAX_IBM_FLOAT
truncated_min_f = max(min_value, MIN_IBM_FLOAT)
truncated_max_f = min(max_value, MAX_IBM_FLOAT)
strategies = []
if truncated_min_f <= LARGEST_NEGATIVE_NORMAL_IBM_FLOAT <= truncated_max_f:
strategies.append(floats(truncated_min_f, LARGEST_NEGATIVE_NORMAL_IBM_FLOAT))
if truncated_min_f <= SMALLEST_POSITIVE_NORMAL_IBM_FLOAT <= truncated_max_f:
strategies.append(floats(SMALLEST_POSITIVE_NORMAL_IBM_FLOAT, truncated_max_f))
if truncated_min_f <= 0 <= truncated_max_f:
strategies.append(just(0.0))
if len(strategies) == 0:
strategies.append(floats(truncated_min_f, truncated_max_f))
ibm = draw(one_of(*strategies))
return ibm
示例2
def _strategy_2d_array(dtype, minval=0, maxval=None, **kwargs):
if 'min_side' in kwargs:
min_side = kwargs.pop('min_side')
else:
min_side = 1
if 'max_side' in kwargs:
max_side = kwargs.pop('max_side')
else:
max_side = None
if dtype is np.int:
elems = st.integers(minval, maxval, **kwargs)
elif dtype is np.float:
elems = st.floats(minval, maxval, **kwargs)
elif dtype is np.str:
elems = st.text(min_size=minval, max_size=maxval, **kwargs)
else:
raise ValueError('no elements strategy for dtype', dtype)
return arrays(dtype, array_shapes(2, 2, min_side, max_side), elements=elems)
示例3
def from_schema(schema):
"""Returns a strategy for objects that match the given schema."""
check_schema(schema)
# TODO: actually handle constraints on number/string/array schemas
return dict(
null=st.none(),
bool=st.booleans(),
number=st.floats(allow_nan=False),
string=st.text(),
array=st.lists(st.nothing()),
)[schema["type"]]
# `@st.composite` is one way to write this - another would be to define a
# bare function, and `return st.one_of(st.none(), st.booleans(), ...)` so
# each strategy can be defined individually. Use whichever seems more
# natural to you - the important thing in tests is usually readability!
示例4
def test_spherical_list(nev):
"""The same_len_lists can draw values that when squared overflow floats.
For regular floats, this doesn't flag, but numpy warns on it. It's
considered a caller problem if the value does overflow.
TODO
----
Consider either properly warning or consistently failing when a partial
computation overflows. A solution could be to coerce even regular floats to
numpy, and look for the warning. For now, do the python thing and just
carry on.
"""
n, e, v = nev
inc, azi = spherical(n, e, v)
assert np.all(0 <= azi)
assert np.all(azi < 360)
示例5
def test_padding(ndim: int, data: st.DataObject):
"""Ensure that convolving a padding-only image with a commensurate kernel yields the single entry: 0"""
padding = data.draw(
st.integers(1, 3) | st.tuples(*[st.integers(1, 3)] * ndim), label="padding"
)
x = Tensor(
data.draw(
hnp.arrays(shape=(1, 1) + (0,) * ndim, dtype=float, elements=st.floats()),
label="x",
)
)
pad_tuple = padding if isinstance(padding, tuple) else (padding,) * ndim
kernel = data.draw(
hnp.arrays(
shape=(1, 1) + tuple(2 * p for p in pad_tuple),
dtype=float,
elements=st.floats(allow_nan=False, allow_infinity=False),
)
)
out = conv_nd(x, kernel, padding=padding, stride=1)
assert out.shape == (1,) * x.ndim
assert out.item() == 0.0
out.sum().backward()
assert x.grad.shape == x.shape
示例6
def test_negative_log_likelihood(data: st.DataObject, labels_as_tensor: bool):
s = data.draw(
hnp.arrays(
shape=hnp.array_shapes(max_side=10, min_dims=2, max_dims=2),
dtype=float,
elements=st.floats(-100, 100),
)
)
y_true = data.draw(
hnp.arrays(
shape=(s.shape[0],),
dtype=hnp.integer_dtypes(),
elements=st.integers(min_value=0, max_value=s.shape[1] - 1),
).map(Tensor if labels_as_tensor else lambda x: x)
)
scores = Tensor(s)
nll = negative_log_likelihood(mg.log(mg.nnet.softmax(scores)), y_true)
nll.backward()
cross_entropy_scores = Tensor(s)
ce = softmax_crossentropy(cross_entropy_scores, y_true)
ce.backward()
assert_allclose(nll.data, ce.data, atol=1e-5, rtol=1e-5)
assert_allclose(scores.grad, cross_entropy_scores.grad, atol=1e-5, rtol=1e-5)
示例7
def arrays(self, i: int) -> st.SearchStrategy:
"""
Hypothesis search strategy for drawing an array y to be passed to f(x, ..., y_i,...).
By default, y is drawn to have a shape that is broadcast-compatible with x.
Parameters
----------
i : int
The argument index-location of y in the signature of f.
Returns
-------
hypothesis.searchstrategy.SearchStrategy"""
return hnp.arrays(
shape=self.index_to_arr_shapes.get(i),
dtype=float,
elements=st.floats(*self.index_to_bnds.get(i, self.default_bnds)),
)
示例8
def generate_dictionary_with_fixed_tokens(draw):
"""
Builds random nested dictionary structure which is then used as JSON to
mask two fixed "token" keys.
Structure is based on TEST_JSON sample fixture defined above.
"""
base = draw(
st.fixed_dictionaries({'token': st.text(printable, min_size=10)})
)
optional = draw(
st.nothing() | st.dictionaries(
st.text(ascii_letters, min_size=1),
st.floats() | st.integers() | st.text(printable) | st.booleans()
| st.nothing(),
min_size=10,
max_size=50
)
)
return {**base, **optional}
示例9
def close_enough(x, y, equal_nan=False, rtol=1e-5, atol=1e-8):
# Might want to adjust rtol and atol for lower precision floats
x, y = np.asarray(x), np.asarray(y)
if x.shape != y.shape:
return False
if x.dtype != y.dtype:
return False
if x.dtype.kind == "f":
assert y.dtype.kind == "f"
# Note: equal_nan only considered in both float case!
return np.allclose(x, y, equal_nan=equal_nan, rtol=rtol, atol=atol)
return np.all(x == y)
示例10
def choice(values):
"""
One value from a limited set.
Args:
values:
Iterable with values that will be produced by strategy. Python enums
are iterable and thus can be used as arguments for this strategy.
Examples:
>>> from hypothesis import strategies as st, given
>>> from math import sin, cos
>>> @given(choice([sin, cos]), st.floats(-1000, 1000))
... def check_range(fn, x):
... assert -1 <= fn(x) <= 1
"""
values = list(values)
return st.integers(min_value=0, max_value=len(values) - 1).map(values.__getitem__)
示例11
def strategy(self):
if self._multiple_of is not None:
maximum = self._maximum
if maximum is not None:
maximum = math.floor(maximum / self._multiple_of)
minimum = self._minimum
if minimum is not None:
minimum = math.ceil(minimum / self._multiple_of)
strategy = hy_st.integers(min_value=minimum, max_value=maximum)
strategy = strategy.map(lambda x: x * self._multiple_of)
else:
strategy = hy_st.floats(min_value=self._minimum,
max_value=self._maximum)
if self._exclusive_maximum:
strategy = strategy.filter(lambda x: x < self._maximum)
if self._exclusive_minimum:
strategy = strategy.filter(lambda x: x > self._minimum)
return strategy
示例12
def enums_of_primitives(draw):
"""Generate enum classes with primitive values."""
names = draw(st.sets(st.text(min_size=1), min_size=1))
n = len(names)
vals = draw(
st.one_of(
st.sets(
st.one_of(
st.integers(),
st.floats(allow_nan=False),
st.text(min_size=1),
),
min_size=n,
max_size=n,
)
)
)
return Enum("HypEnum", list(zip(names, vals)))
示例13
def ibm_compatible_negative_floats(draw):
return draw(floats(MIN_IBM_FLOAT, LARGEST_NEGATIVE_NORMAL_IBM_FLOAT))
示例14
def ibm_compatible_positive_floats(draw):
return draw(floats(SMALLEST_POSITIVE_NORMAL_IBM_FLOAT, MAX_IBM_FLOAT))
示例15
def ibm_compatible_non_negative_floats(draw):
return draw(one_of(
just(0.0),
floats(SMALLEST_POSITIVE_NORMAL_IBM_FLOAT, MAX_IBM_FLOAT)))
示例16
def ibm_compatible_non_positive_floats(draw):
return draw(one_of(
just(0.0),
floats(MIN_IBM_FLOAT, LARGEST_NEGATIVE_NORMAL_IBM_FLOAT)))
示例17
def heavy_nested_data(draw):
return [draw(lists(integers(), min_size=1, max_size=3)),
draw(floats()),
draw(lists(lists(floats(), min_size=1, max_size=3), min_size=1, max_size=3))]
示例18
def mean(data, as_type=Fraction):
"""Return the mean of the input list, as the given type."""
# This function is a correct implementation of the arithmetic mean,
# so that you can test it according to the metamorphic properties of
# that mathematical equation for integers, floats, and fractions.
assert as_type in (int, float, Fraction), as_type
if as_type == int:
return sum(int(n) for n in data) // len(data) # integer division case
return sum(as_type(n) for n in data) / len(data) # float or Fraction case
# You can use parametrize and given together, but two tips for best results:
# 1. Put @parametrize *outside* @given - it doesn't work properly from the inside
# 2. Use named arguments to @given - avoids confusing or colliding positional arguments
示例19
def equatorial_orbits(
draw,
sma=floats(R_E_KM, 42000),
ecc=floats(0, 1, exclude_max=True),
argp=floats(0, 360),
ta=floats(0, 360),
epoch=datetimes(),
):
return KeplerianPredictor(
draw(sma), draw(ecc), 0, 0, draw(argp), draw(ta), draw(epoch),
)
示例20
def greater_than_zero():
"""
A strategy that yields floats greater than zero.
"""
return st.floats(
min_value=0.0,
allow_infinity=False,
).filter(lambda x: x > 0.0)
示例21
def _fuzz_number(
parameter: Dict[str, Any],
**kwargs: Any,
) -> SearchStrategy:
# TODO: Handle all the optional qualifiers for numbers.
# https://swagger.io/docs/specification/data-models/data-types/#numbers
bounds = _find_bounds(parameter)
return st.floats(**bounds)
示例22
def test_transform_always_yields_crops_of_the_correct_size(self, data):
crop_height = data.draw(st.integers(1, 10))
crop_width = data.draw(st.integers(1, 10))
duration = data.draw(st.integers(1, 10))
fixed_crops = data.draw(st.booleans())
if fixed_crops:
more_fixed_crops = data.draw(st.booleans())
else:
more_fixed_crops = False
height = data.draw(st.integers(crop_height, crop_height * 100))
width = data.draw(st.integers(crop_width, crop_width * 100))
video_shape = (duration, height, width, 3)
scale_strategy = st.floats(
min_value=min(crop_width, crop_height) / min(height, width), max_value=1
)
scales = data.draw(st.lists(scale_strategy, min_size=1, max_size=5))
max_distortion = data.draw(st.integers(0, len(scales)))
video = NDArrayToPILVideo()(np.ones(video_shape, dtype=np.uint8))
transform = MultiScaleCropVideo(
size=ImageShape(height=crop_height, width=crop_width),
scales=scales,
max_distortion=max_distortion,
fixed_crops=fixed_crops,
more_fixed_crops=more_fixed_crops,
)
transformed_video = list(transform(video))
print("video_shape", video_shape)
print("scales", scales)
print("max_distortion", max_distortion)
print("fixed_crops", fixed_crops)
print("more_fixed_crops", more_fixed_crops)
assert len(transformed_video) == duration
for frame in transformed_video:
print("crop_size", np.array(frame).shape)
np.testing.assert_allclose(np.array(frame), np.ones_like(frame))
assert frame.height == crop_height
assert frame.width == crop_width
示例23
def to_min_max(arr: np.ndarray) -> st.SearchStrategy:
bnd_shape = hnp.broadcastable_shapes(
shape=arr.shape, max_dims=arr.ndim, max_side=min(arr.shape) if arr.ndim else 1
)
bnd_strat = hnp.arrays(
shape=bnd_shape, elements=st.floats(-1e6, 1e6), dtype=np.float64
)
return st.fixed_dictionaries(dict(a_min=bnd_strat, a_max=bnd_strat))
示例24
def test_uniform_value_validation(data):
upper_bound = data.draw(_reasonable_floats)
lower_bound = data.draw(st.floats(min_value=upper_bound))
with pytest.raises(ValueError):
uniform(10, lower_bound=lower_bound, upper_bound=upper_bound)
示例25
def test_multiclass_hinge(data):
"""Test the built-in implementation of multiclass hinge
against the pure mygrad version"""
s = data.draw(
hnp.arrays(
shape=hnp.array_shapes(max_side=10, min_dims=2, max_dims=2),
dtype=float,
elements=st.floats(-100, 100),
)
)
loss = data.draw(
hnp.arrays(
shape=(s.shape[0],),
dtype=hnp.integer_dtypes(),
elements=st.integers(min_value=0, max_value=s.shape[1] - 1),
)
)
hinge_scores = Tensor(s)
hinge_loss = multiclass_hinge(hinge_scores, loss, constant=False)
hinge_loss.backward()
mygrad_scores = Tensor(s)
correct_labels = (range(len(loss)), loss)
correct_class_scores = mygrad_scores[correct_labels] # Nx1
Lij = mygrad_scores - correct_class_scores[:, np.newaxis] + 1.0 # NxC margins
Lij[Lij <= 0] = 0
Lij[correct_labels] = 0
mygrad_loss = Lij.sum() / mygrad_scores.shape[0]
mygrad_loss.backward()
assert_allclose(hinge_loss.data, mygrad_loss.data)
assert_allclose(mygrad_scores.grad, hinge_scores.grad)
示例26
def test_focal_loss(num_datum, num_classes, alpha, gamma, data, grad, target_type):
scores = data.draw(
hnp.arrays(shape=(num_datum, num_classes), dtype=float, elements=st.floats(1, 100))
)
assume((abs(scores.sum(axis=1)) > 0.001).all())
scores_mygrad = Tensor(scores)
scores_nn = Tensor(scores)
truth = np.zeros((num_datum, num_classes))
targets = data.draw(st.tuples(*(st.integers(0, num_classes - 1) for i in range(num_datum))))
truth[range(num_datum), targets] = 1
targets = target_type(targets)
fl = focal_loss(softmax(scores_mygrad), targets, alpha=alpha, gamma=gamma).mean()
fl.backward(grad)
nn_loss = softmax_focal_loss(scores_nn, targets, alpha=alpha, gamma=gamma).mean()
nn_loss.backward(grad)
assert isinstance(nn_loss, Tensor) and nn_loss.ndim == 0
assert_allclose(nn_loss.data, fl.data, atol=1e-4, rtol=1e-4)
assert_allclose(scores_nn.grad, scores_mygrad.grad, atol=1e-4, rtol=1e-4)
nn_loss.null_gradients()
assert scores_nn.grad is None
示例27
def test_softmax_crossentropy(data: st.DataObject, labels_as_tensor: bool):
s = data.draw(
hnp.arrays(
shape=hnp.array_shapes(max_side=10, min_dims=2, max_dims=2),
dtype=float,
elements=st.floats(-100, 100),
)
)
y_true = data.draw(
hnp.arrays(
shape=(s.shape[0],),
dtype=hnp.integer_dtypes(),
elements=st.integers(min_value=0, max_value=s.shape[1] - 1),
).map(Tensor if labels_as_tensor else lambda x: x)
)
scores = Tensor(s)
softmax_cross = softmax_crossentropy(scores, y_true, constant=False)
softmax_cross.backward()
mygrad_scores = Tensor(s)
probs = softmax(mygrad_scores)
correct_labels = (range(len(y_true)), y_true.data if labels_as_tensor else y_true)
truth = np.zeros(mygrad_scores.shape)
truth[correct_labels] = 1
mygrad_cross = (-1 / s.shape[0]) * (log(probs) * truth).sum()
mygrad_cross.backward()
assert_allclose(softmax_cross.data, mygrad_cross.data, atol=1e-5, rtol=1e-5)
assert_allclose(scores.grad, mygrad_scores.grad, atol=1e-5, rtol=1e-5)
示例28
def test_weighted_negative_log_likelihood(data: st.DataObject, labels_as_tensor: bool):
s = data.draw(
hnp.arrays(
shape=hnp.array_shapes(min_side=1, max_side=10, min_dims=2, max_dims=2),
dtype=float,
elements=st.floats(-100, 100),
)
)
y_true = data.draw(
hnp.arrays(
shape=(s.shape[0],),
dtype=hnp.integer_dtypes(),
elements=st.integers(min_value=0, max_value=s.shape[1] - 1),
).map(Tensor if labels_as_tensor else lambda x: x)
)
weights = data.draw(
hnp.arrays(
shape=(s.shape[1],),
dtype=float,
elements=st.floats(1e-8, 100),
)
)
scores = Tensor(s)
weights = Tensor(weights)
for score, y in zip(scores, y_true):
score = mg.log(mg.nnet.softmax(score.reshape(1, -1)))
y = y.reshape(-1)
nll = negative_log_likelihood(score, y)
weighted_nll = negative_log_likelihood(score, y, weights=weights)
assert np.isclose(weighted_nll.data, weights[y.data].data * nll.data)
示例29
def test_finite_difference_no_broadcast(data, x):
atol, rtol = (1e-2, 1e-2)
y = data.draw(
hnp.arrays(shape=x.shape, dtype=float, elements=st.floats(-100, 100)), label="y"
)
z = data.draw(
hnp.arrays(shape=x.shape, dtype=float, elements=st.floats(-100, 100)), label="z"
)
grad = data.draw(
hnp.arrays(shape=x.shape, dtype=float, elements=st.floats(-100, 100)),
label="grad",
)
# check variable-selection
assert finite_difference(unary_func, x, back_grad=grad, vary_ind=[])[0] is None
# no broadcast
(dx,) = finite_difference(unary_func, x, back_grad=grad)
assert_allclose(dx, grad * 2 * x, atol=atol, rtol=rtol)
dx, dy = numerical_gradient(binary_func, x, y, back_grad=grad)
assert_allclose(dx, grad * y ** 2, atol=atol, rtol=rtol)
assert_allclose(dy, grad * 2 * x * y, atol=atol, rtol=rtol)
dx, dy, dz = numerical_gradient(ternary_func, x, y, z, back_grad=grad)
assert_allclose(dx, grad * z * y ** 2, atol=atol, rtol=rtol)
assert_allclose(dy, grad * z * 2 * x * y, atol=atol, rtol=rtol)
assert_allclose(dz, grad * x * y ** 2, atol=atol, rtol=rtol)
示例30
def test_transpose(x, data):
axes = data.draw(
valid_axes(x.ndim, min_dim=x.ndim, max_dim=x.ndim).map(
lambda out: (out,) if isinstance(out, int) else out
),
label="axes",
)
x_arr = Tensor(np.copy(x))
o = transpose(x_arr, axes, constant=False)
grad = data.draw(
hnp.arrays(shape=o.shape, dtype=float, elements=st.floats(1, 10), unique=True),
label="grad",
)
o.backward(grad)
def f(x):
return np.transpose(x, axes)
assert_allclose(o.data, f(x))
(dx,) = numerical_gradient_full(f, x, back_grad=grad)
assert_allclose(x_arr.grad, dx)
out = transpose(x, constant=True)
assert out.constant and not x_arr.constant