Skip to content

Commit

Permalink
Deprecated redefinition of np.allclose in _allclose
Browse files Browse the repository at this point in the history
  • Loading branch information
Dhruvanshu-Joshi committed Jul 2, 2024
1 parent 3cc2393 commit 87c6ee6
Show file tree
Hide file tree
Showing 9 changed files with 80 additions and 51 deletions.
12 changes: 0 additions & 12 deletions pytensor/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,18 +130,6 @@ def _get_atol_rtol(a, b):
return atol, rtol


def _allclose(a, b, rtol=None, atol=None):
a = np.asarray(a)
b = np.asarray(b)
atol_, rtol_ = _get_atol_rtol(a, b)
if rtol is not None:
rtol_ = rtol
if atol is not None:
atol_ = atol

return np.allclose(a, b, atol=atol_, rtol=rtol_)


class Argmax(COp):
"""
Calculate the argmax over a given axis or over all axes.
Expand Down
7 changes: 6 additions & 1 deletion pytensor/tensor/type.py
Original file line number Diff line number Diff line change
Expand Up @@ -662,7 +662,12 @@ def values_eq_approx(
if str(a.dtype) not in continuous_dtypes:
return np.all(a == b)
else:
cmp = pytensor.tensor.math._allclose(a, b, rtol=rtol, atol=atol)
atol_, rtol_ = pytensor.tensor.math._get_atol_rtol(a, b)
if rtol is not None:
rtol_ = rtol

Check warning on line 667 in pytensor/tensor/type.py

View check run for this annotation

Codecov / codecov/patch

pytensor/tensor/type.py#L667

Added line #L667 was not covered by tests
if atol is not None:
atol_ = atol

Check warning on line 669 in pytensor/tensor/type.py

View check run for this annotation

Codecov / codecov/patch

pytensor/tensor/type.py#L669

Added line #L669 was not covered by tests
cmp = np.allclose(np.asarray(a), np.asarray(b), rtol=rtol_, atol=atol_)
if cmp:
# Numpy claims they are close, this is good enough for us.
return True
Expand Down
37 changes: 30 additions & 7 deletions tests/graph/test_compute_test_value.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from pytensor.graph.op import Op
from pytensor.graph.type import Type
from pytensor.link.c.op import COp
from pytensor.tensor.math import _allclose, dot
from pytensor.tensor.math import _get_atol_rtol, dot
from pytensor.tensor.type import fmatrix, iscalar, matrix, vector


Expand Down Expand Up @@ -85,7 +85,15 @@ def test_variable_only(self):
z = dot(x, y)
assert hasattr(z.tag, "test_value")
f = pytensor.function([x, y], z)
assert _allclose(f(x.tag.test_value, y.tag.test_value), z.tag.test_value)
atol_, rtol_ = _get_atol_rtol(
f(x.tag.test_value, y.tag.test_value), z.tag.test_value
)
assert np.allclose(
f(x.tag.test_value, y.tag.test_value),
z.tag.test_value,
atol=atol_,
rtol=rtol_,
)

# this test should fail
y.tag.test_value = np.random.random((6, 5)).astype(config.floatX)
Expand Down Expand Up @@ -122,7 +130,16 @@ def test_string_var(self):
out = dot(dot(x, y), z)
assert hasattr(out.tag, "test_value")
tf = pytensor.function([x, y], out)
assert _allclose(tf(x.tag.test_value, y.tag.test_value), out.tag.test_value)

atol_, rtol_ = _get_atol_rtol(
tf(x.tag.test_value, y.tag.test_value), out.tag.test_value
)
assert np.allclose(
tf(x.tag.test_value, y.tag.test_value),
out.tag.test_value,
atol=atol_,
rtol=rtol_,
)

def f(x, y, z):
return dot(dot(x, y), z)
Expand All @@ -141,7 +158,10 @@ def test_shared(self):
z = dot(x, y)
assert hasattr(z.tag, "test_value")
f = pytensor.function([x], z)
assert _allclose(f(x.tag.test_value), z.tag.test_value)
atol_, rtol_ = _get_atol_rtol(f(x.tag.test_value), z.tag.test_value)
assert np.allclose(
f(x.tag.test_value), z.tag.test_value, atol=atol_, rtol=rtol_
)

# this test should fail
y.set_value(np.random.random((5, 6)).astype(config.floatX))
Expand All @@ -156,7 +176,8 @@ def test_ndarray(self):
z = dot(x, y)
assert hasattr(z.tag, "test_value")
f = pytensor.function([], z)
assert _allclose(f(), z.tag.test_value)
atol_, rtol_ = _get_atol_rtol(f(), z.tag.test_value)
assert np.allclose(f(), z.tag.test_value, atol=atol_, rtol=rtol_)

# this test should fail
x = np.random.random((2, 4)).astype(config.floatX)
Expand All @@ -170,7 +191,8 @@ def test_empty_elemwise(self):
z = (x + 2) * 3
assert hasattr(z.tag, "test_value")
f = pytensor.function([], z)
assert _allclose(f(), z.tag.test_value)
atol_, rtol_ = _get_atol_rtol(f(), z.tag.test_value)
assert np.allclose(f(), z.tag.test_value, atol=atol_, rtol=rtol_)

def test_constant(self):
x = pt.constant(np.random.random((2, 3)), dtype=config.floatX)
Expand All @@ -180,7 +202,8 @@ def test_constant(self):
z = dot(x, y)
assert hasattr(z.tag, "test_value")
f = pytensor.function([], z)
assert _allclose(f(), z.tag.test_value)
atol_, rtol_ = _get_atol_rtol(f(), z.tag.test_value)
assert np.allclose(f(), z.tag.test_value, atol=atol_, rtol=rtol_)

# this test should fail
x = pt.constant(np.random.random((2, 4)), dtype=config.floatX)
Expand Down
2 changes: 1 addition & 1 deletion tests/scan/test_rewriting.py
Original file line number Diff line number Diff line change
Expand Up @@ -1270,7 +1270,7 @@ def f_rnn_cmpl(u1_t, u2_t, x_tm1, y_tm1, y_tm3, W_in1):

(pytensor_dump, pytensor_x, pytensor_y) = f4(v_u1, v_u2, v_x0, v_y0, vW_in1)

np.testing.assert_allclose(pytensor_x, v_x[-1:][0])
np.testing.assert_allclose(pytensor_x, v_x[-1:].squeeze(0))
np.testing.assert_allclose(pytensor_y, v_y[-1:])

def test_save_mem_reduced_number_of_steps(self):
Expand Down
1 change: 0 additions & 1 deletion tests/sparse/test_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -2091,7 +2091,6 @@ def test_op(self, op_type):
f = pytensor.function(variable, self.op(variable[0], axis=axis))
tested = f(*data)
expected = data[0].todense().sum(axis).ravel()

np.testing.assert_allclose(expected, [tested], atol=1e-08, rtol=1e-05)

def test_infer_shape(self):
Expand Down
4 changes: 0 additions & 4 deletions tests/tensor/conv/test_abstract_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,6 @@
from tests.tensor.conv import c_conv3d_corr3d_ref, c_conv_corr_ref


pytensor.config.mode = "FAST_COMPILE"


def conv2d_corr(
inputs,
filters,
Expand Down Expand Up @@ -2414,7 +2411,6 @@ def test_fwd(self):
for j in range(0, kshp[2]):
single_kern = kern[:, i, j, ...].reshape(single_kshp)
ref_val = ref_func(img, single_kern)

np.testing.assert_allclose(
ref_val[:, :, i, j],
unshared_output[:, :, i, j],
Expand Down
6 changes: 0 additions & 6 deletions tests/tensor/test_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
import pytensor
import pytensor.scalar as ps
import pytensor.tensor.basic as ptb
import pytensor.tensor.math as ptm
from pytensor import compile, config, function, shared
from pytensor.compile import SharedVariable
from pytensor.compile.io import In, Out
Expand Down Expand Up @@ -1258,11 +1257,6 @@ def test_cast_from_complex_to_real_raises_error(self, real_dtype, complex_dtype)
# gradient numerically


def test_basic_allclose():
# This was raised by a user in https://github.com/Theano/Theano/issues/2975
assert ptm._allclose(-0.311023883434, -0.311022856884)


def test_get_vector_length():
# Test `Constant`s
empty_tuple = as_tensor_variable(())
Expand Down
5 changes: 3 additions & 2 deletions tests/tensor/test_math.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@
Prod,
ProdWithoutZeros,
Sum,
_allclose,
_dot,
_get_atol_rtol,
abs,
add,
allclose,
Expand Down Expand Up @@ -3608,7 +3608,8 @@ def setup_method(self):
def _validate_output(self, a, b):
pytensor_sol = self.op(a, b).eval()
numpy_sol = np.matmul(a, b)
assert _allclose(numpy_sol, pytensor_sol)
atol_, rtol_ = _get_atol_rtol(numpy_sol, pytensor_sol)
assert np.allclose(numpy_sol, pytensor_sol, atol=atol_, rtol=rtol_)

@pytest.mark.parametrize(
"x1, x2",
Expand Down
57 changes: 40 additions & 17 deletions tests/tensor/test_nlinalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from pytensor import function
from pytensor.configdefaults import config
from pytensor.tensor.basic import as_tensor_variable
from pytensor.tensor.math import _allclose
from pytensor.tensor.math import _get_atol_rtol
from pytensor.tensor.nlinalg import (
SVD,
Eig,
Expand Down Expand Up @@ -60,7 +60,8 @@ def test_pseudoinverse_correctness():
assert ri.dtype == r.dtype
# Note that pseudoinverse can be quite imprecise so I prefer to compare
# the result with what np.linalg returns
assert _allclose(ri, np.linalg.pinv(r))
atol_, rtol_ = _get_atol_rtol(ri, np.linalg.pinv(r))
assert np.allclose(ri, np.linalg.pinv(r), atol=atol_, rtol=rtol_)


def test_pseudoinverse_grad():
Expand Down Expand Up @@ -92,8 +93,11 @@ def test_inverse_correctness(self):
rir = np.dot(ri, r)
rri = np.dot(r, ri)

assert _allclose(np.identity(4), rir), rir
assert _allclose(np.identity(4), rri), rri
atol_, rtol_ = _get_atol_rtol(np.identity(4), rir)
assert np.allclose(np.identity(4), rir, atol=atol_, rtol=rtol_), rir

atol_, rtol_ = _get_atol_rtol(np.identity(4), rri)
assert np.allclose(np.identity(4), rri, atol=atol_, rtol=rtol_), rri

def test_infer_shape(self):
r = self.rng.standard_normal((4, 4)).astype(config.floatX)
Expand All @@ -119,7 +123,8 @@ def test_matrix_dot():
for r in rs[1:]:
numpy_sol = np.dot(numpy_sol, r)

assert _allclose(numpy_sol, pytensor_sol)
atol_, rtol_ = _get_atol_rtol(numpy_sol, pytensor_sol)
assert np.allclose(numpy_sol, pytensor_sol, atol=atol_, rtol=rtol_)


def test_qr_modes():
Expand All @@ -131,23 +136,34 @@ def test_qr_modes():
f = function([A], qr(A))
t_qr = f(a)
n_qr = np.linalg.qr(a)
assert _allclose(n_qr, t_qr)
atol_, rtol_ = _get_atol_rtol(np.asarray(n_qr), np.asarray(t_qr))
assert np.allclose(np.asarray(n_qr), np.asarray(t_qr), atol=atol_, rtol=rtol_)

for mode in ["reduced", "r", "raw"]:
f = function([A], qr(A, mode))
t_qr = f(a)
n_qr = np.linalg.qr(a, mode)
if isinstance(n_qr, list | tuple):
assert _allclose(n_qr[0], t_qr[0])
assert _allclose(n_qr[1], t_qr[1])
atol_, rtol_ = _get_atol_rtol(np.asarray(n_qr[0]), np.asarray(t_qr[0]))
assert np.allclose(
np.asarray(n_qr[0]), np.asarray(t_qr[0]), atol=atol_, rtol=rtol_
)
atol_, rtol_ = _get_atol_rtol(np.asarray(n_qr[1]), np.asarray(t_qr[1]))
assert np.allclose(
np.asarray(n_qr[1]), np.asarray(t_qr[1]), atol=atol_, rtol=rtol_
)
else:
assert _allclose(n_qr, t_qr)
atol_, rtol_ = _get_atol_rtol(np.asarray(n_qr), np.asarray(t_qr))
assert np.allclose(
np.asarray(n_qr), np.asarray(t_qr), atol=atol_, rtol=rtol_
)

try:
n_qr = np.linalg.qr(a, "complete")
f = function([A], qr(A, "complete"))
t_qr = f(a)
assert _allclose(n_qr, t_qr)
atol_, rtol_ = _get_atol_rtol(np.asarray(n_qr), np.asarray(t_qr))
assert np.allclose(np.asarray(n_qr), np.asarray(t_qr), atol=atol_, rtol=rtol_)
except TypeError as e:
assert "name 'complete' is not defined" in str(e)

Expand Down Expand Up @@ -199,7 +215,8 @@ def test_svd(self, core_shape, full_matrix, compute_uv, batched, test_imag):
np_outputs = np_outputs if isinstance(np_outputs, tuple) else [np_outputs]

for np_val, pt_val in zip(np_outputs, pt_outputs):
assert _allclose(np_val, pt_val)
atol_, rtol_ = _get_atol_rtol(np_val, pt_val)
assert np.allclose(np_val, pt_val, atol=atol_, rtol=rtol_)

def test_svd_infer_shape(self):
self.validate_shape((4, 4), full_matrices=True, compute_uv=True)
Expand Down Expand Up @@ -306,7 +323,8 @@ def test_tensorsolve():

n_x = np.linalg.tensorsolve(a, b)
t_x = fn(a, b)
assert _allclose(n_x, t_x)
atol_, rtol_ = _get_atol_rtol(n_x, np.asarray(t_x))
assert np.allclose(n_x, t_x, atol=atol_, rtol=rtol_)

# check the type upcast now
C = tensor4("C", dtype="float32")
Expand All @@ -319,7 +337,8 @@ def test_tensorsolve():
d = rng.random((2 * 3, 4)).astype("float64")
n_y = np.linalg.tensorsolve(c, d)
t_y = fn(c, d)
assert _allclose(n_y, t_y)
atol_, rtol_ = _get_atol_rtol(n_y, np.asarray(t_y))
assert np.allclose(n_y, t_y, atol=atol_, rtol=rtol_)
assert n_y.dtype == Y.dtype

# check the type upcast now
Expand All @@ -333,7 +352,8 @@ def test_tensorsolve():
f = rng.random((2 * 3, 4)).astype("float64")
n_z = np.linalg.tensorsolve(e, f)
t_z = fn(e, f)
assert _allclose(n_z, t_z)
atol_, rtol_ = _get_atol_rtol(n_z, np.asarray(t_z))
assert np.allclose(n_z, t_z, atol=atol_, rtol=rtol_)
assert n_z.dtype == Z.dtype


Expand Down Expand Up @@ -653,7 +673,8 @@ def test_eval(self):
n_ainv = np.linalg.tensorinv(self.a)
tf_a = function([A], [Ai])
t_ainv = tf_a(self.a)
assert _allclose(n_ainv, t_ainv)
atol_, rtol_ = _get_atol_rtol(n_ainv, np.asarray(t_ainv))
assert np.allclose(n_ainv, t_ainv, atol=atol_, rtol=rtol_)

B = self.B
Bi = tensorinv(B)
Expand All @@ -664,8 +685,10 @@ def test_eval(self):
tf_b1 = function([B], [Bi1])
t_binv = tf_b(self.b)
t_binv1 = tf_b1(self.b1)
assert _allclose(t_binv, n_binv)
assert _allclose(t_binv1, n_binv1)
atol_, rtol_ = _get_atol_rtol(np.asarray(t_binv), n_binv)
assert np.allclose(t_binv, n_binv, atol=atol_, rtol=rtol_)
atol_, rtol_ = _get_atol_rtol(np.asarray(t_binv1), n_binv1)
assert np.allclose(t_binv1, n_binv1, atol=atol_, rtol=rtol_)


class TestKron(utt.InferShapeTester):
Expand Down

0 comments on commit 87c6ee6

Please sign in to comment.