From 135ff93f93bef997cefc0e61dd95b35f7eb68656 Mon Sep 17 00:00:00 2001 From: Dhruvanshu-Joshi Date: Tue, 2 Jul 2024 17:39:19 +0530 Subject: [PATCH] Deprecated redefinition of np.allclose in _allclose --- pytensor/tensor/math.py | 12 ------ pytensor/tensor/type.py | 7 ++- tests/graph/test_compute_test_value.py | 37 +++++++++++++--- tests/scan/test_rewriting.py | 2 +- tests/sparse/test_basic.py | 1 - tests/tensor/conv/test_abstract_conv.py | 4 -- tests/tensor/test_basic.py | 6 --- tests/tensor/test_math.py | 5 ++- tests/tensor/test_nlinalg.py | 57 +++++++++++++++++-------- 9 files changed, 80 insertions(+), 51 deletions(-) diff --git a/pytensor/tensor/math.py b/pytensor/tensor/math.py index d515d51c3a..fcd2e90e7c 100644 --- a/pytensor/tensor/math.py +++ b/pytensor/tensor/math.py @@ -130,18 +130,6 @@ def _get_atol_rtol(a, b): return atol, rtol -def _allclose(a, b, rtol=None, atol=None): - a = np.asarray(a) - b = np.asarray(b) - atol_, rtol_ = _get_atol_rtol(a, b) - if rtol is not None: - rtol_ = rtol - if atol is not None: - atol_ = atol - - return np.allclose(a, b, atol=atol_, rtol=rtol_) - - class Argmax(COp): """ Calculate the argmax over a given axis or over all axes. diff --git a/pytensor/tensor/type.py b/pytensor/tensor/type.py index 952c22982a..0605d07245 100644 --- a/pytensor/tensor/type.py +++ b/pytensor/tensor/type.py @@ -662,7 +662,12 @@ def values_eq_approx( if str(a.dtype) not in continuous_dtypes: return np.all(a == b) else: - cmp = pytensor.tensor.math._allclose(a, b, rtol=rtol, atol=atol) + atol_, rtol_ = pytensor.tensor.math._get_atol_rtol(a, b) + if rtol is not None: + rtol_ = rtol + if atol is not None: + atol_ = atol + cmp = np.allclose(np.asarray(a), np.asarray(a), rtol=rtol_, atol=atol_) if cmp: # Numpy claims they are close, this is good enough for us. return True diff --git a/tests/graph/test_compute_test_value.py b/tests/graph/test_compute_test_value.py index ea59ff68f8..1fdc0db514 100644 --- a/tests/graph/test_compute_test_value.py +++ b/tests/graph/test_compute_test_value.py @@ -10,7 +10,7 @@ from pytensor.graph.op import Op from pytensor.graph.type import Type from pytensor.link.c.op import COp -from pytensor.tensor.math import _allclose, dot +from pytensor.tensor.math import _get_atol_rtol, dot from pytensor.tensor.type import fmatrix, iscalar, matrix, vector @@ -85,7 +85,15 @@ def test_variable_only(self): z = dot(x, y) assert hasattr(z.tag, "test_value") f = pytensor.function([x, y], z) - assert _allclose(f(x.tag.test_value, y.tag.test_value), z.tag.test_value) + atol_, rtol_ = _get_atol_rtol( + f(x.tag.test_value, y.tag.test_value), z.tag.test_value + ) + assert np.allclose( + f(x.tag.test_value, y.tag.test_value), + z.tag.test_value, + atol=atol_, + rtol=rtol_, + ) # this test should fail y.tag.test_value = np.random.random((6, 5)).astype(config.floatX) @@ -122,7 +130,16 @@ def test_string_var(self): out = dot(dot(x, y), z) assert hasattr(out.tag, "test_value") tf = pytensor.function([x, y], out) - assert _allclose(tf(x.tag.test_value, y.tag.test_value), out.tag.test_value) + + atol_, rtol_ = _get_atol_rtol( + tf(x.tag.test_value, y.tag.test_value), out.tag.test_value + ) + assert np.allclose( + tf(x.tag.test_value, y.tag.test_value), + out.tag.test_value, + atol=atol_, + rtol=rtol_, + ) def f(x, y, z): return dot(dot(x, y), z) @@ -141,7 +158,10 @@ def test_shared(self): z = dot(x, y) assert hasattr(z.tag, "test_value") f = pytensor.function([x], z) - assert _allclose(f(x.tag.test_value), z.tag.test_value) + atol_, rtol_ = _get_atol_rtol(f(x.tag.test_value), z.tag.test_value) + assert np.allclose( + f(x.tag.test_value), z.tag.test_value, atol=atol_, rtol=rtol_ + ) # this test should fail y.set_value(np.random.random((5, 6)).astype(config.floatX)) @@ -156,7 +176,8 @@ def test_ndarray(self): z = dot(x, y) assert hasattr(z.tag, "test_value") f = pytensor.function([], z) - assert _allclose(f(), z.tag.test_value) + atol_, rtol_ = _get_atol_rtol(f(), z.tag.test_value) + assert np.allclose(f(), z.tag.test_value, atol=atol_, rtol=rtol_) # this test should fail x = np.random.random((2, 4)).astype(config.floatX) @@ -170,7 +191,8 @@ def test_empty_elemwise(self): z = (x + 2) * 3 assert hasattr(z.tag, "test_value") f = pytensor.function([], z) - assert _allclose(f(), z.tag.test_value) + atol_, rtol_ = _get_atol_rtol(f(), z.tag.test_value) + assert np.allclose(f(), z.tag.test_value, atol=atol_, rtol=rtol_) def test_constant(self): x = pt.constant(np.random.random((2, 3)), dtype=config.floatX) @@ -180,7 +202,8 @@ def test_constant(self): z = dot(x, y) assert hasattr(z.tag, "test_value") f = pytensor.function([], z) - assert _allclose(f(), z.tag.test_value) + atol_, rtol_ = _get_atol_rtol(f(), z.tag.test_value) + assert np.allclose(f(), z.tag.test_value, atol=atol_, rtol=rtol_) # this test should fail x = pt.constant(np.random.random((2, 4)), dtype=config.floatX) diff --git a/tests/scan/test_rewriting.py b/tests/scan/test_rewriting.py index 470eeceae7..b06acdaaef 100644 --- a/tests/scan/test_rewriting.py +++ b/tests/scan/test_rewriting.py @@ -1270,7 +1270,7 @@ def f_rnn_cmpl(u1_t, u2_t, x_tm1, y_tm1, y_tm3, W_in1): (pytensor_dump, pytensor_x, pytensor_y) = f4(v_u1, v_u2, v_x0, v_y0, vW_in1) - np.testing.assert_allclose(pytensor_x, v_x[-1:][0]) + np.testing.assert_allclose(pytensor_x, v_x[-1:].squeeze(0)) np.testing.assert_allclose(pytensor_y, v_y[-1:]) def test_save_mem_reduced_number_of_steps(self): diff --git a/tests/sparse/test_basic.py b/tests/sparse/test_basic.py index 4ed5e9e505..a67ed6f80e 100644 --- a/tests/sparse/test_basic.py +++ b/tests/sparse/test_basic.py @@ -2091,7 +2091,6 @@ def test_op(self, op_type): f = pytensor.function(variable, self.op(variable[0], axis=axis)) tested = f(*data) expected = data[0].todense().sum(axis).ravel() - np.testing.assert_allclose(expected, [tested], atol=1e-08, rtol=1e-05) def test_infer_shape(self): diff --git a/tests/tensor/conv/test_abstract_conv.py b/tests/tensor/conv/test_abstract_conv.py index 0baf51fbb4..43b58121d6 100644 --- a/tests/tensor/conv/test_abstract_conv.py +++ b/tests/tensor/conv/test_abstract_conv.py @@ -38,9 +38,6 @@ from tests.tensor.conv import c_conv3d_corr3d_ref, c_conv_corr_ref -pytensor.config.mode = "FAST_COMPILE" - - def conv2d_corr( inputs, filters, @@ -2414,7 +2411,6 @@ def test_fwd(self): for j in range(0, kshp[2]): single_kern = kern[:, i, j, ...].reshape(single_kshp) ref_val = ref_func(img, single_kern) - np.testing.assert_allclose( ref_val[:, :, i, j], unshared_output[:, :, i, j], diff --git a/tests/tensor/test_basic.py b/tests/tensor/test_basic.py index c1993b3111..f71f8c6f34 100644 --- a/tests/tensor/test_basic.py +++ b/tests/tensor/test_basic.py @@ -8,7 +8,6 @@ import pytensor import pytensor.scalar as ps import pytensor.tensor.basic as ptb -import pytensor.tensor.math as ptm from pytensor import compile, config, function, shared from pytensor.compile import SharedVariable from pytensor.compile.io import In, Out @@ -1258,11 +1257,6 @@ def test_cast_from_complex_to_real_raises_error(self, real_dtype, complex_dtype) # gradient numerically -def test_basic_allclose(): - # This was raised by a user in https://github.com/Theano/Theano/issues/2975 - assert ptm._allclose(-0.311023883434, -0.311022856884) - - def test_get_vector_length(): # Test `Constant`s empty_tuple = as_tensor_variable(()) diff --git a/tests/tensor/test_math.py b/tests/tensor/test_math.py index 066864429e..7d8c6a8eaa 100644 --- a/tests/tensor/test_math.py +++ b/tests/tensor/test_math.py @@ -45,8 +45,8 @@ Prod, ProdWithoutZeros, Sum, - _allclose, _dot, + _get_atol_rtol, abs, add, allclose, @@ -3608,7 +3608,8 @@ def setup_method(self): def _validate_output(self, a, b): pytensor_sol = self.op(a, b).eval() numpy_sol = np.matmul(a, b) - assert _allclose(numpy_sol, pytensor_sol) + atol_, rtol_ = _get_atol_rtol(numpy_sol, pytensor_sol) + assert np.allclose(numpy_sol, pytensor_sol, atol=atol_, rtol=rtol_) @pytest.mark.parametrize( "x1, x2", diff --git a/tests/tensor/test_nlinalg.py b/tests/tensor/test_nlinalg.py index 70d383dd83..eeeffcc108 100644 --- a/tests/tensor/test_nlinalg.py +++ b/tests/tensor/test_nlinalg.py @@ -9,7 +9,7 @@ from pytensor import function from pytensor.configdefaults import config from pytensor.tensor.basic import as_tensor_variable -from pytensor.tensor.math import _allclose +from pytensor.tensor.math import _get_atol_rtol from pytensor.tensor.nlinalg import ( SVD, Eig, @@ -60,7 +60,8 @@ def test_pseudoinverse_correctness(): assert ri.dtype == r.dtype # Note that pseudoinverse can be quite imprecise so I prefer to compare # the result with what np.linalg returns - assert _allclose(ri, np.linalg.pinv(r)) + atol_, rtol_ = _get_atol_rtol(ri, np.linalg.pinv(r)) + assert np.allclose(ri, np.linalg.pinv(r), atol=atol_, rtol=rtol_) def test_pseudoinverse_grad(): @@ -92,8 +93,11 @@ def test_inverse_correctness(self): rir = np.dot(ri, r) rri = np.dot(r, ri) - assert _allclose(np.identity(4), rir), rir - assert _allclose(np.identity(4), rri), rri + atol_, rtol_ = _get_atol_rtol(np.identity(4), rir) + assert np.allclose(np.identity(4), rir, atol=atol_, rtol=rtol_), rir + + atol_, rtol_ = _get_atol_rtol(np.identity(4), rri) + assert np.allclose(np.identity(4), rri, atol=atol_, rtol=rtol_), rri def test_infer_shape(self): r = self.rng.standard_normal((4, 4)).astype(config.floatX) @@ -119,7 +123,8 @@ def test_matrix_dot(): for r in rs[1:]: numpy_sol = np.dot(numpy_sol, r) - assert _allclose(numpy_sol, pytensor_sol) + atol_, rtol_ = _get_atol_rtol(numpy_sol, pytensor_sol) + assert np.allclose(numpy_sol, pytensor_sol, atol=atol_, rtol=rtol_) def test_qr_modes(): @@ -131,23 +136,34 @@ def test_qr_modes(): f = function([A], qr(A)) t_qr = f(a) n_qr = np.linalg.qr(a) - assert _allclose(n_qr, t_qr) + atol_, rtol_ = _get_atol_rtol(np.asarray(n_qr), np.asarray(t_qr)) + assert np.allclose(np.asarray(n_qr), np.asarray(t_qr), atol=atol_, rtol=rtol_) for mode in ["reduced", "r", "raw"]: f = function([A], qr(A, mode)) t_qr = f(a) n_qr = np.linalg.qr(a, mode) if isinstance(n_qr, list | tuple): - assert _allclose(n_qr[0], t_qr[0]) - assert _allclose(n_qr[1], t_qr[1]) + atol_, rtol_ = _get_atol_rtol(np.asarray(n_qr[0]), np.asarray(t_qr[0])) + assert np.allclose( + np.asarray(n_qr[0]), np.asarray(t_qr[0]), atol=atol_, rtol=rtol_ + ) + atol_, rtol_ = _get_atol_rtol(np.asarray(n_qr[1]), np.asarray(t_qr[1])) + assert np.allclose( + np.asarray(n_qr[1]), np.asarray(t_qr[1]), atol=atol_, rtol=rtol_ + ) else: - assert _allclose(n_qr, t_qr) + atol_, rtol_ = _get_atol_rtol(np.asarray(n_qr), np.asarray(t_qr)) + assert np.allclose( + np.asarray(n_qr), np.asarray(t_qr), atol=atol_, rtol=rtol_ + ) try: n_qr = np.linalg.qr(a, "complete") f = function([A], qr(A, "complete")) t_qr = f(a) - assert _allclose(n_qr, t_qr) + atol_, rtol_ = _get_atol_rtol(np.asarray(n_qr), np.asarray(t_qr)) + assert np.allclose(np.asarray(n_qr), np.asarray(t_qr), atol=atol_, rtol=rtol_) except TypeError as e: assert "name 'complete' is not defined" in str(e) @@ -199,7 +215,8 @@ def test_svd(self, core_shape, full_matrix, compute_uv, batched, test_imag): np_outputs = np_outputs if isinstance(np_outputs, tuple) else [np_outputs] for np_val, pt_val in zip(np_outputs, pt_outputs): - assert _allclose(np_val, pt_val) + atol_, rtol_ = _get_atol_rtol(np_val, pt_val) + assert np.allclose(np_val, pt_val, atol=atol_, rtol=rtol_) def test_svd_infer_shape(self): self.validate_shape((4, 4), full_matrices=True, compute_uv=True) @@ -306,7 +323,8 @@ def test_tensorsolve(): n_x = np.linalg.tensorsolve(a, b) t_x = fn(a, b) - assert _allclose(n_x, t_x) + atol_, rtol_ = _get_atol_rtol(n_x, np.asarray(t_x)) + assert np.allclose(n_x, t_x, atol=atol_, rtol=rtol_) # check the type upcast now C = tensor4("C", dtype="float32") @@ -319,7 +337,8 @@ def test_tensorsolve(): d = rng.random((2 * 3, 4)).astype("float64") n_y = np.linalg.tensorsolve(c, d) t_y = fn(c, d) - assert _allclose(n_y, t_y) + atol_, rtol_ = _get_atol_rtol(n_y, np.asarray(t_y)) + assert np.allclose(n_y, t_y, atol=atol_, rtol=rtol_) assert n_y.dtype == Y.dtype # check the type upcast now @@ -333,7 +352,8 @@ def test_tensorsolve(): f = rng.random((2 * 3, 4)).astype("float64") n_z = np.linalg.tensorsolve(e, f) t_z = fn(e, f) - assert _allclose(n_z, t_z) + atol_, rtol_ = _get_atol_rtol(n_z, np.asarray(t_z)) + assert np.allclose(n_z, t_z, atol=atol_, rtol=rtol_) assert n_z.dtype == Z.dtype @@ -653,7 +673,8 @@ def test_eval(self): n_ainv = np.linalg.tensorinv(self.a) tf_a = function([A], [Ai]) t_ainv = tf_a(self.a) - assert _allclose(n_ainv, t_ainv) + atol_, rtol_ = _get_atol_rtol(n_ainv, np.asarray(t_ainv)) + assert np.allclose(n_ainv, t_ainv, atol=atol_, rtol=rtol_) B = self.B Bi = tensorinv(B) @@ -664,8 +685,10 @@ def test_eval(self): tf_b1 = function([B], [Bi1]) t_binv = tf_b(self.b) t_binv1 = tf_b1(self.b1) - assert _allclose(t_binv, n_binv) - assert _allclose(t_binv1, n_binv1) + atol_, rtol_ = _get_atol_rtol(np.asarray(t_binv), n_binv) + assert np.allclose(t_binv, n_binv, atol=atol_, rtol=rtol_) + atol_, rtol_ = _get_atol_rtol(np.asarray(t_binv1), n_binv1) + assert np.allclose(t_binv1, n_binv1, atol=atol_, rtol=rtol_) class TestKron(utt.InferShapeTester):