From 08cbfa4cbd8a346dda87aa0fa6a01ee7419bd845 Mon Sep 17 00:00:00 2001 From: Tal Ben-Nun Date: Sat, 9 Nov 2024 00:56:56 -0800 Subject: [PATCH] Unskip unit tests and provide reasons for skipped tests --- .github/workflows/general-ci.yml | 2 +- dace/codegen/compiled_sdfg.py | 2 +- dace/sdfg/sdfg.py | 2 +- dace/subsets.py | 4 ++ pytest.ini | 1 + samples/fpga/jacobi_fpga_systolic.py | 6 +-- tests/codegen/allocation_lifetime_test.py | 2 +- tests/fpga/jacobi_fpga_test.py | 4 +- .../map_unroll_processing_elements_test.py | 4 +- tests/fpga/matmul_test.py | 4 +- tests/fpga/streaming_memory_test.py | 36 ++++++++--------- tests/fpga/vec_sum_test.py | 6 +-- tests/inlining_test.py | 4 +- tests/instrumentation_test.py | 3 +- ...ledtest.py => nested_control_flow_test.py} | 5 +-- .../npbench/deep_learning/conv2d_bias_test.py | 2 +- tests/npbench/deep_learning/lenet_test.py | 2 +- tests/npbench/misc/azimint_naive_test.py | 2 +- tests/npbench/misc/contour_integral_test.py | 7 ++-- tests/npbench/misc/mandelbrot1_test.py | 6 +-- tests/npbench/misc/mandelbrot2_test.py | 6 +-- tests/npbench/misc/nbody_test.py | 2 +- tests/npbench/misc/spmv_test.py | 2 +- tests/npbench/misc/stockham_fft_test.py | 4 +- tests/npbench/polybench/correlation_test.py | 1 - tests/npbench/polybench/deriche_test.py | 1 - tests/npbench/polybench/doitgen_test.py | 1 - tests/npbench/polybench/durbin_test.py | 1 - tests/npbench/polybench/gemver_test.py | 1 - tests/npbench/polybench/symm_test.py | 1 - tests/npbench/weather_stencils/vadv_test.py | 3 +- tests/numpy/linalg_test.py | 6 +-- tests/numpy/list_globals_test.py | 2 +- tests/numpy/transpose_test.py | 2 +- tests/numpy/ufunc_test.py | 39 +++++-------------- .../assignment_statements_test.py | 4 +- tests/python_frontend/augassign_wcr_test.py | 1 - .../callback_autodetect_test.py | 8 ++-- .../conditional_assignment_test.py | 3 +- tests/python_frontend/lambda_test.py | 10 ++--- tests/python_frontend/string_test.py | 4 +- .../structures/structure_python_test.py | 2 +- tests/reduce_strided_disabledtest.py | 38 +++++++++--------- tests/rtl/hardware_test.py | 10 ++--- tests/sdfg/scalar_return.py | 4 +- tests/symbol_dependent_transients_test.py | 4 +- 46 files changed, 116 insertions(+), 148 deletions(-) rename tests/{nested_control_flow_disabledtest.py => nested_control_flow_test.py} (88%) diff --git a/.github/workflows/general-ci.yml b/.github/workflows/general-ci.yml index 2044639e5f..cde07f0406 100644 --- a/.github/workflows/general-ci.yml +++ b/.github/workflows/general-ci.yml @@ -55,7 +55,7 @@ jobs: else export DACE_optimizer_automatic_simplification=${{ matrix.simplify }} fi - pytest -n auto --cov-report=xml --cov=dace --tb=short -m "not gpu and not verilator and not tensorflow and not mkl and not sve and not papi and not mlir and not lapack and not fpga and not mpi and not rtl_hardware and not scalapack and not datainstrument" + pytest -n auto --cov-report=xml --cov=dace --tb=short -m "not gpu and not verilator and not tensorflow and not mkl and not sve and not papi and not mlir and not lapack and not fpga and not mpi and not rtl_hardware and not scalapack and not datainstrument and not long" ./codecov - name: Test OpenBLAS LAPACK diff --git a/dace/codegen/compiled_sdfg.py b/dace/codegen/compiled_sdfg.py index 332db028ae..bae8531e62 100644 --- a/dace/codegen/compiled_sdfg.py +++ b/dace/codegen/compiled_sdfg.py @@ -580,7 +580,7 @@ def _construct_args(self, kwargs) -> Tuple[Tuple[Any], Tuple[Any]]: arg_ctypes = tuple(at.dtype.as_ctypes() for at in argtypes) constants = self.sdfg.constants - callparams = tuple((actype(arg.get()) if isinstance(arg, symbolic.symbol) else arg, actype, atype, aname) + callparams = tuple((arg, actype, atype, aname) for arg, actype, atype, aname in zip(arglist, arg_ctypes, argtypes, argnames) if not (symbolic.issymbolic(arg) and (hasattr(arg, 'name') and arg.name in constants))) diff --git a/dace/sdfg/sdfg.py b/dace/sdfg/sdfg.py index 716bb9accc..1b449403d5 100644 --- a/dace/sdfg/sdfg.py +++ b/dace/sdfg/sdfg.py @@ -1208,7 +1208,7 @@ def cast(dtype: dt.Data, value: Any): if isinstance(dtype, dt.Array): return value elif isinstance(dtype, dt.Scalar): - return dtype.dtype(value) + return dtype.dtype.type(value) raise TypeError('Unsupported data type %s' % dtype) result.update({k: cast(*v) for k, v in self.constants_prop.items()}) diff --git a/dace/subsets.py b/dace/subsets.py index 0fa6fb536c..0fdc36c22e 100644 --- a/dace/subsets.py +++ b/dace/subsets.py @@ -404,6 +404,8 @@ def data_dims(self): for ts in self.tile_sizes)) def offset(self, other, negative, indices=None, offset_end=True): + if other is None: + return if not isinstance(other, Subset): if isinstance(other, (list, tuple)): other = Indices(other) @@ -420,6 +422,8 @@ def offset(self, other, negative, indices=None, offset_end=True): self.ranges[i] = (rb + mult * off[i], re, rs) def offset_new(self, other, negative, indices=None, offset_end=True): + if other is None: + return Range(self.ranges) if not isinstance(other, Subset): if isinstance(other, (list, tuple)): other = Indices(other) diff --git a/pytest.ini b/pytest.ini index 087be3d897..e36c6cecbd 100644 --- a/pytest.ini +++ b/pytest.ini @@ -14,6 +14,7 @@ markers = scalapack: Test requires ScaLAPACK (Intel MKL and OpenMPI). (select with '-m scalapack') datainstrument: Test uses data instrumentation (select with '-m datainstrument') hptt: Test requires the HPTT library (select with '-m "hptt') + long: Test runs for a long time and is skipped in CI (select with '-m "long"') python_files = *_test.py *_cudatest.py diff --git a/samples/fpga/jacobi_fpga_systolic.py b/samples/fpga/jacobi_fpga_systolic.py index d022f6712b..503f766056 100644 --- a/samples/fpga/jacobi_fpga_systolic.py +++ b/samples/fpga/jacobi_fpga_systolic.py @@ -276,10 +276,10 @@ def run_jacobi(w: int, h: int, t: int, p: int, specialize_all: bool = False): print("Specializing H and T...") jacobi = make_sdfg(specialize_all, h, w, t, p) - jacobi.specialize(dict(W=W, P=P)) + jacobi.specialize(dict(W=w, P=p)) if specialize_all: - jacobi.specialize(dict(H=H, T=T)) + jacobi.specialize(dict(H=h, T=t)) if t % p != 0: raise ValueError("Iteration must be divisable by number of processing elements") @@ -301,7 +301,7 @@ def run_jacobi(w: int, h: int, t: int, p: int, specialize_all: bool = False): if specialize_all: jacobi(A=A) else: - jacobi(A=A, H=H, T=T) + jacobi(A=A, H=h, T=t) # Regression kernel = np.array([[0, 0.2, 0], [0.2, 0.2, 0.2], [0, 0.2, 0]], dtype=np.float32) diff --git a/tests/codegen/allocation_lifetime_test.py b/tests/codegen/allocation_lifetime_test.py index 9a68cd2140..380367057a 100644 --- a/tests/codegen/allocation_lifetime_test.py +++ b/tests/codegen/allocation_lifetime_test.py @@ -480,7 +480,7 @@ def test_branched_allocation(mode): sdfg.compile() -@pytest.mark.skip +@pytest.mark.skip('Dynamic array resize is not yet supported') def test_scope_multisize(): """ An array that needs to be allocated multiple times with different sizes. """ sdfg = dace.SDFG('test') diff --git a/tests/fpga/jacobi_fpga_test.py b/tests/fpga/jacobi_fpga_test.py index 0821e95ed6..6347af6a9a 100644 --- a/tests/fpga/jacobi_fpga_test.py +++ b/tests/fpga/jacobi_fpga_test.py @@ -1,13 +1,11 @@ # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved. from dace.fpga_testing import xilinx_test, import_sample from pathlib import Path -import pytest # This kernel does not work with the Intel FPGA codegen, because it uses the # constant systolic array index in the connector on the nested SDFG. -@pytest.mark.skip -@xilinx_test() +@xilinx_test(assert_ii_1=False) def test_jacobi_fpga(): jacobi = import_sample(Path("fpga") / "jacobi_fpga_systolic.py") return jacobi.run_jacobi(64, 512, 16, 4) diff --git a/tests/fpga/map_unroll_processing_elements_test.py b/tests/fpga/map_unroll_processing_elements_test.py index de73997f1b..afd6d80a24 100644 --- a/tests/fpga/map_unroll_processing_elements_test.py +++ b/tests/fpga/map_unroll_processing_elements_test.py @@ -9,7 +9,6 @@ from dace.config import set_temporary -@pytest.mark.skip @xilinx_test(assert_ii_1=False) def test_map_unroll_processing_elements(): # Grab the systolic GEMM implementation the samples directory @@ -56,7 +55,7 @@ def test_map_unroll_processing_elements(): return sdfg -@pytest.mark.skip +@pytest.mark.skip('Test no longer achieves II=1') @xilinx_test(assert_ii_1=True) def test_map_unroll_processing_elements_decoupled(): # Grab the systolic GEMM implementation the samples directory @@ -105,3 +104,4 @@ def test_map_unroll_processing_elements_decoupled(): if __name__ == "__main__": test_map_unroll_processing_elements(None) + test_map_unroll_processing_elements_decoupled(None) diff --git a/tests/fpga/matmul_test.py b/tests/fpga/matmul_test.py index da7d3d2dfc..be393939df 100644 --- a/tests/fpga/matmul_test.py +++ b/tests/fpga/matmul_test.py @@ -162,7 +162,6 @@ def test_gemm_vectorized(): return sdfg -@pytest.mark.skip @xilinx_test(assert_ii_1=True) def test_gemm_vectorized_decoupled(): # Test with vectorization @@ -201,7 +200,6 @@ def test_gemm_size_not_multiples_of(): return sdfg -@pytest.mark.skip @xilinx_test() def test_gemm_size_not_multiples_of_decoupled(): # Test with matrix sizes that are not a multiple of #PEs and Tile sizes @@ -249,5 +247,7 @@ def matmul_np(A: dace.float64[128, 64], B: dace.float64[64, 32], C: dace.float64 test_naive_matmul_fpga(None) test_systolic_matmul_fpga(None) test_gemm_vectorized(None) + test_gemm_vectorized_decoupled(None) test_gemm_size_not_multiples_of(None) + test_gemm_size_not_multiples_of_decoupled(None) test_matmul_np(None) diff --git a/tests/fpga/streaming_memory_test.py b/tests/fpga/streaming_memory_test.py index 0a75ce53f8..11a56c42f4 100644 --- a/tests/fpga/streaming_memory_test.py +++ b/tests/fpga/streaming_memory_test.py @@ -380,7 +380,7 @@ def test_streaming_and_composition(): return sdfg -@pytest.mark.skip(reason="Save time") +@pytest.mark.long def test_mem_buffer_vec_add_1(): # Make SDFG sdfg: dace.SDFG = vecadd_1_streaming.to_sdfg() @@ -408,7 +408,7 @@ def test_mem_buffer_vec_add_1(): return sdfg -@pytest.mark.skip(reason="Save time") +@pytest.mark.long def test_mem_buffer_vec_add_1_symbolic(): # Make SDFG sdfg: dace.SDFG = vecadd_1_streaming_symbol.to_sdfg() @@ -495,55 +495,55 @@ def mem_buffer_vec_add_types(dace_type0, dace_type1, dace_type2, np_type0, np_ty return sdfg -@pytest.mark.skip(reason="Save time") +@pytest.mark.long # def test_mem_buffer_vec_add_float16(): # return mem_buffer_vec_add_types(dace.float16, dace.float16, dace.float16, np.float16, np.float16, np.float16) -@pytest.mark.skip(reason="Save time") +@pytest.mark.long def test_mem_buffer_vec_add_float32(): return mem_buffer_vec_add_types(dace.float32, dace.float32, dace.float32, np.float32, np.float32, np.float32) -@pytest.mark.skip(reason="Save time") +@pytest.mark.long def test_mem_buffer_vec_add_float64(): return mem_buffer_vec_add_types(dace.float64, dace.float64, dace.float64, np.float64, np.float64, np.float64) -@pytest.mark.skip(reason="Save time") +@pytest.mark.long def test_mem_buffer_vec_add_int8(): return mem_buffer_vec_add_types(dace.int8, dace.int8, dace.int8, np.int8, np.int8, np.int8) -@pytest.mark.skip(reason="Save time") +@pytest.mark.long def test_mem_buffer_vec_add_int16(): return mem_buffer_vec_add_types(dace.int16, dace.int16, dace.int16, np.int16, np.int16, np.int16) -@pytest.mark.skip(reason="Save time") +@pytest.mark.long def test_mem_buffer_vec_add_int32(): return mem_buffer_vec_add_types(dace.int32, dace.int32, dace.int32, np.int32, np.int32, np.int32) -@pytest.mark.skip(reason="Save time") +@pytest.mark.long def test_mem_buffer_vec_add_int64(): return mem_buffer_vec_add_types(dace.int64, dace.int64, dace.int64, np.int64, np.int64, np.int64) -@pytest.mark.skip(reason="Save time") +@pytest.mark.long def test_mem_buffer_vec_add_complex64(): return mem_buffer_vec_add_types(dace.complex64, dace.complex64, dace.complex64, np.complex64, np.complex64, np.complex64) -@pytest.mark.skip(reason="Save time") +@pytest.mark.long def test_mem_buffer_vec_add_complex128(): return mem_buffer_vec_add_types(dace.complex128, dace.complex128, dace.complex128, np.complex128, np.complex128, np.complex128) -@pytest.mark.skip(reason="Save time") +@pytest.mark.long # def test_mem_buffer_vec_add_mixed_float(): # return mem_buffer_vec_add_types(dace.float16, dace.float32, dace.float64, np.float16, np.float32, np.float64) -@pytest.mark.skip(reason="Save time") +@pytest.mark.long def test_mem_buffer_vec_add_mixed_int(): return mem_buffer_vec_add_types(dace.int16, dace.int32, dace.int64, np.int16, np.int32, np.int64) @@ -575,7 +575,7 @@ def test_mem_buffer_mat_add(): return sdfg -@pytest.mark.skip(reason="Save time") +@pytest.mark.long def test_mem_buffer_mat_add_symbol(): # Make SDFG sdfg: dace.SDFG = matadd_streaming_symbol.to_sdfg() @@ -602,7 +602,7 @@ def test_mem_buffer_mat_add_symbol(): return sdfg -@pytest.mark.skip(reason="Save time") +@pytest.mark.long def test_mem_buffer_tensor_add(): # Make SDFG sdfg: dace.SDFG = tensoradd_streaming.to_sdfg() @@ -688,7 +688,7 @@ def test_mem_buffer_multistream_with_deps(): return sdfg -@pytest.mark.skip(reason="Save time") +@pytest.mark.long def test_mem_buffer_mat_mul(): # Make SDFG sdfg: dace.SDFG = matmul_streaming.to_sdfg() @@ -799,7 +799,7 @@ def test_mem_buffer_not_applicable(): return [] -@pytest.mark.skip(reason="Save time") +@pytest.mark.long def test_mem_buffer_atax(): A = np.random.rand(M, N).astype(np.float32) @@ -843,7 +843,7 @@ def test_mem_buffer_atax(): return sdfg -@pytest.mark.skip(reason="Save time") +@pytest.mark.long def test_mem_buffer_bicg(): A = np.random.rand(N, M).astype(np.float32) diff --git a/tests/fpga/vec_sum_test.py b/tests/fpga/vec_sum_test.py index f6817985c9..791ba80e5d 100644 --- a/tests/fpga/vec_sum_test.py +++ b/tests/fpga/vec_sum_test.py @@ -83,16 +83,16 @@ def test_vec_sum_vectorize_first_decoupled_interfaces(): return run_vec_sum(True) -@pytest.mark.skip @xilinx_test(assert_ii_1=True) def test_vec_sum_fpga_transform_first_decoupled_interfaces(): # For this test, decoupled read/write interfaces are needed to achieve II=1 with set_temporary("compiler", "xilinx", "decouple_array_interfaces", value=True): - return run_vec_sum(True) + with set_temporary('testing', 'serialization', value=False): + return run_vec_sum(True) if __name__ == "__main__": test_vec_sum_vectorize_first(None) test_vec_sum_fpga_transform_first(None) - + test_vec_sum_fpga_transform_first_decoupled_interfaces(None) diff --git a/tests/inlining_test.py b/tests/inlining_test.py index c6d8fa8d9f..251c85e7bc 100644 --- a/tests/inlining_test.py +++ b/tests/inlining_test.py @@ -42,7 +42,7 @@ def test(): myprogram.compile(dace.float32[W, H], dace.float32[H, W], dace.int32) -@pytest.mark.skip +@pytest.mark.skip('CI failure that cannot be reproduced outside CI') def test_regression_reshape_unsqueeze(): nsdfg = dace.SDFG("nested_reshape_node") nstate = nsdfg.add_state() @@ -456,7 +456,7 @@ def test(A: dace.float64[96, 32], B: dace.float64[42, 32]): if __name__ == "__main__": test() - # Skipped to to bug that cannot be reproduced + # Skipped due to bug that cannot be reproduced outside CI # test_regression_reshape_unsqueeze() test_empty_memlets() test_multistate_inline() diff --git a/tests/instrumentation_test.py b/tests/instrumentation_test.py index 69eff85a8e..2aa26edf36 100644 --- a/tests/instrumentation_test.py +++ b/tests/instrumentation_test.py @@ -62,8 +62,7 @@ def test_timer(): onetest(dace.InstrumentationType.Timer) -#@pytest.mark.papi -@pytest.mark.skip +@pytest.mark.papi def test_papi(): # Run a lighter load for the sake of performance onetest(dace.InstrumentationType.PAPI_Counters, 4) diff --git a/tests/nested_control_flow_disabledtest.py b/tests/nested_control_flow_test.py similarity index 88% rename from tests/nested_control_flow_disabledtest.py rename to tests/nested_control_flow_test.py index 994b756d51..215a6afb90 100644 --- a/tests/nested_control_flow_disabledtest.py +++ b/tests/nested_control_flow_test.py @@ -22,8 +22,7 @@ def nested_cflow_test(A: dace.int32[1]): out_a = in_a + 2 -@pytest.mark.skip -def test(): +def test_nested_control_flow_with_explicit_tasklets(): A = np.zeros(1).astype(np.int32) nested_cflow_test(A) @@ -33,4 +32,4 @@ def test(): if __name__ == "__main__": - test() + test_nested_control_flow_with_explicit_tasklets() diff --git a/tests/npbench/deep_learning/conv2d_bias_test.py b/tests/npbench/deep_learning/conv2d_bias_test.py index 7d9f1a60b0..648903ffb9 100644 --- a/tests/npbench/deep_learning/conv2d_bias_test.py +++ b/tests/npbench/deep_learning/conv2d_bias_test.py @@ -111,7 +111,7 @@ def test_cpu(): run_conv2d_bias(dace.dtypes.DeviceType.CPU) -@pytest.mark.skip +@pytest.mark.skip('Illegal memory access error') # @pytest.mark.gpu def test_gpu(): run_conv2d_bias(dace.dtypes.DeviceType.GPU) diff --git a/tests/npbench/deep_learning/lenet_test.py b/tests/npbench/deep_learning/lenet_test.py index a1ec02528a..37cba9af9b 100644 --- a/tests/npbench/deep_learning/lenet_test.py +++ b/tests/npbench/deep_learning/lenet_test.py @@ -201,7 +201,7 @@ def test_cpu(monkeypatch): run_lenet(dace.dtypes.DeviceType.CPU) -@pytest.mark.skip(reason="Code error") +@pytest.mark.skip(reason="std::runtime_error") @pytest.mark.gpu def test_gpu(): run_lenet(dace.dtypes.DeviceType.GPU) diff --git a/tests/npbench/misc/azimint_naive_test.py b/tests/npbench/misc/azimint_naive_test.py index 522b545f4c..c1938fce97 100644 --- a/tests/npbench/misc/azimint_naive_test.py +++ b/tests/npbench/misc/azimint_naive_test.py @@ -98,7 +98,7 @@ def test_gpu(): run_azimint_naive(dace.dtypes.DeviceType.GPU) -@pytest.mark.skip(reason="Validation error") +@pytest.mark.skip(reason="Incorrect output") @fpga_test(assert_ii_1=False) def test_fpga(): run_azimint_naive(dace.dtypes.DeviceType.FPGA) diff --git a/tests/npbench/misc/contour_integral_test.py b/tests/npbench/misc/contour_integral_test.py index 4477e392a0..02e1629b76 100644 --- a/tests/npbench/misc/contour_integral_test.py +++ b/tests/npbench/misc/contour_integral_test.py @@ -113,9 +113,8 @@ def test_cpu(): run_contour_integral(dace.dtypes.DeviceType.CPU) -# NOTE: Doesn't work yet with GPU-auto-optimize -# @pytest.mark.gpu -@pytest.mark.skip +@pytest.mark.gpu +@pytest.mark.skip('Incorrect outputs due to auto-optimize') def test_gpu(): run_contour_integral(dace.dtypes.DeviceType.GPU) @@ -139,4 +138,4 @@ def test_fpga(): elif target == "gpu": run_contour_integral(dace.dtypes.DeviceType.GPU) elif target == "fpga": - run_contour_integral(dace.dtypes.DeviceType.FPGA) \ No newline at end of file + run_contour_integral(dace.dtypes.DeviceType.FPGA) diff --git a/tests/npbench/misc/mandelbrot1_test.py b/tests/npbench/misc/mandelbrot1_test.py index 13ee414c34..521d41c560 100644 --- a/tests/npbench/misc/mandelbrot1_test.py +++ b/tests/npbench/misc/mandelbrot1_test.py @@ -103,18 +103,18 @@ def run_mandelbrot1(device_type: dace.dtypes.DeviceType): return sdfg -@pytest.mark.skip(reason="Parsing error") +@pytest.mark.skip(reason="Parsing error (see issue #1139)") def test_cpu(): run_mandelbrot1(dace.dtypes.DeviceType.CPU) -@pytest.mark.skip(reason="Parsing error") +@pytest.mark.skip(reason="Parsing error (see issue #1139)") @pytest.mark.gpu def test_gpu(): run_mandelbrot1(dace.dtypes.DeviceType.GPU) -@pytest.mark.skip(reason="Parsing error") +@pytest.mark.skip(reason="Parsing error (see issue #1139)") @fpga_test(assert_ii_1=False) def test_fpga(): return run_mandelbrot1(dace.dtypes.DeviceType.FPGA) diff --git a/tests/npbench/misc/mandelbrot2_test.py b/tests/npbench/misc/mandelbrot2_test.py index 8d775155b7..aaca2c6db9 100644 --- a/tests/npbench/misc/mandelbrot2_test.py +++ b/tests/npbench/misc/mandelbrot2_test.py @@ -151,18 +151,18 @@ def run_mandelbrot2(device_type: dace.dtypes.DeviceType): return sdfg -@pytest.mark.skip(reason="Parsing error") +@pytest.mark.skip(reason="Parsing error (see issue #1139)") def test_cpu(): run_mandelbrot2(dace.dtypes.DeviceType.CPU) -@pytest.mark.skip(reason="Parsing error") +@pytest.mark.skip(reason="Parsing error (see issue #1139)") @pytest.mark.gpu def test_gpu(): run_mandelbrot2(dace.dtypes.DeviceType.GPU) -@pytest.mark.skip(reason="Parsing error") +@pytest.mark.skip(reason="Parsing error (see issue #1139)") @fpga_test(assert_ii_1=False) def test_fpga(): return run_mandelbrot2(dace.dtypes.DeviceType.FPGA) diff --git a/tests/npbench/misc/nbody_test.py b/tests/npbench/misc/nbody_test.py index e0b320b119..436677ad63 100644 --- a/tests/npbench/misc/nbody_test.py +++ b/tests/npbench/misc/nbody_test.py @@ -304,7 +304,7 @@ def test_cpu(): run_nbody(dace.dtypes.DeviceType.CPU) -@pytest.mark.skip(reason="Compiler error") +@pytest.mark.skip(reason="Incorrect output") @pytest.mark.gpu def test_gpu(): run_nbody(dace.dtypes.DeviceType.GPU) diff --git a/tests/npbench/misc/spmv_test.py b/tests/npbench/misc/spmv_test.py index 549aea65dd..0771ff5198 100644 --- a/tests/npbench/misc/spmv_test.py +++ b/tests/npbench/misc/spmv_test.py @@ -122,4 +122,4 @@ def test_fpga(): elif target == "gpu": run_spmv(dace.dtypes.DeviceType.GPU) elif target == "fpga": - run_spmv(dace.dtypes.DeviceType.FPGA) \ No newline at end of file + run_spmv(dace.dtypes.DeviceType.FPGA) diff --git a/tests/npbench/misc/stockham_fft_test.py b/tests/npbench/misc/stockham_fft_test.py index 5cf1cf8e54..8fc5e88203 100644 --- a/tests/npbench/misc/stockham_fft_test.py +++ b/tests/npbench/misc/stockham_fft_test.py @@ -155,12 +155,12 @@ def run_stockham_fft(device_type: dace.dtypes.DeviceType): return sdfg -@pytest.mark.skip(reason="Error in expansion") +@pytest.mark.skip(reason="Assertion error in read_and_write_sets") def test_cpu(): run_stockham_fft(dace.dtypes.DeviceType.CPU) -@pytest.mark.skip(reason="Runtime error") +@pytest.mark.skip(reason="Assertion error in read_and_write_sets") @pytest.mark.gpu def test_gpu(): run_stockham_fft(dace.dtypes.DeviceType.GPU) diff --git a/tests/npbench/polybench/correlation_test.py b/tests/npbench/polybench/correlation_test.py index d1536d51c8..d743ba528d 100644 --- a/tests/npbench/polybench/correlation_test.py +++ b/tests/npbench/polybench/correlation_test.py @@ -91,7 +91,6 @@ def test_cpu(): run_correlation(dace.dtypes.DeviceType.CPU) -@pytest.mark.skip(reason="Compiler error") @pytest.mark.gpu def test_gpu(): run_correlation(dace.dtypes.DeviceType.GPU) diff --git a/tests/npbench/polybench/deriche_test.py b/tests/npbench/polybench/deriche_test.py index eace2237af..b2fe7d47e2 100644 --- a/tests/npbench/polybench/deriche_test.py +++ b/tests/npbench/polybench/deriche_test.py @@ -160,7 +160,6 @@ def test_cpu(): run_deriche(dace.dtypes.DeviceType.CPU) -@pytest.mark.skip(reason="GPU AutoOpt support") @pytest.mark.gpu def test_gpu(): run_deriche(dace.dtypes.DeviceType.GPU) diff --git a/tests/npbench/polybench/doitgen_test.py b/tests/npbench/polybench/doitgen_test.py index e114f97e23..06cf3355f5 100644 --- a/tests/npbench/polybench/doitgen_test.py +++ b/tests/npbench/polybench/doitgen_test.py @@ -89,7 +89,6 @@ def test_cpu(): run_doitgen(dace.dtypes.DeviceType.CPU) -@pytest.mark.skip(reason="GPU Auto-Opt error") @pytest.mark.gpu def test_gpu(): run_doitgen(dace.dtypes.DeviceType.GPU) diff --git a/tests/npbench/polybench/durbin_test.py b/tests/npbench/polybench/durbin_test.py index ffb1ed006c..ffeff150d9 100644 --- a/tests/npbench/polybench/durbin_test.py +++ b/tests/npbench/polybench/durbin_test.py @@ -98,7 +98,6 @@ def run_durbin(device_type: dace.dtypes.DeviceType): return sdfg -@pytest.mark.skip(reason="Validation error") def test_cpu(): run_durbin(dace.dtypes.DeviceType.CPU) diff --git a/tests/npbench/polybench/gemver_test.py b/tests/npbench/polybench/gemver_test.py index 0c8b23222e..58e078fe11 100644 --- a/tests/npbench/polybench/gemver_test.py +++ b/tests/npbench/polybench/gemver_test.py @@ -90,7 +90,6 @@ def test_cpu(): run_gemver(dace.dtypes.DeviceType.CPU) -@pytest.mark.skip(reason="Variable not found during codegen") @pytest.mark.gpu def test_gpu(): run_gemver(dace.dtypes.DeviceType.GPU) diff --git a/tests/npbench/polybench/symm_test.py b/tests/npbench/polybench/symm_test.py index 73b5dca41e..d0bae1edfc 100644 --- a/tests/npbench/polybench/symm_test.py +++ b/tests/npbench/polybench/symm_test.py @@ -94,7 +94,6 @@ def test_cpu(): run_symm(dace.dtypes.DeviceType.CPU) -@pytest.mark.skip(reason="Compilation error") @pytest.mark.gpu def test_gpu(): run_symm(dace.dtypes.DeviceType.GPU) diff --git a/tests/npbench/weather_stencils/vadv_test.py b/tests/npbench/weather_stencils/vadv_test.py index d1ff08fae3..cf01a0cd31 100644 --- a/tests/npbench/weather_stencils/vadv_test.py +++ b/tests/npbench/weather_stencils/vadv_test.py @@ -217,13 +217,12 @@ def test_cpu(monkeypatch): run_vadv(dace.dtypes.DeviceType.CPU) -@pytest.mark.skip(reason="Compiler error") @pytest.mark.gpu def test_gpu(): run_vadv(dace.dtypes.DeviceType.GPU) -@pytest.mark.skip(reason="Variable not defined") +@pytest.mark.skip(reason="Xilinx internal compiler error") @fpga_test(assert_ii_1=False) def test_fpga(): return run_vadv(dace.dtypes.DeviceType.FPGA) diff --git a/tests/numpy/linalg_test.py b/tests/numpy/linalg_test.py index 9211d84130..d7d00a3807 100644 --- a/tests/numpy/linalg_test.py +++ b/tests/numpy/linalg_test.py @@ -92,7 +92,7 @@ def tensordot_0(A: dace.float32[3, 3, 3, 3, 3, 3], B: dace.float32[3, 3, 3, 3, 3 # TODO: Enable after fixing cuTENSOR in CI #@pytest.mark.gpu -@pytest.mark.skip +@pytest.mark.skip('CUTENSOR is not supported in CI') def test_tensordot_02(): @dace.program(device=dace.dtypes.DeviceType.GPU) @@ -131,7 +131,7 @@ def tensordot_1(A: dace.float32[3, 3, 3, 3, 3, 3], B: dace.float32[3, 3, 3, 3, 3 # TODO: Enable after fixing cuTENSOR in CI #@pytest.mark.gpu -@pytest.mark.skip +@pytest.mark.skip('CUTENSOR is not supported in CI') def test_tensordot_12(): @dace.program(device=dace.dtypes.DeviceType.GPU) @@ -192,7 +192,7 @@ def tensordot_2b(A: dace.float32[3, 3, 3, 3, 3, 3], B: dace.float32[3, 3, 3, 3, # TODO: Enable after fixing cuTENSOR in CI #@pytest.mark.gpu -@pytest.mark.skip +@pytest.mark.skip('CUTENSOR is not supported in CI') def test_tensordot_22(): @dace.program(device=dace.dtypes.DeviceType.GPU) diff --git a/tests/numpy/list_globals_test.py b/tests/numpy/list_globals_test.py index 6281c919a9..8ca9e1eae6 100644 --- a/tests/numpy/list_globals_test.py +++ b/tests/numpy/list_globals_test.py @@ -43,7 +43,7 @@ def local_list(A: dace.int32[3, 2, 4]): assert np.allclose(result, np.transpose(inp.copy(), axes=local_axes)) -@pytest.mark.skip +@pytest.mark.skip('Syntax is not yet supported') def test_local_list_with_slice(): local_axes = [1, 2, 0, 100] diff --git a/tests/numpy/transpose_test.py b/tests/numpy/transpose_test.py index c56a8a0cda..160728aca6 100644 --- a/tests/numpy/transpose_test.py +++ b/tests/numpy/transpose_test.py @@ -50,7 +50,7 @@ def test_transpose(): # TODO: Enable after fixing HPTT in CI # @pytest.mark.hptt -@pytest.mark.skip +@pytest.mark.skip('HPTT is not supported in CI') def test_hptt(): with dace.config.set_temporary('library', 'ttranspose', 'default_implementation', value='HPTT'): test_transpose_axes0() diff --git a/tests/numpy/ufunc_test.py b/tests/numpy/ufunc_test.py index b769ab1082..39eab139c0 100644 --- a/tests/numpy/ufunc_test.py +++ b/tests/numpy/ufunc_test.py @@ -56,7 +56,6 @@ def test_ufunc_true_divide_uu(A: dace.uint32[10], B: dace.uint32[10]): return np.true_divide(A, B) -@pytest.mark.skip @compare_numpy_output(non_zero=True, check_dtype=True) def test_ufunc_floor_divide_cc(A: dace.complex64[10], B: dace.complex64[10]): return np.floor_divide(A, B) @@ -890,12 +889,6 @@ def test_ufunc_not_equal_ff(A: dace.float32[10], B: dace.float32[10]): return np.not_equal(A, B) -@pytest.mark.skip -@compare_numpy_output(check_dtype=True) -def test_ufunc_logical_and_cc(A: dace.complex64[10], B: dace.complex64[10]): - return np.logical_and(A, B) - - @compare_numpy_output(check_dtype=True) def test_ufunc_logical_and_ff(A: dace.float32[10], B: dace.float32[10]): return np.logical_and(A, B) @@ -911,12 +904,6 @@ def test_ufunc_logical_and_su(A: dace.int32[10], B: dace.uint32[10]): return np.logical_and(A, B) -@pytest.mark.skip -@compare_numpy_output(check_dtype=True) -def test_ufunc_logical_or_cc(A: dace.complex64[10], B: dace.complex64[10]): - return np.logical_or(A, B) - - @compare_numpy_output(check_dtype=True) def test_ufunc_logical_or_ff(A: dace.float32[10], B: dace.float32[10]): return np.logical_or(A, B) @@ -932,12 +919,6 @@ def test_ufunc_logical_or_su(A: dace.int32[10], B: dace.uint32[10]): return np.logical_or(A, B) -@pytest.mark.skip -@compare_numpy_output(check_dtype=True) -def test_ufunc_logical_xor_cc(A: dace.complex64[10], B: dace.complex64[10]): - return np.logical_xor(A, B) - - @compare_numpy_output(check_dtype=True) def test_ufunc_logical_xor_ff(A: dace.float32[10], B: dace.float32[10]): return np.logical_xor(A, B) @@ -998,6 +979,7 @@ def test_ufunc_fmin_nan_ff(A: dace.float32[10], B: dace.float32[10]): def test_ufunc_isfinite_c(): + @compare_numpy_output(check_dtype=True) def ufunc_isfinite_c(A: dace.complex64[10]): A[0] = np.inf @@ -1016,6 +998,7 @@ def ufunc_isfinite_c(A: dace.complex64[10]): def test_ufunc_isfinite_f(): + @compare_numpy_output(check_dtype=True) def ufunc_isfinite_f(A: dace.float32[10]): A[0] = np.inf @@ -1036,7 +1019,6 @@ def ufunc_isfinite_f(A: dace.float32[10]): # NumPy accepts integer arrays in np.isfinite. # However, if any element of an integer array is inf, it will fail because it # ": cannot convert float infinity to integer" -@pytest.mark.skip @compare_numpy_output(validation_func=lambda a: np.isfinite(a)) def test_ufunc_isfinite_u(A: dace.uint32[10]): A[0] = np.inf @@ -1045,6 +1027,7 @@ def test_ufunc_isfinite_u(A: dace.uint32[10]): def test_ufunc_isinf_c(): + @compare_numpy_output(check_dtype=True) def ufunc_isinf_c(A: dace.complex64[10]): A[0] = np.inf @@ -1063,6 +1046,7 @@ def ufunc_isinf_c(A: dace.complex64[10]): def test_ufunc_isinf_f(): + @compare_numpy_output(check_dtype=True) def ufunc_isinf_f(A: dace.float32[10]): A[0] = np.inf @@ -1083,7 +1067,6 @@ def ufunc_isinf_f(A: dace.float32[10]): # NumPy accepts integer arrays in np.isinf. # However, if any element of an integer array is inf, it will fail because it # ": cannot convert float infinity to integer" -@pytest.mark.skip @compare_numpy_output(validation_func=lambda a: np.isinf(a)) def test_ufunc_isinf_u(A: dace.uint32[10]): A[0] = np.inf @@ -1092,6 +1075,7 @@ def test_ufunc_isinf_u(A: dace.uint32[10]): def test_ufunc_isnan_c(): + @compare_numpy_output(check_dtype=True) def ufunc_isnan_c(A: dace.complex64[10]): A[0] = np.inf @@ -1110,6 +1094,7 @@ def ufunc_isnan_c(A: dace.complex64[10]): def test_ufunc_isnan_f(): + @compare_numpy_output(check_dtype=True) def ufunc_isnan_f(A: dace.float32[10]): A[0] = np.inf @@ -1130,7 +1115,6 @@ def ufunc_isnan_f(A: dace.float32[10]): # NumPy accepts integer arrays in np.isnan. # However, if any element of an integer array is inf, it will fail because it # ": cannot convert float infinity to integer" -@pytest.mark.skip @compare_numpy_output(validation_func=lambda a: np.isnan(a)) def test_ufunc_isnan_u(A: dace.uint32[10]): A[0] = np.inf @@ -1320,7 +1304,7 @@ def test_ufunc_clip(A: dace.float32[10]): test_ufunc_logaddexp2_ff() test_ufunc_true_divide_ff() test_ufunc_true_divide_uu() - # test_ufunc_floor_divide_cc() + test_ufunc_floor_divide_cc() test_ufunc_floor_divide_ff() test_ufunc_floor_divide_uu() test_ufunc_floor_divide_ss() @@ -1486,15 +1470,12 @@ def test_ufunc_clip(A: dace.float32[10]): test_ufunc_less_equal_ff() test_ufunc_equal_ff() test_ufunc_not_equal_ff() - # test_ufunc_logical_and_cc() # TODO: How to convert to bool? test_ufunc_logical_and_ff() test_ufunc_logical_and_uu() test_ufunc_logical_and_su() - # test_ufunc_logical_or_cc() # TODO: How to convert to bool? test_ufunc_logical_or_ff() test_ufunc_logical_or_uu() test_ufunc_logical_or_su() - # test_ufunc_logical_xor_cc() # TODO: How to convert to bool? test_ufunc_logical_xor_ff() test_ufunc_logical_xor_uu() test_ufunc_logical_xor_su() @@ -1508,13 +1489,13 @@ def test_ufunc_clip(A: dace.float32[10]): test_ufunc_fmin_nan_ff() test_ufunc_isfinite_c() test_ufunc_isfinite_f() - # test_ufunc_isfinite_u() + test_ufunc_isfinite_u() test_ufunc_isinf_c() test_ufunc_isinf_f() - # test_ufunc_isinf_u()) + test_ufunc_isinf_u() test_ufunc_isnan_c() test_ufunc_isnan_f() - # test_ufunc_isnan_u() + test_ufunc_isnan_u() test_ufunc_signbit_c() test_ufunc_signbit_f() test_ufunc_signbit_u() diff --git a/tests/python_frontend/assignment_statements_test.py b/tests/python_frontend/assignment_statements_test.py index 0681c79c49..f8538aa848 100644 --- a/tests/python_frontend/assignment_statements_test.py +++ b/tests/python_frontend/assignment_statements_test.py @@ -72,7 +72,7 @@ def starred_target(a: dace.float32[1]): return b, c, d, e -@pytest.mark.skip +@pytest.mark.skip('Syntax is not yet supported') def test_starred_target(): a = np.zeros((1, ), dtype=np.float32) a[0] = np.pi @@ -94,7 +94,7 @@ def attribute_reference(a: mystruct[1]): a.b[0] = 6 -@pytest.mark.skip +@pytest.mark.skip('Syntax is not yet supported') def test_attribute_reference(): a = np.ndarray((1, ), dtype=np.dtype(mystruct.as_ctypes())) attribute_reference(a=a) diff --git a/tests/python_frontend/augassign_wcr_test.py b/tests/python_frontend/augassign_wcr_test.py index e6964261fe..fbaaf4dbe6 100644 --- a/tests/python_frontend/augassign_wcr_test.py +++ b/tests/python_frontend/augassign_wcr_test.py @@ -156,7 +156,6 @@ def no_wcr(A: dace.int32[5, 5, 5]): assert (np.allclose(A, ref)) -@pytest.mark.skip def test_augassign_wcr4(): with dace.config.set_temporary('frontend', 'avoid_wcr', value=False): diff --git a/tests/python_frontend/callback_autodetect_test.py b/tests/python_frontend/callback_autodetect_test.py index fe567f18ad..6f34dee2b1 100644 --- a/tests/python_frontend/callback_autodetect_test.py +++ b/tests/python_frontend/callback_autodetect_test.py @@ -163,7 +163,7 @@ def tasklet_callback(A: dace.float64[N, N], B: dace.float64[N, N]): b = sq(a) -@pytest.mark.skip +@pytest.mark.skip('Syntax is not yet supported') def test_callback_tasklet(): A = np.random.rand(24, 24) B = np.random.rand(24, 24) @@ -319,7 +319,7 @@ def same_name_nested(): # Cannot run test without cupy -@pytest.mark.skip +@pytest.mark.gpu def test_gpu_callback(): import cupy as cp @@ -906,7 +906,7 @@ def tester(a: dace.float64[20]): assert np.allclose(aa, expected) -@pytest.mark.skip +@pytest.mark.skip('Test requires GUI') def test_matplotlib_with_compute(): """ Stacked bar plot example from Matplotlib using callbacks and pyobjects. @@ -951,7 +951,7 @@ def tester(): test_reorder() test_reorder_nested() test_callback_samename() - # test_gpu_callback() + test_gpu_callback() test_bad_closure() test_object_with_nested_callback() test_two_parameters_same_name() diff --git a/tests/python_frontend/conditional_assignment_test.py b/tests/python_frontend/conditional_assignment_test.py index d138ad88c5..97440563e2 100644 --- a/tests/python_frontend/conditional_assignment_test.py +++ b/tests/python_frontend/conditional_assignment_test.py @@ -76,7 +76,6 @@ def some_func(field, may_be_none): assert np.allclose(field, 1.0) -@pytest.mark.skip def test_maybe_none_scalar_arg(): @dace.program def some_func(field, a_scalar): @@ -134,7 +133,7 @@ def func(): # test_none_or_field_assignment_globalarr() # test_none_or_field_assignment_arr() test_none_arg() - # test_maybe_none_scalar_arg() + test_maybe_none_scalar_arg() test_default_arg() test_kwarg_none() test_conditional_print() diff --git a/tests/python_frontend/lambda_test.py b/tests/python_frontend/lambda_test.py index b2d75a5e16..be3fe58579 100644 --- a/tests/python_frontend/lambda_test.py +++ b/tests/python_frontend/lambda_test.py @@ -23,7 +23,7 @@ def lamb(A: dace.float64[20], B: dace.float64[20], C: dace.float64[20]): assert np.allclose(A, B + C) -@pytest.mark.skip +@pytest.mark.skip('Syntax is not yet supported') def test_inline_lambda_scalar(): @dace.program def lamb(A: dace.float64[20], B: dace.float64[20], C: dace.float64[20]): @@ -38,7 +38,7 @@ def lamb(A: dace.float64[20], B: dace.float64[20], C: dace.float64[20]): assert np.allclose(A, B + C) -@pytest.mark.skip +@pytest.mark.skip('Syntax is not yet supported') def test_inline_lambda_array(): @dace.program def lamb(A: dace.float64[20], B: dace.float64[20], C: dace.float64[20]): @@ -52,7 +52,7 @@ def lamb(A: dace.float64[20], B: dace.float64[20], C: dace.float64[20]): assert np.allclose(A, B + C) -@pytest.mark.skip +@pytest.mark.skip('Syntax is not yet supported') def test_lambda_global(): f = lambda a, b: a + b @@ -67,7 +67,7 @@ def lamb(A: dace.float64[20], B: dace.float64[20], C: dace.float64[20]): assert np.allclose(A, B + C) -@pytest.mark.skip +@pytest.mark.skip('Syntax is not yet supported') def test_lambda_call_jit(): @dace.program def lamb(A, B, C, f): @@ -81,7 +81,7 @@ def lamb(A, B, C, f): assert np.allclose(A, B + C) -@pytest.mark.skip +@pytest.mark.skip('Syntax is not yet supported') def test_lambda_nested_call(): @dace.program def lamb2(A, B, C, f): diff --git a/tests/python_frontend/string_test.py b/tests/python_frontend/string_test.py index a22ef1dd18..533057b092 100644 --- a/tests/python_frontend/string_test.py +++ b/tests/python_frontend/string_test.py @@ -75,7 +75,7 @@ def tester(): assert np.allclose(tester(), False) -@pytest.mark.skip +@pytest.mark.skip('Syntax is not yet supported') def test_string_literal(): @dace @@ -85,7 +85,7 @@ def tester(): assert tester()[0] == 'Hello World!' -@pytest.mark.skip +@pytest.mark.skip('Syntax is not yet supported') def test_bytes_literal(): @dace diff --git a/tests/python_frontend/structures/structure_python_test.py b/tests/python_frontend/structures/structure_python_test.py index 8190e776b9..bc4ab58d7c 100644 --- a/tests/python_frontend/structures/structure_python_test.py +++ b/tests/python_frontend/structures/structure_python_test.py @@ -182,7 +182,7 @@ def rgf_leftToRight(A: BlockTriDiagonal, B: BlockTriDiagonal, n_: dace.int32, nb assert np.allclose(B.lower, B_lower) -@pytest.mark.skip +@pytest.mark.skip('Compiler error (const conversion)') @pytest.mark.gpu def test_read_structure_gpu(): diff --git a/tests/reduce_strided_disabledtest.py b/tests/reduce_strided_disabledtest.py index 72d9c821ca..6a6f1a24e0 100644 --- a/tests/reduce_strided_disabledtest.py +++ b/tests/reduce_strided_disabledtest.py @@ -5,31 +5,29 @@ import numpy as np import pytest -# Python version of the SDFG below -# @dace.program -# def reduce_with_strides(A: dace.float64[50, 50], B: dace.float64[25]): -# B[:] = dace.reduce(lambda a,b: a+b, A[::2, ::2], axis=0, -# identity=0) -reduce_with_strides = dace.SDFG('reduce_with_strides') -reduce_with_strides.add_array('A', [50, 50], dace.float64) -reduce_with_strides.add_array('B', [25], dace.float64) - -state = reduce_with_strides.add_state() -node_a = state.add_read('A') -node_b = state.add_write('B') -red = state.add_reduce('lambda a,b: a+b', [0], 0) -state.add_nedge(node_a, red, dace.Memlet.simple('A', '0:50:2, 0:50:2')) -state.add_nedge(red, node_b, dace.Memlet.simple('B', '0:25')) - - -@pytest.mark.skip +@pytest.mark.skip('Incorrect outputs') def test_strided_reduce(): A = np.random.rand(50, 50) B = np.random.rand(25) - sdfg = copy.deepcopy(reduce_with_strides) - sdfg(A=A, B=B) + # Python version of the SDFG below + # @dace.program + # def reduce_with_strides(A: dace.float64[50, 50], B: dace.float64[25]): + # B[:] = dace.reduce(lambda a,b: a+b, A[::2, ::2], axis=0, + # identity=0) + + reduce_with_strides = dace.SDFG('reduce_with_strides') + reduce_with_strides.add_array('A', [50, 50], dace.float64) + reduce_with_strides.add_array('B', [25], dace.float64) + + state = reduce_with_strides.add_state() + node_a = state.add_read('A') + node_b = state.add_write('B') + red = state.add_reduce('lambda a,b: a+b', [0], 0) + state.add_nedge(node_a, red, dace.Memlet.simple('A', '0:50:2, 0:50:2')) + state.add_nedge(red, node_b, dace.Memlet.simple('B', '0:25')) + reduce_with_strides(A=A, B=B) assert np.allclose(B, np.sum(A[::2, ::2], axis=0)) diff --git a/tests/rtl/hardware_test.py b/tests/rtl/hardware_test.py index b6c01fab93..b17da2050f 100644 --- a/tests/rtl/hardware_test.py +++ b/tests/rtl/hardware_test.py @@ -372,8 +372,7 @@ def test_hardware_add42_single(): return sdfg -@pytest.mark.skip(reason="This test is covered by the Xilinx tests.") -def test_hardware_axpy_double_pump(veclen=2): +def _hardware_axpy_double_pump(veclen=2): ''' Tests manual application of the multi-pumping optimization applied to the AXPY program from BLAS. @@ -415,7 +414,7 @@ def test_hardware_axpy_double_pump_vec2(): ''' Tests double pumping with a vector length of 2. ''' - return test_hardware_axpy_double_pump(veclen=2) + return _hardware_axpy_double_pump(veclen=2) @rtl_test() @@ -423,7 +422,7 @@ def test_hardware_axpy_double_pump_vec4(): ''' Tests double pumping with a vector length of 4. ''' - return test_hardware_axpy_double_pump(veclen=4) + return _hardware_axpy_double_pump(veclen=4) @rtl_test() @@ -493,7 +492,7 @@ def np_vadd(x: dace.float32[N], y: dace.float32[N]): return sdfg -# TODO disabled due to problem with array of streams in Vitis 2021.1 +# Disabled due to problem with array of streams in Vitis 2021.1 #rtl_test() #def test_hardware_add42_multi(): # N = dace.symbol('N') @@ -522,7 +521,6 @@ def np_vadd(x: dace.float32[N], y: dace.float32[N]): test_hardware_vadd(None) test_hardware_vadd_temporal_vectorization(None) test_hardware_add42_single(None) - # TODO disabled due to problem with array of streams in Vitis 2021.1 #test_hardware_add42_multi(None) test_hardware_axpy_double_pump_vec2(None) test_hardware_axpy_double_pump_vec4(None) diff --git a/tests/sdfg/scalar_return.py b/tests/sdfg/scalar_return.py index 82bdb48c6b..1949afab16 100644 --- a/tests/sdfg/scalar_return.py +++ b/tests/sdfg/scalar_return.py @@ -62,7 +62,7 @@ def tuple_retval_sdfg() -> dace.SDFG: return sdfg -@pytest.mark.skip("Scalar return is not implement.") +@pytest.mark.skip("Scalar return is not implemented") def test_scalar_return(): sdfg = single_retval_sdfg() @@ -75,7 +75,7 @@ def test_scalar_return(): assert A[3] == res -@pytest.mark.skip("Scalar return is not implement.") +@pytest.mark.skip("Scalar return is not implemented") def test_scalar_return_tuple(): sdfg = tuple_retval_sdfg() diff --git a/tests/symbol_dependent_transients_test.py b/tests/symbol_dependent_transients_test.py index 2c7ca94c6f..f67b0dc416 100644 --- a/tests/symbol_dependent_transients_test.py +++ b/tests/symbol_dependent_transients_test.py @@ -154,7 +154,7 @@ def test_symbol_dependent_pinned_array(): assert (np.allclose(B, B_ref)) -@pytest.mark.skip # @pytest.mark.gpu +@pytest.mark.skip('Invalid address accessed in kernel') # @pytest.mark.gpu def test_symbol_dependent_gpu_view(): # NOTE: This test cannot produce the correct result since the input # data of the reduction are not contiguous and cub:reduce doesn't support @@ -173,7 +173,7 @@ def test_symbol_dependent_gpu_view(): assert (np.allclose(B, B_ref)) -@pytest.mark.skip +@pytest.mark.skip('FPGA compiler error') def test_symbol_dependent_fpga_global_array(): A = np.random.randn(10, 10, 10) B = np.ndarray(10, dtype=np.float64)