diff --git a/.github/workflows/pnl-ci.yml b/.github/workflows/pnl-ci.yml index bb117f4055a..4f4195bfcb6 100644 --- a/.github/workflows/pnl-ci.yml +++ b/.github/workflows/pnl-ci.yml @@ -23,7 +23,7 @@ jobs: fetch-depth: 10 - name: Linux wheels cache - uses: actions/cache@v2.1.1 + uses: actions/cache@v2.1.2 if: startsWith(runner.os, 'Linux') with: path: ~/.cache/pip/wheels @@ -31,7 +31,7 @@ jobs: restore-keys: ${{ runner.os }}-python-${{ matrix.python-version }}-${{ matrix.python-architecture }}-pip-wheels - name: MacOS wheels cache - uses: actions/cache@v2.1.1 + uses: actions/cache@v2.1.2 if: startsWith(runner.os, 'macOS') with: path: ~/Library/Caches/pip/wheels @@ -39,7 +39,7 @@ jobs: restore-keys: ${{ runner.os }}-python-${{ matrix.python-version }}-${{ matrix.python-architecture }}-pip-wheels - name: Windows wheels cache - uses: actions/cache@v2.1.1 + uses: actions/cache@v2.1.2 if: startsWith(runner.os, 'Windows') with: path: ~\AppData\Local\pip\Cache\wheels diff --git a/.travis.yml b/.travis.yml index 5cb7d8a2680..ebb982c3ea4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -67,9 +67,9 @@ before_install: if [ "$TRAVIS_CPU_ARCH" != "amd64" ]; then # There are a lot fewer wheels distributed for non-x86 architectures. # We end up building a lot of them locally, install dev packages - export EXTRA_PKGS="build-essential gfortran llvm-9-dev libfreetype6-dev libjpeg-dev liblapack-dev zlib1g-dev" + export EXTRA_PKGS="build-essential gfortran llvm-10-dev libfreetype6-dev libjpeg-dev liblapack-dev zlib1g-dev" # Export LLVM_CONFIG for llvmlite - export LLVM_CONFIG=llvm-config-9 + export LLVM_CONFIG=llvm-config-10 # Disable coverage export RUN_COV="" fi diff --git a/dev_requirements.txt b/dev_requirements.txt index 9ce4f32f0e1..25bd503b236 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,6 +1,6 @@ jupyter<=1.0.0 psyneulink-sphinx-theme<=1.2.1.7 -pytest<6.1.1 +pytest<6.1.2 pytest-benchmark<=3.2.3 pytest-cov<=2.10.1 pytest-helpers-namespace<=2019.1.8 diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index a8cd212d2b6..e08cbfce9c9 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -3003,7 +3003,7 @@ def _gen_llvm_function_internal(self, ctx, builder, params, state, arg_in, for scale in [TimeScale.TIME_STEP, TimeScale.PASS, TimeScale.TRIAL, TimeScale.RUN]: num_exec_time_ptr = builder.gep(num_executions_ptr, [ctx.int32_ty(0), ctx.int32_ty(scale.value)]) new_val = builder.load(num_exec_time_ptr) - new_val = builder.add(new_val, ctx.int32_ty(1)) + new_val = builder.add(new_val, new_val.type(1)) builder.store(new_val, num_exec_time_ptr) builder = self._gen_llvm_output_ports(ctx, builder, value, params, state, arg_in, arg_out) diff --git a/psyneulink/core/llvm/__init__.py b/psyneulink/core/llvm/__init__.py index 851b4f73bbe..a438af0d21a 100644 --- a/psyneulink/core/llvm/__init__.py +++ b/psyneulink/core/llvm/__init__.py @@ -88,12 +88,12 @@ def _cuda_kernel(self): self.__cuda_kernel = _ptx_engine.get_kernel(self.name) return self.__cuda_kernel - def cuda_call(self, *args, threads=1, block_size=32): + def cuda_call(self, *args, threads=1, block_size=128): grid = ((threads + block_size - 1) // block_size, 1) self._cuda_kernel(*args, np.int32(threads), block=(block_size, 1, 1), grid=grid) - def cuda_wrap_call(self, *args, threads=1, block_size=32): + def cuda_wrap_call(self, *args, threads=1, block_size=128): wrap_args = (jit_engine.pycuda.driver.InOut(a) if isinstance(a, np.ndarray) else a for a in args) self.cuda_call(*wrap_args, threads=threads, block_size=block_size) diff --git a/psyneulink/core/llvm/builder_context.py b/psyneulink/core/llvm/builder_context.py index accd9a2fba0..61c60eb37be 100644 --- a/psyneulink/core/llvm/builder_context.py +++ b/psyneulink/core/llvm/builder_context.py @@ -431,12 +431,16 @@ def _convert_llvm_ir_to_ctype(t: ir.Type): if type_t is ir.VoidType: return None elif type_t is ir.IntType: - if t.width == 32: - return ctypes.c_int + if t.width == 8: + return ctypes.c_int8 + elif t.width == 16: + return ctypes.c_int16 + elif t.width == 32: + return ctypes.c_int32 elif t.width == 64: - return ctypes.c_longlong + return ctypes.c_int64 else: - assert False, "Integer type too big!" + assert False, "Unknown integer type: {}".format(type_t) elif type_t is ir.DoubleType: return ctypes.c_double elif type_t is ir.FloatType: diff --git a/psyneulink/core/llvm/codegen.py b/psyneulink/core/llvm/codegen.py index 48881283fb0..2de97a6c7fa 100644 --- a/psyneulink/core/llvm/codegen.py +++ b/psyneulink/core/llvm/codegen.py @@ -916,7 +916,7 @@ def gen_composition_run(ctx, composition, *, tags:frozenset): node_state = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(0), ctx.int32_ty(idx)]) num_executions_ptr = helpers.get_state_ptr(builder, node, node_state, "num_executions") num_exec_time_ptr = builder.gep(num_executions_ptr, [ctx.int32_ty(0), ctx.int32_ty(TimeScale.RUN.value)]) - builder.store(ctx.int32_ty(0), num_exec_time_ptr) + builder.store(num_exec_time_ptr.type.pointee(0), num_exec_time_ptr) # Call execution exec_tags = tags.difference({"run"}) diff --git a/psyneulink/core/llvm/jit_engine.py b/psyneulink/core/llvm/jit_engine.py index b4a0d12c3d8..7ee8664e8f9 100644 --- a/psyneulink/core/llvm/jit_engine.py +++ b/psyneulink/core/llvm/jit_engine.py @@ -24,6 +24,8 @@ if pycuda.driver.get_version()[0] > 5: from pycuda import autoinit as pycuda_default import pycuda.compiler + assert pycuda_default.context is not None + pycuda_default.context.set_cache_config(pycuda.driver.func_cache.PREFER_L1) ptx_enabled = True else: raise UserWarning("CUDA driver too old (need 6+): " + str(pycuda.driver.get_version())) @@ -316,5 +318,5 @@ def get_kernel(self, name): wrapper_mod = _gen_cuda_kernel_wrapper_module(function) self.compile_modules([wrapper_mod], set()) kernel = self._engine._find_kernel(name + "_cuda_kernel") - + kernel.set_cache_config(pycuda.driver.func_cache.PREFER_L1) return kernel diff --git a/requirements.txt b/requirements.txt index 719cd01662d..ecf6041bf52 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ autograd<=1.3 dill<=0.32 elfi<=0.7.6 -graphviz<=0.14.1 +graphviz<0.14.3 grpcio<=1.31.0 grpcio-tools<=1.31.0 llvmlite<=0.34 diff --git a/tests/llvm/test_custom_func.py b/tests/llvm/test_custom_func.py index d9d1f32b770..7bfbbc0b5e6 100644 --- a/tests/llvm/test_custom_func.py +++ b/tests/llvm/test_custom_func.py @@ -64,3 +64,45 @@ def test_fixed_dimensions__pnl_builtin_vxm(mode): binf2.cuda_wrap_call(vector, matrix, new_res) assert np.array_equal(orig_res, new_res) + + +@pytest.mark.llvm +@pytest.mark.parametrize('mode', ['CPU', + pytest.param('PTX', marks=pytest.mark.cuda)]) +@pytest.mark.parametrize('val', [np.int8(0x7e), + np.int16(0x7eec), + np.int32(0x7eedbeee), + np.int64(0x7eedcafedeadbeee) + ], ids=lambda x: str(x.dtype)) +def test_integer_broadcast(mode, val): + custom_name = None + with pnlvm.LLVMBuilderContext() as ctx: + custom_name = ctx.get_unique_name("broadcast") + int_ty = ctx.convert_python_struct_to_llvm_ir(val) + int_array_ty = ir.ArrayType(int_ty, 8) + func_ty = ir.FunctionType(ir.VoidType(), (int_ty.as_pointer(), + int_array_ty.as_pointer())) + function = ir.Function(ctx.module, func_ty, name=custom_name) + + i, o = function.args + block = function.append_basic_block(name="entry") + builder = ir.IRBuilder(block) + ival = builder.load(i) + ival = builder.add(ival, ival.type(1)) + with pnlvm.helpers.array_ptr_loop(builder, o, "broadcast") as (b, i): + out_ptr = builder.gep(o, [ctx.int32_ty(0), i]) + builder.store(ival, out_ptr) + builder.ret_void() + + binf = pnlvm.LLVMBinaryFunction.get(custom_name) + res = np.zeros(8, dtype=val.dtype) + + if mode == 'CPU': + ct_res = np.ctypeslib.as_ctypes(res) + ct_in = np.ctypeslib.as_ctypes(val) + + binf(ctypes.byref(ct_in), ctypes.byref(ct_res)) + else: + binf.cuda_wrap_call(np.asarray(val), res) + + assert all(res == np.broadcast_to(val + 1, 8)) diff --git a/tests/mechanisms/test_ddm_mechanism.py b/tests/mechanisms/test_ddm_mechanism.py index 6887c8ea603..f8fda48d54d 100644 --- a/tests/mechanisms/test_ddm_mechanism.py +++ b/tests/mechanisms/test_ddm_mechanism.py @@ -793,3 +793,20 @@ def test_sequence_of_DDM_mechs_in_Composition_Pathway(): # if you do not specify, assert_allcose will use a relative tolerance of 1e-07, # which WILL FAIL unless you gather higher precision values to use as reference np.testing.assert_allclose(val, expected, atol=1e-08, err_msg='Failed on expected_output[{0}]'.format(i)) + + +@pytest.mark.mechanism +@pytest.mark.ddm_mechanism +@pytest.mark.parametrize('mode', ['Python', + pytest.param('LLVM', marks=pytest.mark.llvm), + pytest.param('LLVMExec', marks=pytest.mark.llvm), + pytest.param('LLVMRun', marks=pytest.mark.llvm), + pytest.param('PTXExec', marks=[pytest.mark.llvm, pytest.mark.cuda]), + pytest.param('PTXRun', marks=[pytest.mark.llvm, pytest.mark.cuda])]) +def test_DDMMechanism_LCA_equivalent(mode): + ddm = DDM(default_variable=[0], function=DriftDiffusionIntegrator(rate=1, time_step_size=0.1)) + comp2 = Composition() + comp2.add_node(ddm) + result2 = comp2.run(inputs={ddm:[1]}, bin_execute=mode) + assert np.allclose(np.asfarray(result2[0]), [0.1]) + assert np.allclose(np.asfarray(result2[1]), [0.1]) diff --git a/tests/mechanisms/test_lca.py b/tests/mechanisms/test_lca.py index 64dfaecedc4..85f2dca3d77 100644 --- a/tests/mechanisms/test_lca.py +++ b/tests/mechanisms/test_lca.py @@ -285,6 +285,23 @@ def test_equivalance_of_threshold_and_termination_specifications_max_vs_next(sel # result = comp.run(inputs={lca:[1,0]}) # assert np.allclose(result, [[0.71463572, 0.28536428]]) + @pytest.mark.mechanism + @pytest.mark.lca_mechanism + @pytest.mark.parametrize('mode', ['Python', + pytest.param('LLVM', marks=pytest.mark.llvm), + pytest.param('LLVMExec', marks=pytest.mark.llvm), + pytest.param('LLVMRun', marks=pytest.mark.llvm), + pytest.param('PTXExec', marks=[pytest.mark.llvm, pytest.mark.cuda]), + pytest.param('PTXRun', marks=[pytest.mark.llvm, pytest.mark.cuda])]) + def test_LCAMechanism_DDM_equivalent(self, mode): + lca = LCAMechanism(size=2, leak=0., threshold=1, auto=0, hetero=0, + initial_value=[0, 0], execute_until_finished=False) + comp1 = Composition() + comp1.add_node(lca) + result1 = comp1.run(inputs={lca:[1, -1]}, bin_execute=mode) + assert np.allclose(result1, [[0.52497918747894, 0.47502081252106]],) + + class TestLCAReset: def test_reset_run(self): diff --git a/tests/mechanisms/test_processing_mechanism.py b/tests/mechanisms/test_processing_mechanism.py index a4ab38020b1..7377306d5f4 100644 --- a/tests/mechanisms/test_processing_mechanism.py +++ b/tests/mechanisms/test_processing_mechanism.py @@ -66,75 +66,38 @@ def test_processing_mechanism_linear_function(self): PM2.execute(1.0) assert np.allclose(PM2.value, 3.0) - def test_processing_mechanism_LinearCombination_function(self): - - PM1 = ProcessingMechanism(function=LinearCombination) - PM1.execute(1.0) - # assert np.allclose(PM1.value, 1.0) - - def test_processing_mechanism_Reduce_function(self): - PM1 = ProcessingMechanism(function=Reduce) - PM1.execute(1.0) - # assert np.allclose(PM1.value, 1.0) - - def test_processing_mechanism_CombineMeans_function(self): - PM1 = ProcessingMechanism(function=CombineMeans) - PM1.execute(1.0) - # assert np.allclose(PM1.value, 1.0) - - def test_processing_mechanism_Exponential_function(self): - PM1 = ProcessingMechanism(function=Exponential) - PM1.execute(1.0) - # assert np.allclose(PM1.value, 1.0) - - def test_processing_mechanism_Logistic_function(self): - PM1 = ProcessingMechanism(function=Logistic) - PM1.execute(1.0) - # assert np.allclose(PM1.value, 1.0) - - def test_processing_mechanism_SoftMax_function(self): - PM1 = ProcessingMechanism(function=SoftMax(per_item=False)) - PM1.execute(1.0) - # assert np.allclose(PM1.value, 1.0) - - def test_processing_mechanism_SimpleIntegrator_function(self): - PM1 = ProcessingMechanism(function=SimpleIntegrator) - PM1.execute(1.0) - - def test_processing_mechanism_AdaptiveIntegrator_function(self): - PM1 = ProcessingMechanism(function=AdaptiveIntegrator) - PM1.execute(1.0) - # assert np.allclose(PM1.value, 1.0) - - def test_processing_mechanism_DriftDiffusionIntegrator_function(self): - PM1 = ProcessingMechanism(function=DriftDiffusionIntegrator) - PM1.execute(1.0) - # assert np.allclose(PM1.value, 1.0) - - def test_processing_mechanism_OrnsteinUhlenbeckIntegrator_function(self): - PM1 = ProcessingMechanism(function=OrnsteinUhlenbeckIntegrator) - PM1.execute(1.0) - # assert np.allclose(PM1.value, 1.0) - - def test_processing_mechanism_AccumulatorIntegrator_function(self): - PM1 = ProcessingMechanism(function=AccumulatorIntegrator) - PM1.execute(1.0) - # assert np.allclose(PM1.value, 1.0) - - def test_processing_mechanism_FitzHughNagumoIntegrator_function(self): - PM1 = ProcessingMechanism(function=FitzHughNagumoIntegrator) - PM1.execute(1.0) - # assert np.allclose(PM1.value, 1.0) - - def test_processing_mechanism_DualAdaptiveIntegrator_function(self): - PM1 = ProcessingMechanism(function=DualAdaptiveIntegrator) - PM1.execute(1.0) - # assert np.allclose(PM1.value, 1.0) - - def test_processing_mechanism_BogaczEtAl_function(self): - PM1 = ProcessingMechanism(function=DriftDiffusionAnalytical) - PM1.execute(1.0) - # assert np.allclose(PM1.value, 1.0) + @pytest.mark.parametrize("function,expected", [(LinearCombination, [[1.]]), + (Reduce, [[1.]]), + (CombineMeans, [1.0]), + (Exponential, [[2.71828183]]), + (Logistic, [[0.73105858]]), + (SoftMax, [[1,]]), + (SimpleIntegrator, [[1.]]), + (AdaptiveIntegrator, [[1.]]), + (DriftDiffusionIntegrator, [[[1.]], [[1.]]]), + (OrnsteinUhlenbeckIntegrator, [[[-1.]], [[1.]]]), + (AccumulatorIntegrator, [[0.]]), + (FitzHughNagumoIntegrator, [[[0.05127053]], [[0.00279552]], [[0.05]]]), + (DualAdaptiveIntegrator, [[0.1517455]]), + (DriftDiffusionAnalytical, [[1.19932930e+00], + [3.35350130e-04], + [1.19932930e+00], + [2.48491374e-01], + [1.48291009e+00], + [1.19932930e+00], + [2.48491374e-01], + [1.48291009e+00]]), + (NormalDist, [[-0.51529709]]), + (ExponentialDist, [[0.29964231]]), + (UniformDist, [[0.25891675]]), + (GammaDist, [[0.29964231]]), + (WaldDist, [[0.73955962]]), + ], + ids=lambda x: getattr(x, "componentName", "")) + def test_processing_mechanism_function(self, function, expected): + PM = ProcessingMechanism(function=function) + res = PM.execute(1.0) + assert np.allclose(np.asfarray(res), expected) # COMMENTED OUT BECAUSE OF MATLAB ENGINE: # def test_processing_mechanism_NavarroAndFuss_function(self): @@ -142,36 +105,6 @@ def test_processing_mechanism_BogaczEtAl_function(self): # PM1.execute(1.0) # # assert np.allclose(PM1.value, 1.0) - def test_processing_mechanism_NormalDist_function(self): - PM1 = ProcessingMechanism(function=NormalDist) - PM1.execute(1.0) - # assert np.allclose(PM1.value, 1.0) - - def test_processing_mechanism_ExponentialDist_function(self): - PM1 = ProcessingMechanism(function=ExponentialDist) - PM1.execute(1.0) - # assert np.allclose(PM1.value, 1.0) - - def test_processing_mechanism_UniformDist_function(self): - PM1 = ProcessingMechanism(function=UniformDist) - PM1.execute(1.0) - # assert np.allclose(PM1.value, 1.0) - - def test_processing_mechanism_GammaDist_function(self): - PM1 = ProcessingMechanism(function=GammaDist) - PM1.execute(1.0) - # assert np.allclose(PM1.value, 1.0) - - def test_processing_mechanism_WaldDist_function(self): - PM1 = ProcessingMechanism(function=WaldDist) - PM1.execute(1.0) - # assert np.allclose(PM1.value, 1.0) - - def test_processing_mechanism_Stability_function(self): - PM1 = ProcessingMechanism(function=Stability) - PM1.execute(1.0) - # assert np.allclose(PM1.value, 1.0) - def test_processing_mechanism_Distance_function(self): PM1 = ProcessingMechanism(function=Distance, default_variable=[[0,0], [0,0]]) @@ -317,59 +250,43 @@ def test_invalid_matrix_specs(self): class TestProcessingMechanismStandardOutputPorts: - def test_mean(self): - PM1 = ProcessingMechanism(default_variable=[0,0,0], output_ports=[MEAN]) - PM1.execute([1,2,4]) - assert np.allclose(PM1.output_ports[0].value,[2.33333333]) - - def test_median(self): - PM1 = ProcessingMechanism(default_variable=[0,0,0], output_ports=[MEDIAN]) - PM1.execute([1,2,4]) - assert np.allclose(PM1.output_ports[0].value,[2]) - - def test_std_dev(self): - PM1 = ProcessingMechanism(default_variable=[0,0,0], output_ports=[STANDARD_DEVIATION]) - PM1.execute([1,2,4]) - assert np.allclose(PM1.output_ports[0].value,[1.24721913]) - - def test_variance(self): - PM1 = ProcessingMechanism(default_variable=[0,0,0], output_ports=[VARIANCE]) - PM1.execute([1,2,4]) - assert np.allclose(PM1.output_ports[0].value,[1.55555556]) - - def test_max_val(self): - PM1 = ProcessingMechanism(default_variable=[0,0,0], output_ports=[MAX_VAL]) - PM1.execute([1,2,-4]) - # assert np.allclose(PM1.output_ports[0].value,[0,2,0]) - assert np.allclose(PM1.output_ports[0].value,[2]) - - def test_max_abs_val(self): - PM1 = ProcessingMechanism(default_variable=[0,0,0], output_ports=[MAX_ABS_VAL]) - PM1.execute([1,2,-4]) - # assert np.allclose(PM1.output_ports[0].value,[0,0,-4]) - assert np.allclose(PM1.output_ports[0].value,[4]) - - def test_max_one_hot(self): - PM1 = ProcessingMechanism(default_variable=[0,0,0], output_ports=[MAX_ONE_HOT]) - PM1.execute([1,2,-4]) - assert np.allclose(PM1.output_ports[0].value,[0,2,0]) - - def test_max_abs_one_hot(self): - PM1 = ProcessingMechanism(default_variable=[0,0,0], output_ports=[MAX_ABS_ONE_HOT]) - PM1.execute([1,2,-4]) - assert np.allclose(PM1.output_ports[0].value,[0,0,4]) - - def test_max_indicator(self): - PM1 = ProcessingMechanism(default_variable=[0,0,0], output_ports=[MAX_INDICATOR]) - PM1.execute([1,2,-4]) - assert np.allclose(PM1.output_ports[0].value,[0,1,0]) - - def test_max_abs_indicator(self): - PM1 = ProcessingMechanism(default_variable=[0,0,0], output_ports=[MAX_ABS_INDICATOR]) - PM1.execute([1,2,-4]) - assert np.allclose(PM1.output_ports[0].value,[0,0,1]) - - def test_prob(self): - PM1 = ProcessingMechanism(default_variable=[0,0,0], output_ports=[PROB]) - PM1.execute([1,2,4]) - assert np.allclose(PM1.output_ports[0].value,[0,0,4]) + @pytest.mark.benchmark + @pytest.mark.parametrize("op, expected", [(MAX_ONE_HOT, [0, 2, 0]), + (MAX_INDICATOR, [0, 1, 0]), + (MAX_ABS_INDICATOR, [0, 0, 1]), + ], + ids=lambda x: x if isinstance(x, str) else "") + @pytest.mark.parametrize("mode", ["Python", + pytest.param("LLVM", marks=[pytest.mark.llvm]), + pytest.param("PTX", marks=[pytest.mark.llvm, pytest.mark.cuda]), + ]) + def test_output_ports(self, mode, op, expected, benchmark): + benchmark.group = "Output Port Op: {}".format(op) + PM1 = ProcessingMechanism(default_variable=[0, 0, 0], output_ports=[op]) + var = [1, 2, 4] if op in {MEAN, MEDIAN, STANDARD_DEVIATION, VARIANCE} else [1, 2, -4] + if mode == "Python": + ex = PM1.execute + elif mode == "LLVM": + ex = pnlvm.MechExecution(PM1).execute + elif mode == "PTX": + ex = pnlvm.MechExecution(PM1).cuda_execute + res = benchmark(ex, var) + res = PM1.output_ports[0].value if mode == "Python" else res + assert np.allclose(res, expected) + + # FIXME: These variants don't compile (use UDFs) + @pytest.mark.parametrize("op, expected", [(MEAN, [2.33333333]), + (MEDIAN, [2]), + (STANDARD_DEVIATION, [1.24721913]), + (VARIANCE, [1.55555556]), + (MAX_VAL, [2]), + (MAX_ABS_VAL, [4]), + (MAX_ABS_ONE_HOT, [0, 0, 4]), + (PROB, [0, 2, 0]), + ], + ids=lambda x: x if isinstance(x, str) else "") + def test_output_ports2(self, op, expected): + PM1 = ProcessingMechanism(default_variable=[0, 0, 0], output_ports=[op]) + var = [1, 2, 4] if op in {MEAN, MEDIAN, STANDARD_DEVIATION, VARIANCE} else [1, 2, -4] + PM1.execute(var) + assert np.allclose(PM1.output_ports[0].value, expected) diff --git a/tests/models/test_greedy_agent.py b/tests/models/test_greedy_agent.py index a1103779a72..d1ad1e8046e 100644 --- a/tests/models/test_greedy_agent.py +++ b/tests/models/test_greedy_agent.py @@ -128,9 +128,9 @@ def test_simplified_greedy_agent_random(benchmark, mode): pytest.param('PTXRun', marks=[pytest.mark.llvm, pytest.mark.cuda]), ]) @pytest.mark.parametrize("samples", [[0,10], - pytest.param([a / 10.0 for a in range(0, 101)]), pytest.param([0,3,6,10], marks=pytest.mark.stress), pytest.param([0,2,4,6,8,10], marks=pytest.mark.stress), + pytest.param([a / 10.0 for a in range(0, 101)]), ], ids=lambda x: len(x)) def test_predator_prey(benchmark, mode, samples): if len(samples) > 10 and mode not in {"LLVMRun", "Python-PTX"}: diff --git a/tutorial_requirements.txt b/tutorial_requirements.txt index 4748e19a21b..cecd6bc3c6b 100644 --- a/tutorial_requirements.txt +++ b/tutorial_requirements.txt @@ -1,3 +1,3 @@ -graphviz<=0.14.1 +graphviz<0.14.3 jupyter<=1.0.0 matplotlib<=3.3.2