Skip to content

Commit

Permalink
treewide: Cleanup. Reduce the number of warnings issued during test e…
Browse files Browse the repository at this point in the history
…xecution (#2541)

Update patterns that cause Python warnings at runtime

 * Rewrite Angle function algorithm to avoid division and improve performance. Fixes division by zero during initialization.
 * Remove uses of np.float. The existing use cases already consider basic float, np.float is just a deprecated alias of that.
 * Update TransferMechanism tests that override parameters to capture and check for the presence of override warning
 * Update tests that hardcode checks for warnings in a specific order
 * Adjust test configurations to avoid warnings unless the test is intended to exercise situations that produce warnings
 * Use both python 3.7 and python 3.8+ codepaths in AST parser visiting constants
  • Loading branch information
jvesely authored Nov 22, 2022
2 parents c5c4940 + 8885f9f commit 50c545b
Show file tree
Hide file tree
Showing 10 changed files with 94 additions and 66 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -1809,14 +1809,16 @@ def _angle(self, value):
value = np.squeeze(value)
dim = len(value) + 1
angle = np.zeros(dim)
angle[0] = np.cos(value[0])
prod = np.product([np.sin(value[k]) for k in range(1, dim - 1)])
n_prod = prod
sin_value = np.sin(value)
cos_value = np.cos(value)
angle[0] = cos_value[0]
prod_a = np.cumprod(np.flip(sin_value))[:-1]
angle[dim - 1] = prod_a[-1]
prod_a[-1] = 1.

# going down from the top of cumprod we skip: 2 edge values +1 extra for output size
for j in range(1, dim - 1):
n_prod /= np.sin(value[j])
amt = n_prod * np.cos(value[j])
angle[j] = amt
angle[dim - 1] = prod
angle[j] = prod_a[dim -3 -j] * cos_value[j]
return angle

def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset):
Expand Down
11 changes: 9 additions & 2 deletions psyneulink/core/globals/utilities.py
Original file line number Diff line number Diff line change
Expand Up @@ -440,7 +440,8 @@ def iscompatible(candidate, reference=None, **kargs):
try:
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=FutureWarning)
if reference is not None and (candidate == reference):
# np.array(...).size > 0 checks for empty list. Everything else create single element (dtype=obejct) array
if reference is not None and np.array(candidate, dtype=object).size > 0 and (candidate == reference):
return True
# if reference is not None:
# if (isinstance(reference, (bool, int, float))
Expand All @@ -458,6 +459,12 @@ def iscompatible(candidate, reference=None, **kargs):
# ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
pass

# If the two are the same thing, can settle it right here
# This is a common pattern for tests that use the same structure
# as default variable and variable
if reference is not None and candidate is reference:
return True

# If args not provided, assign to default values
# if not specified in args, use these:
# args[kwCompatibilityType] = list
Expand Down Expand Up @@ -1031,7 +1038,7 @@ def type_match(value, value_type):
return value
if value_type in {int, np.integer, np.int64, np.int32}:
return int(value)
if value_type in {float, np.float, np.float64, np.float32}:
if value_type in {float, np.float64, np.float32}:
return float(value)
if value_type is np.ndarray:
return np.array(value)
Expand Down
28 changes: 17 additions & 11 deletions psyneulink/core/llvm/codegen.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,11 +68,6 @@ def np_cmp(builder, x, y):
if v is np:
self.register[k] = numpy_handlers

name_constants = {
True: ctx.bool_ty(1),
False: ctx.bool_ty(0),
}
self.name_constants = name_constants
super().__init__()

def _update_debug_metadata(self, builder: ir.IRBuilder, node:ast.AST):
Expand Down Expand Up @@ -239,9 +234,6 @@ def _convert(builder, x):

return val[node.attr]

def visit_Num(self, node):
return self.ctx.float_ty(node.n)

def visit_Assign(self, node):
value = self.visit(node.value)

Expand All @@ -259,10 +251,24 @@ def visit_Assign(self, node):
assert self.is_lval(target)
self.builder.store(value, target)

# visit_Constant is supported in Python3.8+
def visit_Constant(self, node):
# Only True/False are currently supported as named constants
# Call deprecated visit_* methods to maintain coverage
if node.value is True or node.value is False:
return self.visit_NameConstant(node)

return self.visit_Num(node)

# deprecated in Python3.8+
def visit_NameConstant(self, node):
val = self.name_constants[node.value]
assert val, f"Failed to convert NameConstant {node.value}"
return val
# Only True and False are supported atm
assert node.value is True or node.value is False
return self.ctx.bool_ty(node.value)

# deprecated in Python3.8+
def visit_Num(self, node):
return self.ctx.float_ty(node.n)

def visit_Tuple(self, node:ast.AST):
elements = (self.visit(element) for element in node.elts)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -382,11 +382,11 @@ def get_hetero_matrix(raw_hetero, size):
# similar to get_hetero_matrix() above
def get_auto_matrix(raw_auto, size):
if isinstance(raw_auto, numbers.Number):
return np.diag(np.full(size, raw_auto, dtype=np.float))
return np.diag(np.full(size, raw_auto, dtype=float))
elif ((isinstance(raw_auto, np.ndarray) and raw_auto.ndim == 1) or
(isinstance(raw_auto, list) and np.array(raw_auto).ndim == 1)):
if len(raw_auto) == 1:
return np.diag(np.full(size, raw_auto[0], dtype=np.float))
return np.diag(np.full(size, raw_auto[0], dtype=float))
else:
if len(raw_auto) != size:
return None
Expand Down
10 changes: 5 additions & 5 deletions tests/mechanisms/test_episodic_memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@
@pytest.mark.parametrize('variable, func, params, expected', test_data, ids=names)
def test_with_dictionary_memory(variable, func, params, expected, benchmark, mech_mode):
f = func(seed=0, **params)
m = EpisodicMemoryMechanism(content_size=len(variable[0]), assoc_size=len(variable[1]), function=f)
m = EpisodicMemoryMechanism(size=len(variable[0]), assoc_size=len(variable[1]), function=f)
EX = pytest.helpers.get_mech_execution(m, mech_mode)

EX(variable)
Expand Down Expand Up @@ -195,21 +195,21 @@ def test_with_dictionary_memory(variable, func, params, expected, benchmark, mec
]

# Allows names to be with each test_data set
names = [test_data[i][0] for i in range(len(test_data))]
names = [td[0] for td in test_data]

@pytest.mark.parametrize('name, func, func_params, mech_params, test_var,'
'input_port_names, output_port_names, expected_output', test_data, ids=names)
def test_with_contentaddressablememory(name, func, func_params, mech_params, test_var,
input_port_names, output_port_names, expected_output, mech_mode):
if mech_mode != 'Python':
pytest.skip("Compiled execution not yet implemented for ContentAddressableMemory")

f = func(seed=0, **func_params)
# EpisodicMemoryMechanism(function=f, **mech_params)
em = EpisodicMemoryMechanism(function=f, **mech_params)
assert em.input_ports.names == input_port_names
assert em.output_ports.names == output_port_names

if mech_mode != 'Python':
pytest.skip("PTX not yet implemented for ContentAddressableMemory")

EX = pytest.helpers.get_mech_execution(em, mech_mode)


Expand Down
63 changes: 37 additions & 26 deletions tests/mechanisms/test_transfer_mechanism.py
Original file line number Diff line number Diff line change
Expand Up @@ -546,13 +546,16 @@ def test_transfer_mech_array_assignments_fct_rate(self, benchmark, mech_mode):
@pytest.mark.benchmark(group="TransferMechanism Parameter Array Assignments")
def test_transfer_mech_array_assignments_fct_over_mech_rate(self, benchmark, mech_mode):

T = TransferMechanism(
name='T',
default_variable=[0 for i in range(VECTOR_SIZE)],
integrator_mode=True,
integrator_function=AdaptiveIntegrator(rate=[i / 20 for i in range(VECTOR_SIZE)]),
integration_rate=[i / 10 for i in range(VECTOR_SIZE)]
)
with pytest.warns(UserWarning) as warnings:
T = TransferMechanism(
name='T',
default_variable=[0 for i in range(VECTOR_SIZE)],
integrator_mode=True,
integrator_function=AdaptiveIntegrator(rate=[i / 20 for i in range(VECTOR_SIZE)]),
integration_rate=[i / 10 for i in range(VECTOR_SIZE)]
)
assert any(str(w.message).startswith('Specification of the "integration_rate" parameter')
for w in warnings), "Warnings: {}".format([str(w.message) for w in warnings])
EX = pytest.helpers.get_mech_execution(T, mech_mode)

var = [1 for i in range(VECTOR_SIZE)]
Expand Down Expand Up @@ -633,19 +636,23 @@ def test_transfer_mech_array_assignments_fct_initzr(self, benchmark, mech_mode):
@pytest.mark.transfer_mechanism
@pytest.mark.benchmark(group="TransferMechanism Parameter Array Assignments")
def test_transfer_mech_array_assignments_fct_initlzr_over_mech_init_val(self, benchmark, mech_mode):
T = TransferMechanism(
name='T',
default_variable=[0 for i in range(VECTOR_SIZE)],
integrator_mode=True,
integrator_function=AdaptiveIntegrator(
default_variable=[0 for i in range(VECTOR_SIZE)],
initializer=[i / 10 for i in range(VECTOR_SIZE)]
),
initial_value=[i / 10 for i in range(VECTOR_SIZE)]
)
EX = pytest.helpers.get_mech_execution(T, mech_mode)
with pytest.warns(UserWarning) as warnings:
T = TransferMechanism(
name='T',
default_variable=[0 for i in range(VECTOR_SIZE)],
integrator_mode=True,
integrator_function=AdaptiveIntegrator(
default_variable=[0 for i in range(VECTOR_SIZE)],
initializer=[i / 10 for i in range(VECTOR_SIZE)]
),
initial_value=[i / 10 for i in range(VECTOR_SIZE)]
)
assert any(str(w.message).startswith('Specification of the "initial_value" parameter')
for w in warnings), "Warnings: {}".format([str(w.message) for w in warnings])

EX = pytest.helpers.get_mech_execution(T, mech_mode)
var = [1 for i in range(VECTOR_SIZE)]

EX(var)
val = benchmark(EX, var)
assert np.allclose(val, [[ 0.75, 0.775, 0.8, 0.825]])
Expand Down Expand Up @@ -768,16 +775,20 @@ def test_transfer_mech_array_assignments_fct_noise(self, benchmark, mech_mode):
# FIXME: Incorrect T.integrator_function.defaults.variable reported
def test_transfer_mech_array_assignments_fct_over_mech_noise(self, benchmark, mech_mode):

T = TransferMechanism(
name='T',
default_variable=[0 for i in range(VECTOR_SIZE)],
integrator_mode=True,
integrator_function=AdaptiveIntegrator(noise=[i / 20 for i in range(VECTOR_SIZE)]),
noise=[i / 10 for i in range(VECTOR_SIZE)]
)
EX = pytest.helpers.get_mech_execution(T, mech_mode)
with pytest.warns(UserWarning) as warnings:
T = TransferMechanism(
name='T',
default_variable=[0 for i in range(VECTOR_SIZE)],
integrator_mode=True,
integrator_function=AdaptiveIntegrator(noise=[i / 20 for i in range(VECTOR_SIZE)]),
noise=[i / 10 for i in range(VECTOR_SIZE)]
)
assert any(str(w.message).startswith('Specification of the "noise" parameter')
for w in warnings), "Warnings: {}".format([str(w.message) for w in warnings])

EX = pytest.helpers.get_mech_execution(T, mech_mode)
var = [1 for i in range(VECTOR_SIZE)]

EX(var)
val = benchmark(EX, var)
assert np.allclose(val, [[ 0.75, 0.825, 0.9, 0.975]])
Expand Down
2 changes: 1 addition & 1 deletion tests/models/test_greedy_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def action_fn(variable):

# note: unitization is done in main loop
greedy_action_mech = pnl.ProcessingMechanism(function=action_fn, input_ports=["predator", "player", "prey"],
default_variable=[[0,0],[0,0],[0,0]], name="ACTION")
default_variable=[[0, 1], [0, -1], [1, 0]], name="ACTION")

direct_move = ComparatorMechanism(name='DIRECT MOVE',sample=player_pos, target=prey_pos)

Expand Down
20 changes: 11 additions & 9 deletions tests/ports/test_input_ports.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,19 +112,21 @@ def test_default_input(self, default_input):
comp = pnl.Composition(nodes=(m, pnl.NodeRole.INTERNAL))
assert pnl.NodeRole.INTERNAL in comp.get_roles_by_node(m)
assert pnl.NodeRole.INPUT not in comp.get_roles_by_node(m)
assert not m.path_afferents

assert not m.path_afferents # No path_afferents since internal_only is set by default_input


if default_input is None:
with pytest.warns(UserWarning) as warning: # Warn, since default_input is NOT set
with pytest.warns(UserWarning) as warnings: # Warn, since default_input is NOT set
comp.run()
assert repr(warning[1].message.args[0]) == '"InputPort (\'INTERNAL_NODE\') of \'TransferMechanism-0\' ' \
'doesn\'t have any afferent Projections."'
assert m.input_port.value == variable # For Mechanisms other than controller, default_variable seems
assert m.value == variable # to still be used even though default_input is NOT set
assert any(repr(w.message.args[0]) == '"InputPort (\'INTERNAL_NODE\') of \'TransferMechanism-0\' '
'doesn\'t have any afferent Projections."'
for w in warnings)
else:
assert not m.path_afferents # No path_afferents since internal_only is set by default_input
comp.run() # No warning since default_input is set
assert m.input_port.value == variable
assert m.value == variable

assert m.input_port.value == variable # For Mechanisms other than controller, default_variable seems
assert m.value == variable # to still be used even though default_input is NOT set

def test_no_efferents(self):
A = pnl.InputPort()
Expand Down
3 changes: 1 addition & 2 deletions tests/projections/test_projection_specifications.py
Original file line number Diff line number Diff line change
Expand Up @@ -480,8 +480,7 @@ def test_no_warning_when_matrix_specified(self):
)
c.add_linear_processing_pathway([m0, p0, m1])
for warn in w:
if r'elementwise comparison failed; returning scalar instead' in warn.message.args[0]:
raise
assert 'elementwise comparison failed; returning scalar instead' not in warn.message.args[0]

# KDM: this is a good candidate for pytest.parametrize
def test_masked_mapping_projection(self):
Expand Down
3 changes: 2 additions & 1 deletion tests/scheduling/test_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -1592,7 +1592,8 @@ def test_scheduler_conditions(self, comp_mode, condition, scale, expected_result
time_step_size=1.0),
reset_stateful_function_when=pnl.AtTrialStart(),
execute_until_finished=False,
output_ports=[pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME],
# Use only the decision variable in this test
output_ports=[pnl.DECISION_VARIABLE],
name='DDM')

response = pnl.ProcessingMechanism(size=2, name="GATE")
Expand Down

0 comments on commit 50c545b

Please sign in to comment.