Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

autodiff_composition: Allow using torch 1.11 with python3.10 and Windows #2667

Merged
merged 4 commits into from
May 12, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions broken_trans_deps.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,7 @@ ipython != 8.13.0; python_version < '3.9'
# onnx == 1.14.0 removed a helper function that is needed by skl2onnx
# https://github.com/onnx/onnx/issues/5202
onnx != 1.14.0

# torch wheels for win32 python3.10 are built against numpy>=1.23
# https://github.com/pytorch/pytorch/issues/100690
torch !=2.0.1, !=2.0.0, !=1.13.*, !=1.12.*; python_version == '3.10' and platform_system == 'Windows'
20 changes: 7 additions & 13 deletions conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,23 +7,17 @@
import re
import sys

from psyneulink import clear_registry, primary_registries
from psyneulink import clear_registry, primary_registries, torch_available
from psyneulink.core import llvm as pnlvm
from psyneulink.core.globals.utilities import set_global_seed


try:
import torch

# If we are on windows and using Python 3.10, despite it importing correctly, PyTorch is currently broken,
# see https://pytorch.org/get-started/locally/ showing lack of support.
if sys.platform.startswith("win32") and sys.version_info >= (3, 10):
pytorch_available = False
else:
pytorch_available = True

except ImportError:
pytorch_available = False
pass
else:
# Check that torch is usable if installed
assert torch_available, "Torch module is available, but not usable by PNL"

# def pytest_addoption(parser):
# parser.addoption(
Expand Down Expand Up @@ -57,7 +51,7 @@ def pytest_runtest_setup(item):
if 'cuda' in item.keywords and not pnlvm.ptx_enabled:
pytest.skip('PTX engine not enabled/available')

if 'pytorch' in item.keywords and not pytorch_available:
if 'pytorch' in item.keywords and not torch_available:
pytest.skip('pytorch not available')

doctest.ELLIPSIS_MARKER = "[...]"
Expand Down Expand Up @@ -122,7 +116,7 @@ def pytest_runtest_call(item):
set_global_seed(seed)

if 'pytorch' in item.keywords:
assert pytorch_available
assert torch_available
torch.manual_seed(seed)


Expand Down
14 changes: 14 additions & 0 deletions psyneulink/library/compositions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,17 @@
__all__ = list(regressioncfa.__all__)
__all__.extend(compositionrunner.__all__)
__all__.extend(autodiffcomposition.__all__)

try:
import torch

Check notice

Code scanning / CodeQL

Module is imported with 'import' and 'import from'

Module 'torch' is imported with both 'import' and 'import from'.
from torch import nn

# Some torch releases have silent dependency on a more recent numpy than the one curently required by PNL.
# This breaks torch numpy bindings, see e.g: https://github.com/pytorch/pytorch/issues/100690
torch.tensor([1,2,3]).numpy()

torch_available = True
except (ImportError, RuntimeError):
torch_available = False

__all__.append('torch_available')
13 changes: 5 additions & 8 deletions psyneulink/library/compositions/autodiffcomposition.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,7 @@
import os
import warnings
import numpy as np
from packaging import version
from pathlib import Path, PosixPath

try:
Expand Down Expand Up @@ -456,19 +457,15 @@ def _get_loss(self, loss_spec):
elif loss_spec == Loss.SSE:
return nn.MSELoss(reduction='sum')
elif loss_spec == Loss.CROSS_ENTROPY:
if version.parse(torch.version.__version__) >= version.parse('1.12.0'):
return nn.CrossEntropyLoss()

# Cross entropy loss is used for multiclass categorization and needs inputs in shape
# ((# minibatch_size, C), targets) where C is a 1-d vector of probabilities for each potential category
# and where target is a 1d vector of type long specifying the index to the target category. This
# formatting is different from most other loss functions available to autodiff compositions,
# and therefore requires a wrapper function to properly package inputs.
cross_entropy_loss = nn.CrossEntropyLoss()
return lambda x, y: cross_entropy_loss(
# x.unsqueeze(0),
x,
# y.type(torch.LongTensor)
# torch.argmax(y.type(torch.LongTensor))
y.type(x.type())
)
return lambda x, y: nn.CrossEntropyLoss()(torch.atleast_2d(x), torch.atleast_2d(y.type(x.type())))
elif loss_spec == Loss.L1:
return nn.L1Loss(reduction='sum')
elif loss_spec == Loss.NLL:
Expand Down
13 changes: 2 additions & 11 deletions psyneulink/library/compositions/pytorchmodelcreator.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
import graph_scheduler
import torch
import torch.nn as nn

from psyneulink.core.components.component import Component, ComponentsMeta
from psyneulink.core.compositions.composition import NodeRole
Expand All @@ -11,23 +13,12 @@
from psyneulink.core.globals.utilities import get_deepcopy_with_shared
from .pytorchcomponents import *

try:
import torch
from torch import nn
torch_available = True
except ImportError:
torch_available = False

__all__ = ['PytorchModelCreator']

class PytorchModelCreator(torch.nn.Module):
# sets up parameters of model & the information required for forward computation
def __init__(self, composition, device, context=None):

if not torch_available:
raise Exception('Pytorch python module (torch) is not installed. Please install it with '
'`pip install torch` or `pip3 install torch`')

super(PytorchModelCreator, self).__init__()

# Maps Mechanism -> PytorchMechanismWrapper
Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ modeci_mdf<0.5, >=0.3.4; (platform_machine == 'AMD64' or platform_machine == 'x8
networkx<3.2
numpy<1.22.5, >=1.19.0
optuna<3.2.0
packaging<24.0
pandas<2.0.2
pillow<9.6.0
pint<0.22.0
Expand Down
22 changes: 10 additions & 12 deletions tests/composition/test_autodiffcomposition.py
Original file line number Diff line number Diff line change
Expand Up @@ -1573,9 +1573,12 @@ def test_training_then_processing(self, autodiff_mode):
# np.testing.assert_allclose(pt_weights_out_bp, pt_weights_out_ap)

@pytest.mark.parametrize(
'loss', [Loss.MSE, Loss.L1, Loss.POISSON_NLL, Loss.CROSS_ENTROPY]
'loss, expected', [(Loss.MSE, [[[0.99330509]], [[0.99933169]], [[0.99933169]], [[0.9998504]]]),
(Loss.L1, []),
(Loss.POISSON_NLL, []),
(Loss.CROSS_ENTROPY, [[[0.99330715]], [[0.99933202]], [[0.99933202]], [[0.99985049]]])]
)
def test_various_loss_specs(self, loss, autodiff_mode):
def test_loss_specs(self, loss, expected, autodiff_mode):
if autodiff_mode is not pnl.ExecutionMode.Python and loss in [Loss.POISSON_NLL, Loss.L1]:
pytest.skip("Loss spec not yet implemented!")

Expand All @@ -1602,22 +1605,17 @@ def test_various_loss_specs(self, loss, autodiff_mode):
xor.add_projection(sender=xor_in, projection=hid_map, receiver=xor_hid)
xor.add_projection(sender=xor_hid, projection=out_map, receiver=xor_out)

xor_inputs = np.array( # the inputs we will provide to the model
[[0, 0],
[0, 1],
[1, 0],
[1, 1]])
xor_inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])

xor_targets = np.array( # the outputs we wish to see from the model
[[0],
[1],
[1],
[0]])
xor_targets = np.array([[0], [1], [1], [0]])

xor.learn(inputs = {"inputs": {xor_in:xor_inputs},
"targets": {xor_out:xor_targets},
"epochs": 10}, execution_mode=autodiff_mode)

tol = {'atol': 2e-6, 'rtol': 2e-6} if autodiff_mode != pnl.ExecutionMode.Python and loss == Loss.CROSS_ENTROPY else {}
np.testing.assert_allclose(xor.learning_results, expected, **tol)

def test_pytorch_loss_spec(self, autodiff_mode):

if autodiff_mode is not pnl.ExecutionMode.Python:
Expand Down