Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Torch issue716 #18

Merged
merged 5 commits into from
Apr 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
67 changes: 59 additions & 8 deletions qiskit_machine_learning/connectors/torch_connector.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2021, 2023.
# (C) Copyright IBM 2021, 2024.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
Expand All @@ -13,7 +13,7 @@
"""A connector to use Qiskit (Quantum) Neural Networks as PyTorch modules."""
from __future__ import annotations

from typing import Tuple, Any, cast
from typing import Tuple, Any, cast, Literal

import numpy as np

Expand Down Expand Up @@ -52,12 +52,57 @@ class Module: # type: ignore
pass


def _get_einsum_signature(n_dimensions: int, return_type: Literal["input", "weight"]) -> str:
"""
Generate an Einstein summation signature for a given number of dimensions and return type.

Args:
n_dimensions (int): The number of dimensions for the summation.
return_type (Literal["input", "weight"]): The type of the return signature.
- "input": Return signature includes all input indices except the last one.
- "weight": Return signature includes only the last index as the output.

Returns:
str: The Einstein summation signature.

Raises:
RuntimeError: If the number of dimensions exceeds the character limit.
ValueError: If an invalid return type is provided.

Example:
Consider a scenario where n_dimensions is 3 and return_type is "input":
>>> _get_einsum_signature(3, "input")
'ab,abc->ac'
This returns the Einstein summation signature 'ab,abc->ac' for input with three dimensions.
"""
trace = ""
char_limit = 26
for i in range(n_dimensions):
trace += chr(97 + i) # chr(97) == 'a'
if i >= char_limit:
raise RuntimeError(
f"Cannot define an Einstein summation with more tha {char_limit:d} dimensions."
)

if return_type == "input":
signature = f"{trace[:-1]},{trace:s}->{trace[0] + trace[2:]}"
elif return_type == "weight":
signature = f"{trace[:-1]},{trace:s}->{trace[-1]}"
else:
raise ValueError(
f'The only allowed return types are ["input", "weight"], got {return_type:s} instead.'
)

return signature


@_optionals.HAS_TORCH.require_in_instance
class TorchConnector(Module):
"""Connects a Qiskit (Quantum) Neural Network to PyTorch."""

# pylint: disable=abstract-method
class _TorchNNFunction(Function):

# pylint: disable=arguments-differ
@staticmethod
def forward( # type: ignore
Expand Down Expand Up @@ -187,7 +232,9 @@ def backward(ctx: Any, grad_output: Tensor) -> Tuple: # type: ignore
# able to do back-prop in a batched manner.
# Pytorch does not support sparse einsum, so we rely on Sparse.
# pylint: disable=no-member
input_grad = sparse.einsum("ij,ijk->ik", grad_coo, input_grad)
n_dimension = max(grad_coo.ndim, input_grad.ndim)
signature = _get_einsum_signature(n_dimension, return_type="input")
input_grad = sparse.einsum(signature, grad_coo, input_grad)

# return sparse gradients
input_grad = torch.sparse_coo_tensor(input_grad.coords, input_grad.data)
Expand All @@ -205,7 +252,9 @@ def backward(ctx: Any, grad_output: Tensor) -> Tuple: # type: ignore
input_grad = torch.as_tensor(input_grad, dtype=torch.float)

# same as above
input_grad = torch.einsum("ij,ijk->ik", grad_output.detach().cpu(), input_grad)
n_dimension = max(grad_output.detach().cpu().ndim, input_grad.ndim)
signature = _get_einsum_signature(n_dimension, return_type="input")
input_grad = torch.einsum(signature, grad_output.detach().cpu(), input_grad)

# place the resulting tensor to the device where they were stored
input_grad = input_grad.to(input_data.device)
Expand All @@ -226,7 +275,9 @@ def backward(ctx: Any, grad_output: Tensor) -> Tuple: # type: ignore
# w.r.t. each parameter k. The weights' dimension is independent of the
# batch size.
# pylint: disable=no-member
weights_grad = sparse.einsum("ij,ijk->k", grad_coo, weights_grad)
n_dimension = max(grad_coo.ndim, weights_grad.ndim)
signature = _get_einsum_signature(n_dimension, return_type="weight")
weights_grad = sparse.einsum(signature, grad_coo, weights_grad)

# return sparse gradients
weights_grad = torch.sparse_coo_tensor(
Expand All @@ -244,9 +295,9 @@ def backward(ctx: Any, grad_output: Tensor) -> Tuple: # type: ignore
weights_grad = weights_grad.todense()
weights_grad = torch.as_tensor(weights_grad, dtype=torch.float)
# same as above
weights_grad = torch.einsum(
"ij,ijk->k", grad_output.detach().cpu(), weights_grad
)
n_dimension = max(grad_output.detach().cpu().ndim, weights_grad.ndim)
signature = _get_einsum_signature(n_dimension, return_type="weight")
weights_grad = torch.einsum(signature, grad_output.detach().cpu(), weights_grad)

# place the resulting tensor to the device where they were stored
weights_grad = weights_grad.to(weights.device)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
fixes:
- |
Fixes the dimension mismatch error in the `torch_connector` raised when using other-than 3D datasets.
The updated implementation defines the Einstein summation signature dynamically based on the number of
dimensions `ndim` of the input data (up to 26 dimensions).
16 changes: 15 additions & 1 deletion test/connectors/test_torch_connector.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2021, 2023.
# (C) Copyright IBM 2021, 2024.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
Expand All @@ -25,6 +25,7 @@
from qiskit_machine_learning import QiskitMachineLearningError
from qiskit_machine_learning.connectors import TorchConnector
from qiskit_machine_learning.neural_networks import SamplerQNN, EstimatorQNN
from qiskit_machine_learning.connectors.torch_connector import _get_einsum_signature


@ddt
Expand All @@ -44,6 +45,19 @@ def setup_test(self):
torch.tensor([[[1.0], [2.0]], [[3.0], [4.0]]]),
]

def test_get_einsum_signature(self):
# Test valid inputs and outputs
self.assertEqual(_get_einsum_signature(3, "input"), "ab,abc->ac")
self.assertEqual(_get_einsum_signature(3, "weight"), "ab,abc->c")

# Test raises for invalid return_type
with self.assertRaises(ValueError):
_get_einsum_signature(3, "invalid_type")

# Test raises for exceeding character limit
with self.assertRaises(RuntimeError):
_get_einsum_signature(30, "input")

def _validate_backward_automatically(self, model: TorchConnector) -> None:
"""Uses PyTorch to validate the backward pass / autograd.

Expand Down