Skip to content

Commit

Permalink
Merge pull request #18 from OkuyanBoga/torch_issue716
Browse files Browse the repository at this point in the history
Torch issue716
  • Loading branch information
OkuyanBoga authored Apr 3, 2024
2 parents 11cde5f + 04b886d commit aea890d
Show file tree
Hide file tree
Showing 3 changed files with 80 additions and 9 deletions.
67 changes: 59 additions & 8 deletions qiskit_machine_learning/connectors/torch_connector.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2021, 2023.
# (C) Copyright IBM 2021, 2024.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
Expand All @@ -13,7 +13,7 @@
"""A connector to use Qiskit (Quantum) Neural Networks as PyTorch modules."""
from __future__ import annotations

from typing import Tuple, Any, cast
from typing import Tuple, Any, cast, Literal

import numpy as np

Expand Down Expand Up @@ -52,12 +52,57 @@ class Module: # type: ignore
pass


def _get_einsum_signature(n_dimensions: int, return_type: Literal["input", "weight"]) -> str:
"""
Generate an Einstein summation signature for a given number of dimensions and return type.
Args:
n_dimensions (int): The number of dimensions for the summation.
return_type (Literal["input", "weight"]): The type of the return signature.
- "input": Return signature includes all input indices except the last one.
- "weight": Return signature includes only the last index as the output.
Returns:
str: The Einstein summation signature.
Raises:
RuntimeError: If the number of dimensions exceeds the character limit.
ValueError: If an invalid return type is provided.
Example:
Consider a scenario where n_dimensions is 3 and return_type is "input":
>>> _get_einsum_signature(3, "input")
'ab,abc->ac'
This returns the Einstein summation signature 'ab,abc->ac' for input with three dimensions.
"""
trace = ""
char_limit = 26
for i in range(n_dimensions):
trace += chr(97 + i) # chr(97) == 'a'
if i >= char_limit:
raise RuntimeError(
f"Cannot define an Einstein summation with more tha {char_limit:d} dimensions."
)

if return_type == "input":
signature = f"{trace[:-1]},{trace:s}->{trace[0] + trace[2:]}"
elif return_type == "weight":
signature = f"{trace[:-1]},{trace:s}->{trace[-1]}"
else:
raise ValueError(
f'The only allowed return types are ["input", "weight"], got {return_type:s} instead.'
)

return signature


@_optionals.HAS_TORCH.require_in_instance
class TorchConnector(Module):
"""Connects a Qiskit (Quantum) Neural Network to PyTorch."""

# pylint: disable=abstract-method
class _TorchNNFunction(Function):

# pylint: disable=arguments-differ
@staticmethod
def forward( # type: ignore
Expand Down Expand Up @@ -187,7 +232,9 @@ def backward(ctx: Any, grad_output: Tensor) -> Tuple: # type: ignore
# able to do back-prop in a batched manner.
# Pytorch does not support sparse einsum, so we rely on Sparse.
# pylint: disable=no-member
input_grad = sparse.einsum("ij,ijk->ik", grad_coo, input_grad)
n_dimension = max(grad_coo.ndim, input_grad.ndim)
signature = _get_einsum_signature(n_dimension, return_type="input")
input_grad = sparse.einsum(signature, grad_coo, input_grad)

# return sparse gradients
input_grad = torch.sparse_coo_tensor(input_grad.coords, input_grad.data)
Expand All @@ -205,7 +252,9 @@ def backward(ctx: Any, grad_output: Tensor) -> Tuple: # type: ignore
input_grad = torch.as_tensor(input_grad, dtype=torch.float)

# same as above
input_grad = torch.einsum("ij,ijk->ik", grad_output.detach().cpu(), input_grad)
n_dimension = max(grad_output.detach().cpu().ndim, input_grad.ndim)
signature = _get_einsum_signature(n_dimension, return_type="input")
input_grad = torch.einsum(signature, grad_output.detach().cpu(), input_grad)

# place the resulting tensor to the device where they were stored
input_grad = input_grad.to(input_data.device)
Expand All @@ -226,7 +275,9 @@ def backward(ctx: Any, grad_output: Tensor) -> Tuple: # type: ignore
# w.r.t. each parameter k. The weights' dimension is independent of the
# batch size.
# pylint: disable=no-member
weights_grad = sparse.einsum("ij,ijk->k", grad_coo, weights_grad)
n_dimension = max(grad_coo.ndim, weights_grad.ndim)
signature = _get_einsum_signature(n_dimension, return_type="weight")
weights_grad = sparse.einsum(signature, grad_coo, weights_grad)

# return sparse gradients
weights_grad = torch.sparse_coo_tensor(
Expand All @@ -244,9 +295,9 @@ def backward(ctx: Any, grad_output: Tensor) -> Tuple: # type: ignore
weights_grad = weights_grad.todense()
weights_grad = torch.as_tensor(weights_grad, dtype=torch.float)
# same as above
weights_grad = torch.einsum(
"ij,ijk->k", grad_output.detach().cpu(), weights_grad
)
n_dimension = max(grad_output.detach().cpu().ndim, weights_grad.ndim)
signature = _get_einsum_signature(n_dimension, return_type="weight")
weights_grad = torch.einsum(signature, grad_output.detach().cpu(), weights_grad)

# place the resulting tensor to the device where they were stored
weights_grad = weights_grad.to(weights.device)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
fixes:
- |
Fixes the dimension mismatch error in the `torch_connector` raised when using other-than 3D datasets.
The updated implementation defines the Einstein summation signature dynamically based on the number of
dimensions `ndim` of the input data (up to 26 dimensions).
16 changes: 15 additions & 1 deletion test/connectors/test_torch_connector.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# This code is part of a Qiskit project.
#
# (C) Copyright IBM 2021, 2023.
# (C) Copyright IBM 2021, 2024.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
Expand All @@ -25,6 +25,7 @@
from qiskit_machine_learning import QiskitMachineLearningError
from qiskit_machine_learning.connectors import TorchConnector
from qiskit_machine_learning.neural_networks import SamplerQNN, EstimatorQNN
from qiskit_machine_learning.connectors.torch_connector import _get_einsum_signature


@ddt
Expand All @@ -44,6 +45,19 @@ def setup_test(self):
torch.tensor([[[1.0], [2.0]], [[3.0], [4.0]]]),
]

def test_get_einsum_signature(self):
# Test valid inputs and outputs
self.assertEqual(_get_einsum_signature(3, "input"), "ab,abc->ac")
self.assertEqual(_get_einsum_signature(3, "weight"), "ab,abc->c")

# Test raises for invalid return_type
with self.assertRaises(ValueError):
_get_einsum_signature(3, "invalid_type")

# Test raises for exceeding character limit
with self.assertRaises(RuntimeError):
_get_einsum_signature(30, "input")

def _validate_backward_automatically(self, model: TorchConnector) -> None:
"""Uses PyTorch to validate the backward pass / autograd.
Expand Down

0 comments on commit aea890d

Please sign in to comment.