Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support sparse matrices in cast methods #68

Merged
merged 15 commits into from
Mar 1, 2022
35 changes: 15 additions & 20 deletions src/qibojit/custom_operators/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@ def set_platform(self, name): # pragma: no cover
self.Tensor = xp.ndarray
self.random = xp.random
self.newaxis = xp.newaxis
self.sparse = self.platform.sparse
# enable multi-GPU if no macos
import sys
if sys.platform != "darwin":
Expand Down Expand Up @@ -142,15 +143,16 @@ def set_threads(self, nthreads):
def to_numpy(self, x):
if isinstance(x, self.np.ndarray):
return x
elif self.platform.name in ("cupy", "cuquantum") and isinstance(x, self.platform.cp.ndarray): # pragma: no cover
return x.get()
return self.np.array(x)
return self.platform.to_numpy(x)

def cast(self, x, dtype='DTYPECPX'):
if isinstance(dtype, str):
dtype = self.dtypes(dtype)
return self.platform.cast(x, dtype=dtype)

def issparse(self, x):
return self.platform.issparse(x)

def check_shape(self, shape):
if self.platform.name in ("cupy", "cuquantum") and isinstance(shape, self.Tensor): # pragma: no cover
shape = shape.get()
Expand All @@ -176,18 +178,19 @@ def expm(self, x):
return self.backend.asarray(super().expm(x))
return super().expm(x)

def eigh(self, x):
if self.platform.name == "cupy" and self.platform.is_hip: # pragma: no cover
# FIXME: Fallback to numpy because eigh is not implemented in rocblas
result = self.np.linalg.eigh(self.to_numpy(x))
return self.cast(result[0]), self.cast(result[1])
return super().eigh(x)
def eigh(self, x, k=6):
return self.platform.eigh(x, k)

def eigvalsh(self, x):
if self.platform.name == "cupy" and self.platform.is_hip: # pragma: no cover
def eigvalsh(self, x, k=6):
if self.issparse(x):
log.warning("Calculating sparse matrix eigenvectors because "
"sparse modules do not provide ``eigvals`` method.")
return self.eigh(x, k=k)[0]
elif self.platform.name == "cupy" and self.platform.is_hip: # pragma: no cover
# FIXME: Fallback to numpy because eigvalsh is not implemented in rocblas
return self.cast(self.np.linalg.eigvalsh(self.to_numpy(x)))
return super().eigvalsh(x)
else:
return self.np.linalg.eigvalsh(x)

def unique(self, x, return_counts=False):
if self.platform.name in ("cupy", "cuquantum"): # pragma: no cover
Expand Down Expand Up @@ -339,14 +342,6 @@ def transpose_state(self, pieces, state, nqubits, order):
state = self._constructed_platforms.get("numba").transpose_state(pieces, state, nqubits, order)
return self.reshape(state, original_shape)

def assert_allclose(self, value, target, rtol=1e-7, atol=0.0):
if self.platform.name in ("cupy", "cuquantum"): # pragma: no cover
if isinstance(value, self.backend.ndarray):
value = value.get()
if isinstance(target, self.backend.ndarray):
target = target.get()
self.np.testing.assert_allclose(value, target, rtol=rtol, atol=atol)

def apply_gate(self, state, gate, nqubits, targets, qubits=None):
return self.platform.one_qubit_base(state, nqubits, *targets, "apply_gate", gate, qubits)

Expand Down
79 changes: 72 additions & 7 deletions src/qibojit/custom_operators/platforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,22 @@ def __init__(self): # pragma: no cover
self.name = "abstract"
self.gates = None
self.ops = None
self.sparse = None
self.test_regressions = {}
self.supports_multigpu = False

@abstractmethod
def cast(self, x, dtype=None, order=None): # pragma: no cover
raise NotImplementedError

@abstractmethod
def to_numpy(self, x): # pragma: no cover
raise NotImplementedError

@abstractmethod
def issparse(self, x): # pragma: no cover
raise NotImplementedError

@abstractmethod
def one_qubit_base(self, state, nqubits, target, kernel, gate, qubits=None): # pragma: no cover
raise NotImplementedError
Expand Down Expand Up @@ -65,13 +74,16 @@ def __init__(self):
4: self.gates.apply_four_qubit_gate_kernel,
5: self.gates.apply_five_qubit_gate_kernel
}
from scipy import sparse
self.sparse = sparse

def cast(self, x, dtype=None, order='K'):
if dtype is None:
dtype = x.dtype
if isinstance(x, self.np.ndarray):
if dtype is None:
return x
else:
return x.astype(dtype, copy=False, order=order)
return x.astype(dtype, copy=False, order=order)
elif self.sparse.issparse(x):
return x.astype(dtype, copy=False)
else:
try:
x = self.np.array(x, dtype=dtype, order=order)
Expand All @@ -81,6 +93,22 @@ def cast(self, x, dtype=None, order='K'):
x = self.np.array(x.get(), dtype=dtype, copy=False, order=order)
return x

def to_numpy(self, x):
if self.sparse.issparse(x):
return x.toarray()
return self.np.array(x, copy=False)

def issparse(self, x):
return self.sparse.issparse(x)

def eigh(self, x, k=6):
if self.issparse(x):
if k < x.shape[0]:
from scipy.sparse.linalg import eigsh
return eigsh(x, k=k, which='SA')
x = self.to_numpy(x)
return self.np.linalg.eigh(x)

def one_qubit_base(self, state, nqubits, target, kernel, gate, qubits=None):
ncontrols = len(qubits) - 1 if qubits is not None else 0
m = nqubits - target - 1
Expand Down Expand Up @@ -172,6 +200,9 @@ def __init__(self):
self.is_hip = cupy_backends.cuda.api.runtime.is_hip
self.KERNELS = ("apply_gate", "apply_x", "apply_y", "apply_z", "apply_z_pow",
"apply_two_qubit_gate", "apply_fsim", "apply_swap")
from scipy import sparse
self.npsparse = sparse
self.sparse = cp.sparse
if self.is_hip: # pragma: no cover
self.test_regressions = {
"test_measurementresult_apply_bitflips": [
Expand Down Expand Up @@ -246,13 +277,47 @@ def calculate_blocks(self, nstates, block_size=DEFAULT_BLOCK_SIZE):
return nblocks, block_size

def cast(self, x, dtype=None, order='C'):
if dtype is None:
dtype = x.dtype
if isinstance(x, self.cp.ndarray):
if dtype is None:
return x
return x.astype(dtype, copy=False, order=order)
elif self.sparse.issparse(x):
if dtype != x.dtype:
return x.astype(dtype)
else:
return x.astype(dtype, copy=False, order=order)
return x
elif self.npsparse.issparse(x):
cls = getattr(self.sparse, x.__class__.__name__)
return cls(x, dtype=dtype)
return self.cp.asarray(x, dtype=dtype, order=order)

def to_numpy(self, x):
if isinstance(x, self.cp.ndarray):
return x.get()
elif self.sparse.issparse(x):
return x.toarray().get()
elif self.npsparse.issparse(x):
return x.toarray()
return self.np.array(x, copy=False)

def issparse(self, x):
return self.sparse.issparse(x) or self.npsparse.issparse(x)

def eigh(self, x, k=6):
if self.issparse(x):
if k < x.shape[0]:
# Fallback to numpy because cupy's ``sparse.eigh`` does not support 'SA'
from scipy.sparse.linalg import eigsh # pylint: disable=import-error
result = eigsh(x.get(), k=k, which='SA')
return self.cast(result[0]), self.cast(result[1])
x = x.toarray()
if self.is_hip:
# Fallback to numpy because eigh is not implemented in rocblas
result = self.np.linalg.eigh(self.to_numpy(x))
return self.cast(result[0]), self.cast(result[1])
else:
return self.cp.linalg.eigh(x)

def get_kernel_type(self, state):
if state.dtype == self.cp.complex128:
return "double"
Expand Down
73 changes: 60 additions & 13 deletions src/qibojit/tests/test_platforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,22 @@ def test_cast(platform, array_type):
K.assert_allclose(final, target)


@pytest.mark.parametrize("array_type", [None, "float32", "float64"])
@pytest.mark.parametrize("format", ["coo", "csr", "csc", "dia"])
def test_sparse_cast(platform, array_type, format):
from scipy import sparse
sptarget = sparse.rand(512, 512, dtype=array_type, format=format)
assert K.issparse(sptarget)
final = K.to_numpy(K.cast(sptarget))
target = sptarget.toarray()
K.assert_allclose(final, target)
if K.platform.name != "numba": # pragma: no cover
sptarget = getattr(K.sparse, sptarget.__class__.__name__)(sptarget)
assert K.issparse(sptarget)
final = K.to_numpy(K.cast(sptarget))
K.assert_allclose(final, target)


def test_to_numpy(platform):
x = [0, 1, 2]
target = K.to_numpy(K.cast(x))
Expand All @@ -53,19 +69,50 @@ def test_basic_matrices(platform):
K.assert_allclose(K.expm(m), expm(m))


def test_backend_eigh(platform):
m = np.random.random((16, 16))
eigvals2, eigvecs2 = np.linalg.eigh(m)
eigvals1, eigvecs1 = K.eigh(K.cast(m))
K.assert_allclose(eigvals1, eigvals2)
K.assert_allclose(K.abs(eigvecs1), np.abs(eigvecs2))


def test_backend_eigvalsh(platform):
m = np.random.random((16, 16))
target = np.linalg.eigvalsh(m)
result = K.eigvalsh(K.cast(m))
K.assert_allclose(target, result)
@pytest.mark.parametrize("sparse_type", [None, "coo", "csr", "csc", "dia"])
def test_backend_eigh(platform, sparse_type):
if sparse_type is None:
m = np.random.random((16, 16))
eigvals1, eigvecs1 = K.eigh(K.cast(m))
eigvals2, eigvecs2 = np.linalg.eigh(m)
else:
from scipy.sparse import rand
m = rand(16, 16, format=sparse_type)
m = m + m.T
eigvals1, eigvecs1 = K.eigh(K.cast(m), k=16)
eigvals2, eigvecs2 = K.eigh(K.cast(m.toarray()))
K.assert_allclose(eigvals1, eigvals2, atol=1e-10)
K.assert_allclose(K.abs(eigvecs1), np.abs(eigvecs2), atol=1e-10)


@pytest.mark.parametrize("sparse_type", [None, "coo", "csr", "csc", "dia"])
def test_backend_eigvalsh(platform, sparse_type):
if sparse_type is None:
m = np.random.random((16, 16))
target = np.linalg.eigvalsh(m)
result = K.eigvalsh(K.cast(m))
else:
from scipy.sparse import rand
m = rand(16, 16, format=sparse_type)
m = m + m.T
result = K.eigvalsh(K.cast(m), k=16)
target, _ = K.eigh(K.cast(m.toarray()))
K.assert_allclose(target, result, atol=1e-10)


@pytest.mark.parametrize("sparse_type", ["coo", "csr", "csc", "dia"])
@pytest.mark.parametrize("k", [6, 8])
def test_backend_eigh_sparse(platform, sparse_type, k):
from scipy.sparse.linalg import eigsh
from scipy import sparse
from qibo import hamiltonians
ham = hamiltonians.TFIM(6, h=1.0)
m = getattr(sparse, f"{sparse_type}_matrix")(K.to_numpy(ham.matrix))
eigvals1, eigvecs1 = K.eigh(K.cast(m), k)
eigvals2, eigvecs2 = eigsh(m, k, which='SA')
eigvals1 = K.to_numpy(eigvals1)
eigvals2 = K.to_numpy(eigvals2)
K.assert_allclose(sorted(eigvals1), sorted(eigvals2))


def test_unique_and_gather(platform):
Expand Down