Skip to content

Commit

Permalink
Merge thinc-apple-ops into Thinc
Browse files Browse the repository at this point in the history
This change adds `AppleOps` to Thinc, to ensure that the AMX unit is
always used on Apple Silicon Macs. Before this change, a user would get
much worse performance if they forgot to install `thinc-apple-ops`.

The `apple_ops` and `_accelerate` modules are built conditionally. When
detecting the best CPU implementation, we rely on a `try...except`
import to determine whether Apple ops are available.

Even though x86_64 Macs do not have an AMX unit, Accelerate is
competitive with BLIS, so it does not hurt to enable Apple ops on all
Macs.
  • Loading branch information
danieldk committed Apr 15, 2024
1 parent ec68d7d commit 6d1098d
Show file tree
Hide file tree
Showing 13 changed files with 281 additions and 25 deletions.
11 changes: 0 additions & 11 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -152,14 +152,3 @@ jobs:

- name: Run tests with extras
run: python -m pytest --pyargs thinc --cov=thinc --cov-report=term -p thinc.tests.enable_tensorflow -p thinc.tests.enable_mxnet

- name: Run tests for thinc-apple-ops
run: |
pip uninstall -y tensorflow
pip install thinc-apple-ops
python -m pytest --pyargs thinc_apple_ops
if: matrix.os == 'macos-latest' && matrix.python_version == '3.10'

- name: Run tests with thinc-apple-ops
run: python -m pytest --pyargs thinc
if: matrix.os == 'macos-latest' && matrix.python_version == '3.10'
16 changes: 14 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#!/usr/bin/env python
import platform
import sys
from setuptools.command.build_ext import build_ext
from sysconfig import get_path
Expand All @@ -13,14 +14,16 @@
# http://docs.cython.org/en/latest/src/userguide/source_files_and_compilation.html#compiler-options
Options.docstrings = True

ACCELERATE = "thinc.backends._accelerate"
APPLE_OPS = ["thinc.backends.apple_ops", ACCELERATE]

PACKAGES = find_packages()
MOD_NAMES = [
"thinc.backends.cblas",
"thinc.backends.numpy_ops",
"thinc.layers.sparselinear",
"thinc.layers.premap_ids",
]
] + (APPLE_OPS if platform.system() == "Darwin" else [])
COMPILE_OPTIONS = {
"msvc": ["/Ox", "/EHsc"],
"other": ["-O3", "-Wno-strict-prototypes", "-Wno-unused-function", "-std=c++11"],
Expand Down Expand Up @@ -78,7 +81,16 @@ def setup_package():
ext_modules = []
for name in MOD_NAMES:
mod_path = name.replace(".", "/") + ".pyx"
ext = Extension(name, [mod_path], language="c++", include_dirs=include_dirs)
if name == ACCELERATE:
ext = Extension(
name,
[mod_path],
language="c++",
include_dirs=include_dirs,
libraries=["blas"],
)
else:
ext = Extension(name, [mod_path], language="c++", include_dirs=include_dirs)
ext_modules.append(ext)
print("Cythonizing sources")
ext_modules = cythonize(
Expand Down
9 changes: 7 additions & 2 deletions thinc/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
use_pytorch_for_gpu_memory,
use_tensorflow_for_gpu_memory,
)
from .compat import enable_mxnet, enable_tensorflow, has_cupy
from .compat import enable_mxnet, enable_tensorflow, has_apple_ops, has_cupy
from .config import Config, ConfigValidationError, registry
from .initializers import (
configure_normal_init,
Expand Down Expand Up @@ -162,6 +162,11 @@
xp2torch,
)

try:
from .backends import AppleOps
except ImportError:
pass

# fmt: off
__all__ = [
# .config
Expand Down Expand Up @@ -226,5 +231,5 @@
"pytorch_to_torchscript_wrapper",

"reduce_first", "reduce_last", "reduce_max", "reduce_mean", "reduce_sum",
]
] + (["AppleOps"] if has_apple_ops else [])
# fmt: on
13 changes: 7 additions & 6 deletions thinc/backends/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from typing import Any, Callable, Dict, Optional, Type, cast

from .. import registry
from ..compat import cupy, has_cupy
from ..compat import cupy, has_apple_ops, has_cupy
from ..util import (
assert_pytorch_installed,
assert_tensorflow_installed,
Expand All @@ -19,6 +19,11 @@
from .numpy_ops import NumpyOps
from .ops import Ops

try:
from .apple_ops import AppleOps
except ImportError:
pass

context_ops: ContextVar[Optional[Ops]] = ContextVar("context_ops", default=None)
context_pools: ContextVar[dict] = ContextVar("context_pools", default={})

Expand Down Expand Up @@ -83,10 +88,6 @@ def use_tensorflow_for_gpu_memory() -> None: # pragma: no cover


def _import_extra_cpu_backends():
try:
from thinc_apple_ops import AppleOps
except ImportError:
pass
try:
from thinc_bigendian_ops import BigEndianOps
except ImportError:
Expand Down Expand Up @@ -175,4 +176,4 @@ def _get_thread_state() -> threading.local:
"MPSOps",
"NumpyOps",
"has_cupy",
]
] + (["AppleOps"] if has_apple_ops else [])
40 changes: 40 additions & 0 deletions thinc/backends/_accelerate.pxd
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
cdef extern from "Accelerate/Accelerate.h":
enum CBLAS_ORDER: CblasRowMajor, CblasColMajor
enum CBLAS_TRANSPOSE: CblasNoTrans, CblasTrans, CblasConjTrans
enum CBLAS_UPLO: CblasUpper, CblasLower
enum CBLAS_DIAG: CblasNonUnit, CblasUnit
enum CBLAS_SIDE: CblasLeft, CblasRight

# BLAS level 1 routines

void cblas_sswap(int M, float *x, int incX, float *y, int incY) nogil
void cblas_sscal(int N, float alpha, float *x, int incX) nogil
void cblas_scopy(int N, float *x, int incX, float *y, int incY) nogil
void cblas_saxpy(int N, float alpha, float *x, int incX, float *y, int incY ) nogil
float cblas_sdot(int N, float *x, int incX, float *y, int incY ) nogil
float cblas_snrm2(int N, float *x, int incX) nogil
float cblas_sasum(int N, float *x, int incX) nogil
int cblas_isamax(int N, float *x, int incX) nogil

# BLAS level 2 routines
void cblas_sgemv(CBLAS_ORDER Order, CBLAS_TRANSPOSE TransA, int M, int N,
float alpha, float *A, int lda, float *x, int incX,
float beta, float *y, int incY) nogil

void cblas_sger(CBLAS_ORDER Order, int M, int N, float alpha, float *x,
int incX, float *y, int incY, float *A, int lda) nogil

# BLAS level 3 routines
void cblas_sgemm(CBLAS_ORDER Order, CBLAS_TRANSPOSE TransA,
CBLAS_TRANSPOSE TransB, int M, int N, int K,
float alpha, float *A, int lda, float *B, int ldb,
float beta, float *C, int ldc) nogil


cdef void sgemm(bint TransA, bint TransB, int M, int N, int K,
float alpha, const float* A, int lda, const float *B,
int ldb, float beta, float* C, int ldc) nogil


cdef void saxpy(int N, float alpha, const float* X, int incX,
float *Y, int incY) nogil
75 changes: 75 additions & 0 deletions thinc/backends/_accelerate.pyx
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
cimport numpy as np
from libc.stdint cimport uintptr_t

import numpy


cpdef np.ndarray gemm(float[:, ::1] A, float[:, ::1] B,
bint trans1=False, bint trans2=False,
np.ndarray out=None):
cdef int nM = A.shape[0] if not trans1 else A.shape[1]
cdef int nK = A.shape[1] if not trans1 else A.shape[0]
cdef int nK_b = B.shape[0] if not trans2 else B.shape[1]
cdef int nN = B.shape[1] if not trans2 else B.shape[0]

cdef float[:, ::1] C = out

if out is None:
out = numpy.empty((nM, nN), dtype="f")
C = out
else:
if C.shape[0] != nM or C.shape[1] != nN:
msg = "Shape mismatch for output matrix, was: (%d, %d), expected (%d, %d)"
raise ValueError(msg % (C.shape[0], C.shape[1], nM, nN))


if nK != nK_b:
msg = "Shape mismatch for gemm: (%d, %d), (%d, %d)"
raise ValueError(msg % (nM, nK, nK_b, nN))

if nM == 0 or nK == 0 or nN == 0:
return out

cblas_sgemm(
CblasRowMajor,
CblasTrans if trans1 else CblasNoTrans,
CblasTrans if trans2 else CblasNoTrans,
nM,
nN,
nK,
1.0,
&A[0, 0],
A.shape[1],
&B[0, 0],
B.shape[1],
0.0,
&C[0, 0],
C.shape[1]
)
return out


cdef void sgemm(bint TransA, bint TransB, int M, int N, int K,
float alpha, const float* A, int lda, const float *B,
int ldb, float beta, float* C, int ldc) nogil:
cblas_sgemm(
CblasRowMajor,
CblasTrans if TransA else CblasNoTrans,
CblasTrans if TransB else CblasNoTrans,
M,
N,
K,
alpha,
A,
lda,
B,
ldb,
beta,
C,
ldc
)


cdef void saxpy(int N, float alpha, const float* X, int incX,
float *Y, int incY) nogil:
cblas_saxpy(N, alpha, X, incX, Y, incY)
39 changes: 39 additions & 0 deletions thinc/backends/apple_ops.pyx
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
from typing import Optional

import numpy

from ._accelerate import gemm

from ._accelerate cimport saxpy, sgemm
from .cblas cimport CBlas, set_saxpy, set_sgemm

from .. import registry
from ..types import Floats2d
from .numpy_ops import NumpyOps


@registry.ops("AppleOps")
class AppleOps(NumpyOps):
"""Thinc Ops class that calls into Apple's native libraries for some
operations. Other operations fall back to numpy."""
name = "apple"
xp = numpy

def cblas(self) -> CBlas:
cdef CBlas cblas = CBlas()
set_saxpy(cblas, saxpy)
set_sgemm(cblas, sgemm)
return cblas

def gemm(
self,
x: Floats2d,
y: Floats2d,
out: Optional[Floats2d] = None,
trans1: bool = False,
trans2: bool = False,
) -> Floats2d:
"""Perform General Matrix Multiplication (GeMM) and optionally store
the result in the specified output variable.
"""
return gemm(x, y, out=out, trans1=trans1, trans2=trans2)
7 changes: 4 additions & 3 deletions thinc/backends/mps_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import numpy

from .. import registry
from ..compat import has_apple_ops
from .numpy_ops import NumpyOps
from .ops import Ops

Expand All @@ -12,11 +13,11 @@
# during type checking.
_Ops = Ops
else:
try:
from thinc_apple_ops import AppleOps
if has_apple_ops:
from .apple_ops import AppleOps

_Ops = AppleOps
except ImportError:
else:
_Ops = NumpyOps


Expand Down
4 changes: 4 additions & 0 deletions thinc/compat.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import platform
import warnings

from packaging.version import Version
Expand Down Expand Up @@ -119,6 +120,9 @@ def enable_mxnet():
has_blis = False


# AppleOps is available unconditionally on macOS.
has_apple_ops = platform.system() == "Darwin"

has_gpu = has_cupy_gpu or has_torch_mps_gpu

__all__ = [
Expand Down
Empty file.
79 changes: 79 additions & 0 deletions thinc/tests/backends/_apple_blas/test_gemm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
import numpy
import pytest

from thinc.compat import has_apple_ops

try:
import thinc.backends._accelerate as accelerate
except:
pass


@pytest.mark.skipif(not has_apple_ops, reason="Apple ops not available")
def test_basic_sgemm():
A = numpy.random.randn(5, 4).astype("f")
B = numpy.random.randn(4, 7).astype("f")
C = accelerate.gemm(A, B)
assert C.shape == (A.shape[0], B.shape[1])

C_out = numpy.empty((5, 7), dtype="f")
accelerate.gemm(A, B, out=C_out)

numpy.testing.assert_allclose(C, C_out)


@pytest.mark.skipif(not has_apple_ops, reason="Apple ops not available")
def test_incorrect_output_size():
A = numpy.ndarray((5, 4), dtype="f")
B = numpy.ndarray((4, 7), dtype="f")

with pytest.raises(ValueError, match=r"Shape mismatch for output matrix"):
accelerate.gemm(A, B, out=numpy.ndarray((3, 7), dtype="f"))

with pytest.raises(ValueError, match=r"Shape mismatch for output matrix"):
accelerate.gemm(A, B, out=numpy.ndarray((5, 3), dtype="f"))


@pytest.mark.skipif(not has_apple_ops, reason="Apple ops not available")
@pytest.mark.parametrize(
"A_shape,B_shape,transA,transB",
[
[(0, 0), (0, 0), False, False],
[(0, 0), (0, 0), True, False],
[(0, 0), (0, 0), False, True],
[(0, 0), (0, 0), True, True],
[(0, 5), (5, 0), False, False],
[(5, 0), (5, 0), False, True],
[(5, 0), (5, 0), True, False],
],
)
def test_zero_size(A_shape, B_shape, transA, transB):
A = numpy.ndarray(A_shape, dtype="f")
B = numpy.ndarray(B_shape, dtype="f")
if not transA and not transB:
C = numpy.dot(A, B)
elif transA:
C = numpy.dot(A.T, B)
elif transB:
C = numpy.dot(A, B.T)
else:
C = numpy.dot(A.T, B.T)
C_ = accelerate.gemm(A, B, trans1=transA, trans2=transB)
assert C.shape == C_.shape


@pytest.mark.skipif(not has_apple_ops, reason="Apple ops not available")
@pytest.mark.parametrize(
"A_shape,B_shape,transA,transB",
[
[(4, 5), (4, 5), False, False],
[(5, 4), (4, 5), True, False],
[(4, 5), (5, 4), False, True],
[(5, 4), (5, 4), True, True],
],
)
def test_incorrect_shapes(A_shape, B_shape, transA, transB):
A = numpy.ndarray(A_shape, dtype="f")
B = numpy.ndarray(B_shape, dtype="f")
with pytest.raises(ValueError, match=r"Shape mismatch"):
accelerate.gemm(A, B, trans1=transA, trans2=transB)
Loading

0 comments on commit 6d1098d

Please sign in to comment.