-
Notifications
You must be signed in to change notification settings - Fork 36
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
58e068b
commit 62c2e69
Showing
4 changed files
with
165 additions
and
13 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,146 @@ | ||
from functools import partial | ||
|
||
import torch | ||
from torch import nn, einsum, Tensor | ||
import torch.nn.functional as F | ||
|
||
from collections import namedtuple | ||
from functools import wraps | ||
from packaging import version | ||
|
||
from einops import rearrange, repeat | ||
|
||
# constants | ||
|
||
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient']) | ||
|
||
# helpers | ||
|
||
def exists(val): | ||
return val is not None | ||
|
||
def default(val, d): | ||
return val if exists(val) else d | ||
|
||
def once(fn): | ||
called = False | ||
@wraps(fn) | ||
def inner(x): | ||
nonlocal called | ||
if called: | ||
return | ||
called = True | ||
return fn(x) | ||
return inner | ||
|
||
print_once = once(print) | ||
|
||
# main class | ||
|
||
class Attend(nn.Module): | ||
def __init__( | ||
self, | ||
*, | ||
dropout = 0., | ||
heads = None, | ||
scale = None, | ||
flash = False, | ||
): | ||
super().__init__() | ||
self.scale = scale | ||
|
||
self.dropout = dropout | ||
self.attn_dropout = nn.Dropout(dropout) | ||
|
||
# flash attention | ||
|
||
self.flash = flash | ||
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above' | ||
|
||
# determine efficient attention configs for cuda and cpu | ||
|
||
self.cpu_config = EfficientAttentionConfig(True, True, True) | ||
self.cuda_config = None | ||
|
||
if not torch.cuda.is_available() or not flash: | ||
return | ||
|
||
device_properties = torch.cuda.get_device_properties(torch.device('cuda')) | ||
|
||
major, minor = device_properties.major, device_properties.minor | ||
|
||
if (major, minor) == (8, 0): | ||
print_once('A100 GPU detected, using flash attention if input tensor is on cuda') | ||
self.cuda_config = EfficientAttentionConfig(True, False, False) | ||
elif (major, minor) == (9, 0): | ||
print_once('H100 GPU detected, using flash attention') | ||
self.cuda_config = EfficientAttentionConfig(True, False, False) | ||
else: | ||
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda') | ||
self.cuda_config = EfficientAttentionConfig(False, True, True) | ||
|
||
def flash_attn( | ||
self, | ||
q, k, v, | ||
mask = None | ||
): | ||
batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device | ||
|
||
# expand key padding mask | ||
|
||
if exists(mask): | ||
assert mask.ndim == 4 | ||
mask = mask.expand(batch, heads, q_len, k_len) | ||
|
||
# Check if there is a compatible device for flash attention | ||
|
||
config = self.cuda_config if is_cuda else self.cpu_config | ||
|
||
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale | ||
|
||
with torch.backends.cuda.sdp_kernel(**config._asdict()): | ||
out = F.scaled_dot_product_attention( | ||
q, k, v, | ||
attn_mask = mask, | ||
dropout_p = self.dropout if self.training else 0. | ||
) | ||
|
||
return out | ||
|
||
def forward( | ||
self, | ||
q, k, v, | ||
mask = None | ||
): | ||
""" | ||
einstein notation | ||
b - batch | ||
h - heads | ||
n, i, j - sequence length (base sequence length, source, target) | ||
d - feature dimension | ||
""" | ||
|
||
n, heads, kv_heads, device = q.shape[-2], q.shape[1], k.shape[1], q.device | ||
|
||
scale = default(self.scale, q.shape[-1] ** -0.5) | ||
|
||
if self.flash: | ||
return self.flash_attn(q, k, v, mask = mask) | ||
|
||
sim = einsum(f'b h i d, b h j d -> b h i j', q, k) * scale | ||
|
||
i, j, dtype = *sim.shape[-2:], sim.dtype | ||
|
||
mask_value = -torch.finfo(sim.dtype).max | ||
|
||
if exists(mask): | ||
sim = sim.masked_fill(~mask, mask_value) | ||
|
||
attn = sim.softmax(dim = -1) | ||
attn = attn.type(dtype) | ||
|
||
attn = self.attn_dropout(attn) | ||
|
||
out = einsum(f'b h i j, b h j d -> b h i d', attn, v) | ||
|
||
return out |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters