Skip to content

Commit

Permalink
save
Browse files Browse the repository at this point in the history
  • Loading branch information
micmelesse committed Oct 16, 2024
1 parent 6cc25ba commit ba7e5d9
Show file tree
Hide file tree
Showing 3 changed files with 3 additions and 3 deletions.
2 changes: 1 addition & 1 deletion flash_attn/flash_attn_triton_amd/bwd_ref.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import torch
import math

DEBUG=False
DEBUG = False

def attention_backward_pytorch_ref_impl(do, q, k, v, o, softmax_lse, sm_scale, causal, layout, use_exp2, bwd_preprocessing_use_o):
# ensure the layout is 'bhsd'
Expand Down
2 changes: 1 addition & 1 deletion flash_attn/flash_attn_triton_amd/fwd_ref.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import math
import torch

DEBUG=False
DEBUG = False

def attention_forward_pytorch_ref_impl(q, k, v, sm_scale, causal, layout, use_exp2):
"""compute reference output and softmax_lse using PyTorch's built-in function"""
Expand Down
2 changes: 1 addition & 1 deletion tests/test_flash_attn_triton.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from flash_attn.flash_attn_interface import _get_block_size_n
from flash_attn.layers.rotary import apply_rotary_emb

DEBUG=False
DEBUG = False
# Test ROCM Triton Backend
USE_TRITON_ROCM = os.getenv("FLASH_ATTENTION_USE_TRITON_ROCM", "FALSE") == "TRUE"
if USE_TRITON_ROCM:
Expand Down

0 comments on commit ba7e5d9

Please sign in to comment.