Skip to content

Commit

Permalink
Use remove_version instead.
Browse files Browse the repository at this point in the history
  • Loading branch information
irenedea committed Feb 6, 2024
1 parent f02f8ad commit 0b682aa
Show file tree
Hide file tree
Showing 6 changed files with 14 additions and 14 deletions.
2 changes: 1 addition & 1 deletion llmfoundry/callbacks/generate_callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def __init__(self, prompts: List[str], batch_log_interval: int,
warnings.warn(
VersionedDeprecationWarning('Accessing llmfoundry.callbacks.generate_callback.Generate ' + \
'is deprecated and will be removed in a future release. Please use composer.callbacks.Generate instead.',
after_version='0.3.0',
after_version='0.5.0',
)
)

Expand Down
2 changes: 1 addition & 1 deletion llmfoundry/data/packing.py
Original file line number Diff line number Diff line change
Expand Up @@ -438,7 +438,7 @@ def profile(raw_batch_size: int) -> Tuple[Optional[float], Optional[float]]:
VersionedDeprecationWarning(
'Please use scripts/misc/profile_packing.py to profile packing.' +
'This script will be removed in later releases.',
after_version='0.3.0',
after_version='0.5.0',
))

import os
Expand Down
12 changes: 6 additions & 6 deletions llmfoundry/models/layers/attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,14 +107,14 @@ def scaled_multihead_dot_product_attention(
warnings.warn(
VersionedDeprecationWarning(
'The direct use of the multiquery arg is deprecated. Setting kv_n_heads=1 automatically. Please set kv_n_heads=1 explicitly to remove this warning.',
after_version='0.2.0',
after_version='0.5.0',
))
kv_n_heads = 1
elif kv_n_heads is None:
warnings.warn(
VersionedDeprecationWarning(
'Not specifying a value for the kv_n_heads arg is deprecated. Setting kv_n_heads=n_heads automatically. Please set kv_n_heads=n_heads explicitly to remove this warning.',
after_version='0.2.0',
after_version='0.5.0',
))
kv_n_heads = n_heads

Expand Down Expand Up @@ -254,14 +254,14 @@ def flash_attn_fn(
warnings.warn(
VersionedDeprecationWarning(
'The direct use of the multiquery arg is deprecated. Setting kv_n_heads=1 automatically. Please set kv_n_heads=1 explicitly to remove this warning.',
after_version='0.2.0',
after_version='0.5.0',
))
kv_n_heads = 1
elif kv_n_heads is None:
warnings.warn(
VersionedDeprecationWarning(
'Not specifying a value for the kv_n_heads arg is deprecated. Setting kv_n_heads=n_heads automatically. Please set kv_n_heads=n_heads explicitly to remove this warning.',
after_version='0.2.0',
after_version='0.5.0',
))
kv_n_heads = n_heads

Expand Down Expand Up @@ -429,14 +429,14 @@ def triton_flash_attn_fn(
warnings.warn(
VersionedDeprecationWarning(
'The direct use of the multiquery arg is deprecated. Setting kv_n_heads=1 automatically. Please set kv_n_heads=1 explicitly to remove this warning.',
after_version='0.2.0',
after_version='0.5.0',
))
kv_n_heads = 1
elif kv_n_heads is None:
warnings.warn(
VersionedDeprecationWarning(
'Not specifying a value for the kv_n_heads arg is deprecated. Setting kv_n_heads=n_heads automatically. Please set kv_n_heads=n_heads explicitly to remove this warning.',
after_version='0.2.0',
after_version='0.5.0',
))
kv_n_heads = n_heads

Expand Down
2 changes: 1 addition & 1 deletion llmfoundry/models/mpt/configuration_mpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ def _validate_config(self) -> None:
warnings.warn(
VersionedDeprecationWarning(
'Support for Flash Attention v1 is deprecated. Please upgrade to Flash Attention v2.4.2. To install Flash Attention v2.4.2, please run `pip install -e ".[gpu-flash2]"` from the root directory of the llm-foundry repository.',
after_version='0.4.0',
after_version='0.6.0',
))

if self.attn_config[
Expand Down
8 changes: 4 additions & 4 deletions llmfoundry/utils/warnings.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,7 @@ class VersionedDeprecationWarning(DeprecationWarning):
Attributes:
message (str): The deprecation message describing why the feature is deprecated.
after_version (str): The version after which the feature will be deprecated.
It will be removed after two releases.
after_version (str): The version after which the feature will be removed.
Example:
>>> def deprecated_function():
Expand All @@ -20,8 +19,9 @@ class VersionedDeprecationWarning(DeprecationWarning):
... )
...
>>> deprecated_function()
DeprecationWarning: After version 2.0.0: Function XYZ is deprecated.
DeprecationWarning: Function XYZ is depressed.
"""

def __init__(self, message: str, after_version: str) -> None:
super().__init__(f'After version {after_version}:' + message)
super().__init__(message +
f' It will be removed in version {after_version}.')
2 changes: 1 addition & 1 deletion scripts/train/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ def main(cfg: DictConfig) -> Trainer:
warnings.warn(
VersionedDeprecationWarning(
'Use of the key `model_gauntlet` is deprecated, please use the key `eval_gauntlet`',
after_version='0.2.0',
after_version='0.5.0',
))
icl_subset_num_batches: Optional[int] = pop_config(cfg,
'icl_subset_num_batches',
Expand Down

0 comments on commit 0b682aa

Please sign in to comment.