Skip to content

Commit

Permalink
- Add abstract func for HFP8QuantizedToFloat (pytorch#3000)
Browse files Browse the repository at this point in the history
Summary:
X-link: facebookresearch/FBGEMM#93

Pull Request resolved: pytorch#3000

as title

Reviewed By: jianyuh

Differential Revision: D61340643
  • Loading branch information
flaviotruzzi committed Aug 15, 2024
1 parent 7105d5f commit ad620ff
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 14 deletions.
9 changes: 9 additions & 0 deletions fbgemm_gpu/fbgemm_gpu/sparse_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -972,6 +972,10 @@ def float_to_hfp8_quantized(
return torch.empty_like(input, dtype=torch.uint8)


def hfp8_quantized_to_float(input: Tensor, ebits: int, exponent_bias: int) -> Tensor:
return torch.empty_like(input, dtype=torch.float32)


def _setup() -> None:
# pyre-ignore[16]
_setup.done = getattr(_setup, "done", False)
Expand Down Expand Up @@ -1102,6 +1106,11 @@ def impl_autograd(op_name, fn, setup_context: Optional[Callable] = None) -> None
"fbgemm::FloatToHFP8Quantized",
float_to_hfp8_quantized,
)
impl_abstract(
"fbgemm::HFP8QuantizedToFloat",
hfp8_quantized_to_float,
)

_setup.done = True


Expand Down
15 changes: 1 addition & 14 deletions fbgemm_gpu/test/quantize/failures_dict_fast.json
Original file line number Diff line number Diff line change
Expand Up @@ -53,19 +53,6 @@
"status": "xfail"
}
},
"fbgemm::HFP8QuantizedToFloat": {
"SplitTableBatchedEmbeddingsTest.test_faketensor__test_nbit_forward_cpu": {
"comment": "",
"status": "xfail"
},
"SplitTableBatchedEmbeddingsTest.test_faketensor__test_nbit_forward_gpu_no_cache": {
"comment": "",
"status": "xfail"
},
"SplitTableBatchedEmbeddingsTest.test_faketensor__test_nbit_forward_gpu_no_cache_fp8_2048": {
"comment": "",
"status": "xfail"
}
}
"fbgemm::HFP8QuantizedToFloat": {}
}
}

0 comments on commit ad620ff

Please sign in to comment.