From 0ea77fd1ffdbf184abe7521a35f753079fb7bb54 Mon Sep 17 00:00:00 2001 From: Kaihui-intel Date: Tue, 16 Jul 2024 14:02:57 +0800 Subject: [PATCH] enhance import&add pack ut Signed-off-by: Kaihui-intel --- neural_compressor/torch/__init__.py | 1 + test/3x/torch/quantization/weight_only/test_rtn.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/neural_compressor/torch/__init__.py b/neural_compressor/torch/__init__.py index 28f108cb636..fa59ad3b280 100644 --- a/neural_compressor/torch/__init__.py +++ b/neural_compressor/torch/__init__.py @@ -11,3 +11,4 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from neural_compressor.torch.algorithms.layer_wise import load_empty_model \ No newline at end of file diff --git a/test/3x/torch/quantization/weight_only/test_rtn.py b/test/3x/torch/quantization/weight_only/test_rtn.py index d4e1ae2f4e6..293f11f6b8b 100644 --- a/test/3x/torch/quantization/weight_only/test_rtn.py +++ b/test/3x/torch/quantization/weight_only/test_rtn.py @@ -167,7 +167,7 @@ def test_quant_lm_head(self): ), "The tied lm_head weight is not deep copied, please check!" def test_layer_wise(self): - from neural_compressor.torch.algorithms.layer_wise import load_empty_model + from neural_compressor.torch import load_empty_model model = load_empty_model("hf-internal-testing/tiny-random-GPTJForCausalLM") quant_config = RTNConfig(