From 787fa01987479c16d82babe72b53d5b20892cfb7 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 31 May 2024 07:00:20 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- auto_round/auto_quantizer.py | 2 +- auto_round/export/export_to_autoround/post_init.py | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/auto_round/auto_quantizer.py b/auto_round/auto_quantizer.py index 3df847c7..a19fa0fc 100644 --- a/auto_round/auto_quantizer.py +++ b/auto_round/auto_quantizer.py @@ -386,7 +386,7 @@ def post_init_model(self, model): # hasattr(model, "hf_device_map") and any(d in model.hf_device_map for d in ["cpu", "disk"]) # ): # raise ValueError( - # "Found modules on cpu/disk. Usin Exllamav2 backend requires all the modules to be on GPU." + # "Found modules on cpu/disk. Using Exllamav2 backend requires all the modules to be on GPU." # "You can deactivate exllama backend by setting `disable_exllama=True` in the quantization config object" # ) diff --git a/auto_round/export/export_to_autoround/post_init.py b/auto_round/export/export_to_autoround/post_init.py index e0f83651..a9732e42 100644 --- a/auto_round/export/export_to_autoround/post_init.py +++ b/auto_round/export/export_to_autoround/post_init.py @@ -1,3 +1,17 @@ +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import torch EXLLAMA_DEFAULT_MAX_INPUT_LENGTH=2048