diff --git a/.azure-pipelines/template/ut-template.yml b/.azure-pipelines/template/ut-template.yml index f8021b62..e2c8c949 100644 --- a/.azure-pipelines/template/ut-template.yml +++ b/.azure-pipelines/template/ut-template.yml @@ -35,15 +35,14 @@ steps: - ${{ if eq(parameters.imageSource, 'build') }}: - script: | docker exec ${{ parameters.utContainerName }} bash -c "cd /auto-round \ - && pip install -r requirements.txt \ - && pip install -vvv --no-build-isolation .[cpu] \ + && pip install -vvv --no-build-isolation . \ && pip list" displayName: "Env Setup" - ${{ if eq(parameters.imageSource, 'pull') }}: - script: | docker exec ${{ parameters.utContainerName }} bash -c "cd /auto-round \ - && pip install -vvv --no-build-isolation .[hpu] \ + && pip install -vvv --no-build-isolation . \ && pip list" displayName: "HPU Env Setup" diff --git a/MANIFEST.in b/MANIFEST.in index 540b7204..d2abcd5f 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1 +1,3 @@ -include requirements.txt \ No newline at end of file +include requirements.txt +include requirements-cpu.txt +include requirements-hpu.txt diff --git a/README.md b/README.md index afa9fc79..3eb17d12 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ AutoRound

Advanced Quantization Algorithm for LLMs

[![python](https://img.shields.io/badge/python-3.9%2B-blue)](https://github.com/intel/auto-round) -[![version](https://img.shields.io/badge/release-0.4.2-green)](https://github.com/intel/auto-round) +[![version](https://img.shields.io/badge/release-0.4.3-green)](https://github.com/intel/auto-round) [![license](https://img.shields.io/badge/license-Apache%202-blue)](https://github.com/intel/auto-round/blob/main/LICENSE) ---
@@ -47,30 +47,14 @@ details and quantized models in several Hugging Face Spaces, e.g. [OPEA](https:/ ### Install from pypi ```bash -# GPU pip install auto-round - -# CPU -pip install auto-round[cpu] - -# HPU -pip install auto-round[hpu] ```
Build from Source ```bash - pip install -r requirements.txt - - # GPU - pip install -vvv --no-build-isolation -e . - - # CPU - pip install -vvv --no-build-isolation -e .[cpu] - - # HPU - pip install -vvv --no-build-isolation -e .[hpu] + pip install -vvv --no-build-isolation . ```
diff --git a/auto_round/version.py b/auto_round/version.py index 3776cd59..80ebd5fd 100644 --- a/auto_round/version.py +++ b/auto_round/version.py @@ -14,4 +14,4 @@ """IntelĀ® auto-round: An open-source Python library supporting popular model weight only compression based on signround.""" -__version__ = "0.4.2" +__version__ = "0.4.3" diff --git a/requirements-cpu.txt b/requirements-cpu.txt index 4b575c6d..71228b64 100644 --- a/requirements-cpu.txt +++ b/requirements-cpu.txt @@ -9,7 +9,6 @@ threadpoolctl lm-eval>=0.4.2,<0.5 tqdm packaging -auto-gptq>=0.7.1 pillow numba tbb diff --git a/setup.py b/setup.py index 8c5649ba..98582dbf 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,7 @@ def is_cuda_available(): try: + os.system("pip install torch") import torch return torch.cuda.is_available() @@ -111,6 +112,15 @@ def detect_local_sm_architectures(): return arch_list +def detect_hardware(): + if is_hpu_available(): + return "requirements-hpu.txt" + elif is_cuda_available(): + return "requirements.txt" + else: + return "requirements-cpu.txt" + + UNSUPPORTED_COMPUTE_CAPABILITIES = ['3.5', '3.7', '5.0', '5.2', '5.3'] if BUILD_CUDA_EXT: @@ -219,11 +229,7 @@ def detect_local_sm_architectures(): "auto_round_extension.*", ], ), - "install_requires": fetch_requirements("requirements.txt"), - "extras_require": { - "hpu": fetch_requirements("requirements-hpu.txt"), - "cpu": fetch_requirements("requirements-cpu.txt"), - }, + "install_requires": fetch_requirements(detect_hardware()), } if __name__ == "__main__": @@ -248,7 +254,6 @@ def detect_local_sm_architectures(): url="https://github.com/intel/auto-round", packages=include_packages, include_dirs=include_dirs, - ##include_package_data=False, install_requires=install_requires, extras_require=extras_require, python_requires=">=3.7.0",