Skip to content

Commit

Permalink
Merge branch 'main' into main-public
Browse files Browse the repository at this point in the history
  • Loading branch information
ashahba committed Oct 6, 2023
2 parents 67c8afe + fd5cda2 commit d56d7aa
Show file tree
Hide file tree
Showing 15 changed files with 104 additions and 108 deletions.
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ venv-test:
@test -d test_env || virtualenv -p python test_env

@echo "Building the XAI API in test_env env..."
@. $(ACTIVATE_TEST) && pip install --editable .[test]
@. $(ACTIVATE_TEST) && pip install --extra-index-url https://download.pytorch.org/whl/cpu --editable .[test]

# TODO: running all tests in one pytest session randomly causes torch test to hang at last epoch
test-torch: venv-test
Expand All @@ -43,7 +43,7 @@ test-mcg: test-torch
@. $(ACTIVATE_TEST) && PYTHONPATH="$(CURDIR)/model_card_gen/tests" pytest -s -k "not torch"

install:
@pip install --editable .
@pip install --extra-index-url https://download.pytorch.org/whl/cpu --editable .

xai-whl:
@python setup.py bdist_wheel
Expand Down
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ Intel is committed to the respect of human rights and avoiding complicity in hum
#### License
Intel® Explainable AI Tools is licensed under Apache License Version 2.0.

#### Datasets
To the extent that any public datasets are referenced by Intel or accessed using tools or code on this site those datasets are provided by the third party indicated as the data source. Intel does not create the data, or datasets, and does not warrant their accuracy or quality. By accessing the public dataset(s) you agree to the terms associated with those datasets and that your use complies with the applicable license. [DATASETS](DATASETS.md)
Intel expressly disclaims the accuracy, adequacy, or completeness of any public datasets, and is not liable for any errors, omissions, or defects in the data, or for any reliance on the data. Intel is not liable for any liability or damages relating to your use of public datasets.
#### Datasets and Models
To the extent that any data, datasets, or models are referenced by Intel or accessed using tools or code on this site such data, datasets and models are provided by the third party indicated as the source of such content. Intel does not create the data, datasets, or models, provide a license to any third-party data, datasets, or models referenced, and does not warrant their accuracy or quality. By accessing such data, dataset(s) or model(s) you agree to the terms associated with that content and that your use complies with the applicable license. [DATASETS](DATASETS.md)

Intel expressly disclaims the accuracy, adequacy, or completeness of any data, datasets or models, and is not liable for any errors, omissions, or defects in such content, or for any reliance thereon. Intel also expressly disclaims any warranty of non-infringement with respect to such data, dataset(s), or model(s). Intel is not liable for any liability or damages relating to your use of such data, datasets, or models.
2 changes: 0 additions & 2 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,14 +37,12 @@
'nbsphinx',
'nbsphinx_link',
'sphinx_design',
'sphinx_external_toc',
'sphinx.ext.intersphinx',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.doctest',
]
external_toc_path = "toc.yml"
external_toc_exclude_missing = False
html_theme = 'sphinx_rtd_theme'
nbsphinx_execute = 'never'
24 changes: 12 additions & 12 deletions docs/requirements-docs.txt
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
Sphinx~=5.3.0
docutils~=0.18.1
ghp-import~=2.1.0
linkify-it-py~=2.0.2
myst-parser~=1.0.0
nbsphinx-link~=1.3.0
nbsphinx~=0.9.2
sphinx-fontawesome~=0.0.6
sphinx-multitoc-numbering~=0.1.3
sphinx-rtd-theme~=1.2.2
sphinx_design~=0.4.1
sphinx_external_toc~=0.3.1
Sphinx==7.1.2; python_version=='3.8'
Sphinx==7.2.6; python_version>'3.8'
docutils==0.18.1
ghp-import==2.1.0
linkify-it-py==2.0.2
myst-parser==2.0.0
nbsphinx-link==1.3.0
nbsphinx==0.9.3
sphinx-fontawesome==0.0.6
sphinx-multitoc-numbering==0.1.3
sphinx-rtd-theme==1.3.0
sphinx_design==0.5.0
2 changes: 1 addition & 1 deletion explainer/attributions/attributions.py
Original file line number Diff line number Diff line change
Expand Up @@ -383,7 +383,7 @@ def explainer():
>>> explainer = attributions.explainer()
<IPython.core.display.HTML object>
>>> explainer.shap.__version__
'0.41.0'
'0.42.1'
"""
return FeatureAttributions()

Expand Down
2 changes: 1 addition & 1 deletion explainer/cam/cam.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,4 +33,4 @@ def __new__(cls, model, *args):
elif is_pt_model(model):
return super().__new__(XGradCAM)
else:
raise_unknown_model_error()
raise_unknown_model_error(model)
4 changes: 3 additions & 1 deletion explainer/utils/model/model_framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@ def is_tf_model(model):
"""Returns whether model is TF keras sequential or functional"""
is_keras_sequential = str(type(model)).endswith("keras.engine.sequential.Sequential'>")
is_keras_functional = str(type(model)).endswith("keras.engine.functional.Functional'>")
return is_keras_sequential | is_keras_functional
is_keras_src_functional = str(type(model)).endswith("keras.src.engine.functional.Functional'>")
is_keras_src_sequential = str(type(model)).endswith("keras.src.engine.sequential.Sequential'>")
return is_keras_sequential | is_keras_functional | is_keras_src_functional | is_keras_src_sequential


def is_pt_model(model):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
"# issue on captum: https://github.com/pytorch/captum/issues/1114\n",
"# Note: this matplotlib version is not guaranteed to work with any other application of intel-xai\n",
"\n",
"! pip install -U matplotlib==3.6.3 --no-deps"
"! pip install matplotlib --no-deps"
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ This notebook demonstrates how to use the attributions explainer API to explain

To run `mnist.ipynb`, install the following dependencies:
1. [Intel® Explainable AI](https://github.com/IntelAI/intel-xai-tools)
2. pip install jupyter-dash

## References

Expand Down
11 changes: 5 additions & 6 deletions notebooks/explainer/multimodal_cancer_detection/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,11 @@ The `dataset_utils.py` holds the supporting functions that prepare the image and

To run `Multimodal_Cancer_Detection.ipynb`, install the following dependencies:
1. [Intel® Explainable AI](https://github.com/IntelAI/intel-xai-tools)
2. `pip install intel-transfer-learning-tool==0.5`
3. `pip install intel-extension-for-transformers`
4. `pip install scikit-image`
7. `pip install nltk`
8. `pip install docx2txt`
9. `pip install openpyxl`
2.
```
pip install docx2txt intel-transfer-learning-tool==0.5 \
intel-extension-for-transformers==1.1.0 nltk openpyxl scikit-image
```

## References

Expand Down
11 changes: 0 additions & 11 deletions notebooks/model_card_gen/adult-pytorch-model-card.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -22,17 +22,6 @@
" 5. [Generate Model Card with Intel Model Card Generator](#5.-Generate-Model-Card)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ad83bad8",
"metadata": {},
"outputs": [],
"source": [
"# Needed to fetch Adult Dataset from OpenML\n",
"!pip install scikit-learn"
]
},
{
"cell_type": "code",
"execution_count": null,
Expand Down
6 changes: 2 additions & 4 deletions notebooks/model_card_gen/compas-model-card-tfx.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,10 @@
"metadata": {},
"outputs": [],
"source": [
"!python -m pip install -U \\\n",
"!python -m pip install \\\n",
" tfx \\\n",
" tensorflow-model-analysis \\\n",
" tensorflow-transform \\\n",
" scikit-learn \\\n",
" pandas"
" tensorflow-transform"
]
},
{
Expand Down
18 changes: 10 additions & 8 deletions notebooks/model_card_gen/toxicity-tfma-model-card.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,16 @@
"Adapted form [Tensorflow](https://colab.research.google.com/github/google/eng-edu/blob/main/ml/pc/exercises/fairness_text_toxicity_part1.ipynb?utm_source=practicum-fairness&utm_campaign=colab-external&utm_medium=referral&utm_content=fairnessexercise1-colab#scrollTo=2z_xzJ40j9Q-) "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ca170cad-c1d4-4222-9b7c-fd633e5f643c",
"metadata": {},
"outputs": [],
"source": [
"! pip install tensorflow_hub"
]
},
{
"cell_type": "markdown",
"id": "64c2921a",
Expand Down Expand Up @@ -493,14 +503,6 @@
"source": [
"mcg"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1c6cfe40",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
Expand Down
4 changes: 2 additions & 2 deletions notebooks/performance/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ In order to compare AI Kit and stock environments, two isolated Conda environmen
conda create -n stock python=3.9
conda activate stock
pip install intel-xai --no-deps
pip install tensorflow==2.12.0 shap torch=1.13.1 opencv-python notebook ipywidgets
pip install tensorflow==2.13.0 shap torch=2.0.1 opencv-python notebook ipywidgets
```

### __Intel® AI Kit Conda environment__
Expand All @@ -41,5 +41,5 @@ conda activate intel
conda install -c intel --deps-only shap
conda install --no-deps shap
pip install --no-deps intel-xai
conda install -c intel opencv pytorch=1.13.1 ipywidgets matplotlib tensorflow=2.12.0 notebook
conda install -c intel opencv pytorch=2.0.1 ipywidgets matplotlib tensorflow=2.13.0 notebook
```
113 changes: 61 additions & 52 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,70 +29,79 @@
long_description = (this_directory / "README.md").read_text()

ATTRIBUTIONS_PKGS = [
'captum~=0.6.0',
'intel-tensorflow==2.12.0',
'ipywidgets~=7.7.5',
'numpy~=1.22.4',
'opencv-python~=4.7.0.72',
'plotly~=5.15.0',
'scikit-plot~=0.3.7',
'scipy~=1.10.1',
'shap~=0.41.0',
'torch==1.13.1',
'transformers~=4.30.0',
]
'captum==0.6.0',
'intel-tensorflow==2.13.0',
'ipywidgets==7.8.1',
'numpy==1.22.4; python_version<"3.10"',
'numpy==1.24.3; python_version=="3.10"',
'opencv-python==4.8.0.76',
'plotly==5.17.0',
'scikit-plot==0.3.7',
'scipy==1.10.1; python_version=="3.8"',
'scipy==1.11.2; python_version>"3.8"',
'shap==0.42.1',
'torch==2.0.1',
'transformers==4.33.2',
]

CAM_PKGS = [
'grad-cam==1.4.6',
'matplotlib~=3.7.1',
'numpy~=1.22.4',
'opencv-python~=4.7.0.72',
'scipy~=1.10.1',
'torch==1.13.1',
]
'grad-cam==1.4.8',
'matplotlib==3.7.3; python_version=="3.8"',
'matplotlib==3.8.0; python_version>"3.8"',
'numpy==1.22.4; python_version<"3.10"',
'numpy==1.24.3; python_version=="3.10"',
'opencv-python==4.8.0.76',
'scipy==1.10.1; python_version=="3.8"',
'scipy==1.11.2; python_version>"3.8"',
'torch==2.0.1',
]

METRICS_PKGS = [
'matplotlib~=3.7.1',
'pandas~=1.5.3',
'plotly~=5.15.0',
'scikit-learn~=1.2.2',
'seaborn~=0.12.2',
'matplotlib==3.7.3; python_version=="3.8"',
'matplotlib==3.8.0; python_version>"3.8"',
'pandas==1.5.3',
'plotly==5.17.0',
'scikit-learn==1.3.1',
'seaborn==0.12.2',
]

MCG_PKGS = [
'Jinja2~=3.1.2',
'absl-py~=1.4.0',
'attrs~=21.4.0',
'dataclasses~=2.10.1;python_version<"3.7"',
'grpcio-status~=1.48.2',
'intel-tensorflow==2.12.0',
'joblib~=1.2.0',
'jsonschema[format-nongpl]~=4.17.3',
'plotly~=5.15.0',
'protobuf~=3.20.3',
'semantic-version~=2.10.0',
'tensorflow-data-validation~=1.13.0',
'tensorflow-model-analysis~=0.44.0',
]
'Jinja2==3.1.2',
'absl-py==1.4.0',
'attrs==21.4.0',
'dataclasses==0.6',
'grpcio-status==1.48.2',
'intel-tensorflow==2.13.0',
'joblib==1.3.2',
'jsonschema[format-nongpl]==4.17.3',
'plotly==5.17.0',
'protobuf==3.20.3',
'semantic-version==2.10.0',
'tensorflow-data-validation==1.13.0; python_version<"3.10"',
'tensorflow-data-validation==1.14.0; python_version=="3.10"',
'tensorflow-model-analysis==0.44.0; python_version<"3.10"',
'tensorflow-model-analysis==0.45.0; python_version=="3.10"',
]

PYTORCH_PKGS = [
'torch==1.13.1',
'torchvision==0.14.1',
]
'torch==2.0.1',
'torchvision==0.15.2',
]

REQUIRED_PKGS = (
ATTRIBUTIONS_PKGS +
CAM_PKGS +
METRICS_PKGS +
MCG_PKGS
)
)

TEST_PKGS = [
'datasets~=2.10.1',
'deepdiff~=6.3.0',
'pytest~=7.3.2',
'tensorflow-hub~=0.13.0',
]
'datasets==2.10.1; python_version<"3.10"',
'datasets==2.14.4; python_version=="3.10"',
'deepdiff==6.5.0',
'pytest==7.4.2',
'tensorflow-hub==0.14.0',
]

PACKAGES = [
'explainer',
Expand All @@ -108,13 +117,13 @@
'model_card_gen.docs.examples',
'model_card_gen.graphics',
'model_card_gen.utils',
]
]


EXTRAS = {
'pytorch': PYTORCH_PKGS,
'test': TEST_PKGS + PYTORCH_PKGS,
}
'pytorch': PYTORCH_PKGS,
'test': TEST_PKGS + PYTORCH_PKGS,
}

# Get version from version module.
with open('explainer/version.py') as fp:
Expand Down Expand Up @@ -161,6 +170,6 @@
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
python_requires='>=3.9,<3.10',
python_requires='>=3.8',
keywords='XAI, explainer',
)

0 comments on commit d56d7aa

Please sign in to comment.