Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Included callable layer selector support for IntegratedGradients. #894

Merged
merged 4 commits into from
Mar 17, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions alibi/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,3 +52,11 @@ def __init__(self, object_name: str):
super().__init__(
f"This {object_name} instance is not fitted yet. Call 'fit' with appropriate arguments first."
)


class SerializationError(AlibiException):
"""
This exception is raised whenever an explainer cannot be serialized.
"""
def __init__(self, message: str):
super().__init__(message)
49 changes: 39 additions & 10 deletions alibi/explainers/integrated_gradients.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import logging
import string
import warnings
from enum import Enum
from typing import Callable, List, Optional, Tuple, Union, cast

import numpy as np
Expand Down Expand Up @@ -779,11 +780,22 @@ def _validate_output(model: tf.keras.Model,
"Targets can be either the true classes or the classes predicted by the model.")


class LayerState(str, Enum):
UNSPECIFIED = 'unspecified'
NON_SERIALIZABLE = 'non-serializable'
CALLABLE = 'callable'


class IntegratedGradients(Explainer):

def __init__(self,
model: tf.keras.Model,
layer: Optional[tf.keras.layers.Layer] = None,
layer: Optional[
Union[
Callable[[tf.keras.Model], tf.keras.layers.Layer],
tf.keras.layers.Layer
]
] = None,
target_fn: Optional[Callable] = None,
method: str = "gausslegendre",
n_steps: int = 50,
Expand All @@ -799,8 +811,10 @@ def __init__(self,
model
`tensorflow` model.
layer
Layer with respect to which the gradients are calculated.
If not provided, the gradients are calculated with respect to the input.
A layer or a function having as parameter the model and returning a layer with respect to which the
gradients are calculated. If not provided, the gradients are calculated with respect to the input.
To guarantee saving and loading of the explainer, the layer has to be specified as a callable which
returns a layer given the model. E.g. ``lambda model: model.layers[0].embeddings``.
target_fn
A scalar function that is applied to the predictions of the model.
This can be used to specify which scalar output the attributions should be calculated for.
Expand Down Expand Up @@ -829,18 +843,33 @@ def __init__(self,

if layer is None:
self.orig_call: Optional[Callable] = None
layer_num: Optional[int] = 0
else:
self.layer = None
layer_meta: Union[int, str] = LayerState.UNSPECIFIED.value

elif isinstance(layer, tf.keras.layers.Layer):
self.orig_call = layer.call
self.layer = layer

try:
layer_num = model.layers.index(layer)
layer_meta = model.layers.index(layer)
except ValueError:
logger.info("Layer not in the list of model.layers")
layer_num = None
layer_meta = LayerState.NON_SERIALIZABLE.value
logger.warning('Layer not in the list of `model.layers`. Passing the layer directly would not '
'permit the serialization of the explainer. This is due to nested layers. To permit '
'the serialization of the explainer, provide the layer as a callable which returns '
'the layer given the model.')

elif callable(layer):
self.layer = layer(self.model)
self.orig_call = self.layer.call
self.callable_layer = layer
layer_meta = LayerState.CALLABLE.value

else:
raise TypeError(f'Unsupported layer type. Received {type(layer)}.')

params['layer'] = layer_num
params['layer'] = layer_meta
self.meta['params'].update(params)
self.layer = layer
self.n_steps = n_steps
self.method = method
self.internal_batch_size = internal_batch_size
Expand Down
24 changes: 17 additions & 7 deletions alibi/saving.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import copy
import json
import numbers
import os
from pathlib import Path
import sys
Expand Down Expand Up @@ -123,21 +124,30 @@ def _simple_load(path: Union[str, os.PathLike], predictor, meta) -> 'Explainer':

def _load_IntegratedGradients(path: Union[str, os.PathLike], predictor: 'Union[tensorflow.keras.Model]',
meta: dict) -> 'IntegratedGradients':
layer_num = meta['params']['layer']
if layer_num == 0:
layer = None
else:
layer = predictor.layers[layer_num]

from alibi.explainers.integrated_gradients import LayerState
with open(Path(path, 'explainer.dill'), 'rb') as f:
explainer = dill.load(f)

explainer.reset_predictor(predictor)
explainer.layer = layer
layer_meta = meta['params']['layer']

if layer_meta == LayerState.CALLABLE:
explainer.layer = explainer.callable_layer(predictor)
elif isinstance(layer_meta, numbers.Integral):
explainer.layer = predictor.layers[layer_meta]

return explainer


def _save_IntegratedGradients(explainer: 'IntegratedGradients', path: Union[str, os.PathLike]) -> None:
from alibi.explainers.integrated_gradients import LayerState
from alibi.exceptions import SerializationError

if explainer.meta['params']['layer'] == LayerState.NON_SERIALIZABLE:
raise SerializationError('The layer provided in the explainer initialization cannot be serialized. This is due '
'to nested layers. To permit the serialization of the explainer, provide the layer as '
'a callable which returns the layer given the model.')

model = explainer.model
layer = explainer.layer
explainer.model = explainer.layer = None
Expand Down
38 changes: 35 additions & 3 deletions alibi/tests/test_saving.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import numbers
import sys
import numpy as np
from numpy.testing import assert_allclose
Expand All @@ -20,6 +21,7 @@
CounterfactualRLTabular,
GradientSimilarity
)
from alibi.explainers.integrated_gradients import LayerState
from alibi.saving import load_explainer
from alibi_testing.data import get_adult_data, get_iris_data, get_movie_sentiment_data
import alibi_testing
Expand Down Expand Up @@ -141,9 +143,20 @@ def ale_explainer(iris_data, lr_classifier):
return ale


@pytest.fixture(scope='module')
def ig_explainer(iris_data, ffn_classifier):
ig = IntegratedGradients(model=ffn_classifier)
@pytest.fixture(scope='module',
params=[LayerState.UNSPECIFIED, LayerState.CALLABLE, 1])
def ig_explainer(request, iris_data, ffn_classifier):
layer_meta = request.param

if layer_meta == LayerState.CALLABLE:
def layer(model):
return model.layers[1]
elif isinstance(layer_meta, numbers.Integral):
layer = ffn_classifier.layers[layer_meta]
else:
layer = None

ig = IntegratedGradients(model=ffn_classifier, layer=layer)
return ig


Expand Down Expand Up @@ -314,8 +327,27 @@ def test_save_IG(ig_explainer, ffn_classifier, iris_data):
ig_explainer1 = load_explainer(temp_dir, predictor=ffn_classifier)

assert isinstance(ig_explainer1, IntegratedGradients)

# need to remove the layer entry since it can be a callable.
# Although the callable are identical, they have different addresses in memory
layer = ig_explainer.meta['params']['layer']
layer1 = ig_explainer1.meta['params']['layer']
del ig_explainer.meta['params']['layer']
del ig_explainer1.meta['params']['layer']

# compare metadata
assert ig_explainer.meta == ig_explainer1.meta

# compare layers
if callable(layer):
assert layer.__code__ == layer1.__code__
else:
assert layer == layer1

# insert layers back in case the `ig_explainer` will be used in the future
ig_explainer.meta['params']['layer'] = layer
ig_explainer1.meta['params']['layer'] = layer1

exp1 = ig_explainer.explain(X, target=target)
assert exp0.meta == exp1.meta

Expand Down