Skip to content

Commit

Permalink
Simplify annotations as python>=3.9
Browse files Browse the repository at this point in the history
Starting from python 3.9, we can
- replace Tuple, Dict, List, Type from typing by buitins tuple, dict, list, type
- replace typing.Callable by collections.abc.Callable
- replace typing.Sequence by collections.abc.Sequence

See https://peps.python.org/pep-0585/
  • Loading branch information
nhuet committed Mar 14, 2024
1 parent 2fa82a5 commit 4e2c607
Show file tree
Hide file tree
Showing 32 changed files with 447 additions and 440 deletions.
9 changes: 4 additions & 5 deletions docs/generate_nb_index.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
import os
import re
import urllib.parse
from typing import List, Tuple

NOTEBOOKS_LIST_PLACEHOLDER = "[[notebooks-list]]"
NOTEBOOKS_PAGE_TEMPLATE_RELATIVE_PATH = "tutorials.template.md"
Expand All @@ -25,15 +24,15 @@

def extract_notebook_title_n_description(
notebook_filepath: str,
) -> Tuple[str, List[str]]:
) -> tuple[str, list[str]]:
# load notebook
with open(notebook_filepath, "rt", encoding="utf-8") as f:
notebook = json.load(f)

# find title + description: from first cell, h1 title + remaining text.
# or title from filename else
title = ""
description_lines: List[str] = []
description_lines: list[str] = []
cell = notebook["cells"][0]
if cell["cell_type"] == "markdown":
firstline = cell["source"][0].strip()
Expand All @@ -51,7 +50,7 @@ def extract_notebook_title_n_description(
return title, description_lines


def filter_tags_from_description(description_lines: List[str], html_tag_to_remove: str) -> List[str]:
def filter_tags_from_description(description_lines: list[str], html_tag_to_remove: str) -> list[str]:
description = "".join(description_lines)
# opening/closing tags
opening_tag = html_tag_to_remove
Expand Down Expand Up @@ -147,7 +146,7 @@ def get_binder_link(
return link


def get_repo_n_branches_for_binder_n_github_links() -> Tuple[bool, str, str, str, str, str, bool]:
def get_repo_n_branches_for_binder_n_github_links() -> tuple[bool, str, str, str, str, str, bool]:
# repos + branches to use for binder environment and notebooks content.
creating_links = True
use_nbgitpuller = False
Expand Down
55 changes: 28 additions & 27 deletions src/decomon/backward_layers/activations.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import warnings
from typing import Any, Callable, Dict, List, Optional, Union
from collections.abc import Callable
from typing import Any, Optional, Union

import keras.ops as K
import numpy as np
Expand Down Expand Up @@ -39,7 +40,7 @@


def backward_relu(
inputs: List[Tensor],
inputs: list[Tensor],
dc_decomp: bool = False,
perturbation_domain: Optional[PerturbationDomain] = None,
alpha: float = 0.0,
Expand All @@ -48,7 +49,7 @@ def backward_relu(
slope: Union[str, Slope] = Slope.V_SLOPE,
mode: Union[str, ForwardMode] = ForwardMode.HYBRID,
**kwargs: Any,
) -> List[Tensor]:
) -> list[Tensor]:
"""Backward LiRPA of relu
Args:
Expand Down Expand Up @@ -87,13 +88,13 @@ def backward_relu(


def backward_sigmoid(
inputs: List[Tensor],
inputs: list[Tensor],
dc_decomp: bool = False,
perturbation_domain: Optional[PerturbationDomain] = None,
slope: Union[str, Slope] = Slope.V_SLOPE,
mode: Union[str, ForwardMode] = ForwardMode.HYBRID,
**kwargs: Any,
) -> List[Tensor]:
) -> list[Tensor]:
"""Backward LiRPA of sigmoid
Args:
Expand All @@ -118,13 +119,13 @@ def backward_sigmoid(


def backward_tanh(
inputs: List[Tensor],
inputs: list[Tensor],
dc_decomp: bool = False,
perturbation_domain: Optional[PerturbationDomain] = None,
slope: Union[str, Slope] = Slope.V_SLOPE,
mode: Union[str, ForwardMode] = ForwardMode.HYBRID,
**kwargs: Any,
) -> List[Tensor]:
) -> list[Tensor]:
"""Backward LiRPA of tanh
Args:
Expand All @@ -149,13 +150,13 @@ def backward_tanh(


def backward_hard_sigmoid(
inputs: List[Tensor],
inputs: list[Tensor],
dc_decomp: bool = False,
perturbation_domain: Optional[PerturbationDomain] = None,
slope: Union[str, Slope] = Slope.V_SLOPE,
mode: Union[str, ForwardMode] = ForwardMode.HYBRID,
**kwargs: Any,
) -> List[Tensor]:
) -> list[Tensor]:
"""Backward LiRPA of hard sigmoid
Args:
Expand All @@ -178,13 +179,13 @@ def backward_hard_sigmoid(


def backward_elu(
inputs: List[Tensor],
inputs: list[Tensor],
dc_decomp: bool = False,
perturbation_domain: Optional[PerturbationDomain] = None,
slope: Union[str, Slope] = Slope.V_SLOPE,
mode: Union[str, ForwardMode] = ForwardMode.HYBRID,
**kwargs: Any,
) -> List[Tensor]:
) -> list[Tensor]:
"""Backward LiRPA of Exponential Linear Unit
Args:
Expand All @@ -208,13 +209,13 @@ def backward_elu(


def backward_selu(
inputs: List[Tensor],
inputs: list[Tensor],
dc_decomp: bool = False,
perturbation_domain: Optional[PerturbationDomain] = None,
slope: Union[str, Slope] = Slope.V_SLOPE,
mode: Union[str, ForwardMode] = ForwardMode.HYBRID,
**kwargs: Any,
) -> List[Tensor]:
) -> list[Tensor]:
"""Backward LiRPA of Scaled Exponential Linear Unit (SELU)
Args:
Expand All @@ -238,13 +239,13 @@ def backward_selu(


def backward_linear(
inputs: List[Tensor],
inputs: list[Tensor],
dc_decomp: bool = False,
perturbation_domain: Optional[PerturbationDomain] = None,
slope: Union[str, Slope] = Slope.V_SLOPE,
mode: Union[str, ForwardMode] = ForwardMode.HYBRID,
**kwargs: Any,
) -> List[Tensor]:
) -> list[Tensor]:
"""Backward LiRPA of linear
Args:
Expand All @@ -265,13 +266,13 @@ def backward_linear(


def backward_exponential(
inputs: List[Tensor],
inputs: list[Tensor],
dc_decomp: bool = False,
perturbation_domain: Optional[PerturbationDomain] = None,
slope: Union[str, Slope] = Slope.V_SLOPE,
mode: Union[str, ForwardMode] = ForwardMode.HYBRID,
**kwargs: Any,
) -> List[Tensor]:
) -> list[Tensor]:
"""Backward LiRPAof exponential
Args:
Expand All @@ -294,13 +295,13 @@ def backward_exponential(


def backward_softplus(
inputs: List[Tensor],
inputs: list[Tensor],
dc_decomp: bool = False,
perturbation_domain: Optional[PerturbationDomain] = None,
slope: Union[str, Slope] = Slope.V_SLOPE,
mode: Union[str, ForwardMode] = ForwardMode.HYBRID,
**kwargs: Any,
) -> List[Tensor]:
) -> list[Tensor]:
"""Backward LiRPA of softplus
Args:
Expand All @@ -325,13 +326,13 @@ def backward_softplus(


def backward_softsign(
inputs: List[Tensor],
inputs: list[Tensor],
dc_decomp: bool = False,
perturbation_domain: Optional[PerturbationDomain] = None,
slope: Union[str, Slope] = Slope.V_SLOPE,
mode: Union[str, ForwardMode] = ForwardMode.HYBRID,
**kwargs: Any,
) -> List[Tensor]:
) -> list[Tensor]:
"""Backward LiRPA of softsign
Args:
Expand Down Expand Up @@ -359,7 +360,7 @@ def backward_softsign(


def backward_softsign_(
inputs: List[Tensor],
inputs: list[Tensor],
w_u_out: Tensor,
b_u_out: Tensor,
w_l_out: Tensor,
Expand All @@ -368,7 +369,7 @@ def backward_softsign_(
mode: Union[str, ForwardMode] = ForwardMode.HYBRID,
slope: Union[str, Slope] = Slope.V_SLOPE,
**kwargs: Any,
) -> List[Tensor]:
) -> list[Tensor]:
if perturbation_domain is None:
perturbation_domain = BoxDomain()
w_u_0, b_u_0, w_l_0, b_l_0 = get_linear_hull_s_shape(
Expand All @@ -391,14 +392,14 @@ def backward_softsign_(


def backward_softmax(
inputs: List[Tensor],
inputs: list[Tensor],
dc_decomp: bool = False,
perturbation_domain: Optional[PerturbationDomain] = None,
slope: Union[str, Slope] = Slope.V_SLOPE,
mode: Union[str, ForwardMode] = ForwardMode.HYBRID,
axis: int = -1,
**kwargs: Any,
) -> List[Tensor]:
) -> list[Tensor]:
"""Backward LiRPA of softmax
Args:
Expand All @@ -422,7 +423,7 @@ def backward_softmax(
raise NotImplementedError()


def deserialize(name: str) -> Callable[..., List[Tensor]]:
def deserialize(name: str) -> Callable[..., list[Tensor]]:
"""Get the activation from name.
Args:
Expand Down Expand Up @@ -457,7 +458,7 @@ def deserialize(name: str) -> Callable[..., List[Tensor]]:
raise ValueError("Could not interpret " "activation function identifier:", name)


def get(identifier: Any) -> Callable[..., List[Tensor]]:
def get(identifier: Any) -> Callable[..., list[Tensor]]:
"""Get the `identifier` activation function.
Args:
Expand Down
32 changes: 16 additions & 16 deletions src/decomon/backward_layers/backward_layers.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Any, Dict, List, Optional, Tuple, Union
from typing import Any, Optional, Union

import keras
import keras.ops as K
Expand Down Expand Up @@ -65,7 +65,7 @@ def __init__(
)
self.frozen_weights = False

def call(self, inputs: List[BackendTensor], **kwargs: Any) -> List[BackendTensor]:
def call(self, inputs: list[BackendTensor], **kwargs: Any) -> list[BackendTensor]:
if len(inputs) == 0:
inputs = self.layer.input

Expand Down Expand Up @@ -96,7 +96,7 @@ def call(self, inputs: List[BackendTensor], **kwargs: Any) -> List[BackendTensor

return [w, b, w, b]

def build(self, input_shape: List[Tuple[Optional[int], ...]]) -> None:
def build(self, input_shape: list[tuple[Optional[int], ...]]) -> None:
"""
Args:
input_shape: list of input shape
Expand Down Expand Up @@ -166,7 +166,7 @@ def __init__(
)
self.frozen_weights = False

def get_affine_components(self, inputs: List[BackendTensor]) -> Tuple[BackendTensor, BackendTensor]:
def get_affine_components(self, inputs: list[BackendTensor]) -> tuple[BackendTensor, BackendTensor]:
"""Express the implicit affine matrix of the convolution layer.
Conv is a linear operator but its affine component is implicit
Expand Down Expand Up @@ -210,7 +210,7 @@ def get_affine_components(self, inputs: List[BackendTensor]) -> Tuple[BackendTen

return w_out_, b_out_

def call(self, inputs: List[BackendTensor], **kwargs: Any) -> List[BackendTensor]:
def call(self, inputs: list[BackendTensor], **kwargs: Any) -> list[BackendTensor]:
weight_, bias_ = self.get_affine_components(inputs)
return [weight_, bias_] * 2

Expand Down Expand Up @@ -252,13 +252,13 @@ def __init__(
self.activation_name = layer.get_config()["activation"]
self.slope = Slope(slope)
self.finetune = finetune
self.finetune_param: List[keras.Variable] = []
self.finetune_param: list[keras.Variable] = []
if self.finetune:
self.frozen_alpha = False
self.grid_finetune: List[keras.Variable] = []
self.grid_finetune: list[keras.Variable] = []
self.frozen_grid = False

def get_config(self) -> Dict[str, Any]:
def get_config(self) -> dict[str, Any]:
config = super().get_config()
config.update(
{
Expand All @@ -268,7 +268,7 @@ def get_config(self) -> Dict[str, Any]:
)
return config

def build(self, input_shape: List[Tuple[Optional[int], ...]]) -> None:
def build(self, input_shape: list[tuple[Optional[int], ...]]) -> None:
"""
Args:
input_shape: list of input shape
Expand Down Expand Up @@ -371,7 +371,7 @@ def build(self, input_shape: List[Tuple[Optional[int], ...]]) -> None:

self.built = True

def call(self, inputs: List[BackendTensor], **kwargs: Any) -> List[BackendTensor]:
def call(self, inputs: list[BackendTensor], **kwargs: Any) -> list[BackendTensor]:
# infer the output dimension
if self.activation_name != "linear":
if self.finetune:
Expand Down Expand Up @@ -452,7 +452,7 @@ def __init__(
**kwargs,
)

def call(self, inputs: List[BackendTensor], **kwargs: Any) -> List[BackendTensor]:
def call(self, inputs: list[BackendTensor], **kwargs: Any) -> list[BackendTensor]:
return get_identity_lirpa(inputs)


Expand All @@ -477,7 +477,7 @@ def __init__(
**kwargs,
)

def call(self, inputs: List[BackendTensor], **kwargs: Any) -> List[BackendTensor]:
def call(self, inputs: list[BackendTensor], **kwargs: Any) -> list[BackendTensor]:
return get_identity_lirpa(inputs)


Expand All @@ -504,7 +504,7 @@ def __init__(
self.dims = layer.dims
self.op = layer.call

def call(self, inputs: List[BackendTensor], **kwargs: Any) -> List[BackendTensor]:
def call(self, inputs: list[BackendTensor], **kwargs: Any) -> list[BackendTensor]:
w_u_out, b_u_out, w_l_out, b_l_out = get_identity_lirpa(inputs)

# w_u_out (None, n_in, n_out)
Expand Down Expand Up @@ -545,7 +545,7 @@ def __init__(
**kwargs,
)

def call(self, inputs: List[BackendTensor], **kwargs: Any) -> List[BackendTensor]:
def call(self, inputs: list[BackendTensor], **kwargs: Any) -> list[BackendTensor]:
return get_identity_lirpa(inputs)


Expand Down Expand Up @@ -573,7 +573,7 @@ def __init__(
self.axis = self.layer.axis
self.op_flat = Flatten()

def call(self, inputs: List[BackendTensor], **kwargs: Any) -> List[BackendTensor]:
def call(self, inputs: list[BackendTensor], **kwargs: Any) -> list[BackendTensor]:
y = inputs[-1]
n_out = int(np.prod(y.shape[1:]))

Expand Down Expand Up @@ -625,5 +625,5 @@ def __init__(
**kwargs,
)

def call(self, inputs: List[BackendTensor], **kwargs: Any) -> List[BackendTensor]:
def call(self, inputs: list[BackendTensor], **kwargs: Any) -> list[BackendTensor]:
return get_identity_lirpa(inputs)
Loading

0 comments on commit 4e2c607

Please sign in to comment.