-
Notifications
You must be signed in to change notification settings - Fork 246
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
support Pytorch model in the weight compression algorithm
- Loading branch information
Showing
53 changed files
with
1,631 additions
and
1,019 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,57 @@ | ||
# Copyright (c) 2024 Intel Corporation | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
from nncf.experimental.tensor.functions.numeric import abs as abs | ||
from nncf.experimental.tensor.functions.numeric import all as all | ||
from nncf.experimental.tensor.functions.numeric import allclose as allclose | ||
from nncf.experimental.tensor.functions.numeric import any as any | ||
from nncf.experimental.tensor.functions.numeric import as_tensor_like as as_tensor_like | ||
from nncf.experimental.tensor.functions.numeric import astype as astype | ||
from nncf.experimental.tensor.functions.numeric import clip as clip | ||
from nncf.experimental.tensor.functions.numeric import count_nonzero as count_nonzero | ||
from nncf.experimental.tensor.functions.numeric import device as device | ||
from nncf.experimental.tensor.functions.numeric import dtype as dtype | ||
from nncf.experimental.tensor.functions.numeric import finfo as finfo | ||
from nncf.experimental.tensor.functions.numeric import flatten as flatten | ||
from nncf.experimental.tensor.functions.numeric import isclose as isclose | ||
from nncf.experimental.tensor.functions.numeric import isempty as isempty | ||
from nncf.experimental.tensor.functions.numeric import item as item | ||
from nncf.experimental.tensor.functions.numeric import max as max | ||
from nncf.experimental.tensor.functions.numeric import maximum as maximum | ||
from nncf.experimental.tensor.functions.numeric import mean as mean | ||
from nncf.experimental.tensor.functions.numeric import min as min | ||
from nncf.experimental.tensor.functions.numeric import minimum as minimum | ||
from nncf.experimental.tensor.functions.numeric import moveaxis as moveaxis | ||
from nncf.experimental.tensor.functions.numeric import multiply as multiply | ||
from nncf.experimental.tensor.functions.numeric import ones_like as ones_like | ||
from nncf.experimental.tensor.functions.numeric import reshape as reshape | ||
from nncf.experimental.tensor.functions.numeric import round as round | ||
from nncf.experimental.tensor.functions.numeric import squeeze as squeeze | ||
from nncf.experimental.tensor.functions.numeric import stack as stack | ||
from nncf.experimental.tensor.functions.numeric import sum as sum | ||
from nncf.experimental.tensor.functions.numeric import unstack as unstack | ||
from nncf.experimental.tensor.functions.numeric import var as var | ||
from nncf.experimental.tensor.functions.numeric import where as where | ||
from nncf.experimental.tensor.functions.numeric import zeros_like as zeros_like | ||
|
||
|
||
def _initialize_backends(): | ||
import nncf.experimental.tensor.functions.numpy_linalg | ||
import nncf.experimental.tensor.functions.numpy_numeric | ||
|
||
try: | ||
import nncf.experimental.tensor.functions.torch_linalg | ||
import nncf.experimental.tensor.functions.torch_numeric # noqa: F401 | ||
except ImportError: | ||
pass | ||
|
||
|
||
_initialize_backends() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,57 @@ | ||
# Copyright (c) 2024 Intel Corporation | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
import functools | ||
from typing import List | ||
|
||
import numpy as np | ||
|
||
from nncf.experimental.tensor import Tensor | ||
|
||
|
||
def tensor_guard(func: callable): | ||
""" | ||
A decorator that ensures that the first argument to the decorated function is a Tensor. | ||
""" | ||
|
||
@functools.wraps(func) | ||
def wrapper(*args, **kwargs): | ||
if isinstance(args[0], Tensor): | ||
return func(*args, **kwargs) | ||
raise NotImplementedError(f"Function `{func.__name__}` is not implemented for {type(args[0])}") | ||
|
||
return wrapper | ||
|
||
|
||
def dispatch_list(fn: "functools._SingleDispatchCallable", tensor_list: List[Tensor], *args, **kwargs): | ||
""" | ||
Dispatches the function to the type of the wrapped data of the first element in tensor_list. | ||
:param fn: A function wrapped by `functools.singledispatch`. | ||
:param tensor_list: List of Tensors. | ||
:return: The result value of the function call. | ||
""" | ||
unwrapped_list = [i.data for i in tensor_list] | ||
return fn.dispatch(type(unwrapped_list[0]))(unwrapped_list, *args, **kwargs) | ||
|
||
|
||
def register_numpy_types(singledispatch_fn): | ||
""" | ||
Decorator to register function to singledispatch for numpy classes. | ||
:param singledispatch_fn: singledispatch function. | ||
""" | ||
|
||
def inner(func): | ||
singledispatch_fn.register(np.ndarray)(func) | ||
singledispatch_fn.register(np.generic)(func) | ||
return func | ||
|
||
return inner |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,64 @@ | ||
# Copyright (c) 2024 Intel Corporation | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
import functools | ||
from typing import Optional, Tuple, Union | ||
|
||
from nncf.experimental.tensor import Tensor | ||
from nncf.experimental.tensor.functions.dispatcher import tensor_guard | ||
|
||
|
||
@functools.singledispatch | ||
@tensor_guard | ||
def norm( | ||
a: Tensor, | ||
ord: Optional[Union[str, float, int]] = None, | ||
axis: Optional[Union[int, Tuple[int, ...]]] = None, | ||
keepdims: bool = False, | ||
) -> Tensor: | ||
""" | ||
Computes a vector or matrix norm. | ||
The following norms can be calculated: | ||
===== ============================ ========================== | ||
ord norm for matrices norm for vectors | ||
===== ============================ ========================== | ||
None Frobenius norm 2-norm | ||
'fro' Frobenius norm -- | ||
'nuc' nuclear norm -- | ||
inf max(sum(abs(x), axis=1)) max(abs(x)) | ||
-inf min(sum(abs(x), axis=1)) min(abs(x)) | ||
0 -- sum(x != 0) | ||
1 max(sum(abs(x), axis=0)) as below | ||
-1 min(sum(abs(x), axis=0)) as below | ||
2 2-norm (largest sing. value) as below | ||
-2 smallest singular value as below | ||
other -- sum(abs(x)**ord)**(1./ord) | ||
===== ============================ ========================== | ||
The Frobenius norm is given by [1]_: | ||
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` | ||
The nuclear norm is the sum of the singular values. | ||
Both the Frobenius and nuclear norm orders are only defined for | ||
matrices and otherwise raise a ValueError. | ||
:param a: The input tensor. | ||
:param ord: Order of norm. Default: None. | ||
:param axis: Axis over which to compute the vector or matrix norm. Default: None. | ||
:param keepdims: If set to True, the reduced dimensions are retained in the result | ||
as dimensions with size one. Default: False. | ||
:return: Norm of the matrix or vector. | ||
""" | ||
return Tensor(norm(a.data, ord, axis, keepdims)) |
Oops, something went wrong.