Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

⚡ Add ESD Support to hls4ml #1129

Open
wants to merge 26 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
ef20647
import converter dependencies lazily
calad0i Oct 26, 2024
028b4d0
make tf and qkeras optionl, stop assuming keras is tf.keras
calad0i Oct 26, 2024
72eb053
less mandatory dependency
calad0i Oct 26, 2024
63af2ac
fix dsp_aware_pruning test import path
calad0i Oct 26, 2024
c11dddb
fix broken setup.cfg after rebase, rm pyparsing
calad0i Dec 15, 2024
d9aaa1a
purge qkeras workaround
calad0i Dec 15, 2024
4854423
switch to pyproject.toml
calad0i Dec 15, 2024
06f9cda
format
calad0i Dec 15, 2024
014c1db
rm useless flake8 config in pyprject.toml
calad0i Dec 15, 2024
d3c8881
Add hint on import failure
calad0i Dec 16, 2024
738e5b0
leftover
calad0i Dec 16, 2024
bc7778b
rm setup.py from manifest
calad0i Dec 16, 2024
b76b5cb
manifest fix 2
calad0i Dec 16, 2024
b7f60f5
keras v3 object based parser
calad0i Nov 7, 2024
a7206b4
sequential and i/o tensor name parsing fix
calad0i Nov 8, 2024
1605f96
support activation layers
calad0i Nov 8, 2024
a8aa489
consistent v2 weight reader behavior
calad0i Nov 8, 2024
eafe8b9
add v3 conv handlers
calad0i Nov 8, 2024
6b8a44c
add test
calad0i Nov 8, 2024
3f8acb5
pre-commit fix
calad0i Dec 17, 2024
d2ccfb4
revert keras v2 converter
calad0i Dec 6, 2024
0334960
make reshape handler compatiable with keras v3
calad0i Nov 13, 2024
074b4b6
add general transpose for vivado/vitis
calad0i Nov 13, 2024
29674db
general einsum support for io_parallel and latency
calad0i Nov 15, 2024
1fb23b9
add tests for einsumdense
calad0i Nov 15, 2024
5489803
keras v3 converter clean-up
calad0i Nov 19, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,19 @@ repos:
args: ['--line-length=125',
'--skip-string-normalization']

- repo: https://github.com/tox-dev/pyproject-fmt
rev: v2.5.0
hooks:
- id: pyproject-fmt

- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: check-added-large-files
- id: check-case-conflict
- id: check-merge-conflict
- id: check-symlinks
- id: check-toml
- id: check-yaml
- id: debug-statements
- id: end-of-file-fixer
Expand All @@ -27,19 +33,13 @@ repos:
rev: 5.13.2
hooks:
- id: isort
args: ["--profile", "black", --line-length=125]

- repo: https://github.com/asottile/pyupgrade
rev: v3.19.0
hooks:
- id: pyupgrade
args: ["--py36-plus"]

- repo: https://github.com/asottile/setup-cfg-fmt
rev: v2.7.0
hooks:
- id: setup-cfg-fmt

- repo: https://github.com/pycqa/flake8
rev: 7.1.1
hooks:
Expand Down
5 changes: 3 additions & 2 deletions MANIFEST.in
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
include LICENSE README.md CONTRIBUTING.md CITATION.cff pyproject.toml setup.py setup.cfg .clang-format
include LICENSE README.md CONTRIBUTING.md CITATION.cff pyproject.toml .clang-format
graft example-models
graft test
graft contrib
recursive-include hls4ml/templates *
global-exclude .git .gitmodules .gitlab-ci.yml
recursive-include hls4ml *.py
global-exclude .git .gitmodules .gitlab-ci.yml *.pyc
include hls4ml/backends/vivado_accelerator/supported_boards.json
30 changes: 0 additions & 30 deletions hls4ml/__init__.py
Original file line number Diff line number Diff line change
@@ -1,33 +1,3 @@
# Temporary workaround for QKeras installation requirement, will be removed after 1.0.0
def maybe_install_qkeras():
import subprocess
import sys

QKERAS_PKG_NAME = 'QKeras'
# QKERAS_PKG_SOURCE = QKERAS_PKG_NAME
QKERAS_PKG_SOURCE = 'qkeras@git+https://github.com/fastmachinelearning/qkeras.git'

def pip_list():
p = subprocess.run([sys.executable, '-m', 'pip', 'list'], check=True, capture_output=True)
return p.stdout.decode()

def pip_install(package):
subprocess.check_call([sys.executable, '-m', 'pip', 'install', package])

all_pkgs = pip_list()
if QKERAS_PKG_NAME not in all_pkgs:
print('QKeras installation not found, installing one...')
pip_install(QKERAS_PKG_SOURCE)
print('QKeras installed.')


try:
maybe_install_qkeras()
except Exception:
print('Could not find QKeras installation, make sure you have QKeras installed.')

# End of workaround

from hls4ml import converters, report, utils # noqa: F401, E402

try:
Expand Down
120 changes: 120 additions & 0 deletions hls4ml/backends/vivado/passes/einsum_dense.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
from hls4ml.backends.backend import get_backend
from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate
from hls4ml.model.layers import EinsumDense

from .reshaping_templates import transpose_config_gen

# Shared Dense template

conv_dense_config_template = """struct config{index}_dense : nnet::dense_config {{
static const unsigned n_in = {n_in};
static const unsigned n_out = {n_out};
static const unsigned reuse_factor = {reuse};
static const unsigned strategy = nnet::{strategy};
static const unsigned n_zeros = {nzeros};
static const unsigned multiplier_limit = DIV_ROUNDUP(n_in * n_out, reuse_factor) - n_zeros / reuse_factor;
typedef {accum_t.name} accum_t;
typedef {bias_t.name} bias_t;
typedef {weight_t.name} weight_t;
template<class data_T, class res_T, class CONFIG_T>
using kernel = nnet::{dense_function}<data_T, res_T, CONFIG_T>;
template<class x_T, class y_T>
using product = nnet::product::{product_type}<x_T, y_T>;
}};\n"""

# EinsumDense template

einsum_dense_config_template = '''
struct config{index} {{
typedef config{index}_tpose_inp tpose_inp_conf;
typedef config{index}_tpose_out tpose_out_conf;
typedef config{index}_dense dense_conf;

// Layer Sizes
static const unsigned n_free_data = {n_free_data};
static const unsigned n_free_kernel = {n_free_kernel};
static const unsigned n_contract = {n_contract};
static const unsigned n_inplace = {n_inplace};

// Resource reuse info
static const unsigned io_type = nnet::{iotype};
static const unsigned strategy = nnet::{strategy};
static const unsigned reuse_factor = {reuse_factor};
static const unsigned parallelization_factor = {parallelization_factor}; // Only useful when n_inplace > 1
static const bool store_weights_in_bram = false; // NOT USED
}};
'''

einsum_dense_function_template = 'nnet::einsum_dense<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});'

einsum_dense_include_list = ['nnet_utils/nnet_einsum_dense.h', 'nnet_utils/nnet_dense.h']


class EinsumDenseConfigTemplate(LayerConfigTemplate):
def __init__(self):
super().__init__(EinsumDense)
self.template = einsum_dense_config_template
self.dense_template = conv_dense_config_template

def format(self, node: EinsumDense):
default_params = self._default_config_params(node)

strategy = node.model.config.get_strategy(node)
io_type = node.model.config.get_config_value('IOType')

assert io_type == 'io_parallel', 'EinsumDense layer only supports io_parallel for now'
assert strategy.lower() == 'latency', 'EinsumDense layer only supports Latency strategy for now'

# EinsumDense config
params = default_params.copy()
params['strategy'] = strategy
params['n_free_data'] = node.attributes.attributes['n_free_data']
params['n_free_kernel'] = node.attributes.attributes['n_free_kernel']
params['n_contract'] = node.attributes.attributes['n_contract']
params['n_inplace'] = node.attributes.attributes['n_inplace']
params['parallelization_factor'] = node.attributes.attributes['parallelization_factor']

einsum_conf = self.template.format(**params)

# inp/out transpose config
inp_shape = node.attributes.attributes['inp_shape']
out_interpert_shape = node.attributes.attributes['out_interpert_shape']
inp_tpose_idxs = node.attributes.attributes['inp_tpose_idxs']
out_tpose_idxs = node.attributes.attributes['out_tpose_idxs']
tpose_inp_conf_name = f'config{node.index}_tpose_inp'
tpose_out_conf_name = f'config{node.index}_tpose_out'

inp_tpose_conf = transpose_config_gen(tpose_inp_conf_name, inp_shape, inp_tpose_idxs)
out_tpose_conf = transpose_config_gen(tpose_out_conf_name, out_interpert_shape, out_tpose_idxs)

# Dense config
dense_params = default_params.copy()
dense_params['strategy'] = strategy
dense_params['n_in'] = node.attributes.attributes['n_contract']
dense_params['n_out'] = node.attributes.attributes['n_free_kernel']
if node.attributes.attributes['n_inplace'] == 1:
dense_params['nzeros'] = node.get_weights('weight').nzeros # type: ignore
else:
dense_params['nzeros'] = '-1; // Not making sense when kernels are switching'
dense_params['product_type'] = get_backend('vivado').product_type(
node.get_input_variable().type.precision, node.get_weights('weight').type.precision # type: ignore
)

dense_params['dense_function'] = 'DenseLatency' # Latency only for now

dense_config = self.dense_template.format(**dense_params)

return '\n\n'.join((inp_tpose_conf, out_tpose_conf, dense_config, einsum_conf))


class EinsumDenseFunctionTemplate(FunctionCallTemplate):
def __init__(self):
super().__init__(EinsumDense, include_header=einsum_dense_include_list)
self.template = einsum_dense_function_template

def format(self, node):
params = self._default_function_params(node)
params['w'] = node.get_weights('weight').name
params['b'] = node.get_weights('bias').name

return self.template.format(**params)
61 changes: 47 additions & 14 deletions hls4ml/backends/vivado/passes/reshaping_templates.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
from math import prod

import numpy as np

from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate
from hls4ml.model.layers import Resize, Transpose, ZeroPadding1D, ZeroPadding2D

Expand Down Expand Up @@ -97,16 +101,45 @@ def format(self, node):

# Transpose templates

transpose_config_template = """struct config{index} : nnet::transpose_config {{
static const unsigned depth = {depth};
static const unsigned height = {height};
static const unsigned width = {width};
static constexpr unsigned perm[3] = {{{perm_str}}};
}};\n"""

transpose_function_template = 'nnet::transpose_{dim}<{input_t}, {output_t}, {config}>({input}, {output});'
transpose_include_list = ['nnet_utils/nnet_transpose.h', 'nnet_utils/nnet_transpose_stream.h']

transpose_config_template = """struct {config_name} {{
static const unsigned dims = {dims};
static const unsigned N = {N};
static const unsigned* const from_shape;
static const unsigned* const to_shape;
static const unsigned* const perm;
static const unsigned* const perm_strides;
}};

unsigned {config_name}_from_shape[{dims}] = {{{from_shape}}};
unsigned {config_name}_to_shape[{dims}] = {{{to_shape}}};
unsigned {config_name}_perm[{dims}] = {{{perm}}};
unsigned {config_name}_perm_strides[{dims}] = {{{perm_strides}}};

const unsigned* const {config_name}::from_shape = {config_name}_from_shape;
const unsigned* const {config_name}::to_shape = {config_name}_to_shape;
const unsigned* const {config_name}::perm = {config_name}_perm;
const unsigned* const {config_name}::perm_strides = {config_name}_perm_strides;
"""

transpose_function_template = 'nnet::transpose<{input_t}, {output_t}, {config_name}>({input}, {output});'

transpose_include_list = ['nnet_utils/nnet_array.h', 'nnet_utils/nnet_stream.h']

def transpose_config_gen(name: str, shape: tuple[int, ...], perm: tuple[int, ...]):
new_shape = tuple(shape[i] for i in perm)
strides = np.cumprod((shape[1:] + (1,))[::-1])[::-1]
perm_strides = tuple(int(strides[i]) for i in perm)
return transpose_config_template.format(
dims=len(shape),
N=prod(shape),
from_shape=', '.join(str(x) for x in shape),
perm=', '.join(str(x) for x in perm),
perm_strides=', '.join(str(x) for x in perm_strides),
to_shape=', '.join(str(x) for x in new_shape),
config_name=name,
)


class TransposeConfigTemplate(LayerConfigTemplate):
Expand All @@ -115,18 +148,18 @@ def __init__(self):
self.template = transpose_config_template

def format(self, node):
params = self._default_config_params(node)

return self.template.format(**params)
shape = tuple(node.get_input_variable().shape)
perm = tuple(node.get_attr('perm'))
name = f'config{node.index}'
return transpose_config_gen(name, shape, perm)


class TransposeFunctionTemplate(FunctionCallTemplate):
def __init__(self):
super().__init__(Transpose, include_header=transpose_include_list)
self.template = transpose_function_template
super().__init__(Transpose, include_header=transpose_include_list)

def format(self, node):
params = self._default_function_params(node)
params['dim'] = node.get_attr('dim')

params['config_name'] = f'config{node.index}'
return self.template.format(**params)
File renamed without changes.
Loading
Loading