Skip to content

Commit

Permalink
rm pooling precision override in favor of fastmachinelearning#855
Browse files Browse the repository at this point in the history
  • Loading branch information
calad0i committed Apr 18, 2024
1 parent 6806aca commit dcf805e
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 67 deletions.
26 changes: 0 additions & 26 deletions hls4ml/backends/catapult/catapult_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@
Embedding,
GarNet,
GarNetStack,
GlobalPooling1D,
GlobalPooling2D,
Layer,
Pooling1D,
Pooling2D,
Expand All @@ -30,7 +28,6 @@
from hls4ml.model.optimizer import get_backend_passes, layer_optimizer
from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType, PackedType
from hls4ml.report import parse_catapult_report
from hls4ml.utils.fixed_point_utils import ceil_log2


class CatapultBackend(FPGABackend):
Expand Down Expand Up @@ -409,37 +406,14 @@ def init_depconv2d(self, layer):
dw_output_t = NamedType(dw_out_name, dw_out_precision)
layer.set_attr('dw_output_t', dw_output_t)

def _set_pooling_accum_t(self, layer, pool_size):
extra_bits = ceil_log2(pool_size)
accum_t = layer.get_attr('accum_t')
accum_t.precision.width += extra_bits * 2
if isinstance(accum_t.precision, FixedPrecisionType):
accum_t.precision.integer += extra_bits

@layer_optimizer(Pooling1D)
def init_pooling1d(self, layer):
pool_size = layer.get_attr('pool_width')
self._set_pooling_accum_t(layer, pool_size)

layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower())

@layer_optimizer(Pooling2D)
def init_pooling2d(self, layer):
pool_size = layer.get_attr('pool_height') * layer.get_attr('pool_width')
self._set_pooling_accum_t(layer, pool_size)

layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower())

@layer_optimizer(GlobalPooling1D)
def init_global_pooling1d(self, layer):
pool_size = layer.get_attr('n_in')
self._set_pooling_accum_t(layer, pool_size)

@layer_optimizer(GlobalPooling2D)
def init_global_pooling2d(self, layer):
pool_size = layer.get_attr('in_height') * layer.get_attr('in_width')
self._set_pooling_accum_t(layer, pool_size)

@layer_optimizer(Softmax)
def init_softmax(self, layer):
if layer.model.config.get_config_value('IOType') == 'io_parallel':
Expand Down
41 changes: 0 additions & 41 deletions hls4ml/backends/vivado/vivado_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@
Embedding,
GarNet,
GarNetStack,
GlobalPooling1D,
GlobalPooling2D,
Layer,
Pooling1D,
Pooling2D,
Expand All @@ -30,7 +28,6 @@
from hls4ml.model.optimizer import get_backend_passes, layer_optimizer
from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType, PackedType
from hls4ml.report import parse_vivado_report
from hls4ml.utils.fixed_point_utils import ceil_log2


class VivadoBackend(FPGABackend):
Expand Down Expand Up @@ -376,52 +373,14 @@ def init_depconv2d(self, layer):
) # TODO Once we have SeparableConv implementation for io_parallel this should be set properly
layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower())

def _set_pooling_accum_t(self, layer: Layer, pool_size):
extra_bits = ceil_log2(pool_size)

input_layer_name = layer.inputs[0]
input_t = layer.model.get_layer_output_variable(input_layer_name).type
accum_t = layer.attributes['accum_t']
pool_op = layer.attributes['pool_op'].lower()

accum_t.name = f'{layer.name}_accum'
# accum_t.name was likely model_default, change to avoid override parameters for other by chance

accum_t = layer.get_attr('accum_t')

if pool_op == 'max':
accum_t.precision = input_t.precision
else:
# Average pool
if isinstance(accum_t.precision, FixedPrecisionType):
accum_t.precision.integer = input_t.precision.integer + extra_bits
accum_t.precision.width = input_t.precision.width + extra_bits * 2
accum_t.precision.signed = input_t.precision.signed

@layer_optimizer(Pooling1D)
def init_pooling1d(self, layer):
pool_size = layer.get_attr('pool_width')
self._set_pooling_accum_t(layer, pool_size)

layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower())

@layer_optimizer(Pooling2D)
def init_pooling2d(self, layer):
pool_size = layer.get_attr('pool_height') * layer.get_attr('pool_width')
self._set_pooling_accum_t(layer, pool_size)

layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower())

@layer_optimizer(GlobalPooling1D)
def init_global_pooling1d(self, layer):
pool_size = layer.get_attr('n_in')
self._set_pooling_accum_t(layer, pool_size)

@layer_optimizer(GlobalPooling2D)
def init_global_pooling2d(self, layer):
pool_size = layer.get_attr('in_height') * layer.get_attr('in_width')
self._set_pooling_accum_t(layer, pool_size)

@layer_optimizer(Softmax)
def init_softmax(self, layer):
if layer.model.config.get_config_value('IOType') == 'io_parallel':
Expand Down

0 comments on commit dcf805e

Please sign in to comment.