From dcf805e0f9d20c1dd410a4f52225c113fb5e6a80 Mon Sep 17 00:00:00 2001 From: Chang Sun Date: Wed, 17 Apr 2024 14:53:25 -0700 Subject: [PATCH] rm pooling precision override in favor of #855 --- hls4ml/backends/catapult/catapult_backend.py | 26 ------------- hls4ml/backends/vivado/vivado_backend.py | 41 -------------------- 2 files changed, 67 deletions(-) diff --git a/hls4ml/backends/catapult/catapult_backend.py b/hls4ml/backends/catapult/catapult_backend.py index 5556154dcb..ac0eaded07 100644 --- a/hls4ml/backends/catapult/catapult_backend.py +++ b/hls4ml/backends/catapult/catapult_backend.py @@ -17,8 +17,6 @@ Embedding, GarNet, GarNetStack, - GlobalPooling1D, - GlobalPooling2D, Layer, Pooling1D, Pooling2D, @@ -30,7 +28,6 @@ from hls4ml.model.optimizer import get_backend_passes, layer_optimizer from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType, PackedType from hls4ml.report import parse_catapult_report -from hls4ml.utils.fixed_point_utils import ceil_log2 class CatapultBackend(FPGABackend): @@ -409,37 +406,14 @@ def init_depconv2d(self, layer): dw_output_t = NamedType(dw_out_name, dw_out_precision) layer.set_attr('dw_output_t', dw_output_t) - def _set_pooling_accum_t(self, layer, pool_size): - extra_bits = ceil_log2(pool_size) - accum_t = layer.get_attr('accum_t') - accum_t.precision.width += extra_bits * 2 - if isinstance(accum_t.precision, FixedPrecisionType): - accum_t.precision.integer += extra_bits - @layer_optimizer(Pooling1D) def init_pooling1d(self, layer): - pool_size = layer.get_attr('pool_width') - self._set_pooling_accum_t(layer, pool_size) - layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower()) @layer_optimizer(Pooling2D) def init_pooling2d(self, layer): - pool_size = layer.get_attr('pool_height') * layer.get_attr('pool_width') - self._set_pooling_accum_t(layer, pool_size) - layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower()) - @layer_optimizer(GlobalPooling1D) - def init_global_pooling1d(self, layer): - pool_size = layer.get_attr('n_in') - self._set_pooling_accum_t(layer, pool_size) - - @layer_optimizer(GlobalPooling2D) - def init_global_pooling2d(self, layer): - pool_size = layer.get_attr('in_height') * layer.get_attr('in_width') - self._set_pooling_accum_t(layer, pool_size) - @layer_optimizer(Softmax) def init_softmax(self, layer): if layer.model.config.get_config_value('IOType') == 'io_parallel': diff --git a/hls4ml/backends/vivado/vivado_backend.py b/hls4ml/backends/vivado/vivado_backend.py index c7321b8e44..e6cf4e6ff7 100644 --- a/hls4ml/backends/vivado/vivado_backend.py +++ b/hls4ml/backends/vivado/vivado_backend.py @@ -17,8 +17,6 @@ Embedding, GarNet, GarNetStack, - GlobalPooling1D, - GlobalPooling2D, Layer, Pooling1D, Pooling2D, @@ -30,7 +28,6 @@ from hls4ml.model.optimizer import get_backend_passes, layer_optimizer from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType, PackedType from hls4ml.report import parse_vivado_report -from hls4ml.utils.fixed_point_utils import ceil_log2 class VivadoBackend(FPGABackend): @@ -376,52 +373,14 @@ def init_depconv2d(self, layer): ) # TODO Once we have SeparableConv implementation for io_parallel this should be set properly layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower()) - def _set_pooling_accum_t(self, layer: Layer, pool_size): - extra_bits = ceil_log2(pool_size) - - input_layer_name = layer.inputs[0] - input_t = layer.model.get_layer_output_variable(input_layer_name).type - accum_t = layer.attributes['accum_t'] - pool_op = layer.attributes['pool_op'].lower() - - accum_t.name = f'{layer.name}_accum' - # accum_t.name was likely model_default, change to avoid override parameters for other by chance - - accum_t = layer.get_attr('accum_t') - - if pool_op == 'max': - accum_t.precision = input_t.precision - else: - # Average pool - if isinstance(accum_t.precision, FixedPrecisionType): - accum_t.precision.integer = input_t.precision.integer + extra_bits - accum_t.precision.width = input_t.precision.width + extra_bits * 2 - accum_t.precision.signed = input_t.precision.signed - @layer_optimizer(Pooling1D) def init_pooling1d(self, layer): - pool_size = layer.get_attr('pool_width') - self._set_pooling_accum_t(layer, pool_size) - layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower()) @layer_optimizer(Pooling2D) def init_pooling2d(self, layer): - pool_size = layer.get_attr('pool_height') * layer.get_attr('pool_width') - self._set_pooling_accum_t(layer, pool_size) - layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower()) - @layer_optimizer(GlobalPooling1D) - def init_global_pooling1d(self, layer): - pool_size = layer.get_attr('n_in') - self._set_pooling_accum_t(layer, pool_size) - - @layer_optimizer(GlobalPooling2D) - def init_global_pooling2d(self, layer): - pool_size = layer.get_attr('in_height') * layer.get_attr('in_width') - self._set_pooling_accum_t(layer, pool_size) - @layer_optimizer(Softmax) def init_softmax(self, layer): if layer.model.config.get_config_value('IOType') == 'io_parallel':