Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
jmduarte committed Dec 19, 2023
1 parent f465f8b commit 449c223
Showing 1 changed file with 15 additions and 8 deletions.
23 changes: 15 additions & 8 deletions convert_full_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,32 +26,39 @@ def print_dict(d, indent=0):


# load full model:
model = tensorflow.keras.models.load_model('models/baseline_DeepMET/trained_DeepMET.h5', compile=False, custom_objects=co)
model = tensorflow.keras.models.load_model('models/baseline_DeepMET_quantized/trained_quantized_DeepMET_normfac1000.h5', compile=False, custom_objects=co)
# model = tensorflow.keras.models.load_model('models/baseline_DeepMET_quantized/baseline_DeepMET_quantized.h5', compile=False, custom_objects=co)

reuse_factor = 1
precision = 'ap_fixed<20,10>'
precision = 'ap_fixed<32,16>'
io_type = 'io_parallel'
strategy = 'Latency'
output_dir = 'hls_output_{}_{}_rf{}_{}'.format(io_type, strategy, reuse_factor, precision)
batch_size = 1
synth = False
trace = True
normFac = 1000

# check everthing works
model.summary()
model.save('{}/model.h5'.format(output_dir))

config = hls4ml.utils.config_from_keras_model(model, granularity='name',
default_reuse_factor=reuse_factor, default_precision=precision)
config = hls4ml.utils.config_from_keras_model(model,
granularity='name',
default_reuse_factor=reuse_factor,
default_precision=precision)
config['Model']['Strategy'] = strategy
for name in config['LayerName'].keys():
config['LayerName'][name]['Trace'] = trace
config['LayerName']['input_cat0']['Precision']['result'] = 'ap_uint<4>'
config['LayerName']['input_cat1']['Precision']['result'] = 'ap_uint<4>'
config['LayerName']['input_cont']['Precision']['result'] = 'ap_fixed<20,10>'
config['LayerName']['concatenate_1']['Precision']['result'] = 'ap_fixed<20,10>'
config['LayerName']['dense']['Precision']['result'] = 'ap_fixed<20,10>'
#config['LayerName']['input_cont']['Precision']['result'] = 'ap_fixed<20,10>'
#config['LayerName']['q_dense']['Precision']['accum'] = 'ap_fixed<32,16>'
config['LayerName']['q_dense']['Precision']['weight'] = 'ap_fixed<32,16>'
config['LayerName']['q_dense']['Precision']['bias'] = 'ap_fixed<32,16>'
#config['LayerName']['q_dense_1']['Precision']['accum'] = 'ap_fixed<32,16>'
#config['LayerName']['q_dense_1']['Precision']['weight'] = 'ap_fixed<32,16>'
#config['LayerName']['q_dense_1']['Precision']['bias'] = 'ap_fixed<32,16>'
config['LayerName']['multiply']['n_elem'] = 100
config['LayerName']['output']['n_filt'] = 2
# skip optimize_pointwise_conv
Expand Down Expand Up @@ -82,7 +89,7 @@ def print_dict(d, indent=0):
y = -f['Y'][:1000]

# preprocessing
X_pre = list(preProcessing(X, normFac=1))
X_pre = list(preProcessing(X, normFac=normFac))
X_pre = [np.ascontiguousarray(x) for x in X_pre]

y_pred = model.predict(X_pre)
Expand Down

0 comments on commit 449c223

Please sign in to comment.