diff --git a/.gitignore b/.gitignore index 89543e26bf3d1..18f4f414b9425 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,14 @@ __init__.py *.rej *.orig + # ignore files under the top level $CMSSW_BASE/src directory, but not its subdirectories /* !/*/ + +# ignore some IDE related files +*.xcodeproj +.vscode + +#ignore mac specific file +.DS_Store diff --git a/Alignment/CocoaFit/src/Fit.cc b/Alignment/CocoaFit/src/Fit.cc index 195d9bdc809bf..880cb9b1144dd 100644 --- a/Alignment/CocoaFit/src/Fit.cc +++ b/Alignment/CocoaFit/src/Fit.cc @@ -1210,8 +1210,9 @@ void Fit::dumpFittedValues(ALIFileOut& fileout, ALIbool printErrors, ALIbool pri // const Entry* entry; int ii, siz; std::vector::const_iterator vocite; + ALIstring sys = ALIstring("system"); for (vocite = Model::OptOList().begin(); vocite != Model::OptOList().end(); ++vocite) { - if ((*vocite)->type() == ALIstring("system")) + if ((*vocite)->type() == sys) continue; fileout << " %%%% Optical Object: " << (*vocite)->longName() << std::endl; @@ -1268,8 +1269,9 @@ void Fit::dumpFittedValuesInAllAncestorFrames(ALIFileOut& fileout, ALIbool print //---------- Iterate over OptO list std::vector entries; std::vector::const_iterator vocite; + ALIstring sys = ALIstring("system"); for (vocite = Model::OptOList().begin(); vocite != Model::OptOList().end(); ++vocite) { - if ((*vocite)->type() == ALIstring("system")) + if ((*vocite)->type() == sys) continue; fileout << " %%%% Optical Object: " << (*vocite)->longName() << std::endl; diff --git a/Alignment/CocoaToDDL/src/UnitConverter.cc b/Alignment/CocoaToDDL/src/UnitConverter.cc index c92ca008101c4..b40233f29ed8c 100644 --- a/Alignment/CocoaToDDL/src/UnitConverter.cc +++ b/Alignment/CocoaToDDL/src/UnitConverter.cc @@ -2,7 +2,6 @@ #include "Alignment/CocoaToDDL/interface/CocoaUnitsTable.h" #include "CLHEP/Units/GlobalSystemOfUnits.h" #include -#include /* UnitConverter::UnitConverter(const G4BestUnit & bu) @@ -23,7 +22,7 @@ UnitConverter::UnitConverter(ALIdouble val, const ALIstring& category) UnitConverter::~UnitConverter() { delete bu_; } std::string UnitConverter::ucstring() { - std::ostrstream str; + std::ostringstream str; if (angl_) { str.precision(11); @@ -42,7 +41,7 @@ std::string UnitConverter::ucstring() { /* ostream & operator<<(ostream & os, const UnitConverter & uc) { - ostrstream temp; + std::ostringstream temp; //temp << uc.bu_; //temp << '\0'; //string s(temp.str()); diff --git a/Alignment/CocoaUtilities/src/ALIFileIn.cc b/Alignment/CocoaUtilities/src/ALIFileIn.cc index b8395701c099e..de453718274cb 100644 --- a/Alignment/CocoaUtilities/src/ALIFileIn.cc +++ b/Alignment/CocoaUtilities/src/ALIFileIn.cc @@ -9,8 +9,7 @@ #include "Alignment/CocoaUtilities/interface/ALIFileIn.h" #include -#include -//#include +#include //#include @@ -112,7 +111,7 @@ ALIint ALIFileIn::getWordsInLine(std::vector& wordlist) { } //---------- Convert line read to istrstream to split it in words - std::istrstream istr_line(ltemp); + std::istringstream istr_line(ltemp); //--------- count how many words are there in ltemp (this sohuld not be needed, but sun compiler has problems) !! this has to be nvestigated... ALIint NoWords = 0; diff --git a/Alignment/CocoaUtilities/src/ALIFileOut.cc b/Alignment/CocoaUtilities/src/ALIFileOut.cc index 1f4653696f3e2..b6aecea980270 100644 --- a/Alignment/CocoaUtilities/src/ALIFileOut.cc +++ b/Alignment/CocoaUtilities/src/ALIFileOut.cc @@ -8,7 +8,7 @@ #include "Alignment/CocoaUtilities/interface/ALIFileOut.h" #include -#include +#include std::vector ALIFileOut::theInstances; diff --git a/Alignment/CommonAlignmentProducer/interface/AlignmentTracksFromVertexCompositeCandidateSelector.h b/Alignment/CommonAlignmentProducer/interface/AlignmentTracksFromVertexCompositeCandidateSelector.h new file mode 100644 index 0000000000000..74d3241cc623e --- /dev/null +++ b/Alignment/CommonAlignmentProducer/interface/AlignmentTracksFromVertexCompositeCandidateSelector.h @@ -0,0 +1,38 @@ +#ifndef Alignment_CommonAlignmentAlgorithm_AlignmentTrackFromVertexCompositeCandidateSelector_h +#define Alignment_CommonAlignmentAlgorithm_AlignmentTrackFromVertexCompositeCandidateSelector_h + +#include "DataFormats/TrackReco/interface/Track.h" +#include "FWCore/Framework/interface/ConsumesCollector.h" +#include "DataFormats/Candidate/interface/VertexCompositeCandidate.h" +#include "DataFormats/RecoCandidate/interface/RecoChargedCandidate.h" +#include "FWCore/Utilities/interface/EDGetToken.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include + +namespace edm { + class Event; + class ParameterSet; +} // namespace edm + +class TrackingRecHit; + +class AlignmentTrackFromVertexCompositeCandidateSelector { +public: + typedef std::vector Tracks; + + /// constructor + AlignmentTrackFromVertexCompositeCandidateSelector(const edm::ParameterSet& cfg, edm::ConsumesCollector& iC); + + /// destructor + ~AlignmentTrackFromVertexCompositeCandidateSelector(); + + /// select tracks + Tracks select(const edm::Handle& tc, + const edm::Event& evt, + const edm::EventSetup& setup) const; + +private: + const edm::EDGetTokenT vccToken_; +}; + +#endif diff --git a/Alignment/CommonAlignmentProducer/plugins/AlignmentTracksFromVertexCompositeCandidateSelectorModule.cc b/Alignment/CommonAlignmentProducer/plugins/AlignmentTracksFromVertexCompositeCandidateSelectorModule.cc new file mode 100644 index 0000000000000..77e56bfc7fe89 --- /dev/null +++ b/Alignment/CommonAlignmentProducer/plugins/AlignmentTracksFromVertexCompositeCandidateSelectorModule.cc @@ -0,0 +1,51 @@ +#include "FWCore/Framework/interface/ConsumesCollector.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "CommonTools/UtilAlgos/interface/ObjectSelector.h" + +//the selectores used to select the tracks +#include "Alignment/CommonAlignmentProducer/interface/AlignmentTracksFromVertexCompositeCandidateSelector.h" + +// the following include is necessary to clone all track branches +// including recoTrackExtras and TrackingRecHitsOwned. +// if you remove it the code will compile, but the cloned +// tracks have only the recoTracks branch! +#include "CommonTools/RecoAlgos/interface/TrackSelector.h" + +struct TrackFromVertexCompositeCandidateConfigSelector { + typedef std::vector container; + typedef container::const_iterator const_iterator; + typedef reco::TrackCollection collection; + + TrackFromVertexCompositeCandidateConfigSelector(const edm::ParameterSet &cfg, edm::ConsumesCollector &&iC) + : theBaseSelector(cfg, iC) {} + + const_iterator begin() const { return theSelectedTracks.begin(); } + const_iterator end() const { return theSelectedTracks.end(); } + size_t size() const { return theSelectedTracks.size(); } + + void select(const edm::Handle &c, const edm::Event &evt, const edm::EventSetup &setup) { + theSelectedTracks = theBaseSelector.select(c, evt, setup); + } + +private: + container theSelectedTracks; + + AlignmentTrackFromVertexCompositeCandidateSelector theBaseSelector; +}; + +class AlignmentTrackFromVertexCompositeCandidateSelectorModule + : public ObjectSelector { +public: + AlignmentTrackFromVertexCompositeCandidateSelectorModule(const edm::ParameterSet &ps) + : ObjectSelector(ps) {} + static void fillDescriptions(edm::ConfigurationDescriptions &descriptions) { + edm::ParameterSetDescription desc; + desc.setComment("Alignment Tracks Selector from VertexCompositeCandidates"); + desc.add("src", edm::InputTag("generalTracks")); + desc.add("filter", false); + desc.add("vertexCompositeCandidates", edm::InputTag("generalV0Candidates:Kshort")); + descriptions.addWithDefaultLabel(desc); + } +}; + +DEFINE_FWK_MODULE(AlignmentTrackFromVertexCompositeCandidateSelectorModule); diff --git a/Alignment/CommonAlignmentProducer/python/ALCARECOPromptCalibProdSiPixelAliHGCombined_Output_cff.py b/Alignment/CommonAlignmentProducer/python/ALCARECOPromptCalibProdSiPixelAliHGCombined_Output_cff.py new file mode 100644 index 0000000000000..efe55506e3dfc --- /dev/null +++ b/Alignment/CommonAlignmentProducer/python/ALCARECOPromptCalibProdSiPixelAliHGCombined_Output_cff.py @@ -0,0 +1,12 @@ +import FWCore.ParameterSet.Config as cms + +OutALCARECOPromptCalibProdSiPixelAliHGComb_noDrop = cms.PSet( + SelectEvents = cms.untracked.PSet( + SelectEvents = cms.vstring('pathALCARECOPromptCalibProdSiPixelAliHGMinBias','pathALCARECOPromptCalibProdSiPixelAliHGDiMu') + ), + outputCommands = cms.untracked.vstring('keep *_SiPixelAliMillePedeFileConverterHGDimuon_*_*', + 'keep *_SiPixelAliMillePedeFileConverterHG_*_*') +) + +OutALCARECOPromptCalibProdSiPixelAliHGComb=OutALCARECOPromptCalibProdSiPixelAliHGComb_noDrop.clone() +OutALCARECOPromptCalibProdSiPixelAliHGComb.outputCommands.insert(0, "drop *") diff --git a/Alignment/CommonAlignmentProducer/python/ALCARECOPromptCalibProdSiPixelAliHGDiMuon_cff.py b/Alignment/CommonAlignmentProducer/python/ALCARECOPromptCalibProdSiPixelAliHGDiMuon_cff.py new file mode 100644 index 0000000000000..7cc13a08481c8 --- /dev/null +++ b/Alignment/CommonAlignmentProducer/python/ALCARECOPromptCalibProdSiPixelAliHGDiMuon_cff.py @@ -0,0 +1,135 @@ +import FWCore.ParameterSet.Config as cms + +# ------------------------------------------------------------------------------ +# configure a filter to run only on the events selected by TkAlZMuMu AlcaReco +from HLTrigger.HLTfilters.hltHighLevel_cfi import * +ALCARECOTkAlZMuMuFilterForSiPixelAli = hltHighLevel.clone( + HLTPaths = ['pathALCARECOTkAlZMuMu'], + throw = True, ## dont throw on unknown path names, + TriggerResultsTag = "TriggerResults::RECO" +) + +from Alignment.CommonAlignmentProducer.ALCARECOPromptCalibProdSiPixelAli_cff import * +from Alignment.CommonAlignmentProducer.LSNumberFilter_cfi import * + +# Ingredient: offlineBeamSpot +from RecoVertex.BeamSpotProducer.BeamSpot_cfi import offlineBeamSpot + +# Ingredient: AlignmentTrackSelector +# track selector for HighPurity tracks +#-- AlignmentTrackSelector +SiPixelAliHighPuritySelectorHGDimuon = SiPixelAliHighPuritySelector.clone( + src = 'ALCARECOTkAlZMuMu', + etaMax = 3.0, + etaMin = -3.0, + filter = True, + pMin = 8.0, +) + +# track selection for alignment +SiPixelAliTrackSelectorHGDimuon = SiPixelAliTrackSelector.clone( + src = 'SiPixelAliTrackFitterHGDimuon', + applyMultiplicityFilter = True, + d0Max = 50.0, + d0Min = -50.0, + etaMax = 3.0, + etaMin = -3.0, + filter = True, + maxMultiplicity = 2, + minHitChargeStrip = 20.0, + minHitIsolation = 0.01, + minMultiplicity = 2, + nHighestPt = 2, + nHitMin = 10, + pMin = 3.0, + ptMin = 15.0, + TwoBodyDecaySelector = dict(applyChargeFilter = True, + applyMassrangeFilter = True, + maxXMass = 95.8, + minXMass = 85.8), + minHitsPerSubDet = dict(inPIXEL = 1) +) + +# Ingredient: SiPixelAliTrackRefitter0 +# refitting +SiPixelAliTrackRefitterHGDimuon0 = SiPixelAliTrackRefitter0.clone( + src = 'SiPixelAliHighPuritySelectorHGDimuon' +) +SiPixelAliTrackRefitterHGDimuon1 = SiPixelAliTrackRefitterHGDimuon0.clone( + src = 'SiPixelAliTrackSelectorHGDimuon' +) + +#-- Alignment producer +SiPixelAliMilleAlignmentProducerHGDimuon = SiPixelAliMilleAlignmentProducer.clone( + ParameterBuilder = dict( + Selector = cms.PSet( + alignParams = cms.vstring( + "TrackerP1PXBLadder,111111", + "TrackerP1PXECPanel,111111", + ) + ) + ), + tjTkAssociationMapTag = 'SiPixelAliTrackRefitterHGDimuon1', + algoConfig = MillePedeAlignmentAlgorithm.clone( + binaryFile = 'milleBinaryHGDimuon_0.dat', + treeFile = 'treeFileHGDimuon.root', + monitorFile = 'millePedeMonitorHGDimuon.root', + minNumHits = 8, + skipGlobalPositionRcdCheck = True, + TrajectoryFactory = cms.PSet( + AllowZeroMaterial = cms.bool(False), + Chi2Cut = cms.double(10000.0), + ConstructTsosWithErrors = cms.bool(False), + EstimatorParameters = cms.PSet( + MaxIterationDifference = cms.untracked.double(0.01), + MaxIterations = cms.untracked.int32(100), + RobustificationConstant = cms.untracked.double(1.0), + UseInvariantMass = cms.untracked.bool(True) + ), + IncludeAPEs = cms.bool(False), + MaterialEffects = cms.string('LocalGBL'), + NSigmaCut = cms.double(100.0), + ParticleProperties = cms.PSet( + PrimaryMass = cms.double(91.1061), + PrimaryWidth = cms.double(1.7678), + SecondaryMass = cms.double(0.105658) + ), + PropagationDirection = cms.string('alongMomentum'), + TrajectoryFactoryName = cms.string('TwoBodyDecayTrajectoryFactory'), + UseBeamSpot = cms.bool(False), + UseHitWithoutDet = cms.bool(True), + UseInvalidHits = cms.bool(True), + UseProjectedHits = cms.bool(True), + UseRefittedState = cms.bool(True) + ) + ) +) + +# Ingredient: SiPixelAliTrackerTrackHitFilter +SiPixelAliTrackerTrackHitFilterHGDimuon = SiPixelAliTrackerTrackHitFilter.clone( + src = 'SiPixelAliTrackRefitterHGDimuon0', + TrackAngleCut = 0.087, + minimumHits = 10 +) + +# Ingredient: SiPixelAliSiPixelAliTrackFitter +SiPixelAliTrackFitterHGDimuon = SiPixelAliTrackFitter.clone( + src = 'SiPixelAliTrackerTrackHitFilterHGDimuon' +) + +SiPixelAliMillePedeFileConverterHGDimuon = cms.EDProducer("MillePedeFileConverter", + fileDir = cms.string(SiPixelAliMilleAlignmentProducerHGDimuon.algoConfig.fileDir.value()), + inputBinaryFile = cms.string(SiPixelAliMilleAlignmentProducerHGDimuon.algoConfig.binaryFile.value()), + fileBlobLabel = cms.string('')) + +seqALCARECOPromptCalibProdSiPixelAliHGDiMu = cms.Sequence(ALCARECOTkAlZMuMuFilterForSiPixelAli* + lsNumberFilter* + offlineBeamSpot* + SiPixelAliHighPuritySelectorHGDimuon* + SiPixelAliTrackRefitterHGDimuon0* + SiPixelAliTrackerTrackHitFilterHGDimuon* + SiPixelAliTrackFitterHGDimuon* + SiPixelAliTrackSelectorHGDimuon* + SiPixelAliTrackRefitterHGDimuon1* + SiPixelAliMilleAlignmentProducerHGDimuon* + SiPixelAliMillePedeFileConverterHGDimuon) diff --git a/Alignment/CommonAlignmentProducer/python/ALCARECOPromptCalibProdSiPixelAliHG_cff.py b/Alignment/CommonAlignmentProducer/python/ALCARECOPromptCalibProdSiPixelAliHG_cff.py index 9fe329e660658..2c47ea7b6da76 100644 --- a/Alignment/CommonAlignmentProducer/python/ALCARECOPromptCalibProdSiPixelAliHG_cff.py +++ b/Alignment/CommonAlignmentProducer/python/ALCARECOPromptCalibProdSiPixelAliHG_cff.py @@ -59,21 +59,18 @@ ) SiPixelAliMillePedeFileConverterHG = cms.EDProducer("MillePedeFileConverter", - fileDir = cms.string(SiPixelAliMilleAlignmentProducerHG.algoConfig.fileDir.value()), - inputBinaryFile = cms.string(SiPixelAliMilleAlignmentProducerHG.algoConfig.binaryFile.value()), - fileBlobLabel = cms.string(''), - ) - - + fileDir = cms.string(SiPixelAliMilleAlignmentProducerHG.algoConfig.fileDir.value()), + inputBinaryFile = cms.string(SiPixelAliMilleAlignmentProducerHG.algoConfig.binaryFile.value()), + fileBlobLabel = cms.string('')) seqALCARECOPromptCalibProdSiPixelAliHG = cms.Sequence(ALCARECOTkAlMinBiasFilterForSiPixelAliHG* - lsNumberFilter* - offlineBeamSpot* - SiPixelAliHighPuritySelectorHG* - SiPixelAliTrackRefitterHG0* - SiPixelAliTrackerTrackHitFilterHG* - SiPixelAliTrackFitterHG* - SiPixelAliTrackSelectorHG* - SiPixelAliTrackRefitterHG1* - SiPixelAliMilleAlignmentProducerHG* - SiPixelAliMillePedeFileConverterHG) + lsNumberFilter* + offlineBeamSpot* + SiPixelAliHighPuritySelectorHG* + SiPixelAliTrackRefitterHG0* + SiPixelAliTrackerTrackHitFilterHG* + SiPixelAliTrackFitterHG* + SiPixelAliTrackSelectorHG* + SiPixelAliTrackRefitterHG1* + SiPixelAliMilleAlignmentProducerHG* + SiPixelAliMillePedeFileConverterHG) diff --git a/Alignment/CommonAlignmentProducer/python/ALCARECOTkAlV0s_Output_cff.py b/Alignment/CommonAlignmentProducer/python/ALCARECOTkAlV0s_Output_cff.py new file mode 100644 index 0000000000000..a90c4338d49ec --- /dev/null +++ b/Alignment/CommonAlignmentProducer/python/ALCARECOTkAlV0s_Output_cff.py @@ -0,0 +1,52 @@ +import FWCore.ParameterSet.Config as cms + +# AlCaReco for track based alignment using MinBias events +OutALCARECOTkAlV0s_noDrop = cms.PSet( + SelectEvents = cms.untracked.PSet( + SelectEvents = cms.vstring('pathALCARECOTkAlK0s', + 'pathALCARECOTkAlLambdas') + ), + outputCommands = cms.untracked.vstring( + 'keep recoTracks_ALCARECOTkAlKShortTracks_*_*', + 'keep recoTrackExtras_ALCARECOTkAlKShortTracks_*_*', + 'keep TrackingRecHitsOwned_ALCARECOTkAlKShortTracks_*_*', + 'keep SiPixelClusteredmNewDetSetVector_ALCARECOTkAlKShortTracks_*_*', + 'keep SiStripClusteredmNewDetSetVector_ALCARECOTkAlKShortTracks_*_*', + 'keep recoTracks_ALCARECOTkAlLambdaTracks_*_*', + 'keep recoTrackExtras_ALCARECOTkAlLambdaTracks_*_*', + 'keep TrackingRecHitsOwned_ALCARECOTkAlLambdaTracks_*_*', + 'keep SiPixelClusteredmNewDetSetVector_ALCARECOTkAlLambdaTracks_*_*', + 'keep SiStripClusteredmNewDetSetVector_ALCARECOTkAlLambdaTracks_*_*', + 'keep *_generalV0Candidates_*_*', + 'keep L1AcceptBunchCrossings_*_*_*', + 'keep L1GlobalTriggerReadoutRecord_gtDigis_*_*', + 'keep *_TriggerResults_*_*', + 'keep DcsStatuss_scalersRawToDigi_*_*', + 'keep *_offlinePrimaryVertices_*_*', + 'keep *_offlineBeamSpot_*_*') +) + +# in Run3, SCAL digis replaced by onlineMetaDataDigis +import copy +_run3_common_removedCommands = OutALCARECOTkAlV0s_noDrop.outputCommands.copy() +_run3_common_removedCommands.remove('keep DcsStatuss_scalersRawToDigi_*_*') + +_run3_common_extraCommands = ['keep DCSRecord_onlineMetaDataDigis_*_*', + 'keep OnlineLuminosityRecord_onlineMetaDataDigis_*_*'] + +from Configuration.Eras.Modifier_run3_common_cff import run3_common +run3_common.toModify(OutALCARECOTkAlV0s_noDrop, outputCommands = _run3_common_removedCommands + _run3_common_extraCommands) + +# in Phase2, remove the SiStrip clusters and keep the OT ones instead +_phase2_common_removedCommands = OutALCARECOTkAlV0s_noDrop.outputCommands.copy() +_phase2_common_removedCommands.remove('keep SiStripClusteredmNewDetSetVector_ALCARECOTkAlKShortTracks_*_*') +_phase2_common_removedCommands.remove('keep SiStripClusteredmNewDetSetVector_ALCARECOTkAlLambdaTracks_*_*') + +_phase2_common_extraCommands = ['keep Phase2TrackerCluster1DedmNewDetSetVector_ALCARECOTkAlKShortTracks_*_*', + 'keep Phase2TrackerCluster1DedmNewDetSetVector_ALCARECOTkAlLambdaTracks_*_*'] + +from Configuration.Eras.Modifier_phase2_common_cff import phase2_common +phase2_common.toModify(OutALCARECOTkAlV0s_noDrop, outputCommands = _phase2_common_removedCommands + _phase2_common_extraCommands ) + +OutALCARECOTkAlV0s = OutALCARECOTkAlV0s_noDrop.clone() +OutALCARECOTkAlV0s.outputCommands.insert(0, "drop *") diff --git a/Alignment/CommonAlignmentProducer/python/ALCARECOTkAlV0s_cff.py b/Alignment/CommonAlignmentProducer/python/ALCARECOTkAlV0s_cff.py new file mode 100644 index 0000000000000..cff5f3022d12f --- /dev/null +++ b/Alignment/CommonAlignmentProducer/python/ALCARECOTkAlV0s_cff.py @@ -0,0 +1,40 @@ +import FWCore.ParameterSet.Config as cms + +################################################################## +# AlCaReco for track based calibration using V0s +################################################################## +from HLTrigger.HLTfilters.hltHighLevel_cfi import * +ALCARECOTkAlV0sHLTFilter = hltHighLevel.clone() +ALCARECOTkAlV0sHLTFilter.andOr = True ## choose logical OR between Triggerbits +ALCARECOTkAlV0sHLTFilter.throw = False ## dont throw on unknown path names +ALCARECOTkAlV0sHLTFilter.HLTPaths = ['HLT_*'] +#ALCARECOTkAlV0sHLTFilter.eventSetupPathsKey = 'TkAlV0s' + +################################################################## +# Select events with at least one V0 +################################################################## +from DQM.TrackingMonitorSource.v0EventSelector_cfi import * +ALCARECOTkAlV0sKShortEventSelector = v0EventSelector.clone( + vertexCompositeCandidates = "generalV0Candidates:Kshort" +) +ALCARECOTkAlV0sLambdaEventSelector = v0EventSelector.clone( + vertexCompositeCandidates = "generalV0Candidates:Lambda" +) + +################################################################## +# Tracks from the selected vertex +################################################################# +import Alignment.CommonAlignmentProducer.AlignmentTracksFromVertexCompositeCandidateSelector_cfi as TracksFromV0 +ALCARECOTkAlKShortTracks = TracksFromV0.AlignmentTracksFromVertexCompositeCandidateSelector.clone( + vertexCompositeCandidates = 'generalV0Candidates:Kshort' +) + +ALCARECOTkAlLambdaTracks = TracksFromV0.AlignmentTracksFromVertexCompositeCandidateSelector.clone( + vertexCompositeCandidates = 'generalV0Candidates:Lambda' +) + +################################################################## +# Sequence +################################################################## +seqALCARECOTkAlK0s = cms.Sequence(ALCARECOTkAlV0sHLTFilter + ALCARECOTkAlV0sKShortEventSelector + ALCARECOTkAlKShortTracks) +seqALCARECOTkAlLambdas = cms.Sequence(ALCARECOTkAlV0sHLTFilter + ALCARECOTkAlV0sLambdaEventSelector + ALCARECOTkAlLambdaTracks) diff --git a/Alignment/CommonAlignmentProducer/python/AlcaSiPixelAliHarvesterHGCombined_cff.py b/Alignment/CommonAlignmentProducer/python/AlcaSiPixelAliHarvesterHGCombined_cff.py new file mode 100644 index 0000000000000..bf7d4da4f1037 --- /dev/null +++ b/Alignment/CommonAlignmentProducer/python/AlcaSiPixelAliHarvesterHGCombined_cff.py @@ -0,0 +1,92 @@ +import FWCore.ParameterSet.Config as cms + +SiPixelAliMilleFileExtractorHGMinBias = cms.EDAnalyzer("MillePedeFileExtractor", + fileBlobInputTag = cms.InputTag("SiPixelAliMillePedeFileConverterHG",''), + fileDir = cms.string('HGCombinedAlignment/'), + # File names the Extractor will use to write the fileblobs in the root + # file as real binary files to disk, so that the pede step can read them. + # This includes the formatting directive "%04d" which will be expanded to + # 0000, 0001, 0002,... + outputBinaryFile = cms.string('pedeBinaryHGMinBias%04d.dat')) + +SiPixelAliMilleFileExtractorHGZMuMu = cms.EDAnalyzer("MillePedeFileExtractor", + fileBlobInputTag = cms.InputTag("SiPixelAliMillePedeFileConverterHGDimuon",''), + fileDir = cms.string('HGCombinedAlignment/'), + # File names the Extractor will use to write the fileblobs in the root + # file as real binary files to disk, so that the pede step can read them. + # This includes the formatting directive "%04d" which will be expanded to + # 0000, 0001, 0002,... + outputBinaryFile = cms.string('pedeBinaryHGDiMuon%04d.dat')) + +from Alignment.MillePedeAlignmentAlgorithm.MillePedeAlignmentAlgorithm_cfi import * +from Alignment.CommonAlignmentProducer.AlignmentProducerAsAnalyzer_cff import AlignmentProducer +from Alignment.MillePedeAlignmentAlgorithm.MillePedeDQMModule_cff import * + +SiPixelAliPedeAlignmentProducerHGCombined = AlignmentProducer.clone( + ParameterBuilder = dict( + Selector = cms.PSet( + alignParams = cms.vstring( + "TrackerP1PXBLadder,111111", + "TrackerP1PXECPanel,111111", + ) + ) + ), + doMisalignmentScenario = False, + checkDbAlignmentValidity = False, + applyDbAlignment = True, + tjTkAssociationMapTag = 'TrackRefitter2', + saveToDB = True, + trackerAlignmentRcdName = "TrackerAlignmentHGCombinedRcd" +) + +SiPixelAliPedeAlignmentProducerHGCombined.algoConfig = MillePedeAlignmentAlgorithm.clone( + mode = 'pede', + runAtPCL = True, + #mergeBinaryFiles = [SiPixelAliMilleFileExtractorHGMinBias.outputBinaryFile.value()], + #mergeBinaryFiles = [SiPixelAliMilleFileExtractorHGZMuMu.outputBinaryFile.value()], + mergeBinaryFiles = ['pedeBinaryHGMinBias%04d.dat','pedeBinaryHGDiMuon%04d.dat -- 10.0'], + binaryFile = '', + TrajectoryFactory = cms.PSet(BrokenLinesTrajectoryFactory), + minNumHits = 10, + fileDir = 'HGCombinedAlignment/', + pedeSteerer = dict( + pedeCommand = 'pede', + method = 'inversion 5 0.8', + options = [ + #'regularisation 1.0 0.05', # non-stated pre-sigma 50 mrad or 500 mum + 'entries 500', + 'chisqcut 30.0 4.5', + 'threads 1 1', + 'closeandreopen', + 'skipemptycons' + #'outlierdownweighting 3','dwfractioncut 0.1' + #'outlierdownweighting 5','dwfractioncut 0.2' + ], + fileDir = 'HGCombinedAlignment/', + runDir = 'HGCombinedAlignment/', + steerFile = 'pedeSteerHGCombined', + pedeDump = 'pedeHGCombined.dump' + ), + pedeReader = dict( + fileDir = 'HGCombinedAlignment/' + ), + MillePedeFileReader = dict( + fileDir = "HGCombinedAlignment/", + isHG = True + ) +) + +SiPixelAliDQMModuleHGCombined = SiPixelAliDQMModule.clone() +SiPixelAliDQMModuleHGCombined.outputFolder = "AlCaReco/SiPixelAliHGCombined" +SiPixelAliDQMModuleHGCombined.MillePedeFileReader.fileDir = "HGCombinedAlignment/" +SiPixelAliDQMModuleHGCombined.MillePedeFileReader.isHG = True + +from DQMServices.Core.DQMEDHarvester import DQMEDHarvester +dqmEnvSiPixelAliHGCombined = DQMEDHarvester('DQMHarvestingMetadata', + subSystemFolder = cms.untracked.string('AlCaReco')) + +ALCAHARVESTSiPixelAliHGCombined = cms.Sequence(SiPixelAliMilleFileExtractorHGMinBias* + SiPixelAliMilleFileExtractorHGZMuMu* + SiPixelAliPedeAlignmentProducerHGCombined* + SiPixelAliDQMModuleHGCombined* + dqmEnvSiPixelAliHGCombined) diff --git a/Alignment/CommonAlignmentProducer/python/AlcaSiPixelAliHarvesterHG_cff.py b/Alignment/CommonAlignmentProducer/python/AlcaSiPixelAliHarvesterHG_cff.py index a4e4bfbcd94c3..4a3113c850e71 100644 --- a/Alignment/CommonAlignmentProducer/python/AlcaSiPixelAliHarvesterHG_cff.py +++ b/Alignment/CommonAlignmentProducer/python/AlcaSiPixelAliHarvesterHG_cff.py @@ -66,6 +66,7 @@ ) SiPixelAliDQMModuleHG = SiPixelAliDQMModule.clone() +SiPixelAliDQMModuleHG.outputFolder = "AlCaReco/SiPixelAliHG" SiPixelAliDQMModuleHG.MillePedeFileReader.fileDir = "HGalignment/" SiPixelAliDQMModuleHG.MillePedeFileReader.isHG = True diff --git a/Alignment/CommonAlignmentProducer/python/AlignmentTracksFromVertexCompositeCandidateSelector_cfi.py b/Alignment/CommonAlignmentProducer/python/AlignmentTracksFromVertexCompositeCandidateSelector_cfi.py new file mode 100644 index 0000000000000..345896ef63e28 --- /dev/null +++ b/Alignment/CommonAlignmentProducer/python/AlignmentTracksFromVertexCompositeCandidateSelector_cfi.py @@ -0,0 +1,4 @@ +import FWCore.ParameterSet.Config as cms + +from Alignment.CommonAlignmentProducer.alignmentTrackFromVertexCompositeCandidateSelectorModule_cfi import alignmentTrackFromVertexCompositeCandidateSelectorModule +AlignmentTracksFromVertexCompositeCandidateSelector = alignmentTrackFromVertexCompositeCandidateSelectorModule.clone() diff --git a/Alignment/CommonAlignmentProducer/python/TkAlMuonSelectors_cfi.py b/Alignment/CommonAlignmentProducer/python/TkAlMuonSelectors_cfi.py index 667913e6b9a1e..1999a479cb05b 100644 --- a/Alignment/CommonAlignmentProducer/python/TkAlMuonSelectors_cfi.py +++ b/Alignment/CommonAlignmentProducer/python/TkAlMuonSelectors_cfi.py @@ -16,3 +16,16 @@ cut = cms.string('(isolationR03().sumPt + isolationR03().emEt + isolationR03().hadEt)/pt < 0.15'), filter = cms.bool(True) ) + +## FIXME: these are needed for ALCARECO production in CMSSW_14_0_X +## to avoid loosing in efficiency. To be reviewed after muon reco is fixed + +from Configuration.Eras.Modifier_phase2_common_cff import phase2_common +phase2_common.toModify(TkAlGoodIdMuonSelector, + cut = '(abs(eta) < 2.5 & isGlobalMuon & isTrackerMuon & numberOfMatches > 1 & globalTrack.hitPattern.numberOfValidMuonHits > 0 & globalTrack.normalizedChi2 < 20.) ||' # regular selection + '(abs(eta) > 2.3 & abs(eta) < 3.0 & numberOfMatches >= 0 & isTrackerMuon)' # to recover GE0 tracks + ) + +phase2_common.toModify(TkAlRelCombIsoMuonSelector, + cut = '(isolationR03().sumPt)/pt < 0.1' # only tracker isolation + ) diff --git a/Alignment/CommonAlignmentProducer/python/customizeLSNumberFilterForRelVals.py b/Alignment/CommonAlignmentProducer/python/customizeLSNumberFilterForRelVals.py index 5c990fee2c902..32bbbb6340325 100644 --- a/Alignment/CommonAlignmentProducer/python/customizeLSNumberFilterForRelVals.py +++ b/Alignment/CommonAlignmentProducer/python/customizeLSNumberFilterForRelVals.py @@ -23,4 +23,12 @@ def lowerHitsPerStructure(process): 'threads 1 1', 'closeandreopen' ) + if hasattr(process,'SiPixelAliPedeAlignmentProducerHGCombined'): + process.SiPixelAliPedeAlignmentProducerHGCombined.algoConfig.pedeSteerer.options = cms.vstring( + 'entries 10', + 'chisqcut 30.0 4.5', + 'threads 1 1', + 'closeandreopen', + 'skipemptycons' + ) return process diff --git a/Alignment/CommonAlignmentProducer/src/AlignmentTracksFromVertexCompositeCandidateSelector.cc b/Alignment/CommonAlignmentProducer/src/AlignmentTracksFromVertexCompositeCandidateSelector.cc new file mode 100644 index 0000000000000..8acd0f017a60b --- /dev/null +++ b/Alignment/CommonAlignmentProducer/src/AlignmentTracksFromVertexCompositeCandidateSelector.cc @@ -0,0 +1,70 @@ +#include "Alignment/CommonAlignmentProducer/interface/AlignmentTracksFromVertexCompositeCandidateSelector.h" + +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" + +#include "FWCore/Framework/interface/Event.h" +#include "DataFormats/TrackReco/interface/Track.h" + +// constructor ---------------------------------------------------------------- +AlignmentTrackFromVertexCompositeCandidateSelector::AlignmentTrackFromVertexCompositeCandidateSelector( + const edm::ParameterSet& cfg, edm::ConsumesCollector& iC) + : vccToken_(iC.consumes( + cfg.getParameter("vertexCompositeCandidates"))) {} + +// destructor ----------------------------------------------------------------- +AlignmentTrackFromVertexCompositeCandidateSelector::~AlignmentTrackFromVertexCompositeCandidateSelector() {} + +// do selection --------------------------------------------------------------- +AlignmentTrackFromVertexCompositeCandidateSelector::Tracks AlignmentTrackFromVertexCompositeCandidateSelector::select( + const edm::Handle& tc, const edm::Event& evt, const edm::EventSetup& setup) const { + Tracks result; + + std::vector theV0keys; + + edm::Handle vccHandle; + evt.getByToken(vccToken_, vccHandle); + + if (vccHandle.isValid()) { + // Loop over VertexCompositeCandidates and associate tracks + for (const auto& vcc : *vccHandle) { + for (size_t i = 0; i < vcc.numberOfDaughters(); ++i) { + LogDebug("AlignmentTrackFromVertexCompositeCandidateSelector") << "daughter: " << i << std::endl; + const reco::Candidate* daughter = vcc.daughter(i); + const reco::RecoChargedCandidate* chargedDaughter = dynamic_cast(daughter); + if (chargedDaughter) { + LogDebug("AlignmentTrackFromVertexCompositeCandidateSelector") << "charged daughter: " << i << std::endl; + const reco::TrackRef trackRef = chargedDaughter->track(); + if (trackRef.isNonnull()) { + LogDebug("AlignmentTrackFromVertexCompositeCandidateSelector") + << "charged daughter has non-null trackref: " << i << std::endl; + theV0keys.push_back(trackRef.key()); + } + } + } + } + } else { + edm::LogError("AlignmentTrackFromVertexCompositeCandidateSelector") + << "Error >> Failed to get VertexCompositeCandidateCollection"; + } + + LogDebug("AlignmentTrackFromVertexCompositeCandidateSelector") + << "collection will have size: " << theV0keys.size() << std::endl; + + if (tc.isValid()) { + int indx(0); + // put the track in the collection is it was used for the vertex + for (reco::TrackCollection::const_iterator tk = tc->begin(); tk != tc->end(); ++tk, ++indx) { + reco::TrackRef trackRef = reco::TrackRef(tc, indx); + if (std::find(theV0keys.begin(), theV0keys.end(), trackRef.key()) != theV0keys.end()) { + LogDebug("AlignmentTrackFromVertexSelector") << "track index: " << indx << "filling result vector" << std::endl; + result.push_back(&(*tk)); + } // if a valid key is found + } // end loop over tracks + } // if the handle is valid + + LogDebug("AlignmentTrackFromVertexCompositeCandidateSelector") + << "collection will have size: " << result.size() << std::endl; + + return result; +} diff --git a/Alignment/Geners/interface/CPP11_auto_ptr.hh b/Alignment/Geners/interface/CPP11_auto_ptr.hh deleted file mode 100644 index 1de84b424a6b1..0000000000000 --- a/Alignment/Geners/interface/CPP11_auto_ptr.hh +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef GENERS_CPP11_AUTO_PTR_HH_ -#define GENERS_CPP11_AUTO_PTR_HH_ - -#include "Alignment/Geners/interface/CPP11_config.hh" - -#include - -#ifdef CPP11_STD_AVAILABLE -#define CPP11_auto_ptr std::unique_ptr -#else -#define CPP11_auto_ptr std::auto_ptr -#endif - -#endif // GENERS_CPP11_AUTO_PTR_HH_ diff --git a/Alignment/Geners/interface/CStringStream.hh b/Alignment/Geners/interface/CStringStream.hh index df7d6e0d5d918..f52a7c767150e 100644 --- a/Alignment/Geners/interface/CStringStream.hh +++ b/Alignment/Geners/interface/CStringStream.hh @@ -18,7 +18,7 @@ #include #include -#include "Alignment/Geners/interface/CPP11_auto_ptr.hh" +#include #include "Alignment/Geners/interface/CStringBuf.hh" #include "Alignment/Geners/interface/ZlibHandle.hh" @@ -76,8 +76,8 @@ namespace gs { std::vector comprBuf_; std::vector readBuf_; std::ostream *sink_; - CPP11_auto_ptr inflator_; - CPP11_auto_ptr deflator_; + std::unique_ptr inflator_; + std::unique_ptr deflator_; }; } // namespace gs diff --git a/Alignment/Geners/interface/CompressedIO.hh b/Alignment/Geners/interface/CompressedIO.hh index be107abd91fa2..acb90e5f60bee 100644 --- a/Alignment/Geners/interface/CompressedIO.hh +++ b/Alignment/Geners/interface/CompressedIO.hh @@ -25,7 +25,7 @@ namespace gs { void restore_compressed_item(std::istream &in, Item *item); template - CPP11_auto_ptr read_compressed_item(std::istream &in); + std::unique_ptr read_compressed_item(std::istream &in); } // namespace gs namespace gs { @@ -77,7 +77,7 @@ namespace gs { } template - inline CPP11_auto_ptr read_compressed_item(std::istream &is) { + inline std::unique_ptr read_compressed_item(std::istream &is) { long long len; read_pod(is, &len); unsigned compressionCode; diff --git a/Alignment/Geners/interface/GenericIO.hh b/Alignment/Geners/interface/GenericIO.hh index 10e86d158ac08..c1370918de98f 100644 --- a/Alignment/Geners/interface/GenericIO.hh +++ b/Alignment/Geners/interface/GenericIO.hh @@ -5,6 +5,8 @@ #ifndef GENERS_GENERICIO_HH_ #define GENERS_GENERICIO_HH_ +#include + #include "Alignment/Geners/interface/IOPointeeType.hh" #include "Alignment/Geners/interface/binaryIO.hh" @@ -51,13 +53,13 @@ namespace gs { // from std::exception. */ template - inline CPP11_auto_ptr read_item(Stream &is, const bool readClassId = true) { + inline std::unique_ptr read_item(Stream &is, const bool readClassId = true) { typedef std::vector State; Item *item = nullptr; State state; const bool status = GenericReader::ISNULLPOINTER>>::process( item, is, &state, readClassId); - CPP11_auto_ptr ptr(item); + std::unique_ptr ptr(item); if (is.fail()) throw IOReadFailure("In gs::read_item: input stream failure"); if (!status || item == nullptr) @@ -150,9 +152,9 @@ namespace gs { template struct GenericReader::ISPOD>> { inline static bool readIntoPtr(T *&ptr, Stream &str, State *, const bool processClassId) { - CPP11_auto_ptr myptr; + std::unique_ptr myptr; if (ptr == nullptr) - myptr = CPP11_auto_ptr(new T()); + myptr = std::unique_ptr(new T()); if (processClassId) { static const ClassId current(ClassId::makeId()); ClassId id(str, 1); @@ -441,9 +443,9 @@ namespace gs { template struct GenericReader::ISPAIR>> { inline static bool readIntoPtr(T *&ptr, Stream &str, State *s, const bool processClassId) { - CPP11_auto_ptr myptr; + std::unique_ptr myptr; if (ptr == 0) { - myptr = CPP11_auto_ptr(new T()); + myptr = std::unique_ptr(new T()); clearIfPointer(myptr.get()->first); clearIfPointer(myptr.get()->second); } @@ -499,9 +501,9 @@ namespace gs { template struct GenericReader::ISSTRING>> { inline static bool readIntoPtr(std::string *&ptr, Stream &is, State *, const bool processClassId) { - CPP11_auto_ptr myptr; + std::unique_ptr myptr; if (ptr == nullptr) - myptr = CPP11_auto_ptr(new std::string()); + myptr = std::make_unique(); if (processClassId) { static const ClassId current(ClassId::makeId()); ClassId id(is, 1); @@ -613,7 +615,7 @@ namespace gs { if (ptr) return process_item(*ptr, str, s, processClassId); else { - CPP11_auto_ptr myptr(new T()); + std::unique_ptr myptr(new T()); if (!process_item(*myptr, str, s, processClassId)) return false; ptr = myptr.release(); @@ -673,9 +675,9 @@ namespace gs { template struct GenericReader::ISPLACEREADABLE>> { inline static bool readIntoPtr(T *&ptr, Stream &str, State *s, const bool processClassId) { - CPP11_auto_ptr myptr; + std::unique_ptr myptr; if (ptr == 0) - myptr = CPP11_auto_ptr(new T()); + myptr = std::unique_ptr(new T()); if (processClassId) { ClassId id(str, 1); T::restore(id, str, ptr ? ptr : myptr.get()); diff --git a/Alignment/Geners/interface/Reference.hh b/Alignment/Geners/interface/Reference.hh index 217330920e066..c3bdbafc07679 100644 --- a/Alignment/Geners/interface/Reference.hh +++ b/Alignment/Geners/interface/Reference.hh @@ -2,7 +2,7 @@ #define GENERS_REFERENCE_HH_ #include "Alignment/Geners/interface/AbsReference.hh" -#include "Alignment/Geners/interface/CPP11_auto_ptr.hh" +#include #include @@ -32,7 +32,7 @@ namespace gs { // Methods to retrieve the item void restore(unsigned long index, T *obj) const; - CPP11_auto_ptr get(unsigned long index) const; + std::unique_ptr get(unsigned long index) const; std::shared_ptr getShared(unsigned long index) const; Reference() = delete; @@ -73,8 +73,8 @@ namespace gs { } template - inline CPP11_auto_ptr Reference::get(const unsigned long index) const { - return CPP11_auto_ptr(getPtr(index)); + inline std::unique_ptr Reference::get(const unsigned long index) const { + return std::unique_ptr(getPtr(index)); } template diff --git a/Alignment/Geners/interface/WriteOnlyCatalog.hh b/Alignment/Geners/interface/WriteOnlyCatalog.hh index d19c3e452b805..bf91e11651943 100644 --- a/Alignment/Geners/interface/WriteOnlyCatalog.hh +++ b/Alignment/Geners/interface/WriteOnlyCatalog.hh @@ -4,7 +4,7 @@ #include #include "Alignment/Geners/interface/AbsCatalog.hh" -#include "Alignment/Geners/interface/CPP11_auto_ptr.hh" +#include namespace gs { class WriteOnlyCatalog : public AbsCatalog { @@ -63,7 +63,7 @@ namespace gs { unsigned long long count_; unsigned long long smallestId_; unsigned long long largestId_; - CPP11_auto_ptr lastEntry_; + std::unique_ptr lastEntry_; }; } // namespace gs diff --git a/Alignment/Geners/interface/binaryIO.hh b/Alignment/Geners/interface/binaryIO.hh index e5df7129e5fb6..7cc1069646069 100644 --- a/Alignment/Geners/interface/binaryIO.hh +++ b/Alignment/Geners/interface/binaryIO.hh @@ -15,7 +15,7 @@ #ifndef GENERS_BINARYIO_HH_ #define GENERS_BINARYIO_HH_ -#include "Alignment/Geners/interface/CPP11_auto_ptr.hh" +#include #include "Alignment/Geners/interface/ClassId.hh" #include "Alignment/Geners/interface/IOException.hh" @@ -143,9 +143,9 @@ namespace gs { } template - inline CPP11_auto_ptr read_obj(std::istream &in) { + inline std::unique_ptr read_obj(std::istream &in) { const ClassId id(in, 1); - return CPP11_auto_ptr(T::read(id, in)); + return std::unique_ptr(T::read(id, in)); } template @@ -157,10 +157,10 @@ namespace gs { // The following function is templated upon the reader factory template - inline CPP11_auto_ptr read_base_obj(std::istream &in, const Reader &f) { + inline std::unique_ptr read_base_obj(std::istream &in, const Reader &f) { typedef typename Reader::value_type T; const ClassId id(in, 1); - return CPP11_auto_ptr(f.read(id, in)); + return std::unique_ptr(f.read(id, in)); } // The following function assumes that the array contains actual @@ -279,7 +279,7 @@ namespace gs { const ClassId id(in, 1); pv->reserve(vlen); for (unsigned long i = 0; i < vlen; ++i) { - CPP11_auto_ptr obj(T::read(id, in)); + std::unique_ptr obj(T::read(id, in)); pv->push_back(*obj); } } diff --git a/Alignment/Geners/interface/forward_listIO.hh b/Alignment/Geners/interface/forward_listIO.hh index 47b004c7c7f38..9440cdc7d4fc8 100644 --- a/Alignment/Geners/interface/forward_listIO.hh +++ b/Alignment/Geners/interface/forward_listIO.hh @@ -12,111 +12,89 @@ // standard containers. Instead, we will designate std::forward_list // as an external type and will handle it separately. // -gs_declare_template_external_TT(std::forward_list) -gs_specialize_template_id_TT(std::forward_list, 0, 1) +gs_declare_template_external_TT(std::forward_list) gs_specialize_template_id_TT(std::forward_list, 0, 1) -namespace gs { - // Assuming that we want to write the list once and potentially - // read it back many times, we will write it out in the reverse - // order. This is because it is easy to extend the list from the - // front but not from the back. - // - template - struct GenericWriter, - Int2Type::ISEXTERNAL> > - { - inline static bool process(const std::forward_list& s, Stream& os, - State* p2, const bool processClassId) - { - typedef typename std::forward_list::const_iterator Iter; + namespace gs { + // Assuming that we want to write the list once and potentially + // read it back many times, we will write it out in the reverse + // order. This is because it is easy to extend the list from the + // front but not from the back. + // + template + struct GenericWriter, Int2Type::ISEXTERNAL> > { + inline static bool process(const std::forward_list& s, Stream& os, State* p2, const bool processClassId) { + typedef typename std::forward_list::const_iterator Iter; - bool status = processClassId ? - ClassId::makeId >().write(os) : true; - if (status) - { - const Iter listend = s.end(); - std::size_t sz = 0; - for (Iter it=s.begin(); it!=listend; ++it, ++sz) {;} - write_pod(os, sz); - if (sz) - { - status = ClassId::makeId().write(os); - std::vector iters(sz); - sz = 0; - for (Iter it=s.begin(); it!=listend; ++it, ++sz) - iters[sz] = it; - for (long long number=sz-1; number>=0 && status; --number) - status = process_const_item( - *iters[number], os, p2, false); - } - } - return status && !os.fail(); + bool status = processClassId ? ClassId::makeId >().write(os) : true; + if (status) { + const Iter listend = s.end(); + std::size_t sz = 0; + for (Iter it = s.begin(); it != listend; ++it, ++sz) { + ; } - }; + write_pod(os, sz); + if (sz) { + status = ClassId::makeId().write(os); + std::vector iters(sz); + sz = 0; + for (Iter it = s.begin(); it != listend; ++it, ++sz) + iters[sz] = it; + for (long long number = sz - 1; number >= 0 && status; --number) + status = process_const_item(*iters[number], os, p2, false); + } + } + return status && !os.fail(); + } + }; - template - struct InsertContainerItem > - { - inline static void insert(std::forward_list& obj, const T& item, - const std::size_t /* itemNumber */) - {obj.push_front(item);} - }; + template + struct InsertContainerItem > { + inline static void insert(std::forward_list& obj, const T& item, const std::size_t /* itemNumber */) { + obj.push_front(item); + } + }; - template - struct GenericReader, - Int2Type::ISEXTERNAL> > - { - inline static bool readIntoPtr(std::forward_list*& ptr, Stream& is, - State* p2, const bool processClassId) - { - if (processClassId) - { - ClassId id(is, 1); - const ClassId& curr = ClassId::makeId >(); - curr.ensureSameName(id); - } - CPP11_auto_ptr > myptr; - if (ptr == 0) - myptr = CPP11_auto_ptr >( - new std::forward_list()); - else - ptr->clear(); - std::size_t sz = 0; - read_pod(is, &sz); - bool itemStatus = true; - if (sz) - { - ClassId itemId(is, 1); - p2->push_back(itemId); - std::forward_list* nzptr = ptr ? ptr : myptr.get(); - try - { - for (std::size_t i=0; i < sz && itemStatus; ++i) - itemStatus = GenericReader< - Stream,State,std::forward_list,InContainerCycle - >::process(*nzptr, is, p2, i); - } - catch (...) - { - p2->pop_back(); - throw; - } - } - const bool success = itemStatus && !is.fail(); - if (success && ptr == 0) - ptr = myptr.release(); - return success; + template + struct GenericReader, Int2Type::ISEXTERNAL> > { + inline static bool readIntoPtr(std::forward_list*& ptr, Stream& is, State* p2, const bool processClassId) { + if (processClassId) { + ClassId id(is, 1); + const ClassId& curr = ClassId::makeId >(); + curr.ensureSameName(id); + } + std::unique_ptr > myptr; + if (ptr == 0) + myptr = std::unique_ptr >(new std::forward_list()); + else + ptr->clear(); + std::size_t sz = 0; + read_pod(is, &sz); + bool itemStatus = true; + if (sz) { + ClassId itemId(is, 1); + p2->push_back(itemId); + std::forward_list* nzptr = ptr ? ptr : myptr.get(); + try { + for (std::size_t i = 0; i < sz && itemStatus; ++i) + itemStatus = + GenericReader, InContainerCycle>::process(*nzptr, is, p2, i); + } catch (...) { + p2->pop_back(); + throw; } + } + const bool success = itemStatus && !is.fail(); + if (success && ptr == 0) + ptr = myptr.release(); + return success; + } - inline static bool process(std::forward_list& s, Stream& is, - State* st, const bool processClassId) - { - std::forward_list* ps = &s; - return readIntoPtr(ps, is, st, processClassId); - } - }; + inline static bool process(std::forward_list& s, Stream& is, State* st, const bool processClassId) { + std::forward_list* ps = &s; + return readIntoPtr(ps, is, st, processClassId); + } + }; } -#endif // CPP11_STD_AVAILABLE -#endif // GENERS_FORWARD_LISTIO_HH_ - +#endif // CPP11_STD_AVAILABLE +#endif // GENERS_FORWARD_LISTIO_HH_ diff --git a/Alignment/Geners/src/CStringStream.cc b/Alignment/Geners/src/CStringStream.cc index b507f45903b49..b686276fe5fac 100644 --- a/Alignment/Geners/src/CStringStream.cc +++ b/Alignment/Geners/src/CStringStream.cc @@ -1,5 +1,6 @@ #include #include +#include #include "zlib.h" @@ -108,7 +109,7 @@ namespace gs { case ZLIB: { if (!inflator_.get()) - inflator_ = CPP11_auto_ptr(new ZlibInflateHandle()); + inflator_ = std::make_unique(); doZlibCompression(&readBuf_[0], len, false, inflator_->strm(), &comprBuf_[0], comprBuf_.size(), *this); } break; @@ -144,7 +145,7 @@ namespace gs { switch (mode_) { case ZLIB: { if (!deflator_.get()) - deflator_ = CPP11_auto_ptr(new ZlibDeflateHandle(compressionLevel_)); + deflator_ = std::make_unique(compressionLevel_); doZlibCompression(data, len, true, deflator_->strm(), &comprBuf_[0], comprBuf_.size(), *sink_); } break; diff --git a/Alignment/Geners/src/CatalogEntry.cc b/Alignment/Geners/src/CatalogEntry.cc index 65dee0ec12e8c..514fa9a91bf91 100644 --- a/Alignment/Geners/src/CatalogEntry.cc +++ b/Alignment/Geners/src/CatalogEntry.cc @@ -1,6 +1,6 @@ #include "Alignment/Geners/interface/IOException.hh" -#include "Alignment/Geners/interface/CPP11_auto_ptr.hh" +#include #include "Alignment/Geners/interface/CatalogEntry.hh" #include "Alignment/Geners/interface/binaryIO.hh" @@ -80,7 +80,7 @@ namespace gs { CatalogEntry *rec = nullptr; if (!in.fail()) { - CPP11_auto_ptr loc(ItemLocation::read(locId, in)); + std::unique_ptr loc(ItemLocation::read(locId, in)); if (loc.get()) rec = new CatalogEntry(ItemDescriptor(itemClass, ioPrototype.c_str(), name.c_str(), category.c_str()), itemId, diff --git a/Alignment/Geners/src/ContiguousCatalog.cc b/Alignment/Geners/src/ContiguousCatalog.cc index 3f3327505b805..0b57a6c0394e7 100644 --- a/Alignment/Geners/src/ContiguousCatalog.cc +++ b/Alignment/Geners/src/ContiguousCatalog.cc @@ -2,7 +2,7 @@ #include #include -#include "Alignment/Geners/interface/CPP11_auto_ptr.hh" +#include #include "Alignment/Geners/interface/ContiguousCatalog.hh" #include "Alignment/Geners/interface/IOException.hh" #include "Alignment/Geners/interface/binaryIO.hh" @@ -113,7 +113,7 @@ namespace gs { ClassId rId(in, 1); ClassId locId(in, 1); - CPP11_auto_ptr catalog(new ContiguousCatalog()); + std::unique_ptr catalog(new ContiguousCatalog()); bool firstEntry = true; for (long long recnum = 0; recnum < nRecords; ++recnum) { CatalogEntry *rec = CatalogEntry::read(rId, locId, in); @@ -146,7 +146,7 @@ namespace gs { ClassId rId(in, 1); ClassId locId(in, 1); - CPP11_auto_ptr catalog(new ContiguousCatalog()); + std::unique_ptr catalog(new ContiguousCatalog()); bool firstEntry = true; for (in.peek(); !in.eof(); in.peek()) { CatalogEntry *rec = CatalogEntry::read(rId, locId, in); diff --git a/Alignment/Geners/src/StringArchive.cc b/Alignment/Geners/src/StringArchive.cc index 62bd21a95a84e..9581e873f21a0 100644 --- a/Alignment/Geners/src/StringArchive.cc +++ b/Alignment/Geners/src/StringArchive.cc @@ -57,12 +57,12 @@ namespace gs { read_pod(in, &nam); if (in.fail()) throw IOReadFailure("In gs::StringArchive::read: input stream failure"); - CPP11_auto_ptr archive(new StringArchive(nam.c_str())); + std::unique_ptr archive(new StringArchive(nam.c_str())); archive->lastpos_ = lastpos; ClassId streamId(in, 1); CharBuffer::restore(streamId, in, &archive->stream_); ClassId catId(in, 1); - CPP11_auto_ptr p(ContiguousCatalog::read(catId, in)); + std::unique_ptr p(ContiguousCatalog::read(catId, in)); assert(p.get()); archive->catalog_ = *p; return archive.release(); diff --git a/Alignment/Geners/src/WriteOnlyCatalog.cc b/Alignment/Geners/src/WriteOnlyCatalog.cc index 242f337141f9a..9a32a544934b0 100644 --- a/Alignment/Geners/src/WriteOnlyCatalog.cc +++ b/Alignment/Geners/src/WriteOnlyCatalog.cc @@ -1,4 +1,4 @@ -#include "Alignment/Geners/interface/CPP11_auto_ptr.hh" +#include #include "Alignment/Geners/interface/IOException.hh" #include "Alignment/Geners/interface/WriteOnlyCatalog.hh" #include "Alignment/Geners/interface/binaryIO.hh" @@ -16,7 +16,7 @@ namespace gs { const unsigned long long off) { const unsigned long long id = count_ ? largestId_ + 1 : smallestId_; lastEntry_ = - CPP11_auto_ptr(new CatalogEntry(descriptor, id, compressionCode, itemLen, loc, off)); + std::unique_ptr(new CatalogEntry(descriptor, id, compressionCode, itemLen, loc, off)); if (lastEntry_->write(os_)) { ++count_; largestId_ = id; @@ -53,7 +53,7 @@ namespace gs { ClassId rId(in, 1); ClassId locId(in, 1); - CPP11_auto_ptr cat(new WriteOnlyCatalog(dynamic_cast(in))); + std::unique_ptr cat(new WriteOnlyCatalog(dynamic_cast(in))); bool firstEntry = true; for (in.peek(); !in.eof(); in.peek()) { CatalogEntry *rec = CatalogEntry::read(rId, locId, in); diff --git a/Alignment/Geners/src/stringArchiveIO.cc b/Alignment/Geners/src/stringArchiveIO.cc index 9ab011ba47434..cd1e02d339da0 100644 --- a/Alignment/Geners/src/stringArchiveIO.cc +++ b/Alignment/Geners/src/stringArchiveIO.cc @@ -41,7 +41,7 @@ namespace gs { std::ifstream is(filename, std::ios_base::binary); if (!is.is_open()) throw IOOpeningFailure("gs::readStringArchive", filename); - CPP11_auto_ptr ar = read_item(is); + std::unique_ptr ar = read_item(is); return ar.release(); } @@ -73,7 +73,7 @@ namespace gs { std::ifstream is(filename, std::ios_base::binary); if (!is.is_open()) throw IOOpeningFailure("gs::readCompressedStringArchive", filename); - CPP11_auto_ptr ar = read_compressed_item(is); + std::unique_ptr ar = read_compressed_item(is); return ar.release(); } @@ -85,7 +85,7 @@ namespace gs { << "StringArchive item with id " << id << " not found"; throw gs::IOInvalidArgument(os.str()); } - CPP11_auto_ptr p = ref.get(0); + std::unique_ptr p = ref.get(0); return p.release(); } diff --git a/Alignment/Geners/test/cdump.cc b/Alignment/Geners/test/cdump.cc index 174d5741418a8..82cdd46b7d242 100644 --- a/Alignment/Geners/test/cdump.cc +++ b/Alignment/Geners/test/cdump.cc @@ -6,7 +6,7 @@ #include #include -#include "Alignment/Geners/interface/CPP11_auto_ptr.hh" +#include #include "Alignment/Geners/interface/CStringStream.hh" #include "Alignment/Geners/interface/CatalogIO.hh" #include "Alignment/Geners/interface/ContiguousCatalog.hh" @@ -81,9 +81,9 @@ int main(int argc, char const *argv[]) { unsigned compressionCode = 0, mergeLevel = 0; std::vector annotations; - CPP11_auto_ptr cat; + std::unique_ptr cat; try { - cat = CPP11_auto_ptr( + cat = std::unique_ptr( readBinaryCatalog(in, &compressionCode, &mergeLevel, &annotations, true)); } catch (std::exception &e) { cerr << "Failed to read catalog from file \"" << inputfile << "\". " << e.what() << endl; diff --git a/Alignment/Geners/test/cmerge.cc b/Alignment/Geners/test/cmerge.cc index 1eef3ebe5bc04..3afe06898e842 100644 --- a/Alignment/Geners/test/cmerge.cc +++ b/Alignment/Geners/test/cmerge.cc @@ -5,7 +5,7 @@ #include #include -#include "Alignment/Geners/interface/CPP11_auto_ptr.hh" +#include #include "Alignment/Geners/interface/CatalogIO.hh" #include "Alignment/Geners/interface/ContiguousCatalog.hh" #include "Alignment/Geners/interface/uriUtils.hh" @@ -102,9 +102,9 @@ int main(int argc, char const *argv[]) { unsigned compressionCode = 0, mergeLevel = 0; std::vector annotations; - CPP11_auto_ptr cat; + std::unique_ptr cat; try { - cat = CPP11_auto_ptr( + cat = std::unique_ptr( readBinaryCatalog(in, &compressionCode, &mergeLevel, &annotations, true)); } catch (std::exception &e) { cerr << "Failed to read catalog from file \"" << inputfile << "\". " << e.what() << endl; diff --git a/Alignment/Geners/test/crecover.cc b/Alignment/Geners/test/crecover.cc index c530c61a0a057..4e7366c779f51 100644 --- a/Alignment/Geners/test/crecover.cc +++ b/Alignment/Geners/test/crecover.cc @@ -5,7 +5,7 @@ #include #include "Alignment/Geners/interface/BinaryArchiveBase.hh" -#include "Alignment/Geners/interface/CPP11_auto_ptr.hh" +#include #include "Alignment/Geners/interface/CatalogIO.hh" #include "Alignment/Geners/interface/ContiguousCatalog.hh" #include "Alignment/Geners/interface/IOException.hh" diff --git a/Alignment/Geners/test/print_items.cc b/Alignment/Geners/test/print_items.cc index bbb18e0453f51..d98bf986a638e 100644 --- a/Alignment/Geners/test/print_items.cc +++ b/Alignment/Geners/test/print_items.cc @@ -8,7 +8,7 @@ // statements already present in this program. // -#include "Alignment/Geners/interface/CPP11_auto_ptr.hh" +#include #include "Alignment/Geners/interface/ClassId.hh" #include "Alignment/Geners/interface/MultiFileArchive.hh" #include "Alignment/Geners/interface/Reference.hh" diff --git a/Alignment/HIPAlignmentAlgorithm/python/common_cff_py.txt b/Alignment/HIPAlignmentAlgorithm/python/common_cff_py.txt index 8f547eef1d3ab..09abda64c3411 100644 --- a/Alignment/HIPAlignmentAlgorithm/python/common_cff_py.txt +++ b/Alignment/HIPAlignmentAlgorithm/python/common_cff_py.txt @@ -176,7 +176,7 @@ for sra in SelectorRigidAlignables: if not tmpstrsrafound: tmprigidalignables.append(sra) else: - print "{} is already in the non-rigid alignables list. Omitting it in the rigid structures to align.".format(tmpstrsra) + print("{} is already in the non-rigid alignables list. Omitting it in the rigid structures to align.".format(tmpstrsra)) SelectorRigidAlignables = tmprigidalignables process.AlignmentProducer.ParameterBuilder.SelectorBowed = cms.PSet( diff --git a/Alignment/HIPAlignmentAlgorithm/scripts/makeHippyCampaign.py b/Alignment/HIPAlignmentAlgorithm/scripts/makeHippyCampaign.py index 06324d036ca3a..a0893443359e3 100755 --- a/Alignment/HIPAlignmentAlgorithm/scripts/makeHippyCampaign.py +++ b/Alignment/HIPAlignmentAlgorithm/scripts/makeHippyCampaign.py @@ -11,8 +11,6 @@ import subprocess import sys -basedir = "/afs/cern.ch/cms/CAF/CMSALCA/ALCA_TRACKERALIGN2/HipPy" - thisfile = os.path.abspath(__file__) def main(): @@ -20,13 +18,21 @@ def main(): parser.add_argument("foldername", help="folder name for the campaign. Example: CRUZET20xy") parser.add_argument("--cmssw", default=os.environ["CMSSW_VERSION"]) parser.add_argument("--scram-arch", default=os.environ["SCRAM_ARCH"]) - parser.add_argument("--subfolder", default="", help="subfolder within "+basedir+" to make 'foldername' in.") + parser.add_argument("--subfolder", default="", help="subfolder within basedir to make 'foldername' in.") parser.add_argument("--merge-topic", action="append", help="things to cms-merge-topic within the CMSSW release created", default=[]) parser.add_argument("--print-sys-path", action="store_true", help=argparse.SUPPRESS) #internal, don't use this + parser.add_argument('--basedir', default="/afs/cern.ch/cms/CAF/CMSALCA/ALCA_TRACKERALIGN2/HipPy") args = parser.parse_args() + basedir = args.basedir + if not os.path.exists(basedir): + raise FileExistsError("Base Directory does not exist!") + + if basedir[-1] == '/': + basedir = basedir[:-1] #No trailing slashed allowed + if args.print_sys_path: - print repr(sys.path) + print(repr(sys.path)) return folder = os.path.join(basedir, args.subfolder, args.foldername) @@ -100,7 +106,14 @@ def main(): shutil.copy(os.path.join(HIPAlignmentAlgorithm, "test", "hippysubmittertemplate.sh"), "submit_template.sh") os.chmod("submit_template.sh", os.stat("submit_template.sh").st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) subprocess.check_call(["git", "add", "submit_template.sh"]) - + + if not os.path.exists("submit_script.sh"): + shutil.copy(os.path.join(HIPAlignmentAlgorithm, "test", "hippysubmitterscript.sh"), "submit_script.sh") + os.chmod("submit_script.sh", os.stat("submit_script.sh").st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) + subprocess.check_call(["git", "add", "submit_script.sh"]) + + print("Dumped files into ", folder) + try: subprocess.check_output(["git", "diff", "--staged", "--quiet"]) except subprocess.CalledProcessError: @@ -128,7 +141,7 @@ def cd(newdir): def cmsenv(): output = subprocess.check_output(["scram", "ru", "-sh"]) - for line in output.split(";\n"): + for line in output.decode('utf8').split(";\n"): if not line.strip(): continue match1 = re.match(r'^export (\w*)="([^"]*)"$', line) match2 = re.match(r'^unset *((\w* *)*)$', line) diff --git a/Alignment/HIPAlignmentAlgorithm/test/hippysubmitterscript.sh b/Alignment/HIPAlignmentAlgorithm/test/hippysubmitterscript.sh new file mode 100644 index 0000000000000..97ef75d1e4b07 --- /dev/null +++ b/Alignment/HIPAlignmentAlgorithm/test/hippysubmitterscript.sh @@ -0,0 +1,15 @@ +#! /bin/bash + +hipName="$(grep -m 1 "alignmentname=" $1 | cut -d= -f2)" + +if [ -z "$hipName" ] +then + echo "Value for 'alignmentname' not found in template. Please check your submission template." +else + nohup ./$1 >> ../$hipName.log 2>&1 & + echo $hipName $! >> ../pid.nohup + echo "Please follow the log in '../$hipName.log'. To track progress live, use 'tail -f ../$hipName.log'." + echo "The nohup job PID is appended to '../pid.nohup' in case the submission should be killed." + echo "You can also use 'ps -ef | grep submit_' to find PIDs of currently running alignments." +fi + diff --git a/Alignment/HIPAlignmentAlgorithm/test/hippysubmittertemplate.sh b/Alignment/HIPAlignmentAlgorithm/test/hippysubmittertemplate.sh index 3468295a195a9..9c2bf4ef8c32b 100644 --- a/Alignment/HIPAlignmentAlgorithm/test/hippysubmittertemplate.sh +++ b/Alignment/HIPAlignmentAlgorithm/test/hippysubmittertemplate.sh @@ -6,7 +6,7 @@ set -euo pipefail voms-proxy-info | grep timeleft | grep -v -q 00:00:00 || (echo 'no proxy'; exit 1) -(echo $STY > /dev/null) || (echo "run this on a screen"; exit 1) +(echo $TMUX > /dev/null) || (echo "run this on a screen"; exit 1) #hpnumber= hptype=hp #or sm diff --git a/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeAlignmentAlgorithm.cc b/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeAlignmentAlgorithm.cc index df32f75393fb8..bbdc83388b3f8 100644 --- a/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeAlignmentAlgorithm.cc +++ b/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeAlignmentAlgorithm.cc @@ -722,8 +722,12 @@ void MillePedeAlignmentAlgorithm::endRun(const EndRunInfo &runInfo, const edm::E void MillePedeAlignmentAlgorithm::beginLuminosityBlock(const edm::EventSetup &) { if (!runAtPCL_) return; - if (this->isMode(myMilleBit)) + if (this->isMode(myMilleBit)) { theMille->resetOutputFile(); + theBinary.reset(); // GBL output has to be considered since same binary file is used + theBinary = std::make_unique((theDir + theConfig.getParameter("binaryFile")).c_str(), + theGblDoubleBinary); + } } //____________________________________________________ diff --git a/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeAlignmentAlgorithm.h b/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeAlignmentAlgorithm.h index 62f3d5f396cb4..ce701bf645098 100644 --- a/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeAlignmentAlgorithm.h +++ b/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeAlignmentAlgorithm.h @@ -116,9 +116,6 @@ class MillePedeAlignmentAlgorithm : public AlignmentAlgorithmBase { /// called at end of luminosity block void endLuminosityBlock(const edm::EventSetup &) override; - /* virtual void beginLuminosityBlock(const edm::EventSetup &setup) {} */ - /* virtual void endLuminosityBlock(const edm::EventSetup &setup) {} */ - /// Called in order to pass parameters to alignables for a specific run /// range in case the algorithm supports run range dependent alignment. bool setParametersForRunRange(const RunRange &runrange) override; diff --git a/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeDQMModule.cc b/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeDQMModule.cc index 430d0ed7aa016..abc6cbadcb3d0 100644 --- a/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeDQMModule.cc +++ b/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeDQMModule.cc @@ -31,6 +31,7 @@ MillePedeDQMModule ::MillePedeDQMModule(const edm::ParameterSet& config) ptpToken_(esConsumes()), aliThrToken_(esConsumes()), geomToken_(esConsumes()), + outputFolder_(config.getParameter("outputFolder")), mpReaderConfig_(config.getParameter("MillePedeFileReader")), isHG_(mpReaderConfig_.getParameter("isHG")) { consumes(config.getParameter("alignmentTokenSrc")); @@ -47,7 +48,12 @@ void MillePedeDQMModule ::bookHistograms(DQMStore::IBooker& booker) { booker.cd(); if (!isHG_) { - booker.setCurrentFolder("AlCaReco/SiPixelAli/"); + if (outputFolder_.find("HG") != std::string::npos) { + throw cms::Exception("LogicError") + << "MillePedeDQMModule is configured as Low Granularity but the outputfolder is for High Granularity"; + } + + booker.setCurrentFolder(outputFolder_); h_xPos = booker.book1D("Xpos", "Alignment fit #DeltaX;;#mum", 36, 0., 36.); h_xRot = booker.book1D("Xrot", "Alignment fit #Delta#theta_{X};;#murad", 36, 0., 36.); h_yPos = booker.book1D("Ypos", "Alignment fit #DeltaY;;#mum", 36, 0., 36.); @@ -56,7 +62,12 @@ void MillePedeDQMModule ::bookHistograms(DQMStore::IBooker& booker) { h_zRot = booker.book1D("Zrot", "Alignment fit #Delta#theta_{Z};;#murad", 36, 0., 36.); statusResults = booker.book2D("statusResults", "Status of SiPixelAli PCL workflow;;", 6, 0., 6., 1, 0., 1.); } else { - booker.setCurrentFolder("AlCaReco/SiPixelAliHG/"); + if (outputFolder_.find("HG") == std::string::npos) { + throw cms::Exception("LogicError") + << "MillePedeDQMModule is configured as High Granularity but the outputfolder is for Low Granularity"; + } + + booker.setCurrentFolder(outputFolder_); layerVec = {{"Layer1", pixelTopologyMap_->getPXBLadders(1)}, {"Layer2", pixelTopologyMap_->getPXBLadders(2)}, @@ -476,7 +487,7 @@ void MillePedeDQMModule ::fillExpertHisto_HG(std::mapSetMinimum(-(max_)*1.2); + histo_0->SetMinimum(-(max_) * 1.2); histo_0->SetMaximum(max_ * 1.2); currentStart += layer.second; diff --git a/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeDQMModule.h b/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeDQMModule.h index c65cedb457496..a35380f63a8a2 100644 --- a/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeDQMModule.h +++ b/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeDQMModule.h @@ -100,6 +100,7 @@ class MillePedeDQMModule : public DQMEDHarvester { const edm::ESGetToken aliThrToken_; const edm::ESGetToken geomToken_; + const std::string outputFolder_; const edm::ParameterSet mpReaderConfig_; std::unique_ptr tracker_; std::unique_ptr mpReader_; diff --git a/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeFileConverter.cc b/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeFileConverter.cc index 812b6ed80c568..b4333a2279255 100644 --- a/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeFileConverter.cc +++ b/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeFileConverter.cc @@ -18,9 +18,10 @@ MillePedeFileConverter::MillePedeFileConverter(const edm::ParameterSet& iConfig) produces(fileBlobLabel_); } -MillePedeFileConverter::~MillePedeFileConverter() {} - void MillePedeFileConverter::endLuminosityBlockProduce(edm::LuminosityBlock& iLumi, const edm::EventSetup& iSetup) { + auto const& moduleType = moduleDescription().moduleName(); + auto const& moduleLabel = moduleDescription().moduleLabel(); + edm::LogInfo("MillePedeFileActions") << "Inserting all data from file " << inputDir_ + inputFileName_ << " as a FileBlob to the lumi, using label \"" << fileBlobLabel_ << "\"."; // Preparing the FileBlobCollection: @@ -33,7 +34,12 @@ void MillePedeFileConverter::endLuminosityBlockProduce(edm::LuminosityBlock& iLu if (fileBlob.size() > 0) { // skip if no data or FileBlob file not found // Adding the FileBlob to the lumi: fileBlobCollection->addFileBlob(fileBlob); + edm::LogInfo(moduleType) << "[" << moduleLabel << "] fileBlob size was not empty, putting file blob with size " + << fileBlob.size() << std::endl; } + + edm::LogInfo(moduleType) << "[" << moduleLabel << "]" + << " Root file contains " << fileBlobCollection->size() << " FileBlob(s)."; iLumi.put(std::move(fileBlobCollection), fileBlobLabel_); } diff --git a/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeFileConverter.h b/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeFileConverter.h index 82c5b2ce518b1..450e0ec10ff06 100644 --- a/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeFileConverter.h +++ b/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeFileConverter.h @@ -28,7 +28,7 @@ class MillePedeFileConverter : public edm::one::EDProducer { public: explicit MillePedeFileConverter(const edm::ParameterSet&); - ~MillePedeFileConverter() override; + ~MillePedeFileConverter() override = default; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: diff --git a/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeFileExtractor.cc b/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeFileExtractor.cc index 18a33b6c818c2..e5e32a919d07c 100644 --- a/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeFileExtractor.cc +++ b/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeFileExtractor.cc @@ -17,15 +17,13 @@ MillePedeFileExtractor::MillePedeFileExtractor(const edm::ParameterSet& iConfig) : outputDir_(iConfig.getParameter("fileDir")), outputFileName_(iConfig.getParameter("outputBinaryFile")), maxNumberOfBinaries_(iConfig.getParameter("maxNumberOfBinaries")) { - auto fileBlobInputTag = iConfig.getParameter("fileBlobInputTag"); - fileBlobToken_ = consumes(fileBlobInputTag); + fileBlobInputTag_ = iConfig.getParameter("fileBlobInputTag"); + fileBlobToken_ = consumes(fileBlobInputTag_); if (hasBinaryNumberLimit()) { edm::LogInfo("MillePedeFileActions") << "Limiting the number of extracted binary files to " << maxNumberOfBinaries_; } } -MillePedeFileExtractor::~MillePedeFileExtractor() {} - void MillePedeFileExtractor::endLuminosityBlock(const edm::LuminosityBlock& iLumi, const edm::EventSetup&) { if (enoughBinaries()) return; @@ -41,6 +39,12 @@ void MillePedeFileExtractor::endLuminosityBlock(const edm::LuminosityBlock& iLum // Getting our hands on the vector of FileBlobs edm::Handle fileBlobCollection; iLumi.getByToken(fileBlobToken_, fileBlobCollection); + + if (fileBlobCollection.failedToGet()) { + edm::LogError("MillePedeFileActions") << "Failed to get collection from input tag: " << fileBlobInputTag_.encode(); + return; + } + if (fileBlobCollection.isValid()) { // Logging the amount of FileBlobs in the vector edm::LogInfo("MillePedeFileActions") << "Root file contains " << fileBlobCollection->size() << " FileBlob(s)."; @@ -65,7 +69,9 @@ void MillePedeFileExtractor::endLuminosityBlock(const edm::LuminosityBlock& iLum ++nBinaries_; } } else { - edm::LogError("MillePedeFileActions") << "Error: The root file does not contain any vector of FileBlob."; + edm::LogError("MillePedeFileActions") + << "Error: The root file does not contain any vector of FileBlob under the label " << fileBlobInputTag_.encode() + << "."; } } diff --git a/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeFileExtractor.h b/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeFileExtractor.h index 3d4bc0256ba10..9d360b4623a9a 100644 --- a/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeFileExtractor.h +++ b/Alignment/MillePedeAlignmentAlgorithm/plugins/MillePedeFileExtractor.h @@ -31,7 +31,7 @@ class MillePedeFileExtractor : public edm::one::EDAnalyzer { public: explicit MillePedeFileExtractor(const edm::ParameterSet&); - ~MillePedeFileExtractor() override; + ~MillePedeFileExtractor() override = default; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: @@ -47,6 +47,7 @@ class MillePedeFileExtractor : public edm::one::EDAnalyzer fileBlobToken_; const int maxNumberOfBinaries_; diff --git a/Alignment/MillePedeAlignmentAlgorithm/python/MillePedeDQMModule_cff.py b/Alignment/MillePedeAlignmentAlgorithm/python/MillePedeDQMModule_cff.py index c73b86cc4b430..a7dbdfd857431 100644 --- a/Alignment/MillePedeAlignmentAlgorithm/python/MillePedeDQMModule_cff.py +++ b/Alignment/MillePedeAlignmentAlgorithm/python/MillePedeDQMModule_cff.py @@ -4,6 +4,7 @@ import Alignment.MillePedeAlignmentAlgorithm.MillePedeFileReader_cfi as MillePedeFileReader_cfi SiPixelAliDQMModule = DQMEDHarvester("MillePedeDQMModule", + outputFolder = cms.string("AlCaReco/SiPixelAli"), alignmentTokenSrc = cms.InputTag("SiPixelAliPedeAlignmentProducer"), MillePedeFileReader = cms.PSet(MillePedeFileReader_cfi.MillePedeFileReader.clone()) ) diff --git a/Alignment/MillePedeAlignmentAlgorithm/test/test_payload_sanity.sh b/Alignment/MillePedeAlignmentAlgorithm/test/test_payload_sanity.sh index 55191e29e75cc..9ee58beaa1475 100755 --- a/Alignment/MillePedeAlignmentAlgorithm/test/test_payload_sanity.sh +++ b/Alignment/MillePedeAlignmentAlgorithm/test/test_payload_sanity.sh @@ -1,6 +1,7 @@ #!/bin/bash function die { echo $1: status $2; exit $2; } -INPUTFILE=${SCRAM_TEST_PATH}/alignments_MP.db -(cmsRun ${SCRAM_TEST_PATH}/AlignmentRcdChecker_cfg.py inputSqliteFile=${INPUTFILE}) || die 'failed running AlignmentRcdChecker' +echo -e "Content of the current directory is: "`ls .` +INPUTFILE=alignments_MP.db +(cmsRun ${SCRAM_TEST_PATH}/AlignmentRcdChecker_cfg.py inputSqliteFile=${INPUTFILE}) || die 'failed running AlignmentRcdChecker' $? rm $INPUTFILE diff --git a/Alignment/MillePedeAlignmentAlgorithm/test/test_pede.sh b/Alignment/MillePedeAlignmentAlgorithm/test/test_pede.sh index 137c4df7cfde6..339ce9792e472 100755 --- a/Alignment/MillePedeAlignmentAlgorithm/test/test_pede.sh +++ b/Alignment/MillePedeAlignmentAlgorithm/test/test_pede.sh @@ -4,7 +4,7 @@ function die { echo $1: status $2; exit $2; } LOCAL_TEST_DIR=${SCRAM_TEST_PATH} clean_up(){ - echo "cleaning the local test area" + echo -e "\nCleaning the local test area" rm -fr milleBinary00* rm -fr pedeSteer* rm -fr millepede.* @@ -13,7 +13,7 @@ clean_up(){ rm -fr *.dat rm -fr *.tar rm -fr *.gz - rm -fr *.db + rm -fr *.dump } if test -f "milleBinary*"; then @@ -39,10 +39,9 @@ if [ $STATUS -eq 0 ]; then echo -e "\n @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@" echo -e " @ MillePede Exit Status: "`cat millepede.end` echo -e " @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@" - ## mv the output file to the local test directory for the subsequent payload sanity check - mv alignments_MP.db ${LOCAL_TEST_DIR} ## clean the house now... clean_up + echo -e "\nContent of the current directory is: "`ls .` else die "SKIPPING test, file ${TESTPACKAGE}.tar not found" 0 fi diff --git a/Alignment/OfflineValidation/README.md b/Alignment/OfflineValidation/README.md index 02dd633ff9ca4..d7c4554d9812d 100644 --- a/Alignment/OfflineValidation/README.md +++ b/Alignment/OfflineValidation/README.md @@ -114,5 +114,8 @@ For details read [`README_PV.md`](https://github.com/cms-sw/cmssw/blob/master/Al ## JetHT validation For details read [`README_JetHT.md`](https://github.com/cms-sw/cmssw/blob/master/Alignment/OfflineValidation/README_JetHT.md) +## MTS validation +For details read [`README_MTS.md`](https://github.com/cms-sw/cmssw/blob/master/Alignment/OfflineValidation/README_MTS.md) + ## General info about IOV/run arguments For details read [`README_IOV.md`](https://github.com/cms-sw/cmssw/blob/master/Alignment/OfflineValidation/README_IOV.md) diff --git a/Alignment/OfflineValidation/README_MTS.md b/Alignment/OfflineValidation/README_MTS.md new file mode 100644 index 0000000000000..e616e4d549147 --- /dev/null +++ b/Alignment/OfflineValidation/README_MTS.md @@ -0,0 +1,33 @@ +## MTS (Muon Track Splitting) validation + +### General info + +``` +validations: + MTS: + : + : + +``` + +MTS validation runs in 1 possible type of steps: + - single (validation analysis by MTS_cfg.py) +Step name is arbitrary string which will be used as a reference for consequent steps. +Merge and trend jobs are not yet implemented. + +### Single MTS jobs + +Single jobs can be specified per run (IoV as well). + +**Parameters below to be updated** +Variable | Default value | Explanation/Options +-------- | ------------- | -------------------- +IOV | None | List of IOVs/runs defined by integer value. IOV 1 is reserved for MC. +Alignments | None | List of alignments. Will create separate directory for each. +dataset | See defaultInputFiles_cff.py | Path to txt file containing list of datasets to be used. If file is missing at EOS or is corrupted - job will eventually fail (most common issue). +goodlumi | cms.untracked.VLuminosityBlockRange() | Path to json file containing lumi information about selected IoV - must contain list of runs under particular IoV with lumiblock info. Format: `IOV_Vali_{}.json` +maxevents | 1 | Maximum number of events before cmsRun terminates. +trackcollection | "generalTracks" | Track collection to be specified here, e.g. "ALCARECOTkAlMuonIsolated" or "ALCARECOTkAlMinBias" ... +tthrbuilder | "WithAngleAndTemplate" | Specify TTRH Builder +usePixelQualityFlag | True | Use pixel quality flag? +cosmicsZeroTesla | False | Is this validation for cosmics with zero magnetic field? diff --git a/Alignment/OfflineValidation/bin/BuildFile.xml b/Alignment/OfflineValidation/bin/BuildFile.xml index 4034992bc0c58..6578fd9b70855 100644 --- a/Alignment/OfflineValidation/bin/BuildFile.xml +++ b/Alignment/OfflineValidation/bin/BuildFile.xml @@ -17,5 +17,7 @@ + + diff --git a/Alignment/OfflineValidation/bin/DiMuonVmerge.cc b/Alignment/OfflineValidation/bin/DiMuonVmerge.cc new file mode 100644 index 0000000000000..a12042f1cc759 --- /dev/null +++ b/Alignment/OfflineValidation/bin/DiMuonVmerge.cc @@ -0,0 +1,69 @@ +#include +#include +#include +#include +#include + +#include "exceptions.h" +#include "toolbox.h" +#include "Options.h" + +#include "boost/filesystem.hpp" +#include "boost/property_tree/ptree.hpp" +#include "boost/property_tree/json_parser.hpp" +#include "boost/optional.hpp" + +#include "TString.h" +#include "TASImage.h" + +#include "Alignment/OfflineValidation/macros/loopAndPlot.C" +#include "Alignment/OfflineValidation/interface/TkAlStyle.h" + +using namespace std; +using namespace AllInOneConfig; + +namespace pt = boost::property_tree; + +int merge(int argc, char* argv[]) { + // parse the command line + + Options options; + options.helper(argc, argv); + options.parser(argc, argv); + + //Read in AllInOne json config + pt::ptree main_tree; + pt::read_json(options.config, main_tree); + + pt::ptree alignments = main_tree.get_child("alignments"); + pt::ptree validation = main_tree.get_child("validation"); + + TString filesAndLabels; + for (const auto& childTree : alignments) { + // Print node name and its attributes + // std::cout << "Node: " << childTree.first << std::endl; + // for (const auto& attr : childTree.second) { + // std::cout << " Attribute: " << attr.first << " = " << attr.second.data() << std::endl; + // } + + std::cout << childTree.second.get("file") << std::endl; + std::cout << childTree.second.get("title") << std::endl; + std::string toAdd = childTree.second.get("file") + + "/DiMuonVertexValidation.root=" + childTree.second.get("title") + ","; + filesAndLabels += toAdd; + } + + if (filesAndLabels.Length() > 0) { + filesAndLabels.Remove(filesAndLabels.Length() - 1); // Remove the last character + } + + std::cout << "filesAndLabels: " << filesAndLabels << std::endl; + + loopAndPlot(filesAndLabels); + + return EXIT_SUCCESS; +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +int main(int argc, char* argv[]) { return exceptions(argc, argv); } +#endif diff --git a/Alignment/OfflineValidation/bin/MTSmerge.cc b/Alignment/OfflineValidation/bin/MTSmerge.cc new file mode 100644 index 0000000000000..c94e55a6d8e26 --- /dev/null +++ b/Alignment/OfflineValidation/bin/MTSmerge.cc @@ -0,0 +1,87 @@ +#include +#include +#include +#include +#include + +#include "exceptions.h" +#include "toolbox.h" +#include "Options.h" + +#include "boost/filesystem.hpp" +#include "boost/property_tree/ptree.hpp" +#include "boost/property_tree/json_parser.hpp" +#include "boost/optional.hpp" + +#include "TString.h" +#include "TASImage.h" + +#include "Alignment/OfflineValidation/macros/trackSplitPlot.h" +#include "Alignment/OfflineValidation/macros/trackSplitPlot.C" +#include "Alignment/OfflineValidation/interface/TkAlStyle.h" + +using namespace std; +using namespace AllInOneConfig; + +namespace pt = boost::property_tree; + +int merge(int argc, char* argv[]) { + // parse the command line + + Options options; + options.helper(argc, argv); + options.parser(argc, argv); + + //Read in AllInOne json config + pt::ptree main_tree; + pt::read_json(options.config, main_tree); + + pt::ptree alignments = main_tree.get_child("alignments"); + pt::ptree validation = main_tree.get_child("validation"); + pt::ptree global_style; + pt::ptree merge_style; + + int iov = validation.count("IOV") ? validation.get("IOV") : 1; + std::string rlabel = validation.count("customrighttitle") ? validation.get("customrighttitle") : ""; + rlabel = merge_style.count("Rlabel") ? merge_style.get("Rlabel") : rlabel; + std::string cmslabel = merge_style.count("CMSlabel") ? merge_style.get("CMSlabel") : "INTERNAL"; + if (TkAlStyle::toStatus(cmslabel) == CUSTOM) + TkAlStyle::set(CUSTOM, NONE, cmslabel, rlabel); + else + TkAlStyle::set(TkAlStyle::toStatus(cmslabel), NONE, "", rlabel); + + TString filesAndLabels; + for (const auto& childTree : alignments) { + // Print node name and its attributes + std::cout << "Node: " << childTree.first << std::endl; + for (const auto& attr : childTree.second) { + std::cout << " Attribute: " << attr.first << " = " << attr.second.data() << std::endl; + } + + //std::cout << childTree.second.get("file") << std::endl; + //std::cout << childTree.second.get("title") << std::endl; + //std::cout << childTree.second.get("color") << std::endl; + //std::cout << childTree.second.get("style") << std::endl; + + std::string toAdd = childTree.second.get("file") + + Form("/MTSValidation_%s_%d.root=", childTree.first.c_str(), iov) + + childTree.second.get("title") + + Form("|%i|%i,", childTree.second.get("color"), childTree.second.get("style")); + filesAndLabels += toAdd; + } + + std::cout << "filesAndLabels: " << filesAndLabels << std::endl; + + TkAlStyle::legendheader = ""; + TkAlStyle::legendoptions = "all"; + outliercut = -1.0; + //fillmatrix(); + subdetector = "PIXEL"; + makePlots(filesAndLabels, "./"); + + return EXIT_SUCCESS; +} + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +int main(int argc, char* argv[]) { return exceptions(argc, argv); } +#endif diff --git a/Alignment/OfflineValidation/interface/DiLeptonVertexHelpers.h b/Alignment/OfflineValidation/interface/DiLeptonVertexHelpers.h index ee22c68001b78..51436d1fee44a 100644 --- a/Alignment/OfflineValidation/interface/DiLeptonVertexHelpers.h +++ b/Alignment/OfflineValidation/interface/DiLeptonVertexHelpers.h @@ -45,6 +45,131 @@ namespace DiLeptonHelp { enum flavour { MM = 0, EE = 1, UNDEF = -1 }; + enum etaRegion { BARBAR, BARFWD, BARBWD, FWDFWD, BWDBWD, FWDBWD, END }; + + // + // Ancillary class for plotting in different kinematics regions + // of the two muon tracks + // + class PlotsVsDiLeptonRegion { + public: + PlotsVsDiLeptonRegion(const float etaBoundary) : m_etaBoundary(etaBoundary) {} + ~PlotsVsDiLeptonRegion() = default; + + //________________________________________________________________________________// + inline void bookSet(const TFileDirectory& fs, const TH1* histo) { + const std::string name = histo->GetName(); + const std::string title = histo->GetTitle(); + const std::string xTitle = histo->GetXaxis()->GetTitle(); + const std::string yTitle = histo->GetYaxis()->GetTitle(); + std::string zTitle = ""; + if (((TObject*)histo)->InheritsFrom("TH2")) { + zTitle = histo->GetZaxis()->GetTitle(); + } + + for (const auto& etaReg : m_etaRegions) { + if (etaReg == etaRegion::END) + continue; + + if (((TObject*)histo)->InheritsFrom("TH2")) { + m_h2_map[etaReg] = + fs.make((name + "_" + m_etaRegionNames[etaReg]).c_str(), + (title + m_etaRegionNames[etaReg] + ";" + xTitle + ";" + yTitle + ";" + zTitle).c_str(), + histo->GetNbinsX(), + histo->GetXaxis()->GetXmin(), + histo->GetXaxis()->GetXmax(), + histo->GetNbinsY(), + histo->GetYaxis()->GetXmin(), + histo->GetYaxis()->GetXmax()); + } else { + m_h1_map[etaReg] = fs.make((name + "_" + m_etaRegionNames[etaReg]).c_str(), + (title + m_etaRegionNames[etaReg] + ";" + xTitle + ";" + yTitle).c_str(), + histo->GetNbinsX(), + histo->GetXaxis()->GetXmin(), + histo->GetXaxis()->GetXmax()); + } + } + + // flip the is booked bit + m_isBooked = true; + } + + //________________________________________________________________________________// + // Determine the eta region based on eta values + etaRegion getEtaRegion(const double eta1, const double eta2) { + bool isEta1Barrel = std::abs(eta1) <= m_etaBoundary; + bool isEta2Barrel = std::abs(eta2) <= m_etaBoundary; + + if (isEta1Barrel && isEta2Barrel) { + return etaRegion::BARBAR; + } else if ((isEta1Barrel && eta2 > m_etaBoundary) || (isEta2Barrel && eta1 > m_etaBoundary)) { + return etaRegion::BARFWD; + } else if ((isEta1Barrel && eta2 < -m_etaBoundary) || (isEta2Barrel && eta1 < -m_etaBoundary)) { + return etaRegion::BARBWD; + } else if (eta1 > m_etaBoundary && eta2 > m_etaBoundary) { + return etaRegion::FWDFWD; + } else if (eta1 < -m_etaBoundary && eta2 < -m_etaBoundary) { + return etaRegion::BWDBWD; + } else if ((eta1 > m_etaBoundary && eta2 < -m_etaBoundary) || (eta2 > m_etaBoundary && eta1 < -m_etaBoundary)) { + return etaRegion::FWDBWD; + } + + // Default case if none of the conditions match + return etaRegion::END; // Adjust the default based on your logic + } + + //________________________________________________________________________________// + inline void fillTH1Plots(const float val, const std::pair& momenta) { + if (!m_isBooked) { + edm::LogError("PlotsVsDiLeptonRegion") + << "In" << __FUNCTION__ << "," << __LINE__ << "trying to fill a plot not booked!" << std::endl; + return; + } + + etaRegion region = getEtaRegion(momenta.first.Eta(), momenta.second.Eta()); + if (region == etaRegion::END) { + edm::LogError("PlotsVsDiLeptonRegion") << "undefined di-muon kinematics" << std::endl; + } + m_h1_map[region]->Fill(val); + } + + //________________________________________________________________________________// + inline void fillTH2Plots(const float valX, + const float valY, + const std::pair& momenta) { + if (!m_isBooked) { + edm::LogError("PlotsVsDiLeptonRegion") + << "In" << __FUNCTION__ << "," << __LINE__ << "trying to fill a plot not booked!" << std::endl; + return; + } + + etaRegion region = getEtaRegion(momenta.first.Eta(), momenta.second.Eta()); + if (region == etaRegion::END) { + edm::LogError("PlotsVsDiLeptonRegion") << "undefined di-muon kinematics" << std::endl; + } + m_h2_map[region]->Fill(valX, valY); + } + + private: + const std::vector m_etaRegions = {etaRegion::BARBAR, + etaRegion::BARFWD, + etaRegion::BARBWD, + etaRegion::FWDFWD, + etaRegion::BWDBWD, + etaRegion::FWDBWD}; + + const std::vector m_etaRegionNames = {"barrel-barrel", + "barrel-forward", + "barrel-backward", + "forward-forward", + "backward-backward", + "forward-backward"}; + const float m_etaBoundary; + bool m_isBooked; + std::map m_h1_map; + std::map m_h2_map; + }; + // // Ancillary class for plotting // diff --git a/Alignment/OfflineValidation/interface/TkAlStyle.h b/Alignment/OfflineValidation/interface/TkAlStyle.h index ca99704d473eb..fae28aea455f3 100644 --- a/Alignment/OfflineValidation/interface/TkAlStyle.h +++ b/Alignment/OfflineValidation/interface/TkAlStyle.h @@ -42,6 +42,9 @@ enum PublicationStatus { // Data era: determines labels of data-taking periods, e.g. CRUZET enum Era { NONE, CRUZET15, CRAFT15, COLL0T15 }; +// Alignment object +enum AlignObj { IDEALAlign, RUN1Align, CRUZETAlign, CRAFTAlign, Coll0TAlign }; + class TkAlStyle { public: // Adjusts the gStyle settings and store the PublicationStatus @@ -52,6 +55,13 @@ class TkAlStyle { static void set(const TString customTitle); static PublicationStatus status() { return publicationStatus_; } + static TString toTString(const PublicationStatus status); + static TString toTString(const Era era); + static TString toTString(const AlignObj obj); + + static int color(const AlignObj obj); + static int style(const AlignObj obj); + // Draws a title " 2015" on the current pad // dependending on the PublicationStatus // INTERNAL : no extra label (intended for AN-only plots with data) diff --git a/Alignment/OfflineValidation/macros/FitPVResiduals.C b/Alignment/OfflineValidation/macros/FitPVResiduals.C index 2dcf9a5037252..04fe43f26e13a 100644 --- a/Alignment/OfflineValidation/macros/FitPVResiduals.C +++ b/Alignment/OfflineValidation/macros/FitPVResiduals.C @@ -39,7 +39,7 @@ #include #include #include -//#include "Alignment/OfflineValidation/macros/TkAlStyle.cc" +//#include "Alignment/OfflineValidation/interface/TkAlStyle.h" #include "Alignment/OfflineValidation/macros/CMS_lumi.h" #define PLOTTING_MACRO // to remove message logger #include "Alignment/OfflineValidation/interface/PVValidationHelpers.h" diff --git a/Alignment/OfflineValidation/macros/loopAndPlot.C b/Alignment/OfflineValidation/macros/loopAndPlot.C new file mode 100644 index 0000000000000..093582435b54d --- /dev/null +++ b/Alignment/OfflineValidation/macros/loopAndPlot.C @@ -0,0 +1,482 @@ +// ROOT includes +#include "TCanvas.h" +#include "TClass.h" +#include "TDirectory.h" +#include "TFile.h" +#include "TGaxis.h" +#include "TH1.h" +#include "TH2.h" +#include "TKey.h" +#include "TLegend.h" +#include "TObjString.h" +#include "TObject.h" +#include "TProfile.h" +#include "TRatioPlot.h" +#include "TStyle.h" + +// standard includes +#include + +// 2 file case +TFile *sourceFile1, *sourceFile2; + +// multi-file case +std::vector sourceFiles; +Int_t def_colors[9] = {kBlack, kBlue, kRed, kMagenta, kGreen, kCyan, kViolet, kOrange, kGreen + 2}; + +std::pair getExtrema(TObjArray *array); +template +void MakeNicePlotStyle(T *hist); +void MakeNiceProfile(TProfile *prof); + +//void MakeNicePlotStyle(TH1 *hist); +void plot2Histograms(TH1 *h1, TH1 *h2, const TString &label1, const TString &label2); +void plot2Profiles(TProfile *h1, TProfile *h2, const TString &label1, const TString &label2); +void recurseOverKeys(TDirectory *target1, const std::vector &labels, bool isNorm); +void recurseOverKeys(TDirectory *target1, const TString &label1, const TString &label2); +void plotHistograms(std::vector histos, const std::vector &labels, bool isNormalized = false); + +/************************************************/ +void loopAndPlot(TString namesandlabels, bool doNormalize = false) +/************************************************/ +{ + std::vector labels; + + namesandlabels.Remove(TString::kTrailing, ','); + TObjArray *nameandlabelpairs = namesandlabels.Tokenize(","); + for (Int_t i = 0; i < nameandlabelpairs->GetEntries(); ++i) { + TObjArray *aFileLegPair = TString(nameandlabelpairs->At(i)->GetName()).Tokenize("="); + if (aFileLegPair->GetEntries() == 2) { + sourceFiles.push_back(TFile::Open(aFileLegPair->At(0)->GetName(), "READ")); + TObjString *s_label = (TObjString *)aFileLegPair->At(1); + labels.push_back(s_label->String()); + } else { + std::cout << "Please give file name and legend entry in the following form:\n" + << " filename1=legendentry1,filename2=legendentry2\n"; + } + } + + recurseOverKeys(sourceFiles[0], labels, doNormalize); + + for (const auto &file : sourceFiles) { + file->Close(); + } +} + +/************************************************/ +void recurseOverKeys(TDirectory *target1, const std::vector &labels, bool isNorm) +/************************************************/ +{ + // Figure out where we are + TString path((char *)strstr(target1->GetPath(), ":")); + path.Remove(0, 2); + + sourceFiles[0]->cd(path); + + std::cout << path << std::endl; + + TDirectory *current_sourcedir = gDirectory; + + TKey *key; + TIter nextkey(current_sourcedir->GetListOfKeys()); + + while ((key = (TKey *)nextkey())) { + auto obj = key->ReadObj(); + + // Check if this is a 1D histogram or a directory + if (obj->IsA()->InheritsFrom("TH1")) { + if (obj->IsA()->InheritsFrom("TH2")) + continue; + + // ************************** + // Plot & Save this Histogram + std::vector histos; + + TH1 *htemp1 = (TH1 *)obj; + TString histName = htemp1->GetName(); + + for (const auto &file : sourceFiles) { + TH1 *htemp; + if (path != "") { + file->GetObject(path + "/" + histName, htemp); + } else { + file->GetObject(histName, htemp); + } + histos.push_back(htemp); + } + + //outputFilename=histName; + //plot2Histograms(htemp1, htemp2, outputFolder+path+"/"+outputFilename+"."+imageType); + plotHistograms(histos, labels, isNorm); + + } else if (obj->IsA()->InheritsFrom("TDirectory")) { + // it's a subdirectory + + std::cout << "Found subdirectory " << obj->GetName() << std::endl; + //gSystem->MakeDirectory(outputFolder+path+"/"+obj->GetName()); + + // obj is now the starting point of another round of merging + // obj still knows its depth within the target file via + // GetPath(), so we can still figure out where we are in the recursion + + if ((TString(obj->GetName())).Contains("Residuals")) + continue; + + recurseOverKeys((TDirectory *)obj, labels, isNorm); + + } // end of IF a TDriectory + } +} + +/************************************************/ +void plotHistograms(std::vector histos, const std::vector &labels, bool isNormalized) { + /************************************************/ + + TGaxis::SetMaxDigits(3); + + auto c1 = new TCanvas(Form("c1_%s", histos[0]->GetName()), "A ratio example", 1000, 800); + c1->SetTicks(0, 1); + gStyle->SetOptStat(0); + + TObjArray *array = new TObjArray(histos.size()); + int index = 0; + for (const auto &histo : histos) { + MakeNicePlotStyle(histo); + + if (isNormalized) { + Double_t scale = 1. / histo->Integral(); + histo->Scale(scale); + } + + histo->SetLineColor(def_colors[index]); + histo->SetMarkerColor(def_colors[index]); + histo->SetMarkerStyle(20); + array->Add(histo); + index++; + } + + std::pair extrema = getExtrema(array); + delete array; + float min = (extrema.first > 0) ? (extrema.first) * 0.7 : (extrema.first) * 1.3; + histos[0]->GetYaxis()->SetRangeUser(min, extrema.second * 1.3); + + TRatioPlot *rp{nullptr}; + + for (unsigned int i = 1; i < histos.size(); i++) { + if (i == 1) { + rp = new TRatioPlot(histos[0], histos[i]); + rp->SetLeftMargin(0.15); + rp->SetRightMargin(0.05); + rp->SetSeparationMargin(0.01); + rp->SetLowBottomMargin(0.35); + rp->Draw(); + } else { + rp->GetUpperPad()->cd(); + histos[i]->Draw("same"); + } + } + + if (!rp) { + std::cerr << "TRatioPlot could not be initialized, exiting!" << std::endl; + return; + } + + rp->GetUpperPad()->cd(); + + // Draw the legend + TLegend *infoBox = new TLegend(0.65, 0.75, 0.95, 0.90, ""); + infoBox->SetShadowColor(0); // 0 = transparent + infoBox->SetFillColor(kWhite); + infoBox->SetTextSize(0.035); + + for (unsigned int i = 0; i < histos.size(); i++) { + if (i == 0) { + infoBox->AddEntry(histos[i], labels[i], "L"); + } else { + infoBox->AddEntry(histos[i], labels[i], "P"); + } + } + infoBox->Draw("same"); + + MakeNicePlotStyle(rp->GetLowerRefGraph()); + rp->GetLowerRefGraph()->GetYaxis()->SetTitle("ratio"); + rp->GetLowerRefGraph()->SetMinimum(0.); + rp->GetLowerRefGraph()->SetMaximum(2.); + rp->GetLowerRefGraph()->SetLineColor(def_colors[0]); + rp->GetLowerRefGraph()->SetMarkerColor(def_colors[0]); + //c1->Update(); + + for (unsigned int i = 1; i < histos.size(); i++) { + auto c2 = new TCanvas(Form("c2_%s_%i", histos[i]->GetName(), i), "A ratio example 2", 800, 800); + c2->cd(); + auto rp2 = new TRatioPlot(histos[0], histos[i]); + rp2->Draw(); + TGraph *g = rp2->GetLowerRefGraph(); + // if(g) + MakeNicePlotStyle(g); + g->SetLineColor(def_colors[i]); + g->SetMarkerColor(def_colors[i]); + + c1->cd(); + rp->GetLowerPad()->cd(); + if (g) + g->Draw("same"); + c1->Update(); + delete c2; + } + + //rp->GetLowerPad()->cd(); + //c1->Update(); + + c1->SaveAs(TString(histos[0]->GetName()) + ".png"); + delete c1; +} + +/************************************************/ +void recurseOverKeys(TDirectory *target1, const TString &label1, const TString &label2) +/************************************************/ +{ + // Figure out where we are + TString path((char *)strstr(target1->GetPath(), ":")); + path.Remove(0, 2); + + sourceFile1->cd(path); + + std::cout << path << std::endl; + + TDirectory *current_sourcedir = gDirectory; + + TKey *key; + TIter nextkey(current_sourcedir->GetListOfKeys()); + + while ((key = (TKey *)nextkey())) { + auto obj = key->ReadObj(); + + // Check if this is a 1D histogram or a directory + if (obj->IsA()->InheritsFrom("TH1F")) { + // ************************** + // Plot & Save this Histogram + TH1F *htemp1, *htemp2; + + htemp1 = (TH1F *)obj; + TString histName = htemp1->GetName(); + + if (path != "") { + sourceFile2->GetObject(path + "/" + histName, htemp2); + } else { + sourceFile2->GetObject(histName, htemp2); + } + + //outputFilename=histName; + //plot2Histograms(htemp1, htemp2, outputFolder+path+"/"+outputFilename+"."+imageType); + plot2Histograms(htemp1, htemp2, label1, label2); + + } else if (obj->IsA()->InheritsFrom("TProfile")) { + // ************************** + // Plot & Save this Histogram + TProfile *htemp1, *htemp2; + + htemp1 = (TProfile *)obj; + TString histName = htemp1->GetName(); + + if (path != "") { + sourceFile2->GetObject(path + "/" + histName, htemp2); + } else { + sourceFile2->GetObject(histName, htemp2); + } + + plot2Profiles(htemp1, htemp2, label1, label2); + } else if (obj->IsA()->InheritsFrom("TDirectory")) { + // it's a subdirectory + + std::cout << "Found subdirectory " << obj->GetName() << std::endl; + //gSystem->MakeDirectory(outputFolder+path+"/"+obj->GetName()); + + // obj is now the starting point of another round of merging + // obj still knows its depth within the target file via + // GetPath(), so we can still figure out where we are in the recursion + + if ((TString(obj->GetName())).Contains("DQM") && !(TString(obj->GetName())).Contains("DQMData")) + continue; + + recurseOverKeys((TDirectory *)obj, label1, label2); + + } // end of IF a TDriectory + } +} + +/************************************************/ +void plot2Profiles(TProfile *h1, TProfile *h2, const TString &label1, const TString &label2) +/************************************************/ +{ + auto c1 = new TCanvas(Form("c1_%s", h1->GetName()), "example", 800, 800); + c1->SetLeftMargin(0.15); + c1->SetRightMargin(0.03); + gStyle->SetOptStat(0); + + h1->SetLineColor(kBlue); + h2->SetLineColor(kRed); + + h1->SetMarkerColor(kBlue); + h2->SetMarkerColor(kRed); + + h1->SetMarkerStyle(20); + h2->SetMarkerStyle(21); + + MakeNiceProfile(h1); + MakeNiceProfile(h2); + + TObjArray *array = new TObjArray(2); + array->Add(h1); + array->Add(h2); + + std::pair extrema = getExtrema(array); + + delete array; + + float min = (extrema.first > 0) ? (extrema.first) * 0.99 : (extrema.first) * 1.01; + + h1->GetYaxis()->SetRangeUser(min, extrema.second * 1.01); + h2->GetYaxis()->SetRangeUser(min, extrema.second * 1.01); + c1->cd(); + h1->Draw(); + h2->Draw("same"); + + TLegend *infoBox = new TLegend(0.75, 0.75, 0.97, 0.90, ""); + infoBox->AddEntry(h1, label1, "PL"); + infoBox->AddEntry(h2, label2, "PL"); + infoBox->SetShadowColor(0); // 0 = transparent + infoBox->SetFillColor(kWhite); + infoBox->Draw("same"); + + c1->SaveAs(TString(h1->GetName()) + ".png"); + delete c1; +} + +/************************************************/ +void plot2Histograms(TH1 *h1, TH1 *h2, const TString &label1, const TString &label2) { + /************************************************/ + + TGaxis::SetMaxDigits(3); + + auto c1 = new TCanvas(Form("c1_%s", h1->GetName()), "A ratio example", 800, 800); + gStyle->SetOptStat(0); + + MakeNicePlotStyle(h1); + MakeNicePlotStyle(h2); + + h1->SetLineColor(kBlue); + h2->SetLineColor(kRed); + + TObjArray *array = new TObjArray(2); + array->Add(h1); + array->Add(h2); + + std::pair extrema = getExtrema(array); + + delete array; + + float min = (extrema.first > 0) ? (extrema.first) * 0.7 : (extrema.first) * 1.3; + + h1->GetYaxis()->SetRangeUser(min, extrema.second * 1.3); + h2->GetYaxis()->SetRangeUser(min, extrema.second * 1.3); + + auto rp = new TRatioPlot(h1, h2); + c1->SetTicks(0, 1); + rp->Draw(); + + //rp->GetUpperPad()->SetTopMargin(0.09); + //rp->GetUpperPad()->SetLeftMargin(0.15); + //rp->GetUpperPad()->SetRightMargin(0.03); + //rp->GetLowerPad()->SetBottomMargin(0.5); + + rp->SetLeftMargin(0.15); + rp->SetRightMargin(0.03); + rp->SetSeparationMargin(0.01); + rp->SetLowBottomMargin(0.35); + + rp->GetUpperPad()->cd(); + // Draw the legend + TLegend *infoBox = new TLegend(0.75, 0.75, 0.97, 0.90, ""); + infoBox->AddEntry(h1, label1, "L"); + infoBox->AddEntry(h2, label2, "L"); + infoBox->SetShadowColor(0); // 0 = transparent + infoBox->SetFillColor(kWhite); + infoBox->Draw("same"); + + MakeNicePlotStyle(rp->GetLowerRefGraph()); + rp->GetLowerRefGraph()->GetYaxis()->SetTitle("ratio"); + rp->GetLowerRefGraph()->SetMinimum(0.); + rp->GetLowerRefGraph()->SetMaximum(2.); + c1->Update(); + + //rp->GetLowerPad()->cd(); + //c1->Update(); + + c1->SaveAs(TString(h1->GetName()) + ".png"); + delete c1; +} + +/*--------------------------------------------------------------------*/ +template +void MakeNicePlotStyle(T *hist) +/*--------------------------------------------------------------------*/ +{ + //hist->SetStats(kFALSE); + hist->SetLineWidth(2); + hist->GetXaxis()->SetNdivisions(505); + hist->GetXaxis()->CenterTitle(true); + hist->GetYaxis()->CenterTitle(true); + hist->GetXaxis()->SetTitleFont(42); + hist->GetYaxis()->SetTitleFont(42); + hist->GetXaxis()->SetTitleSize(0.05); + hist->GetYaxis()->SetTitleSize(0.05); + hist->GetXaxis()->SetTitleOffset(0.9); + hist->GetYaxis()->SetTitleOffset(1.4); + hist->GetXaxis()->SetLabelFont(42); + hist->GetYaxis()->SetLabelFont(42); + if (((TObject *)hist)->IsA()->InheritsFrom("TGraph")) { + hist->GetYaxis()->SetLabelSize(.025); + //hist->GetYaxis()->SetNdivisions(505); + } else { + hist->GetYaxis()->SetLabelSize(.05); + } + hist->GetXaxis()->SetLabelSize(.05); +} + +/*--------------------------------------------------------------------*/ +void MakeNiceProfile(TProfile *prof) +/*--------------------------------------------------------------------*/ +{ + prof->SetLineWidth(2); + prof->GetXaxis()->SetNdivisions(505); + prof->GetXaxis()->CenterTitle(true); + prof->GetYaxis()->CenterTitle(true); + prof->GetXaxis()->SetTitleFont(42); + prof->GetYaxis()->SetTitleFont(42); + prof->GetXaxis()->SetTitleSize(0.05); + prof->GetYaxis()->SetTitleSize(0.05); + prof->GetXaxis()->SetTitleOffset(0.9); + prof->GetYaxis()->SetTitleOffset(1.4); + prof->GetXaxis()->SetLabelFont(42); + prof->GetYaxis()->SetLabelFont(42); + prof->GetYaxis()->SetLabelSize(.05); + prof->GetXaxis()->SetLabelSize(.05); +} + +//*****************************************************// +std::pair getExtrema(TObjArray *array) +//*****************************************************// +{ + Double_t theMaximum = (static_cast(array->At(0)))->GetMaximum(); + Double_t theMinimum = (static_cast(array->At(0)))->GetMinimum(); + for (Int_t i = 0; i < array->GetSize(); i++) { + if ((static_cast(array->At(i)))->GetMaximum() > theMaximum) { + theMaximum = (static_cast(array->At(i)))->GetMaximum(); + } + if ((static_cast(array->At(i)))->GetMinimum() < theMinimum) { + theMinimum = (static_cast(array->At(i)))->GetMinimum(); + } + } + return std::make_pair(theMinimum, theMaximum); +} diff --git a/Alignment/OfflineValidation/macros/momentumBiasValidation.C b/Alignment/OfflineValidation/macros/momentumBiasValidation.C index 06b3069a5a7c6..b48833be3212c 100644 --- a/Alignment/OfflineValidation/macros/momentumBiasValidation.C +++ b/Alignment/OfflineValidation/macros/momentumBiasValidation.C @@ -40,8 +40,8 @@ namespace eop { double mean = f1->GetParameter(1); double deviation = f1->GetParameter(2); - double lowLim; - double upLim; + double lowLim{mean - (2.0 * deviation)}; + double upLim{mean + (2.0 * deviation)}; double newmean; double degrade = 0.05; diff --git a/Alignment/OfflineValidation/macros/momentumElectronBiasValidation.C b/Alignment/OfflineValidation/macros/momentumElectronBiasValidation.C index 52bfdab5b4eb7..ac77f589f8e11 100644 --- a/Alignment/OfflineValidation/macros/momentumElectronBiasValidation.C +++ b/Alignment/OfflineValidation/macros/momentumElectronBiasValidation.C @@ -131,7 +131,7 @@ void momentumElectronBiasValidation(TString variable, std::vector labels; // list of input labels if (!checkArguments( variable, path, alignmentWithLabel, outputType, radius, verbose, givenMin, givenMax, mode, files, labels)) - return; + exit(EXIT_FAILURE); else { std::cout << "-> Number of files: " << files.size() << std::endl; } diff --git a/Alignment/OfflineValidation/macros/trackSplitPlot.C b/Alignment/OfflineValidation/macros/trackSplitPlot.C index 1e48e0ee52a1e..39e030fe10a91 100644 --- a/Alignment/OfflineValidation/macros/trackSplitPlot.C +++ b/Alignment/OfflineValidation/macros/trackSplitPlot.C @@ -8,689 +8,660 @@ Table Of Contents 5. Place Legend ***********************************/ +using namespace std; #include "trackSplitPlot.h" -#include "Alignment/OfflineValidation/macros/TkAlStyle.cc" +#include "Alignment/OfflineValidation/interface/TkAlStyle.h" //=================== //0. Track Split Plot //=================== -TCanvas *trackSplitPlot(Int_t nFiles,TString *files,TString *names,TString xvar,TString yvar, - Bool_t relative,Bool_t resolution,Bool_t pull, - TString saveas, ostream& summaryfile) -{ - if (TkAlStyle::status() == NO_STATUS) - TkAlStyle::set(INTERNAL); - TString legendOptions = TkAlStyle::legendoptions; - legendOptions.ReplaceAll("all","meanerror,rmserror").ToLower(); - if (outliercut < 0) - outliercut = -1; - gStyle->SetMarkerSize(1.5); - setupcolors(); - stufftodelete->SetOwner(true); - cout << xvar << " " << yvar << endl; - if (xvar == "" && yvar == "") - return 0; - - PlotType type; - if (xvar == "") type = Histogram; - else if (yvar == "") type = OrgHistogram; - else if (resolution) type = Resolution; - else if (nFiles < 1) type = ScatterPlot; - else type = Profile; - if (nFiles < 1) nFiles = 1; - - const Int_t n = nFiles; - - vector p; - Int_t lengths[n]; - - stringstream sx,sy,srel,ssigma1,ssigma2,ssigmaorg; - - sx << xvar << "_org"; - TString xvariable = sx.str(); - TString xvariable2 = ""; - if (xvar == "runNumber") xvariable = "runNumber"; - if (xvar.BeginsWith("nHits")) - { - xvariable = xvar; - xvariable2 = xvar; - xvariable.Append("1_spl"); - xvariable2.Append("2_spl"); - } +TCanvas *trackSplitPlot(Int_t nFiles, + TString *files, + TString *names, + TString xvar, + TString yvar, + Bool_t relative, + Bool_t resolution, + Bool_t pull, + TString saveas, + ostream &summaryfile) { + if (TkAlStyle::status() == NO_STATUS) + TkAlStyle::set(INTERNAL); + TString legendOptions = TkAlStyle::legendoptions; + legendOptions.ReplaceAll("all", "meanerror,rmserror").ToLower(); + if (outliercut < 0) + outliercut = -1; + gStyle->SetMarkerSize(1.5); + setupcolors(); + stufftodelete->SetOwner(true); + cout << xvar << " " << yvar << endl; + if (xvar == "" && yvar == "") + return nullptr; + + PlotType type; + if (xvar == "") + type = Histogram; + else if (yvar == "") + type = OrgHistogram; + else if (resolution) + type = Resolution; + else if (nFiles < 1) + type = ScatterPlot; + else + type = Profile; + if (nFiles < 1) + nFiles = 1; + + const Int_t n = nFiles; + + vector p; + Int_t lengths[n]; + + stringstream sx, sy, srel, ssigma1, ssigma2, ssigmaorg; + + sx << xvar << "_org"; + TString xvariable = sx.str(); + TString xvariable2 = ""; + if (xvar == "runNumber") + xvariable = "runNumber"; + if (xvar.BeginsWith("nHits")) { + xvariable = xvar; + xvariable2 = xvar; + xvariable.Append("1_spl"); + xvariable2.Append("2_spl"); + } + + sy << "Delta_" << yvar; + TString yvariable = sy.str(); + + TString relvariable = "1"; + if (relative) { + srel << yvar << "_org"; + relvariable = srel.str(); + } + + TString sigma1variable = "", sigma2variable = ""; + if (pull) { + ssigma1 << yvar << "1Err_spl"; + ssigma2 << yvar << "2Err_spl"; + } + sigma1variable = ssigma1.str(); + sigma2variable = ssigma2.str(); + + TString sigmaorgvariable = ""; + if (pull && relative) + ssigmaorg << yvar << "Err_org"; + sigmaorgvariable = ssigmaorg.str(); + + Double_t xmin = -1, xmax = 1, ymin = -1, ymax = 1, xbins = -1, ybins; + if (type == Profile || type == ScatterPlot || type == OrgHistogram || type == Resolution) + axislimits(nFiles, files, xvar, 'x', relative, pull, xmin, xmax, xbins); + if (type == Profile || type == ScatterPlot || type == Histogram || type == Resolution) + axislimits(nFiles, files, yvar, 'y', relative, pull, ymin, ymax, ybins); + + std::vector meansrmss(n); + std::vector means(n); + std::vector rmss(n); + //a file is not "used" if it's MC data and the x variable is run number, or if the filename is blank + std::vector used(n); + + for (Int_t i = 0; i < n; i++) { + stringstream sid; + sid << "p" << i; + TString id = sid.str(); + + //for a profile or resolution, it fills a histogram, q[j], for each bin, then gets the mean and width from there. + vector q; - sy << "Delta_" << yvar; - TString yvariable = sy.str(); - - TString relvariable = "1"; - if (relative) - { - srel << yvar << "_org"; - relvariable = srel.str(); + if (type == ScatterPlot) + p.push_back(new TH2F(id, "", xbins, xmin, xmax, ybins, ymin, ymax)); + if (type == Histogram) + p.push_back(new TH1F(id, "", ybins, ymin, ymax)); + if (type == OrgHistogram) + p.push_back(new TH1F(id, "", xbins, xmin, xmax)); + if (type == Resolution || type == Profile) { + p.push_back(new TH1F(id, "", xbins, xmin, xmax)); + for (Int_t j = 0; j < xbins; j++) { + stringstream sid2; + sid2 << "q" << i << j; + TString id2 = sid2.str(); + q.push_back(new TH1F(id2, "", 1000, ymin * 10, ymax * 10)); + } } - TString sigma1variable = "",sigma2variable = ""; - if (pull) - { - ssigma1 << yvar << "1Err_spl"; - ssigma2 << yvar << "2Err_spl"; + p[i]->SetLineColor(colors[i]); + if (type == Resolution || type == Profile) { + p[i]->SetMarkerStyle(styles[i] / 100); + p[i]->SetMarkerColor(colors[i]); + p[i]->SetLineStyle(styles[i] % 100); + } else { + if (styles[i] >= 100) { + p[i]->SetMarkerStyle(styles[i] / 100); + p[i]->SetMarkerColor(colors[i]); + p[i]->Sumw2(); + } + p[i]->SetLineStyle(styles[i] % 100); } - sigma1variable = ssigma1.str(); - sigma2variable = ssigma2.str(); - - TString sigmaorgvariable = ""; - if (pull && relative) - ssigmaorg << yvar << "Err_org"; - sigmaorgvariable = ssigmaorg.str(); - - Double_t xmin = -1, xmax = 1, ymin = -1, ymax = 1, xbins = -1, ybins; - if (type == Profile || type == ScatterPlot || type == OrgHistogram || type == Resolution) - axislimits(nFiles,files,xvar,'x',relative,pull,xmin,xmax,xbins); - if (type == Profile || type == ScatterPlot || type == Histogram || type == Resolution) - axislimits(nFiles,files,yvar,'y',relative,pull,ymin,ymax,ybins); - - std::vector meansrmss(n); - std::vector means(n); - std::vector rmss(n); - std::vector used(n); //a file is not "used" if it's MC data and the x variable is run number, or if the filename is blank - - for (Int_t i = 0; i < n; i++) - { - stringstream sid; - sid << "p" << i; - TString id = sid.str(); + stufftodelete->Add(p[i]); + p[i]->SetBit(kCanDelete, true); + + used[i] = true; + //if it's MC data (run 1), the run number is meaningless + if ((xvar == "runNumber" && findMax(files[i], "runNumber", 'x') < 2) || files[i] == "") { + used[i] = false; + p[i]->SetLineColor(kWhite); + p[i]->SetMarkerColor(kWhite); + for (unsigned int j = 0; j < q.size(); j++) + delete q[j]; + continue; + } - //for a profile or resolution, it fills a histogram, q[j], for each bin, then gets the mean and width from there. - vector q; + TFile *f = TFile::Open(files[i]); + TTree *tree = (TTree *)f->Get("cosmicValidation/splitterTree"); + if (tree == nullptr) + tree = (TTree *)f->Get("splitterTree"); + + lengths[i] = tree->GetEntries(); + + Double_t x = 0, y = 0, rel = 1, sigma1 = 1; + Double_t sigma2 = 1; //if !pull, we want to divide by sqrt(2) because we want the error from 1 track + Double_t sigmaorg = 0; + + Int_t xint = 0, xint2 = 0; + Int_t runNumber = 0; + double pt1 = 0, maxpt1 = 0; + + if (!relative && !pull && (yvar == "dz" || yvar == "dxy")) + rel = 1e-4; //it's in cm but we want it in um, so divide by 1e-4 + if (!relative && !pull && (yvar == "phi" || yvar == "theta" || yvar == "qoverpt")) + rel = 1e-3; //make the axis labels manageable + + tree->SetBranchAddress("runNumber", &runNumber); + if (type == Profile || type == ScatterPlot || type == Resolution || type == OrgHistogram) { + if (xvar == "runNumber") + tree->SetBranchAddress(xvariable, &xint); + else if (xvar.BeginsWith("nHits")) { + tree->SetBranchAddress(xvariable, &xint); + tree->SetBranchAddress(xvariable2, &xint2); + } else + tree->SetBranchAddress(xvariable, &x); + } + if (type == Profile || type == ScatterPlot || type == Resolution || type == Histogram) { + int branchexists = tree->SetBranchAddress(yvariable, &y); + if (branchexists == -5) //i.e. it doesn't exist + { + yvariable.ReplaceAll("Delta_", "d"); + yvariable.Append("_spl"); + tree->SetBranchAddress(yvariable, &y); + } + } + if (relative && xvar != yvar) //if xvar == yvar, setting the branch here will undo setting it to x 2 lines earlier + tree->SetBranchAddress(relvariable, &rel); //setting the value of rel is then taken care of later: rel = x + if (pull) { + tree->SetBranchAddress(sigma1variable, &sigma1); + tree->SetBranchAddress(sigma2variable, &sigma2); + } + if (relative && pull) + tree->SetBranchAddress(sigmaorgvariable, &sigmaorg); + if (xvar == "pt" || yvar == "pt" || xvar == "qoverpt" || yvar == "qoverpt") { + tree->SetBranchAddress("pt1_spl", &pt1); + } else { + maxpt1 = 999; + } - if (type == ScatterPlot) - p.push_back(new TH2F(id,"",xbins,xmin,xmax,ybins,ymin,ymax)); + Int_t notincluded = 0; //this counts the number that aren't in the right run range. + //it's subtracted from lengths[i] in order to normalize the histograms + + for (Int_t j = 0; j < lengths[i]; j++) { + tree->GetEntry(j); + if (xvar == "runNumber" || xvar.BeginsWith("nHits")) + x = xint; + if (xvar == "runNumber") + runNumber = x; + if (yvar == "phi" && y >= pi) + y -= 2 * pi; + if (yvar == "phi" && y <= -pi) + y += 2 * pi; + if ((runNumber < minrun && runNumber > 1) || + (runNumber > maxrun && maxrun > 0)) //minrun and maxrun are global variables. + { + notincluded++; + continue; + } + if (relative && xvar == yvar) + rel = x; + Double_t error = 0; + if (relative && pull) + error = sqrt((sigma1 / rel) * (sigma1 / rel) + (sigma2 / rel) * (sigma2 / rel) + + (sigmaorg * y / (rel * rel)) * (sigmaorg * x / (rel * rel))); + else + error = sqrt(sigma1 * sigma1 + + sigma2 * sigma2); // = sqrt(2) if !pull; this divides by sqrt(2) to get the error in 1 track + y /= (rel * error); + + if (pt1 > maxpt1) + maxpt1 = pt1; + + if (ymin <= y && y < ymax && xmin <= x && x < xmax) { if (type == Histogram) - p.push_back(new TH1F(id,"",ybins,ymin,ymax)); - if (type == OrgHistogram) - p.push_back(new TH1F(id,"",xbins,xmin,xmax)); - if (type == Resolution || type == Profile) - { - p.push_back(new TH1F(id,"",xbins,xmin,xmax)); - for (Int_t j = 0; j < xbins; j++) - { - - stringstream sid2; - sid2 << "q" << i << j; - TString id2 = sid2.str(); - q.push_back(new TH1F(id2,"",1000,ymin*10,ymax*10)); - - } + p[i]->Fill(y); + if (type == ScatterPlot) + p[i]->Fill(x, y); + if (type == Resolution || type == Profile) { + int which = (p[i]->Fill(x, 0)) - 1; + //get which q[j] by filling p[i] with nothing. (TH1F::Fill returns the bin number) + //p[i]'s actual contents are set later. + if (which >= 0 && (unsigned)which < q.size()) + q[which]->Fill(y); } + if (type == OrgHistogram) + p[i]->Fill(x); + } + + if (xvar.BeginsWith("nHits")) { + x = xint2; + if (ymin <= y && y < ymax && xmin <= x && x < xmax) { + if (type == Histogram) + p[i]->Fill(y); + if (type == ScatterPlot) + p[i]->Fill(x, y); + if (type == Resolution || type == Profile) { + int which = (p[i]->Fill(x, 0)) - 1; + if (which >= 0) + q[which]->Fill(y); //get which q[j] by filling p[i] (with nothing), which returns the bin number + } + if (type == OrgHistogram) + p[i]->Fill(x); + } + } + + if (lengths[i] < 10 ? true + : (((j + 1) / (int)(pow(10, (int)(log10(lengths[i])) - 1))) * + (int)(pow(10, (int)(log10(lengths[i])) - 1)) == + j + 1 || + j + 1 == lengths[i])) + //print when j+1 is a multiple of 10^x, where 10^x has 1 less digit than lengths[i] + // and when it's finished + //For example, if lengths[i] = 123456, it will print this when j+1 = 10000, 20000, ..., 120000, 123456 + //So it will print between 10 and 100 times: 10 when lengths[i] = 10^x and 100 when lengths[i] = 10^x - 1 + { + cout << j + 1 << "/" << lengths[i] << ": "; + if (type == Profile || type == ScatterPlot || type == Resolution) + cout << x << ", " << y << endl; + if (type == OrgHistogram) + cout << x << endl; + if (type == Histogram) + cout << y << endl; + } + } + lengths[i] -= notincluded; + + if (maxpt1 < 6) { //0T + used[i] = false; + p[i]->SetLineColor(kWhite); + p[i]->SetMarkerColor(kWhite); + for (unsigned int j = 0; j < q.size(); j++) + delete q[j]; + continue; + } - p[i]->SetLineColor(colors[i]); - if (type == Resolution || type == Profile) - { - p[i]->SetMarkerStyle(styles[i] / 100); - p[i]->SetMarkerColor(colors[i]); - p[i]->SetLineStyle(styles[i] % 100); - } + meansrmss[i] = ""; + if (type == Histogram || type == OrgHistogram) { + stringstream meanrms; + meanrms.precision(3); + + double average = -1e99; + double rms = -1e99; + + TString var = (type == Histogram ? yvar : xvar); + char axis = (type == Histogram ? 'y' : 'x'); + TString varunits = ""; + if (!relative && !pull) + varunits = units(var, axis); + if (legendOptions.Contains("mean")) { + if (outliercut < 0) + average = p[i]->GetMean(); else - { - if (styles[i] >= 100) - { - p[i]->SetMarkerStyle(styles[i] / 100); - p[i]->SetMarkerColor(colors[i]); - p[i]->Sumw2(); - } - p[i]->SetLineStyle(styles[i] % 100); - } - - stufftodelete->Add(p[i]); - p[i]->SetBit(kCanDelete,true); - - used[i] = true; - if ((xvar == "runNumber" ? findMax(files[i],"runNumber",'x') < 2 : false) || files[i] == "") //if it's MC data (run 1), the run number is meaningless - { - used[i] = false; - p[i]->SetLineColor(kWhite); - p[i]->SetMarkerColor(kWhite); - for (unsigned int j = 0; j < q.size(); j++) - delete q[j]; - continue; - } - - TFile *f = TFile::Open(files[i]); - TTree *tree = (TTree*)f->Get("cosmicValidation/splitterTree"); - if (tree == 0) - tree = (TTree*)f->Get("splitterTree"); - - lengths[i] = tree->GetEntries(); + average = findAverage(files[i], var, axis, relative, pull); + cout << "Average = " << average; + meanrms << "#mu = " << average; + means[i] = average; + if (legendOptions.Contains("meanerror")) { + if (outliercut < 0) + rms = p[i]->GetRMS(); + else + rms = findRMS(files[i], var, axis, relative, pull); + meanrms << " #pm " << rms / TMath::Sqrt(lengths[i] * abs(outliercut)); + cout << " +/- " << rms / TMath::Sqrt(lengths[i] * abs(outliercut)); + } + if (varunits != "") { + meanrms << " " << varunits; + cout << " " << varunits; + } + cout << endl; + if (legendOptions.Contains("rms")) + meanrms << ", "; + } + if (legendOptions.Contains("rms")) { + if (rms < -1e98) { + if (outliercut < 0) + rms = p[i]->GetRMS(); + else + rms = findRMS(files[i], var, axis, relative, pull); + } + cout << "RMS = " << rms; + meanrms << "rms = " << rms; + rmss[i] = rms; + if (legendOptions.Contains("rmserror")) { + //https://root.cern.ch/root/html/src/TH1.cxx.html#7076 + meanrms << " #pm " << rms / TMath::Sqrt(2 * lengths[i] * abs(outliercut)); + cout << " +/- " << rms / TMath::Sqrt(2 * lengths[i] * abs(outliercut)); + } + if (varunits != "") { + meanrms << " " << varunits; + cout << " " << varunits; + } + cout << endl; + } + meansrmss[i] = meanrms.str(); + } - Double_t x = 0, y = 0, rel = 1, sigma1 = 1, sigma2 = 1, //if !pull, we want to divide by sqrt(2) because we want the error from 1 track - sigmaorg = 0; - Int_t xint = 0, xint2 = 0; - Int_t runNumber = 0; - double pt1 = 0, maxpt1 = 0; + if (type == Resolution) { + for (Int_t j = 0; j < xbins; j++) { + p[i]->SetBinContent(j + 1, q[j]->GetRMS()); + p[i]->SetBinError(j + 1, q[j]->GetRMSError()); + delete q[j]; + } + } - if (!relative && !pull && (yvar == "dz" || yvar == "dxy")) - rel = 1e-4; //it's in cm but we want it in um, so divide by 1e-4 - if (!relative && !pull && (yvar == "phi" || yvar == "theta" || yvar == "qoverpt")) - rel = 1e-3; //make the axis labels manageable + if (type == Profile) { + for (Int_t j = 0; j < xbins; j++) { + p[i]->SetBinContent(j + 1, q[j]->GetMean()); + p[i]->SetBinError(j + 1, q[j]->GetMeanError()); + delete q[j]; + } + } - tree->SetBranchAddress("runNumber",&runNumber); - if (type == Profile || type == ScatterPlot || type == Resolution || type == OrgHistogram) - { - if (xvar == "runNumber") - tree->SetBranchAddress(xvariable,&xint); - else if (xvar.BeginsWith("nHits")) - { - tree->SetBranchAddress(xvariable,&xint); - tree->SetBranchAddress(xvariable2,&xint2); - } - else - tree->SetBranchAddress(xvariable,&x); - } - if (type == Profile || type == ScatterPlot || type == Resolution || type == Histogram) - { - int branchexists = tree->SetBranchAddress(yvariable,&y); - if (branchexists == -5) //i.e. it doesn't exist - { - yvariable.ReplaceAll("Delta_","d"); - yvariable.Append("_spl"); - tree->SetBranchAddress(yvariable,&y); - } - } - if (relative && xvar != yvar) //if xvar == yvar, setting the branch here will undo setting it to x 2 lines earlier - tree->SetBranchAddress(relvariable,&rel); //setting the value of rel is then taken care of later: rel = x - if (pull) - { - tree->SetBranchAddress(sigma1variable,&sigma1); - tree->SetBranchAddress(sigma2variable,&sigma2); - } - if (relative && pull) - tree->SetBranchAddress(sigmaorgvariable,&sigmaorg); - if (xvar == "pt" || yvar == "pt" || xvar == "qoverpt" || yvar == "qoverpt") { - tree->SetBranchAddress("pt1_spl", &pt1); + setAxisLabels(p[i], type, xvar, yvar, relative, pull); + } + + if (type == Histogram && !pull && any_of(begin(used), end(used), identity)) { + if (legendOptions.Contains("mean")) { + summaryfile << " mu_Delta" << yvar; + if (relative) + summaryfile << "/" << yvar; + if (pull) + summaryfile << "_pull"; + if (!pull && !relative && plainunits(yvar, 'y') != "") + summaryfile << " (" << plainunits(yvar, 'y') << ")"; + summaryfile << "\t" + << "latexname=$\\mu_{" << latexlabel(yvar, 'y', relative, resolution, pull) << "}$"; + if (!pull && !relative && plainunits(yvar, 'y') != "") + summaryfile << " (" << latexunits(yvar, 'y') << ")"; + summaryfile << "\t" + << "format={:.3g}\t" + << "latexformat=${:.3g}$"; + for (int i = 0; i < n; i++) { + if (used[i]) { + summaryfile << "\t" << means[i]; } else { - maxpt1 = 999; - } - - Int_t notincluded = 0; //this counts the number that aren't in the right run range. - //it's subtracted from lengths[i] in order to normalize the histograms - - for (Int_t j = 0; jGetEntry(j); - if (xvar == "runNumber" || xvar.BeginsWith("nHits")) - x = xint; - if (xvar == "runNumber") - runNumber = x; - if (yvar == "phi" && y >= pi) - y -= 2*pi; - if (yvar == "phi" && y <= -pi) - y += 2*pi; - if ((runNumber < minrun && runNumber > 1) || (runNumber > maxrun && maxrun > 0)) //minrun and maxrun are global variables. - { - notincluded++; - continue; - } - if (relative && xvar == yvar) - rel = x; - Double_t error = 0; - if (relative && pull) - error = sqrt((sigma1/rel)*(sigma1/rel) + (sigma2/rel)*(sigma2/rel) + (sigmaorg*y/(rel*rel))*(sigmaorg*x/(rel*rel))); - else - error = sqrt(sigma1 * sigma1 + sigma2 * sigma2); // = sqrt(2) if !pull; this divides by sqrt(2) to get the error in 1 track - y /= (rel * error); - - if (pt1 > maxpt1) maxpt1 = pt1; - - if (ymin <= y && y < ymax && xmin <= x && x < xmax) - { - if (type == Histogram) - p[i]->Fill(y); - if (type == ScatterPlot) - p[i]->Fill(x,y); - if (type == Resolution || type == Profile) - { - int which = (p[i]->Fill(x,0)) - 1; - //get which q[j] by filling p[i] with nothing. (TH1F::Fill returns the bin number) - //p[i]'s actual contents are set later. - if (which >= 0 && (unsigned)which < q.size()) q[which]->Fill(y); - } - if (type == OrgHistogram) - p[i]->Fill(x); - } - - if (xvar.BeginsWith("nHits")) - { - x = xint2; - if (ymin <= y && y < ymax && xmin <= x && x < xmax) - { - if (type == Histogram) - p[i]->Fill(y); - if (type == ScatterPlot) - p[i]->Fill(x,y); - if (type == Resolution || type == Profile) - { - int which = (p[i]->Fill(x,0)) - 1; - if (which >= 0) q[which]->Fill(y); //get which q[j] by filling p[i] (with nothing), which returns the bin number - } - if (type == OrgHistogram) - p[i]->Fill(x); - } - } - - if (lengths[i] < 10 ? true : - (((j+1)/(int)(pow(10,(int)(log10(lengths[i]))-1)))*(int)(pow(10,(int)(log10(lengths[i]))-1)) == j + 1 || j + 1 == lengths[i])) - //print when j+1 is a multiple of 10^x, where 10^x has 1 less digit than lengths[i] - // and when it's finished - //For example, if lengths[i] = 123456, it will print this when j+1 = 10000, 20000, ..., 120000, 123456 - //So it will print between 10 and 100 times: 10 when lengths[i] = 10^x and 100 when lengths[i] = 10^x - 1 - { - cout << j + 1 << "/" << lengths[i] << ": "; - if (type == Profile || type == ScatterPlot || type == Resolution) - cout << x << ", " << y << endl; - if (type == OrgHistogram) - cout << x << endl; - if (type == Histogram) - cout << y << endl; - } - } - lengths[i] -= notincluded; - - if (maxpt1 < 6) { //0T - used[i] = false; - p[i]->SetLineColor(kWhite); - p[i]->SetMarkerColor(kWhite); - for (unsigned int j = 0; j < q.size(); j++) - delete q[j]; - continue; - } - - meansrmss[i] = ""; - if (type == Histogram || type == OrgHistogram) - { - stringstream meanrms; - meanrms.precision(3); - - double average = -1e99; - double rms = -1e99; - - TString var = (type == Histogram ? yvar : xvar); - char axis = (type == Histogram ? 'y' : 'x'); - TString varunits = ""; - if (!relative && !pull) - varunits = units(var, axis); - if (legendOptions.Contains("mean")) - { - if (outliercut < 0) - average = p[i]->GetMean(); - else - average = findAverage(files[i], var, axis, relative, pull); - cout << "Average = " << average; - meanrms << "#mu = " << average; - means[i] = average; - if (legendOptions.Contains("meanerror")) - { - if (outliercut < 0) - rms = p[i]->GetRMS(); - else - rms = findRMS(files[i], var, axis, relative, pull); - meanrms << " #pm " << rms/TMath::Sqrt(lengths[i]*abs(outliercut)); - cout << " +/- " << rms/TMath::Sqrt(lengths[i]*abs(outliercut)); - } - if (varunits != "") - { - meanrms << " " << varunits; - cout << " " << varunits; - } - cout << endl; - if (legendOptions.Contains("rms")) - meanrms << ", "; - } - if (legendOptions.Contains("rms")) - { - if (rms<-1e98) - { - if (outliercut < 0) - rms = p[i]->GetRMS(); - else - rms = findRMS(files[i], var, axis, relative, pull); - } - cout << "RMS = " << rms; - meanrms << "rms = " << rms; - rmss[i] = rms; - if (legendOptions.Contains("rmserror")) - { - //https://root.cern.ch/root/html/src/TH1.cxx.html#7076 - meanrms << " #pm " << rms/TMath::Sqrt(2*lengths[i]*abs(outliercut)); - cout << " +/- " << rms/TMath::Sqrt(2*lengths[i]*abs(outliercut)); - } - if (varunits != "") - { - meanrms << " " << varunits; - cout << " " << varunits; - } - cout << endl; - } - meansrmss[i] = meanrms.str(); - } - - if (type == Resolution) - { - for (Int_t j = 0; j < xbins; j++) - { - p[i]->SetBinContent(j+1,q[j]->GetRMS()); - p[i]->SetBinError (j+1,q[j]->GetRMSError()); - delete q[j]; - } + summaryfile << "\t" << nan(""); } - - if (type == Profile) - { - for (Int_t j = 0; j < xbins; j++) - { - p[i]->SetBinContent(j+1,q[j]->GetMean()); - p[i]->SetBinError (j+1,q[j]->GetMeanError()); - delete q[j]; - } - } - - setAxisLabels(p[i],type,xvar,yvar,relative,pull); + } + summaryfile << "\n"; } - - if (type == Histogram && !pull && any_of(begin(used), end(used), identity)) { - if (legendOptions.Contains("mean")) { - summaryfile << " mu_Delta" << yvar; - if (relative) summaryfile << "/" << yvar; - if (pull) summaryfile << "_pull"; - if (!pull && !relative && plainunits(yvar, 'y') != "") summaryfile << " (" << plainunits(yvar, 'y') << ")"; - summaryfile << "\t" - << "latexname=$\\mu_{" << latexlabel(yvar, 'y', relative, resolution, pull) << "}$"; - if (!pull && !relative && plainunits(yvar, 'y') != "") summaryfile << " (" << latexunits(yvar, 'y') << ")"; - summaryfile << "\t" - << "format={:.3g}\t" - << "latexformat=${:.3g}$"; - for (int i = 0; i < n; i++) { - if (used[i]) { - summaryfile << "\t" << means[i]; - } else { - summaryfile << "\t" << nan(""); - } - } - summaryfile << "\n"; - } - if (legendOptions.Contains("rms")) { - summaryfile << "sigma_Delta" << yvar; - if (relative) summaryfile << "/" << yvar; - if (pull) summaryfile << "_pull"; - if (!pull && !relative && plainunits(yvar, 'y') != "") summaryfile << " (" << plainunits(yvar, 'y') << ")"; - summaryfile << "\t" - << "latexname=$\\sigma_{" << latexlabel(yvar, 'y', relative, resolution, pull) << "}$"; - if (!pull && !relative && latexunits(yvar, 'y') != "") summaryfile << " (" << latexunits(yvar, 'y') << ")"; - summaryfile << "\t" - << "format={:.3g}\t" - << "latexformat=${:.3g}$"; - for (int i = 0; i < n; i++) { - if (used[i]) { - summaryfile << "\t" << rmss[i]; - } else { - summaryfile << "\t" << nan(""); - } - } - summaryfile << "\n"; + if (legendOptions.Contains("rms")) { + summaryfile << "sigma_Delta" << yvar; + if (relative) + summaryfile << "/" << yvar; + if (pull) + summaryfile << "_pull"; + if (!pull && !relative && plainunits(yvar, 'y') != "") + summaryfile << " (" << plainunits(yvar, 'y') << ")"; + summaryfile << "\t" + << "latexname=$\\sigma_{" << latexlabel(yvar, 'y', relative, resolution, pull) << "}$"; + if (!pull && !relative && latexunits(yvar, 'y') != "") + summaryfile << " (" << latexunits(yvar, 'y') << ")"; + summaryfile << "\t" + << "format={:.3g}\t" + << "latexformat=${:.3g}$"; + for (int i = 0; i < n; i++) { + if (used[i]) { + summaryfile << "\t" << rmss[i]; + } else { + summaryfile << "\t" << nan(""); } + } + summaryfile << "\n"; } + } - TH1 *firstp = 0; - for (int i = 0; i < n; i++) - { - if (used[i]) - { - firstp = p[i]; - break; - } + TH1 *firstp = nullptr; + for (int i = 0; i < n; i++) { + if (used[i]) { + firstp = p[i]; + break; } - if (firstp == 0) - { - stufftodelete->Clear(); - return 0; + } + if (firstp == nullptr) { + stufftodelete->Clear(); + return nullptr; + } + + TCanvas *c1 = TCanvas::MakeDefCanvas(); + + TH1 *maxp = firstp; + if (type == ScatterPlot) + firstp->Draw("COLZ"); + else if (type == Resolution || type == Profile) { + vector g; + TMultiGraph *list = new TMultiGraph(); + for (Int_t i = 0, ii = 0; i < n; i++, ii++) { + if (!used[i]) { + ii--; + continue; + } + g.push_back(new TGraphErrors(p[i])); + for (Int_t j = 0; j < g[ii]->GetN(); j++) { + if (g[ii]->GetY()[j] == 0 && g[ii]->GetEY()[j] == 0) { + g[ii]->RemovePoint(j); + j--; + } + } + list->Add(g[ii]); } - - TCanvas *c1 = TCanvas::MakeDefCanvas(); - - TH1 *maxp = firstp; - if (type == ScatterPlot) - firstp->Draw("COLZ"); - else if (type == Resolution || type == Profile) - { - vector g; - TMultiGraph *list = new TMultiGraph(); - for (Int_t i = 0, ii = 0; i < n; i++, ii++) - { - if (!used[i]) - { - ii--; - continue; - } - g.push_back(new TGraphErrors(p[i])); - for (Int_t j = 0; j < g[ii]->GetN(); j++) - { - if (g[ii]->GetY()[j] == 0 && g[ii]->GetEY()[j] == 0) - { - g[ii]->RemovePoint(j); - j--; - } - } - list->Add(g[ii]); - } - list->Draw("AP"); - Double_t yaxismax = list->GetYaxis()->GetXmax(); - Double_t yaxismin = list->GetYaxis()->GetXmin(); - delete list; //automatically deletes g[i] - if (yaxismin > 0) - { - yaxismax += yaxismin; - yaxismin = 0; - } - firstp->GetYaxis()->SetRangeUser(yaxismin,yaxismax); - if (xvar == "runNumber") - firstp->GetXaxis()->SetNdivisions(505); + list->Draw("AP"); + Double_t yaxismax = list->GetYaxis()->GetXmax(); + Double_t yaxismin = list->GetYaxis()->GetXmin(); + delete list; //automatically deletes g[i] + if (yaxismin > 0) { + yaxismax += yaxismin; + yaxismin = 0; } - else if (type == Histogram || type == OrgHistogram) - { - Bool_t allthesame = true; - for (Int_t i = 1; i < n && allthesame; i++) - { - if (lengths[i] != lengths[0]) - allthesame = false; - } - if (!allthesame && xvar != "runNumber") - for (Int_t i = 0; i < n; i++) - { - p[i]->Scale(1.0/lengths[i]); //This does NOT include events that are out of the run number range (minrun and maxrun). - //It DOES include events that are out of the histogram range. - } - maxp = (TH1F*)firstp->Clone("maxp"); - stufftodelete->Add(maxp); - maxp->SetBit(kCanDelete,true); - maxp->SetLineColor(kWhite); - for (Int_t i = 1; i <= maxp->GetNbinsX(); i++) - { - for (Int_t j = 0; j < n; j++) - { - if (!used[j]) - continue; - maxp->SetBinContent(i,TMath::Max(maxp->GetBinContent(i),p[j]->GetBinContent(i))); - } - } - maxp->SetMarkerStyle(0); - maxp->SetMinimum(0); - maxp->Draw(""); - if (xvar == "runNumber") - { - maxp->GetXaxis()->SetNdivisions(505); - maxp->Draw(""); - } + firstp->GetYaxis()->SetRangeUser(yaxismin, yaxismax); + if (xvar == "runNumber") + firstp->GetXaxis()->SetNdivisions(505); + } else if (type == Histogram || type == OrgHistogram) { + Bool_t allthesame = true; + for (Int_t i = 1; i < n && allthesame; i++) { + if (lengths[i] != lengths[0]) + allthesame = false; } - - int nEntries = 0; - for (int i = 0; i < n; i++) - if (used[i]) - nEntries++; - double width = 0.5; - if (type == Histogram || type == OrgHistogram) - width *= 2; - TLegend *legend = TkAlStyle::legend(nEntries, width); - legend->SetTextSize(0); - if (type == Histogram || type == OrgHistogram) - legend->SetNColumns(2); - stufftodelete->Add(legend); - legend->SetBit(kCanDelete,true); - - for (Int_t i = 0; i < n; i++) - { - if (!used[i]) - continue; - if (type == Resolution || type == Profile) - { - if (p[i] == firstp) - p[i]->Draw("P"); - else - p[i]->Draw("same P"); - legend->AddEntry(p[i],names[i],"pl"); - } - else if (type == Histogram || type == OrgHistogram) - { - if (styles[i] >= 100) - { - p[i]->Draw("same P0E"); - legend->AddEntry(p[i],names[i],"pl"); - } - else - { - p[i]->Draw("same hist"); - legend->AddEntry(p[i],names[i],"l"); - } - legend->AddEntry((TObject*)0,meansrmss[i],""); - } + if (!allthesame && xvar != "runNumber") + for (Int_t i = 0; i < n; i++) { + //This does NOT include events that are out of the run number range (minrun and maxrun). + //It DOES include events that are out of the histogram range. + p[i]->Scale(1.0 / lengths[i]); + } + maxp = (TH1F *)firstp->Clone("maxp"); + stufftodelete->Add(maxp); + maxp->SetBit(kCanDelete, true); + maxp->SetLineColor(kWhite); + for (Int_t i = 1; i <= maxp->GetNbinsX(); i++) { + for (Int_t j = 0; j < n; j++) { + if (!used[j]) + continue; + maxp->SetBinContent(i, TMath::Max(maxp->GetBinContent(i), p[j]->GetBinContent(i))); + } } - if (legend->GetListOfPrimitives()->At(0) == 0) - { - stufftodelete->Clear(); - deleteCanvas(c1); - return 0; + maxp->SetMarkerStyle(0); + maxp->SetMinimum(0); + maxp->Draw(""); + if (xvar == "runNumber") { + maxp->GetXaxis()->SetNdivisions(505); + maxp->Draw(""); } - - c1->Update(); - legend->Draw(); - - double legendfraction = legend->GetY2() - legend->GetY1(); //apparently GetY1 and GetY2 give NDC coordinates. This is not a mistake on my part - double padheight = gPad->GetUymax() - gPad->GetUymin(); - //legendfraction = legendheight / padheight = newlegendheight / newpadheight - //newpadheight = padheight + x - //newlegendheight = newpadheight - padheight = x so it doesn't cover anything - //==>legendfraction = x/(padheight+x) - /* ==> */ double x = padheight*legendfraction / (1-legendfraction) * 1.5; //1.5 to give extra room - maxp->GetYaxis()->SetRangeUser(gPad->GetUymin(), gPad->GetUymax() + x); - - TkAlStyle::drawStandardTitle(); - - c1->Update(); - - if (saveas != "") - saveplot(c1,saveas); - - return c1; + } + + int nEntries = 0; + for (int i = 0; i < n; i++) + if (used[i]) + nEntries++; + double width = 0.5; + if (type == Histogram || type == OrgHistogram) + width *= 2; + TLegend *legend = TkAlStyle::legend(nEntries, width); + legend->SetTextSize(0); + if (type == Histogram || type == OrgHistogram) + legend->SetNColumns(2); + stufftodelete->Add(legend); + legend->SetBit(kCanDelete, true); + + for (Int_t i = 0; i < n; i++) { + if (!used[i]) + continue; + if (type == Resolution || type == Profile) { + if (p[i] == firstp) + p[i]->Draw("P"); + else + p[i]->Draw("same P"); + legend->AddEntry(p[i], names[i], "pl"); + } else if (type == Histogram || type == OrgHistogram) { + if (styles[i] >= 100) { + p[i]->Draw("same P0E"); + legend->AddEntry(p[i], names[i], "pl"); + } else { + p[i]->Draw("same hist"); + legend->AddEntry(p[i], names[i], "l"); + } + legend->AddEntry((TObject *)nullptr, meansrmss[i], ""); + } + } + if (legend->GetListOfPrimitives()->At(0) == nullptr) { + stufftodelete->Clear(); + deleteCanvas(c1); + return nullptr; + } + + c1->Update(); + legend->Draw(); + + double legendfraction = + legend->GetY2() - + legend->GetY1(); //apparently GetY1 and GetY2 give NDC coordinates. This is not a mistake on my part + double padheight = gPad->GetUymax() - gPad->GetUymin(); + //legendfraction = legendheight / padheight = newlegendheight / newpadheight + //newpadheight = padheight + x + //newlegendheight = newpadheight - padheight = x so it doesn't cover anything + //==>legendfraction = x/(padheight+x) + /* ==> */ double x = padheight * legendfraction / (1 - legendfraction) * 1.5; //1.5 to give extra room + maxp->GetYaxis()->SetRangeUser(gPad->GetUymin(), gPad->GetUymax() + x); + + TkAlStyle::drawStandardTitle(); + + c1->Update(); + + if (saveas != "") + saveplot(c1, saveas); + + return c1; } - //make a 1D histogram of Delta_yvar -TCanvas *trackSplitPlot(Int_t nFiles,TString *files,TString *names,TString var, - Bool_t relative,Bool_t pull,TString saveas, ostream& summaryfile) -{ - return trackSplitPlot(nFiles,files,names,"",var,relative,false,pull,saveas,summaryfile); +TCanvas *trackSplitPlot(Int_t nFiles, + TString *files, + TString *names, + TString var, + Bool_t relative, + Bool_t pull, + TString saveas, + ostream &summaryfile) { + return trackSplitPlot(nFiles, files, names, "", var, relative, false, pull, saveas, summaryfile); } - - //For 1 file -TCanvas *trackSplitPlot(TString file,TString xvar,TString yvar,Bool_t profile, - Bool_t relative,Bool_t resolution,Bool_t pull, - TString saveas, ostream& summaryfile) -{ - Int_t nFiles = 0; - if (profile) //it interprets nFiles < 1 as 1 file, make a scatterplot - nFiles = 1; - TString *files = &file; - TString name = ""; - TString *names = &name; - return trackSplitPlot(nFiles,files,names,xvar,yvar,relative,resolution,pull,saveas,summaryfile); +TCanvas *trackSplitPlot(TString file, + TString xvar, + TString yvar, + Bool_t profile, + Bool_t relative, + Bool_t resolution, + Bool_t pull, + TString saveas, + ostream &summaryfile) { + Int_t nFiles = 0; + if (profile) //it interprets nFiles < 1 as 1 file, make a scatterplot + nFiles = 1; + TString *files = &file; + TString name = ""; + TString *names = &name; + return trackSplitPlot(nFiles, files, names, xvar, yvar, relative, resolution, pull, saveas, summaryfile); } //make a 1D histogram of Delta_yvar -TCanvas *trackSplitPlot(TString file,TString var, - Bool_t relative,Bool_t pull, - TString saveas, ostream& summaryfile) -{ - Int_t nFiles = 1; - TString *files = &file; - TString name = ""; - TString *names = &name; - return trackSplitPlot(nFiles,files,names,var,relative,pull,saveas,summaryfile); +TCanvas *trackSplitPlot(TString file, TString var, Bool_t relative, Bool_t pull, TString saveas, ostream &summaryfile) { + Int_t nFiles = 1; + TString *files = &file; + TString name = ""; + TString *names = &name; + return trackSplitPlot(nFiles, files, names, var, relative, pull, saveas, summaryfile); } -void saveplot(TCanvas *c1,TString saveas) -{ - if (saveas == "") - return; - TString saveas2 = saveas, - saveas3 = saveas; - saveas2.ReplaceAll(".pngepsroot",""); - saveas3.Remove(saveas3.Length()-11); - if (saveas2 == saveas3) - { - c1->SaveAs(saveas.ReplaceAll(".pngepsroot",".png")); - c1->SaveAs(saveas.ReplaceAll(".png",".eps")); - c1->SaveAs(saveas.ReplaceAll(".eps",".root")); - c1->SaveAs(saveas.ReplaceAll(".root",".pdf")); - } - else - { - c1->SaveAs(saveas); - } +void saveplot(TCanvas *c1, TString saveas) { + if (saveas == "") + return; + TString saveas2 = saveas, saveas3 = saveas; + saveas2.ReplaceAll(".pngepsroot", ""); + saveas3.Remove(saveas3.Length() - 11); + if (saveas2 == saveas3) { + c1->SaveAs(saveas.ReplaceAll(".pngepsroot", ".png")); + c1->SaveAs(saveas.ReplaceAll(".png", ".eps")); + c1->SaveAs(saveas.ReplaceAll(".eps", ".root")); + c1->SaveAs(saveas.ReplaceAll(".root", ".pdf")); + } else { + c1->SaveAs(saveas); + } } -void deleteCanvas(TObject *canvas) -{ - if (canvas == 0) return; - if (!canvas->InheritsFrom("TCanvas")) - { - delete canvas; - return; - } - TCanvas *c1 = (TCanvas*)canvas; - TList *list = c1->GetListOfPrimitives(); - list->SetOwner(true); - list->Clear(); - delete c1; +void deleteCanvas(TObject *canvas) { + if (canvas == nullptr) + return; + if (!canvas->InheritsFrom("TCanvas")) { + delete canvas; + return; + } + TCanvas *c1 = (TCanvas *)canvas; + delete c1; } -void setupcolors() -{ - if (colorsset) return; - colorsset = true; - colors.clear(); - styles.clear(); - Color_t array[15] = {1,2,3,4,6,7,8,9, - kYellow+3,kOrange+10,kPink-2,kTeal+9,kAzure-8,kViolet-6,kSpring-1}; - for (int i = 0; i < 15; i++) - { - colors.push_back(array[i]); - styles.push_back(1); //Set the default to 1 - //This is to be consistent with the other validation - } +void setupcolors() { + if (colorsset) + return; + colorsset = true; + colors.clear(); + styles.clear(); + Color_t array[15] = { + 1, 2, 3, 4, 6, 7, 8, 9, kYellow + 3, kOrange + 10, kPink - 2, kTeal + 9, kAzure - 8, kViolet - 6, kSpring - 1}; + for (int i = 0; i < 15; i++) { + colors.push_back(array[i]); + styles.push_back(1); //Set the default to 1 + //This is to be consistent with the other validation + } } //This makes a plot, of Delta_yvar vs. runNumber, zoomed in to between firstrun and lastrun. @@ -699,17 +670,23 @@ void setupcolors() //There might be bins with very few events => big error bars, //or just 1 event => no error bar -void runNumberZoomed(Int_t nFiles,TString *files,TString *names,TString yvar, - Bool_t relative,Bool_t resolution,Bool_t pull, - Int_t firstRun,Int_t lastRun,TString saveas) -{ - Int_t tempminrun = minrun; - Int_t tempmaxrun = maxrun; - minrun = firstRun; - maxrun = lastRun; - trackSplitPlot(nFiles,files,names,"runNumber",yvar,relative,resolution,pull,saveas); - minrun = tempminrun; - maxrun = tempmaxrun; +void runNumberZoomed(Int_t nFiles, + TString *files, + TString *names, + TString yvar, + Bool_t relative, + Bool_t resolution, + Bool_t pull, + Int_t firstRun, + Int_t lastRun, + TString saveas) { + Int_t tempminrun = minrun; + Int_t tempmaxrun = maxrun; + minrun = firstRun; + maxrun = lastRun; + trackSplitPlot(nFiles, files, names, "runNumber", yvar, relative, resolution, pull, saveas); + minrun = tempminrun; + maxrun = tempmaxrun; } //========================== @@ -734,379 +711,526 @@ void runNumberZoomed(Int_t nFiles,TString *files,TString *names,TString yvar, //The best way to run misalignmentDependence is through makePlots. If you want to run misalignmentDependence directly, //the LAST function, all the way at the bottom of this file, is probably the most practical to use (for all three of these). - // The first function takes a canvas as its argument. This canvas needs to have been produced with trackSplitPlot using // the same values of xvar, yvar, relative, resolution, and pull or something strange could happen. void misalignmentDependence(TCanvas *c1old, - Int_t nFiles,TString *names,TString misalignment,Double_t *values,Double_t *phases,TString xvar,TString yvar, - TF1 *function,Int_t parameter,TString parametername,TString functionname, - Bool_t relative,Bool_t resolution,Bool_t pull, - TString saveas) -{ - if (c1old == 0) return; - c1old = (TCanvas*)c1old->Clone("c1old"); - if (misalignment == "" || yvar == "") return; - Bool_t drawfits = (parameter < 0); - if (parameter < 0) - parameter = -parameter - 1; //-1 --> 0, -2 --> 1, -3 --> 2, ... - TString yaxislabel = nPart(1,parametername); - TString parameterunits = nPart(2,parametername); - if (parameterunits != "") - yaxislabel.Append(" (").Append(parameterunits).Append(")"); - TList *list = c1old->GetListOfPrimitives(); - //const int n = list->GetEntries() - 2 - (xvar == ""); - const int n = nFiles; - - gStyle->SetOptStat(0); - gStyle->SetOptFit(0); - gStyle->SetFitFormat("5.4g"); - gStyle->SetFuncColor(2); - gStyle->SetFuncStyle(1); - gStyle->SetFuncWidth(1); - - TH1 **p = new TH1*[n]; - TF1 **f = new TF1*[n]; - bool used[n]; - for (Int_t i = 0; i < n; i++) - { - stringstream s0; - s0 << "p" << i; - TString pname = s0.str(); - p[i] = (TH1*)list->/*At(i+1+(xvar == ""))*/FindObject(pname); - used[i] = (p[i] != 0); - if (used[i]) - p[i]->SetDirectory(0); - if (xvar == "") - continue; - stringstream s; - s << function->GetName() << i; - TString newname = s.str(); - f[i] = (TF1*)function->Clone(newname); - stufftodelete->Add(f[i]); - } - - Double_t *result = new Double_t[nFiles]; - Double_t *error = new Double_t[nFiles]; + Int_t nFiles, + TString *names, + TString misalignment, + Double_t *values, + Double_t *phases, + TString xvar, + TString yvar, + TF1 *function, + Int_t parameter, + TString parametername, + TString functionname, + Bool_t relative, + Bool_t resolution, + Bool_t pull, + TString saveas) { + if (c1old == nullptr) + return; + c1old = (TCanvas *)c1old->Clone("c1old"); + if (misalignment == "" || yvar == "") + return; + Bool_t drawfits = (parameter < 0); + if (parameter < 0) + parameter = -parameter - 1; //-1 --> 0, -2 --> 1, -3 --> 2, ... + TString yaxislabel = nPart(1, parametername); + TString parameterunits = nPart(2, parametername); + if (parameterunits != "") + yaxislabel.Append(" (").Append(parameterunits).Append(")"); + TList *list = c1old->GetListOfPrimitives(); + //const int n = list->GetEntries() - 2 - (xvar == ""); + const int n = nFiles; + + gStyle->SetOptStat(0); + gStyle->SetOptFit(0); + gStyle->SetFitFormat("5.4g"); + gStyle->SetFuncColor(2); + gStyle->SetFuncStyle(1); + gStyle->SetFuncWidth(1); + + TH1 **p = new TH1 *[n]; + TF1 **f = new TF1 *[n]; + bool used[n]; + for (Int_t i = 0; i < n; i++) { + stringstream s0; + s0 << "p" << i; + TString pname = s0.str(); + p[i] = (TH1 *)list->/*At(i+1+(xvar == ""))*/ FindObject(pname); + used[i] = (p[i] != nullptr); + if (used[i]) + p[i]->SetDirectory(nullptr); if (xvar == "") - { - yaxislabel = axislabel(yvar,'y',relative,resolution,pull); - for (Int_t i = 0; i < nFiles; i++) - { - if (!used[i]) continue; - if (!resolution) - { - result[i] = p[i]->GetMean(); - error[i] = p[i]->GetMeanError(); - } - else - { - result[i] = p[i]->GetRMS(); - error[i] = p[i]->GetRMSError(); - } - cout << result[i] << " +/- " << error[i] << endl; - } + continue; + stringstream s; + s << function->GetName() << i; + TString newname = s.str(); + f[i] = (TF1 *)function->Clone(newname); + stufftodelete->Add(f[i]); + } + + Double_t *result = new Double_t[nFiles]; + Double_t *error = new Double_t[nFiles]; + if (xvar == "") { + yaxislabel = axislabel(yvar, 'y', relative, resolution, pull); + for (Int_t i = 0; i < nFiles; i++) { + if (!used[i]) + continue; + if (!resolution) { + result[i] = p[i]->GetMean(); + error[i] = p[i]->GetMeanError(); + } else { + result[i] = p[i]->GetRMS(); + error[i] = p[i]->GetRMSError(); + } + cout << result[i] << " +/- " << error[i] << endl; } - else - { - for (int i = 0; i < n; i++) - { - if (!used[i]) continue; - f[i]->SetLineColor(colors[i]); - f[i]->SetLineStyle(styles[i]); - f[i]->SetLineWidth(1); - p[i]->SetMarkerColor(colors[i]); - p[i]->SetMarkerStyle(20+i); - p[i]->SetLineColor(colors[i]); - p[i]->SetLineStyle(styles[i]); - p[i]->Fit(f[i],"IM"); - error[i] = f[i]->GetParError (parameter); - //the fits sometimes don't work if the parameters are constrained. - //take care of the constraining here. - //for sine, make the amplitude positive and the phase between 0 and 2pi. - //unless the amplitude is the only parameter (eg sagitta theta theta) - if (function->GetName() == TString("sine") && function->GetNumberFreeParameters() >= 2) - { - if (f[i]->GetParameter(0) < 0) - { - f[i]->SetParameter(0,-f[i]->GetParameter(0)); - f[i]->SetParameter(2,f[i]->GetParameter(2)+pi); - } - while(f[i]->GetParameter(2) >= 2*pi) - f[i]->SetParameter(2,f[i]->GetParameter(2)-2*pi); - while(f[i]->GetParameter(2) < 0) - f[i]->SetParameter(2,f[i]->GetParameter(2)+2*pi); - } - result[i] = f[i]->GetParameter(parameter); - } + } else { + for (int i = 0; i < n; i++) { + if (!used[i]) + continue; + f[i]->SetLineColor(colors[i]); + f[i]->SetLineStyle(styles[i]); + f[i]->SetLineWidth(1); + p[i]->SetMarkerColor(colors[i]); + p[i]->SetMarkerStyle(20 + i); + p[i]->SetLineColor(colors[i]); + p[i]->SetLineStyle(styles[i]); + p[i]->Fit(f[i], "IM"); + error[i] = f[i]->GetParError(parameter); + //the fits sometimes don't work if the parameters are constrained. + //take care of the constraining here. + //for sine, make the amplitude positive and the phase between 0 and 2pi. + //unless the amplitude is the only parameter (eg sagitta theta theta) + if (function->GetName() == TString("sine") && function->GetNumberFreeParameters() >= 2) { + if (f[i]->GetParameter(0) < 0) { + f[i]->SetParameter(0, -f[i]->GetParameter(0)); + f[i]->SetParameter(2, f[i]->GetParameter(2) + pi); + } + while (f[i]->GetParameter(2) >= 2 * pi) + f[i]->SetParameter(2, f[i]->GetParameter(2) - 2 * pi); + while (f[i]->GetParameter(2) < 0) + f[i]->SetParameter(2, f[i]->GetParameter(2) + 2 * pi); + } + result[i] = f[i]->GetParameter(parameter); } + } + TCanvas *c1 = TCanvas::MakeDefCanvas(); - TCanvas *c1 = TCanvas::MakeDefCanvas(); - - if (drawfits && xvar != "" && yvar != "") - { - TString legendtitle = "["; - legendtitle.Append(functionname); - legendtitle.Append("]"); - TLegend *legend = new TLegend(.7,.7,.9,.9,legendtitle,"br"); - stufftodelete->Add(legend); - TString drawoption = ""; - for (int i = 0; i < n; i++) - { - if (!used[i]) continue; - p[i]->Draw(drawoption); - f[i]->Draw("same"); - drawoption = "same"; - - stringstream s; - s.precision(3); - s << nPart(1,parametername) << " = " << result[i] << " #pm " << error[i]; - if (parameterunits != "") s << " " << parameterunits; - TString str = s.str(); - legend->AddEntry(p[i],names[i],"pl"); - legend->AddEntry(f[i],str,"l"); - } - c1->Update(); - Double_t x1min = .98*gPad->GetUxmin() + .02*gPad->GetUxmax(); - Double_t x2max = .02*gPad->GetUxmin() + .98*gPad->GetUxmax(); - Double_t y1min = .98*gPad->GetUymin() + .02*gPad->GetUymax(); - Double_t y2max = .02*gPad->GetUymin() + .98*gPad->GetUymax(); - Double_t width = .4*(x2max-x1min); - Double_t height = (1./20)*legend->GetListOfPrimitives()->GetEntries()*(y2max-y1min); - width *= 2; - height /= 2; - legend->SetNColumns(2); - - Double_t newy2max = placeLegend(legend,width,height,x1min,y1min,x2max,y2max); - p[0]->GetYaxis()->SetRangeUser(gPad->GetUymin(),(newy2max-.02*gPad->GetUymin())/.98); - - legend->SetFillStyle(0); - legend->Draw(); + if (drawfits && xvar != "" && yvar != "") { + TString legendtitle = "["; + legendtitle.Append(functionname); + legendtitle.Append("]"); + TLegend *legend = new TLegend(.7, .7, .9, .9, legendtitle, "br"); + stufftodelete->Add(legend); + TString drawoption = ""; + for (int i = 0; i < n; i++) { + if (!used[i]) + continue; + p[i]->Draw(drawoption); + f[i]->Draw("same"); + drawoption = "same"; + + stringstream s; + s.precision(3); + s << nPart(1, parametername) << " = " << result[i] << " #pm " << error[i]; + if (parameterunits != "") + s << " " << parameterunits; + TString str = s.str(); + legend->AddEntry(p[i], names[i], "pl"); + legend->AddEntry(f[i], str, "l"); } - else - { - if (values == 0) return; - - Bool_t phasesmatter = false; - if (misalignment == "elliptical" || misalignment == "sagitta" || misalignment == "skew") - { - if (phases == 0) - { - cout << "This misalignment has a phase, but you didn't supply the phases!" << endl - << "Can't produce plots depending on the misalignment value." << endl; - return; - } - int firstnonzero = -1; - for (Int_t i = 0; i < nFiles; i++) - { - if (values[i] == 0) continue; //if the amplitude is 0 the phase is arbitrary - if (firstnonzero == -1) firstnonzero = i; - if (phases[i] != phases[firstnonzero]) - phasesmatter = true; - } - } - - if (!phasesmatter) - { - TGraphErrors *g = new TGraphErrors(nFiles,values,result,(Double_t*)0,error); - g->SetName(""); - stufftodelete->Add(g); - - TString xaxislabel = "#epsilon_{"; - xaxislabel.Append(misalignment); - xaxislabel.Append("}"); - g->GetXaxis()->SetTitle(xaxislabel); - if (xvar != "") - { - yaxislabel.Append(" ["); - yaxislabel.Append(functionname); - yaxislabel.Append("]"); - } - g->GetYaxis()->SetTitle(yaxislabel); - - g->SetMarkerColor(colors[0]); - g->SetMarkerStyle(20); - - g->Draw("AP"); - Double_t yaxismax = g->GetYaxis()->GetXmax(); - Double_t yaxismin = g->GetYaxis()->GetXmin(); - if (yaxismin > 0) - { - yaxismax += yaxismin; - yaxismin = 0; - } - g->GetYaxis()->SetRangeUser(yaxismin,yaxismax); - g->Draw("AP"); - } - else - { - double *xvalues = new double[nFiles]; - double *yvalues = new double[nFiles]; //these are not physically x and y (except in the case of skew) - for (int i = 0; i < nFiles; i++) - { - xvalues[i] = values[i] * cos(phases[i]); - yvalues[i] = values[i] * sin(phases[i]); - } - TGraph2DErrors *g = new TGraph2DErrors(nFiles,xvalues,yvalues,result,(Double_t*)0,(Double_t*)0,error); - g->SetName(""); - stufftodelete->Add(g); - delete[] xvalues; //A TGraph2DErrors has its own copy of xvalues and yvalues, so it's ok to delete these copies. - delete[] yvalues; - - TString xaxislabel = "#epsilon_{"; - xaxislabel.Append(misalignment); - xaxislabel.Append("}cos(#delta)"); - TString realyaxislabel = xaxislabel; - realyaxislabel.ReplaceAll("cos(#delta)","sin(#delta)"); - g->GetXaxis()->SetTitle(xaxislabel); - g->GetYaxis()->SetTitle(realyaxislabel); - TString zaxislabel = /*"fake"*/yaxislabel; //yaxislabel is defined earlier - if (xvar != "") - { - zaxislabel.Append(" ["); - zaxislabel.Append(functionname); - zaxislabel.Append("]"); - } - g->GetZaxis()->SetTitle(zaxislabel); - g->SetMarkerStyle(20); - g->Draw("pcolerr"); - } + c1->Update(); + Double_t x1min = .98 * gPad->GetUxmin() + .02 * gPad->GetUxmax(); + Double_t x2max = .02 * gPad->GetUxmin() + .98 * gPad->GetUxmax(); + Double_t y1min = .98 * gPad->GetUymin() + .02 * gPad->GetUymax(); + Double_t y2max = .02 * gPad->GetUymin() + .98 * gPad->GetUymax(); + Double_t width = .4 * (x2max - x1min); + Double_t height = (1. / 20) * legend->GetListOfPrimitives()->GetEntries() * (y2max - y1min); + width *= 2; + height /= 2; + legend->SetNColumns(2); + + Double_t newy2max = placeLegend(legend, width, height, x1min, y1min, x2max, y2max); + p[0]->GetYaxis()->SetRangeUser(gPad->GetUymin(), (newy2max - .02 * gPad->GetUymin()) / .98); + + legend->SetFillStyle(0); + legend->Draw(); + } else { + if (values == nullptr) + return; + + Bool_t phasesmatter = false; + if (misalignment == "elliptical" || misalignment == "sagitta" || misalignment == "skew") { + if (phases == nullptr) { + cout << "This misalignment has a phase, but you didn't supply the phases!" << endl + << "Can't produce plots depending on the misalignment value." << endl; + return; + } + int firstnonzero = -1; + for (Int_t i = 0; i < nFiles; i++) { + if (values[i] == 0) + continue; //if the amplitude is 0 the phase is arbitrary + if (firstnonzero == -1) + firstnonzero = i; + if (phases[i] != phases[firstnonzero]) + phasesmatter = true; + } } - if (saveas != "") - { - saveplot(c1,saveas); - delete[] p; - delete[] f; - delete[] result; - delete[] error; - delete c1old; + if (!phasesmatter) { + TGraphErrors *g = new TGraphErrors(nFiles, values, result, (Double_t *)nullptr, error); + g->SetName(""); + stufftodelete->Add(g); + + TString xaxislabel = "#epsilon_{"; + xaxislabel.Append(misalignment); + xaxislabel.Append("}"); + g->GetXaxis()->SetTitle(xaxislabel); + if (xvar != "") { + yaxislabel.Append(" ["); + yaxislabel.Append(functionname); + yaxislabel.Append("]"); + } + g->GetYaxis()->SetTitle(yaxislabel); + + g->SetMarkerColor(colors[0]); + g->SetMarkerStyle(20); + + g->Draw("AP"); + Double_t yaxismax = g->GetYaxis()->GetXmax(); + Double_t yaxismin = g->GetYaxis()->GetXmin(); + if (yaxismin > 0) { + yaxismax += yaxismin; + yaxismin = 0; + } + g->GetYaxis()->SetRangeUser(yaxismin, yaxismax); + g->Draw("AP"); + } else { + double *xvalues = new double[nFiles]; + double *yvalues = new double[nFiles]; //these are not physically x and y (except in the case of skew) + for (int i = 0; i < nFiles; i++) { + xvalues[i] = values[i] * cos(phases[i]); + yvalues[i] = values[i] * sin(phases[i]); + } + TGraph2DErrors *g = + new TGraph2DErrors(nFiles, xvalues, yvalues, result, (Double_t *)nullptr, (Double_t *)nullptr, error); + g->SetName(""); + stufftodelete->Add(g); + delete[] xvalues; //A TGraph2DErrors has its own copy of xvalues and yvalues, so it's ok to delete these copies. + delete[] yvalues; + + TString xaxislabel = "#epsilon_{"; + xaxislabel.Append(misalignment); + xaxislabel.Append("}cos(#delta)"); + TString realyaxislabel = xaxislabel; + realyaxislabel.ReplaceAll("cos(#delta)", "sin(#delta)"); + g->GetXaxis()->SetTitle(xaxislabel); + g->GetYaxis()->SetTitle(realyaxislabel); + TString zaxislabel = /*"fake"*/ yaxislabel; //yaxislabel is defined earlier + if (xvar != "") { + zaxislabel.Append(" ["); + zaxislabel.Append(functionname); + zaxislabel.Append("]"); + } + g->GetZaxis()->SetTitle(zaxislabel); + g->SetMarkerStyle(20); + g->Draw("pcolerr"); } + } + + if (saveas != "") { + saveplot(c1, saveas); + delete[] p; + delete[] f; + delete[] result; + delete[] error; + delete c1old; + } } - //This version allows you to show multiple parameters. It runs the previous version multiple times, once for each parameter. //saveas will be modified to indicate which parameter is being used each time. void misalignmentDependence(TCanvas *c1old, - Int_t nFiles,TString *names,TString misalignment,Double_t *values,Double_t *phases,TString xvar,TString yvar, - TF1 *function,Int_t nParameters,Int_t *parameters,TString *parameternames,TString functionname, - Bool_t relative,Bool_t resolution,Bool_t pull, - TString saveas) -{ - for (int i = 0; i < nParameters; i++) - { - TString saveasi = saveas; - TString insert = nPart(1,parameternames[i]); - insert.Prepend("."); - saveasi.Insert(saveasi.Last('.'),insert); //insert the parameter name before the file extension - misalignmentDependence(c1old, - nFiles,names,misalignment,values,phases,xvar,yvar, - function,parameters[i],parameternames[i],functionname, - relative,resolution,pull, - saveasi); - } + Int_t nFiles, + TString *names, + TString misalignment, + Double_t *values, + Double_t *phases, + TString xvar, + TString yvar, + TF1 *function, + Int_t nParameters, + Int_t *parameters, + TString *parameternames, + TString functionname, + Bool_t relative, + Bool_t resolution, + Bool_t pull, + TString saveas) { + for (int i = 0; i < nParameters; i++) { + TString saveasi = saveas; + TString insert = nPart(1, parameternames[i]); + insert.Prepend("."); + saveasi.Insert(saveasi.Last('.'), insert); //insert the parameter name before the file extension + misalignmentDependence(c1old, + nFiles, + names, + misalignment, + values, + phases, + xvar, + yvar, + function, + parameters[i], + parameternames[i], + functionname, + relative, + resolution, + pull, + saveasi); + } } - //This version does not take a canvas as its argument. It runs trackSplitPlot to produce the canvas. -void misalignmentDependence(Int_t nFiles,TString *files,TString *names,TString misalignment,Double_t *values,Double_t *phases,TString xvar,TString yvar, - TF1 *function,Int_t parameter,TString parametername,TString functionname, - Bool_t relative,Bool_t resolution,Bool_t pull, - TString saveas) -{ - misalignmentDependence(trackSplitPlot(nFiles,files,names,xvar,yvar,relative,resolution,pull,""), - nFiles,names,misalignment,values,phases,xvar,yvar, - function,parameter,parametername,functionname, - relative,resolution,pull,saveas); +void misalignmentDependence(Int_t nFiles, + TString *files, + TString *names, + TString misalignment, + Double_t *values, + Double_t *phases, + TString xvar, + TString yvar, + TF1 *function, + Int_t parameter, + TString parametername, + TString functionname, + Bool_t relative, + Bool_t resolution, + Bool_t pull, + TString saveas) { + misalignmentDependence(trackSplitPlot(nFiles, files, names, xvar, yvar, relative, resolution, pull, ""), + nFiles, + names, + misalignment, + values, + phases, + xvar, + yvar, + function, + parameter, + parametername, + functionname, + relative, + resolution, + pull, + saveas); } -void misalignmentDependence(Int_t nFiles,TString *files,TString *names,TString misalignment,Double_t *values,Double_t *phases,TString xvar,TString yvar, - TF1 *function,Int_t nParameters,Int_t *parameters,TString *parameternames,TString functionname, - Bool_t relative,Bool_t resolution,Bool_t pull, - TString saveas) -{ - for (int i = 0; i < nParameters; i++) - { - TString saveasi = saveas; - TString insert = nPart(1,parameternames[i]); - insert.Prepend("."); - saveasi.Insert(saveasi.Last('.'),insert); //insert the parameter name before the file extension - misalignmentDependence(nFiles,files,names,misalignment,values,phases,xvar,yvar, - function,parameters[i],parameternames[i],functionname, - relative,resolution,pull, - saveasi); - } +void misalignmentDependence(Int_t nFiles, + TString *files, + TString *names, + TString misalignment, + Double_t *values, + Double_t *phases, + TString xvar, + TString yvar, + TF1 *function, + Int_t nParameters, + Int_t *parameters, + TString *parameternames, + TString functionname, + Bool_t relative, + Bool_t resolution, + Bool_t pull, + TString saveas) { + for (int i = 0; i < nParameters; i++) { + TString saveasi = saveas; + TString insert = nPart(1, parameternames[i]); + insert.Prepend("."); + saveasi.Insert(saveasi.Last('.'), insert); //insert the parameter name before the file extension + misalignmentDependence(nFiles, + files, + names, + misalignment, + values, + phases, + xvar, + yvar, + function, + parameters[i], + parameternames[i], + functionname, + relative, + resolution, + pull, + saveasi); + } } - // This version allows you to use a string for the function. It creates a TF1 using this string and uses this TF1 void misalignmentDependence(TCanvas *c1old, - Int_t nFiles,TString *names,TString misalignment,Double_t *values,Double_t *phases,TString xvar,TString yvar, - TString function,Int_t parameter,TString parametername,TString functionname, - Bool_t relative,Bool_t resolution,Bool_t pull, - TString saveas) -{ - TF1 *f = new TF1("func",function); - misalignmentDependence(c1old,nFiles,names,misalignment,values,phases,xvar,yvar,f,parameter,parametername,functionname,relative,resolution,pull,saveas); - delete f; + Int_t nFiles, + TString *names, + TString misalignment, + Double_t *values, + Double_t *phases, + TString xvar, + TString yvar, + TString function, + Int_t parameter, + TString parametername, + TString functionname, + Bool_t relative, + Bool_t resolution, + Bool_t pull, + TString saveas) { + TF1 *f = new TF1("func", function); + misalignmentDependence(c1old, + nFiles, + names, + misalignment, + values, + phases, + xvar, + yvar, + f, + parameter, + parametername, + functionname, + relative, + resolution, + pull, + saveas); + delete f; } void misalignmentDependence(TCanvas *c1old, - Int_t nFiles,TString *names,TString misalignment,Double_t *values,Double_t *phases,TString xvar,TString yvar, - TString function,Int_t nParameters,Int_t *parameters,TString *parameternames,TString functionname, - Bool_t relative,Bool_t resolution,Bool_t pull, - TString saveas) -{ - for (int i = 0; i < nParameters; i++) - { - TString saveasi = saveas; - TString insert = nPart(1,parameternames[i]); - insert.Prepend("."); - saveasi.Insert(saveasi.Last('.'),insert); //insert the parameter name before the file extension - misalignmentDependence(c1old, - nFiles,names,misalignment,values,phases,xvar,yvar, - function,parameters[i],parameternames[i],functionname, - relative,resolution,pull, - saveasi); - } + Int_t nFiles, + TString *names, + TString misalignment, + Double_t *values, + Double_t *phases, + TString xvar, + TString yvar, + TString function, + Int_t nParameters, + Int_t *parameters, + TString *parameternames, + TString functionname, + Bool_t relative, + Bool_t resolution, + Bool_t pull, + TString saveas) { + for (int i = 0; i < nParameters; i++) { + TString saveasi = saveas; + TString insert = nPart(1, parameternames[i]); + insert.Prepend("."); + saveasi.Insert(saveasi.Last('.'), insert); //insert the parameter name before the file extension + misalignmentDependence(c1old, + nFiles, + names, + misalignment, + values, + phases, + xvar, + yvar, + function, + parameters[i], + parameternames[i], + functionname, + relative, + resolution, + pull, + saveasi); + } } - -void misalignmentDependence(Int_t nFiles,TString *files,TString *names,TString misalignment,Double_t *values,Double_t *phases,TString xvar,TString yvar, - TString function,Int_t parameter,TString parametername,TString functionname, - Bool_t relative,Bool_t resolution,Bool_t pull, - TString saveas) -{ - TF1 *f = new TF1("func",function); - misalignmentDependence(nFiles,files,names,misalignment,values,phases,xvar,yvar,f,parameter,parametername,functionname,relative,resolution,pull,saveas); - delete f; +void misalignmentDependence(Int_t nFiles, + TString *files, + TString *names, + TString misalignment, + Double_t *values, + Double_t *phases, + TString xvar, + TString yvar, + TString function, + Int_t parameter, + TString parametername, + TString functionname, + Bool_t relative, + Bool_t resolution, + Bool_t pull, + TString saveas) { + TF1 *f = new TF1("func", function); + misalignmentDependence(nFiles, + files, + names, + misalignment, + values, + phases, + xvar, + yvar, + f, + parameter, + parametername, + functionname, + relative, + resolution, + pull, + saveas); + delete f; } -void misalignmentDependence(Int_t nFiles,TString *files,TString *names,TString misalignment,Double_t *values,Double_t *phases,TString xvar,TString yvar, - TString function,Int_t nParameters,Int_t *parameters,TString *parameternames,TString functionname, - Bool_t relative,Bool_t resolution,Bool_t pull, - TString saveas) -{ - for (int i = 0; i < nParameters; i++) - { - TString saveasi = saveas; - TString insert = nPart(1,parameternames[i]); - insert.Prepend("."); - saveasi.Insert(saveasi.Last('.'),insert); //insert the parameter name before the file extension - misalignmentDependence(nFiles,files,names,misalignment,values,phases,xvar,yvar, - function,parameters[i],parameternames[i],functionname, - relative,resolution,pull, - saveasi); - } +void misalignmentDependence(Int_t nFiles, + TString *files, + TString *names, + TString misalignment, + Double_t *values, + Double_t *phases, + TString xvar, + TString yvar, + TString function, + Int_t nParameters, + Int_t *parameters, + TString *parameternames, + TString functionname, + Bool_t relative, + Bool_t resolution, + Bool_t pull, + TString saveas) { + for (int i = 0; i < nParameters; i++) { + TString saveasi = saveas; + TString insert = nPart(1, parameternames[i]); + insert.Prepend("."); + saveasi.Insert(saveasi.Last('.'), insert); //insert the parameter name before the file extension + misalignmentDependence(nFiles, + files, + names, + misalignment, + values, + phases, + xvar, + yvar, + function, + parameters[i], + parameternames[i], + functionname, + relative, + resolution, + pull, + saveasi); + } } - - - //This version does not take a function as its argument. It automatically determines what function, parameter, //functionname, and parametername to use based on misalignment, xvar, yvar, relative, resolution, and pull. //However, you have to manually put into the function which plots to fit to what shapes. @@ -1119,112 +1243,126 @@ void misalignmentDependence(Int_t nFiles,TString *files,TString *names,TString m //This is the version called by makeThesePlots.C Bool_t misalignmentDependence(TCanvas *c1old, - Int_t nFiles,TString *names,TString misalignment,Double_t *values,Double_t *phases,TString xvar,TString yvar, + Int_t nFiles, + TString *names, + TString misalignment, + Double_t *values, + Double_t *phases, + TString xvar, + TString yvar, Bool_t drawfits, - Bool_t relative,Bool_t resolution,Bool_t pull, - TString saveas) -{ - if (xvar == "") - { - if (c1old == 0 || misalignment == "" || values == 0) return false; - misalignmentDependence(c1old,nFiles,names,misalignment,values,phases,xvar,yvar,(TF1*)0,0,"","",relative,resolution,pull,saveas); - return true; + Bool_t relative, + Bool_t resolution, + Bool_t pull, + TString saveas) { + if (xvar == "") { + if (c1old == nullptr || misalignment == "" || values == nullptr) + return false; + misalignmentDependence(c1old, + nFiles, + names, + misalignment, + values, + phases, + xvar, + yvar, + (TF1 *)nullptr, + 0, + "", + "", + relative, + resolution, + pull, + saveas); + return true; + } + TF1 *f = nullptr; + TString functionname = ""; + + //if only one parameter is of interest + TString parametername = ""; + Int_t parameter = 9999; + + //if multiple parameters are of interest + Int_t nParameters = -1; + TString *parameternames = nullptr; + Int_t *parameters = nullptr; + + if (misalignment == "sagitta") { + if (xvar == "phi" && yvar == "phi" && !resolution && !pull) { + f = new TF1("sine", "-[0]*cos([1]*x+[2])"); + f->FixParameter(1, 1); + f->SetParameter(0, 6e-4); + nParameters = 2; + Int_t tempParameters[2] = {0, 2}; + TString tempParameterNames[2] = {"A;mrad", "B"}; + parameters = tempParameters; + parameternames = tempParameterNames; + functionname = "#Delta#phi=-Acos(#phi+B)"; } - TF1 *f = 0; - TString functionname = ""; - - //if only one parameter is of interest - TString parametername = ""; - Int_t parameter = 9999; - - //if multiple parameters are of interest - Int_t nParameters = -1; - TString *parameternames = 0; - Int_t *parameters = 0; - - if (misalignment == "sagitta") - { - if (xvar == "phi" && yvar == "phi" && !resolution && !pull) - { - f = new TF1("sine","-[0]*cos([1]*x+[2])"); - f->FixParameter(1,1); - f->SetParameter(0,6e-4); - nParameters = 2; - Int_t tempParameters[2] = {0,2}; - TString tempParameterNames[2] = {"A;mrad","B"}; - parameters = tempParameters; - parameternames = tempParameterNames; - functionname = "#Delta#phi=-Acos(#phi+B)"; - } - if (xvar == "theta" && yvar == "theta" && !resolution && pull) - { - f = new TF1("line","-[0]*(x+[1])"); - f->FixParameter(1,-pi/2); - parametername = "A"; - functionname = "#Delta#theta/#delta(#Delta#theta)=-A(#theta-#pi/2)"; - parameter = 0; - } - if (xvar == "theta" && yvar == "theta" && !resolution && !pull) - { - f = new TF1("sine","[0]*sin([1]*x+[2])"); - f->FixParameter(1,2); - f->FixParameter(2,0); - parametername = "A;mrad"; - functionname = "#Delta#theta=-Asin(2#theta)"; - parameter = 0; - } + if (xvar == "theta" && yvar == "theta" && !resolution && pull) { + f = new TF1("line", "-[0]*(x+[1])"); + f->FixParameter(1, -pi / 2); + parametername = "A"; + functionname = "#Delta#theta/#delta(#Delta#theta)=-A(#theta-#pi/2)"; + parameter = 0; } - if (misalignment == "elliptical") - { - if (xvar == "phi" && yvar == "dxy" && !resolution && !pull) - { - f = new TF1("sine","[0]*sin([1]*x-[2])"); - //f = new TF1("sine","[0]*sin([1]*x-[2]) + [3]"); - f->FixParameter(1,-2); - f->SetParameter(0,5e-4); - - nParameters = 2; - Int_t tempParameters[2] = {0,2}; - TString tempParameterNames[2] = {"A;#mum","B"}; - //nParameters = 3; - //Int_t tempParameters[3] = {0,2,3}; - //TString tempParameterNames[3] = {"A;#mum","B","C;#mum"}; - - parameters = tempParameters; - parameternames = tempParameterNames; - functionname = "#Deltad_{xy}=-Asin(2#phi+B)"; - //functionname = "#Deltad_{xy}=-Asin(2#phi+B)+C"; - } - if (xvar == "phi" && yvar == "dxy" && !resolution && pull) - { - f = new TF1("sine","[0]*sin([1]*x-[2])"); - //f = new TF1("sine","[0]*sin([1]*x-[2]) + [3]"); + if (xvar == "theta" && yvar == "theta" && !resolution && !pull) { + f = new TF1("sine", "[0]*sin([1]*x+[2])"); + f->FixParameter(1, 2); + f->FixParameter(2, 0); + parametername = "A;mrad"; + functionname = "#Delta#theta=-Asin(2#theta)"; + parameter = 0; + } + } + if (misalignment == "elliptical") { + if (xvar == "phi" && yvar == "dxy" && !resolution && !pull) { + f = new TF1("sine", "[0]*sin([1]*x-[2])"); + //f = new TF1("sine","[0]*sin([1]*x-[2]) + [3]"); + f->FixParameter(1, -2); + f->SetParameter(0, 5e-4); + + nParameters = 2; + Int_t tempParameters[2] = {0, 2}; + TString tempParameterNames[2] = {"A;#mum", "B"}; + //nParameters = 3; + //Int_t tempParameters[3] = {0,2,3}; + //TString tempParameterNames[3] = {"A;#mum","B","C;#mum"}; + + parameters = tempParameters; + parameternames = tempParameterNames; + functionname = "#Deltad_{xy}=-Asin(2#phi+B)"; + //functionname = "#Deltad_{xy}=-Asin(2#phi+B)+C"; + } + if (xvar == "phi" && yvar == "dxy" && !resolution && pull) { + f = new TF1("sine", "[0]*sin([1]*x-[2])"); + //f = new TF1("sine","[0]*sin([1]*x-[2]) + [3]"); - f->FixParameter(1,-2); + f->FixParameter(1, -2); - nParameters = 2; - Int_t tempParameters[2] = {0,2}; - TString tempParameterNames[2] = {"A","B"}; - //nParameters = 3; - //Int_t tempParameters[3] = {0,2,3}; - //TString tempParameterNames[3] = {"A","B","C"}; + nParameters = 2; + Int_t tempParameters[2] = {0, 2}; + TString tempParameterNames[2] = {"A", "B"}; + //nParameters = 3; + //Int_t tempParameters[3] = {0,2,3}; + //TString tempParameterNames[3] = {"A","B","C"}; - parameters = tempParameters; - parameternames = tempParameterNames; + parameters = tempParameters; + parameternames = tempParameterNames; - functionname = "#Deltad_{xy}/#delta(#Deltad_{xy})=-Asin(2#phi+B)"; - //functionname = "#Deltad_{xy}/#delta(#Deltad_{xy})=-Asin(2#phi+B)+C"; - } + functionname = "#Deltad_{xy}/#delta(#Deltad_{xy})=-Asin(2#phi+B)"; + //functionname = "#Deltad_{xy}/#delta(#Deltad_{xy})=-Asin(2#phi+B)+C"; + } - if (xvar == "theta" && yvar == "dz" && !resolution && !pull) - { - f = new TF1("line","-[0]*(x-[1])"); - f->FixParameter(1,pi/2); - parametername = "A;#mum"; - functionname = "#Deltad_{z}=-A(#theta-#pi/2)"; - parameter = 0; - } - /* + if (xvar == "theta" && yvar == "dz" && !resolution && !pull) { + f = new TF1("line", "-[0]*(x-[1])"); + f->FixParameter(1, pi / 2); + parametername = "A;#mum"; + functionname = "#Deltad_{z}=-A(#theta-#pi/2)"; + parameter = 0; + } + /* This fit doesn't work if (xvar == "theta" && yvar == "dz" && !resolution && pull) { @@ -1236,310 +1374,357 @@ Bool_t misalignmentDependence(TCanvas *c1old, parameter = 0; } */ - if (xvar == "dxy" && yvar == "phi" && !resolution && !pull) - { - f = new TF1("line","-[0]*(x-[1])"); - f->FixParameter(1,0); - parametername = "A;mrad/cm"; - functionname = "#Delta#phi=-A(d_{xy})"; - parameter = 0; - } - if (xvar == "dxy" && yvar == "phi" && !resolution && pull) - { - f = new TF1("line","-[0]*(x-[1])"); - f->FixParameter(1,0); - parametername = "A;cm^{-1}"; - functionname = "#Delta#phi/#delta(#Delta#phi)=-A(d_{xy})"; - parameter = 0; - } + if (xvar == "dxy" && yvar == "phi" && !resolution && !pull) { + f = new TF1("line", "-[0]*(x-[1])"); + f->FixParameter(1, 0); + parametername = "A;mrad/cm"; + functionname = "#Delta#phi=-A(d_{xy})"; + parameter = 0; } - if (misalignment == "skew") - { - if (xvar == "phi" && yvar == "theta" && resolution && !pull) - { - f = new TF1("sine","[0]*sin([1]*x+[2])+[3]"); - f->FixParameter(1,2); - nParameters = 3; - Int_t tempParameters[3] = {0,2,3}; - TString tempParameterNames[3] = {"A;mrad","B","C;mrad"}; - parameters = tempParameters; - parameternames = tempParameterNames; - functionname = "#sigma(#Delta#theta)=Asin(2#phi+B)+C"; - } - if (xvar == "phi" && yvar == "eta" && resolution && !pull) - { - f = new TF1("sine","[0]*sin([1]*x+[2])+[3]"); - f->FixParameter(1,2); - nParameters = 3; - Int_t tempParameters[3] = {0,2,3}; - TString tempParameterNames[3] = {"A;mrad","B","C;mrad"}; - parameters = tempParameters; - parameternames = tempParameterNames; - functionname = "#sigma(#Delta#eta)=Asin(2#phi+B)+C"; - } - if (xvar == "phi" && yvar == "theta" && resolution && pull) - { - f = new TF1("sine","[0]*sin([1]*x+[2])+[3]"); - f->FixParameter(1,2); - nParameters = 3; - Int_t tempParameters[3] = {0,2,3}; - TString tempParameterNames[3] = {"A","B","C"}; - parameters = tempParameters; - parameternames = tempParameterNames; - functionname = "#sigma(#Delta#theta/#delta(#Delta#theta))=Asin(2#phi+B)+C"; - } - if (xvar == "phi" && yvar == "eta" && resolution && pull) - { - f = new TF1("sine","[0]*sin([1]*x+[2])+[3]"); - f->FixParameter(1,2); - nParameters = 3; - Int_t tempParameters[3] = {0,2,3}; - TString tempParameterNames[3] = {"A","B","C"}; - parameters = tempParameters; - parameternames = tempParameterNames; - functionname = "#sigma(#Delta#eta/#delta(#Delta#eta))=Asin(2#phi+B)+C"; - } - if (xvar == "phi" && yvar == "dz" && !resolution && !pull) - { - f = new TF1("tanh","[0]*(tanh([1]*(x+[2])) )"); // - tanh(([3]-[1])*x+[2]) + 1)"); - //f = new TF1("tanh","[0]*(tanh([1]*(x+[2])) + tanh([1]*([3]-[2]-x)) - 1)"); - f->SetParameter(0,100); - f->SetParLimits(1,-20,20); - f->SetParLimits(2,0,pi); - f->FixParameter(3,pi); - nParameters = 3; - Int_t tempParameters[3] = {0,1,2}; - TString tempParameterNames[3] = {"A;#mum","B","C"}; - parameters = tempParameters; - parameternames = tempParameterNames; - functionname = "#Deltad_{z}=Atanh(B(#phi+C))"; - //functionname = "#Deltad_{z}=A(tanh(B(#phi+C)) + tanh(B(#pi-#phi-C)) - 1"; - } + if (xvar == "dxy" && yvar == "phi" && !resolution && pull) { + f = new TF1("line", "-[0]*(x-[1])"); + f->FixParameter(1, 0); + parametername = "A;cm^{-1}"; + functionname = "#Delta#phi/#delta(#Delta#phi)=-A(d_{xy})"; + parameter = 0; } - if (misalignment == "layerRot") - { - if (xvar == "qoverpt" && yvar == "qoverpt" && !relative && !resolution && !pull) - { - f = new TF1("sech","[0]/cosh([1]*(x+[2]))+[3]"); - //f = new TF1("gauss","[0]/exp(([1]*(x+[2]))^2)+[3]"); //sech works better than a gaussian - f->SetParameter(0,1); - f->SetParameter(1,1); - f->SetParLimits(1,0,10); - f->FixParameter(2,0); - f->FixParameter(3,0); - nParameters = 2; - Int_t tempParameters[2] = {0,1}; - TString tempParameterNames[2] = {"A;10^{-3}e/GeV","B;GeV/e"}; - parameters = tempParameters; - parameternames = tempParameterNames; - functionname = "#Delta(q/p_{T})=Asech(B(q/p_{T}))"; - } + } + if (misalignment == "skew") { + if (xvar == "phi" && yvar == "theta" && resolution && !pull) { + f = new TF1("sine", "[0]*sin([1]*x+[2])+[3]"); + f->FixParameter(1, 2); + nParameters = 3; + Int_t tempParameters[3] = {0, 2, 3}; + TString tempParameterNames[3] = {"A;mrad", "B", "C;mrad"}; + parameters = tempParameters; + parameternames = tempParameterNames; + functionname = "#sigma(#Delta#theta)=Asin(2#phi+B)+C"; } - if (misalignment == "telescope") - { - if (xvar == "theta" && yvar == "theta" && !relative && !resolution && !pull) - { - f = new TF1("gauss","[0]/exp(([1]*(x+[2]))^2)+[3]"); - f->SetParameter(0,1); - f->SetParameter(1,1); - f->SetParLimits(1,0,10); - f->FixParameter(2,-pi/2); - f->FixParameter(3,0); - nParameters = 2; - Int_t tempParameters[2] = {0,1}; - TString tempParameterNames[2] = {"A;mrad","B"}; - parameters = tempParameters; - parameternames = tempParameterNames; - functionname = "#Delta#theta=Aexp(-(B(#theta-#pi/2))^{2})"; - } + if (xvar == "phi" && yvar == "eta" && resolution && !pull) { + f = new TF1("sine", "[0]*sin([1]*x+[2])+[3]"); + f->FixParameter(1, 2); + nParameters = 3; + Int_t tempParameters[3] = {0, 2, 3}; + TString tempParameterNames[3] = {"A;mrad", "B", "C;mrad"}; + parameters = tempParameters; + parameternames = tempParameterNames; + functionname = "#sigma(#Delta#eta)=Asin(2#phi+B)+C"; } - if (functionname == "") return false; - if (drawfits) - { - parameter = -parameter-1; - for (int i = 0; i < nParameters; i++) - parameters[i] = -parameters[i]-1; + if (xvar == "phi" && yvar == "theta" && resolution && pull) { + f = new TF1("sine", "[0]*sin([1]*x+[2])+[3]"); + f->FixParameter(1, 2); + nParameters = 3; + Int_t tempParameters[3] = {0, 2, 3}; + TString tempParameterNames[3] = {"A", "B", "C"}; + parameters = tempParameters; + parameternames = tempParameterNames; + functionname = "#sigma(#Delta#theta/#delta(#Delta#theta))=Asin(2#phi+B)+C"; } - if (nParameters > 0) - misalignmentDependence(c1old,nFiles,names,misalignment,values,phases,xvar,yvar, - f,nParameters,parameters,parameternames,functionname,relative,resolution,pull,saveas); - else - misalignmentDependence(c1old,nFiles,names,misalignment,values,phases,xvar,yvar, - f,parameter,parametername,functionname,relative,resolution,pull,saveas); - delete f; - return true; - + if (xvar == "phi" && yvar == "eta" && resolution && pull) { + f = new TF1("sine", "[0]*sin([1]*x+[2])+[3]"); + f->FixParameter(1, 2); + nParameters = 3; + Int_t tempParameters[3] = {0, 2, 3}; + TString tempParameterNames[3] = {"A", "B", "C"}; + parameters = tempParameters; + parameternames = tempParameterNames; + functionname = "#sigma(#Delta#eta/#delta(#Delta#eta))=Asin(2#phi+B)+C"; + } + if (xvar == "phi" && yvar == "dz" && !resolution && !pull) { + f = new TF1("tanh", "[0]*(tanh([1]*(x+[2])) )"); // - tanh(([3]-[1])*x+[2]) + 1)"); + //f = new TF1("tanh","[0]*(tanh([1]*(x+[2])) + tanh([1]*([3]-[2]-x)) - 1)"); + f->SetParameter(0, 100); + f->SetParLimits(1, -20, 20); + f->SetParLimits(2, 0, pi); + f->FixParameter(3, pi); + nParameters = 3; + Int_t tempParameters[3] = {0, 1, 2}; + TString tempParameterNames[3] = {"A;#mum", "B", "C"}; + parameters = tempParameters; + parameternames = tempParameterNames; + functionname = "#Deltad_{z}=Atanh(B(#phi+C))"; + //functionname = "#Deltad_{z}=A(tanh(B(#phi+C)) + tanh(B(#pi-#phi-C)) - 1"; + } + } + if (misalignment == "layerRot") { + if (xvar == "qoverpt" && yvar == "qoverpt" && !relative && !resolution && !pull) { + f = new TF1("sech", "[0]/cosh([1]*(x+[2]))+[3]"); + //f = new TF1("gauss","[0]/exp(([1]*(x+[2]))^2)+[3]"); //sech works better than a gaussian + f->SetParameter(0, 1); + f->SetParameter(1, 1); + f->SetParLimits(1, 0, 10); + f->FixParameter(2, 0); + f->FixParameter(3, 0); + nParameters = 2; + Int_t tempParameters[2] = {0, 1}; + TString tempParameterNames[2] = {"A;10^{-3}e/GeV", "B;GeV/e"}; + parameters = tempParameters; + parameternames = tempParameterNames; + functionname = "#Delta(q/p_{T})=Asech(B(q/p_{T}))"; + } + } + if (misalignment == "telescope") { + if (xvar == "theta" && yvar == "theta" && !relative && !resolution && !pull) { + f = new TF1("gauss", "[0]/exp(([1]*(x+[2]))^2)+[3]"); + f->SetParameter(0, 1); + f->SetParameter(1, 1); + f->SetParLimits(1, 0, 10); + f->FixParameter(2, -pi / 2); + f->FixParameter(3, 0); + nParameters = 2; + Int_t tempParameters[2] = {0, 1}; + TString tempParameterNames[2] = {"A;mrad", "B"}; + parameters = tempParameters; + parameternames = tempParameterNames; + functionname = "#Delta#theta=Aexp(-(B(#theta-#pi/2))^{2})"; + } + } + if (functionname == "") + return false; + if (drawfits) { + parameter = -parameter - 1; + for (int i = 0; i < nParameters; i++) + parameters[i] = -parameters[i] - 1; + } + if (nParameters > 0) + misalignmentDependence(c1old, + nFiles, + names, + misalignment, + values, + phases, + xvar, + yvar, + f, + nParameters, + parameters, + parameternames, + functionname, + relative, + resolution, + pull, + saveas); + else + misalignmentDependence(c1old, + nFiles, + names, + misalignment, + values, + phases, + xvar, + yvar, + f, + parameter, + parametername, + functionname, + relative, + resolution, + pull, + saveas); + delete f; + return true; } - //This is the most practically useful version. It does not take a canvas, but produces it automatically and then determines what //function to fit it to. -Bool_t misalignmentDependence(Int_t nFiles,TString *files,TString *names,TString misalignment,Double_t *values,Double_t *phases,TString xvar,TString yvar, +Bool_t misalignmentDependence(Int_t nFiles, + TString *files, + TString *names, + TString misalignment, + Double_t *values, + Double_t *phases, + TString xvar, + TString yvar, Bool_t drawfits, - Bool_t relative,Bool_t resolution,Bool_t pull, - TString saveas) -{ - return misalignmentDependence(trackSplitPlot(nFiles,files,names,xvar,yvar,relative,resolution,pull,""), - nFiles,names,misalignment,values,phases,xvar,yvar, - drawfits,relative,resolution,pull,saveas); + Bool_t relative, + Bool_t resolution, + Bool_t pull, + TString saveas) { + return misalignmentDependence(trackSplitPlot(nFiles, files, names, xvar, yvar, relative, resolution, pull, ""), + nFiles, + names, + misalignment, + values, + phases, + xvar, + yvar, + drawfits, + relative, + resolution, + pull, + saveas); } -Bool_t hasFit(TString misalignment,TString xvar,TString yvar,Bool_t relative,Bool_t resolution,Bool_t pull) -{ - return misalignmentDependence((TCanvas*)0, - 0,(TString*)0,misalignment,(Double_t*)0,(Double_t*)0,xvar,yvar, - false, - relative,resolution,pull, - TString("")); +Bool_t hasFit(TString misalignment, TString xvar, TString yvar, Bool_t relative, Bool_t resolution, Bool_t pull) { + return misalignmentDependence((TCanvas *)nullptr, + 0, + (TString *)nullptr, + misalignment, + (Double_t *)nullptr, + (Double_t *)nullptr, + xvar, + yvar, + false, + relative, + resolution, + pull, + TString("")); } //============= //2. Make Plots //============= -void makePlots(Int_t nFiles,TString *files,TString *names,TString misalignment,Double_t *values,Double_t *phases,TString directory, - Bool_t matrix[xsize][ysize]) -{ - stufftodelete->SetOwner(true); - - for (Int_t i = 0, totaltime = 0; i < nFiles; i++) - { - TFile *f = 0; - bool exists = false; - if (files[i] == "") exists = true; - - for (int j = 1; j <= 60*24 && !exists; j++, totaltime++) //wait up to 1 day for the validation to be finished - { - f = TFile::Open(files[i]); - if (f != 0) - exists = f->IsOpen(); - delete f; - if (exists) continue; - gSystem->Sleep(60000); - cout << "It's been "; - if (j >= 60) - cout << j/60 << " hour"; - if (j >= 120) - cout << "s"; - if (j % 60 != 0 && j >= 60) - cout << " and "; - if (j % 60 != 0) - cout << j%60 << " minute"; - if (j % 60 >= 2) - cout << "s"; - cout << endl; - } - if (!exists) return; - if (i == nFiles - 1 && totaltime > nFiles) - gSystem->Sleep(60000); - } - - TString directorytomake = directory; - gSystem->mkdir(directorytomake,true); - - ofstream summaryfile(directorytomake+"/TrackSplittingValidationSummary.txt"); - for (int i = 0; i < nFiles; i++) { - summaryfile << "\t" << TString(names[i]).ReplaceAll("#", "\\"); - } - summaryfile << "\tformat={}\tlatexformat={}\n"; - - if (misalignment != "") +void makePlots(Int_t nFiles, + TString *files, + TString *names, + TString misalignment, + Double_t *values, + Double_t *phases, + TString directory, + Bool_t matrix[xsize][ysize]) { + stufftodelete->SetOwner(true); + + for (Int_t i = 0, totaltime = 0; i < nFiles; i++) { + TFile *f = nullptr; + bool exists = false; + if (files[i] == "") + exists = true; + + for (int j = 1; j <= 60 * 24 && !exists; j++, totaltime++) //wait up to 1 day for the validation to be finished { - directorytomake.Append("/fits"); - gSystem->mkdir(directorytomake); + f = TFile::Open(files[i]); + if (f != nullptr) + exists = f->IsOpen(); + delete f; + if (exists) + continue; + gSystem->Sleep(60000); + cout << "It's been "; + if (j >= 60) + cout << j / 60 << " hour"; + if (j >= 120) + cout << "s"; + if (j % 60 != 0 && j >= 60) + cout << " and "; + if (j % 60 != 0) + cout << j % 60 << " minute"; + if (j % 60 >= 2) + cout << "s"; + cout << endl; } + if (!exists) + return; + if (i == nFiles - 1 && totaltime > nFiles) + gSystem->Sleep(60000); + } + + TString directorytomake = directory; + gSystem->mkdir(directorytomake, true); + + ofstream summaryfile(directorytomake + "/TrackSplittingValidationSummary.txt"); + for (int i = 0; i < nFiles; i++) { + summaryfile << "\t" << TString(names[i]).ReplaceAll("#", "\\"); + } + summaryfile << "\tformat={}\tlatexformat={}\n"; + + if (misalignment != "") { + directorytomake.Append("/fits"); + gSystem->mkdir(directorytomake); + } + + for (Int_t x = 0; x < xsize; x++) { + for (Int_t y = 0; y < ysize; y++) { + for (Int_t pull = 0; pull == 0 || (pull == 1 && yvariables[y] != ""); pull++) { + if (false) + continue; //this line is to make it easier to do e.g. all plots involving Delta eta + //(replace false with yvariables[y] != "eta") + + if (!matrix[x][y]) + continue; + + if (xvariables[x] == "" && yvariables[y] == "") + continue; + + Int_t nPlots = + nFiles + 4; //scatterplot for each (if you uncomment it), profile, resolution, and fits for each. + vector s; + + TString slashstring = ""; + if (directory.Last('/') != directory.Length() - 1) + slashstring = "/"; + + vector plotnames; + for (Int_t i = 0; i < nFiles; i++) { + plotnames.push_back(names[i]); //this is plotnames[i] + plotnames[i].ReplaceAll(" ", ""); + } + + plotnames.push_back(""); //this is plotnames[nFiles], but gets changed + if (yvariables[y] == "") + plotnames[nFiles] = "orghist"; + else if (xvariables[x] == "") + plotnames[nFiles] = "hist"; + else + plotnames[nFiles] = "profile"; + + plotnames.push_back("resolution"); //this is plotnames[nFiles+1] + + plotnames.push_back(""); //this is plotnames[nFiles+2] + plotnames.push_back(""); //this is plotnames[nFiles+3] + if (plotnames[nFiles] == "profile") { + plotnames[nFiles + 2] = ".profile"; + plotnames[nFiles + 2].Prepend(misalignment); + plotnames[nFiles + 3] = ".resolution"; + plotnames[nFiles + 3].Prepend(misalignment); + plotnames[nFiles + 2].Prepend("fits/"); + plotnames[nFiles + 3].Prepend("fits/"); + } else { + plotnames[nFiles + 2] = "profile."; + plotnames[nFiles + 2].Append(misalignment); + plotnames[nFiles + 3] = "resolution."; + plotnames[nFiles + 3].Append(misalignment); + } - for (Int_t x = 0; x < xsize; x++) - { - for (Int_t y = 0; y < ysize; y++) - { - for (Int_t pull = 0; pull == 0 || (pull == 1 && yvariables[y] != ""); pull++) - { - if (false) continue; //this line is to make it easier to do e.g. all plots involving Delta eta - //(replace false with yvariables[y] != "eta") - - if (!matrix[x][y]) continue; - - if (xvariables[x] == "" && yvariables[y] == "") continue; - - Int_t nPlots = nFiles+4; //scatterplot for each (if you uncomment it), profile, resolution, and fits for each. - vector s; - - TString slashstring = ""; - if (directory.Last('/') != directory.Length() - 1) slashstring = "/"; - - vector plotnames; - for (Int_t i = 0; i < nFiles; i++) - { - plotnames.push_back(names[i]); //this is plotnames[i] - plotnames[i].ReplaceAll(" ",""); - } - - plotnames.push_back(""); //this is plotnames[nFiles], but gets changed - if (yvariables[y] == "") - plotnames[nFiles] = "orghist"; - else if (xvariables[x] == "") - plotnames[nFiles] = "hist"; - else - plotnames[nFiles] = "profile"; - - plotnames.push_back("resolution"); //this is plotnames[nFiles+1] - - plotnames.push_back(""); //this is plotnames[nFiles+2] - plotnames.push_back(""); //this is plotnames[nFiles+3] - if (plotnames[nFiles] == "profile") - { - plotnames[nFiles+2] = ".profile"; - plotnames[nFiles+2].Prepend(misalignment); - plotnames[nFiles+3] = ".resolution"; - plotnames[nFiles+3].Prepend(misalignment); - plotnames[nFiles+2].Prepend("fits/"); - plotnames[nFiles+3].Prepend("fits/"); - } - else - { - plotnames[nFiles+2] = "profile."; - plotnames[nFiles+2].Append(misalignment); - plotnames[nFiles+3] = "resolution."; - plotnames[nFiles+3].Append(misalignment); - } - - TString pullstring = ""; - if (pull) pullstring = "pull."; - - TString xvarstring = xvariables[x]; - if (xvariables[x] != "runNumber" && !xvariables[x].BeginsWith("nHits") && xvariables[x] != "") xvarstring.Append("_org"); - if (xvariables[x] != "" && yvariables[y] != "") xvarstring.Append("."); - - TString yvarstring = yvariables[y]; - if (yvariables[y] != "") yvarstring.Prepend("Delta_"); - - TString relativestring = ""; - if (relativearray[y]) relativestring = ".relative"; - - for (Int_t i = 0; i < nPlots; i++) - { - stringstream ss; - ss << directory << slashstring << plotnames[i] << "." << pullstring - << xvarstring << yvarstring << relativestring << ".pngepsroot"; - s.push_back(ss.str()); - if (misalignment != "") - { - TString wrongway = misalignment; - TString rightway = misalignment; - wrongway.Append (".pull"); - rightway.Prepend("pull."); - s[i].ReplaceAll(wrongway,rightway); - } - } - - Int_t i; - for (i = 0; i < nFiles; i++) - { - if (xvariables[x] == "" || yvariables[y] == "") continue; - //uncomment this section to make scatterplots - /* + TString pullstring = ""; + if (pull) + pullstring = "pull."; + + TString xvarstring = xvariables[x]; + if (xvariables[x] != "runNumber" && !xvariables[x].BeginsWith("nHits") && xvariables[x] != "") + xvarstring.Append("_org"); + if (xvariables[x] != "" && yvariables[y] != "") + xvarstring.Append("."); + + TString yvarstring = yvariables[y]; + if (yvariables[y] != "") + yvarstring.Prepend("Delta_"); + + TString relativestring = ""; + if (relativearray[y]) + relativestring = ".relative"; + + for (Int_t i = 0; i < nPlots; i++) { + stringstream ss; + ss << directory << slashstring << plotnames[i] << "." << pullstring << xvarstring << yvarstring + << relativestring << ".pngepsroot"; + s.push_back(ss.str()); + if (misalignment != "") { + TString wrongway = misalignment; + TString rightway = misalignment; + wrongway.Append(".pull"); + rightway.Prepend("pull."); + s[i].ReplaceAll(wrongway, rightway); + } + } + + Int_t i; + for (i = 0; i < nFiles; i++) { + if (xvariables[x] == "" || yvariables[y] == "") + continue; + //uncomment this section to make scatterplots + /* trackSplitPlot(files[i],xvariables[x],yvariables[y],false,relativearray[y],false,(bool)pull,s[i]); stufftodelete->Clear(); for ( ; gROOT->GetListOfCanvases()->GetEntries() > 0; ) @@ -1547,110 +1732,179 @@ void makePlots(Int_t nFiles,TString *files,TString *names,TString misalignment,D for ( ; gROOT->GetListOfFiles()->GetEntries() > 0; ) delete (TFile*)gROOT->GetListOfFiles()->Last(); */ - } - - if (xvariables[x] != "" && yvariables[y] != "") - { - //make profile - TCanvas *c1 = trackSplitPlot(nFiles,files,names,xvariables[x],yvariables[y],relativearray[y],false,(bool)pull,s[i],summaryfile); - if (misalignmentDependence(c1,nFiles,names,misalignment,values,phases,xvariables[x],yvariables[y], - true,relativearray[y],false,(bool)pull,s[i+2])) - { - s[i+2].ReplaceAll(".png",".parameter.png"); - misalignmentDependence(c1,nFiles,names,misalignment,values,phases,xvariables[x],yvariables[y], - false,relativearray[y],false,(bool)pull,s[i+2]); - } - stufftodelete->Clear(); - for ( ; gROOT->GetListOfCanvases()->GetEntries() > 0; ) - deleteCanvas( gROOT->GetListOfCanvases()->Last()); - for ( ; gROOT->GetListOfFiles()->GetEntries() > 0; ) - delete (TFile*)gROOT->GetListOfFiles()->Last(); - - //make resolution plot - TCanvas *c2 = trackSplitPlot(nFiles,files,names,xvariables[x],yvariables[y],relativearray[y],true ,(bool)pull,s[i+1],summaryfile); - if (misalignmentDependence(c2,nFiles,names,misalignment,values,phases,xvariables[x],yvariables[y], - true,relativearray[y],true,(bool)pull,s[i+3])) - { - s[i+3].ReplaceAll(".png",".parameter.png"); - misalignmentDependence(c2,nFiles,names,misalignment,values,phases,xvariables[x],yvariables[y], - false,relativearray[y],true,(bool)pull,s[i+3]); - } - stufftodelete->Clear(); - for ( ; gROOT->GetListOfCanvases()->GetEntries() > 0; ) - deleteCanvas( gROOT->GetListOfCanvases()->Last()); - for ( ; gROOT->GetListOfFiles()->GetEntries() > 0; ) - delete (TFile*)gROOT->GetListOfFiles()->Last(); - } - else - { - //make histogram - TCanvas *c1 = trackSplitPlot(nFiles,files,names,xvariables[x],yvariables[y],relativearray[y],false,(bool)pull,s[i],summaryfile); - if (misalignmentDependence(c1,nFiles,names,misalignment,values,phases,xvariables[x],yvariables[y], - true,relativearray[y],false,(bool)pull,s[i+2])) - { - misalignmentDependence(c1,nFiles,names,misalignment,values,phases,xvariables[x],yvariables[y], - true,relativearray[y],true,(bool)pull,s[i+3]); - } - stufftodelete->Clear(); - for ( ; gROOT->GetListOfCanvases()->GetEntries() > 0; ) - deleteCanvas( gROOT->GetListOfCanvases()->Last()); - for ( ; gROOT->GetListOfFiles()->GetEntries() > 0; ) - delete (TFile*)gROOT->GetListOfFiles()->Last(); - } - } - cout << y + ysize * x + 1 << "/" << xsize*ysize << endl; } + + if (xvariables[x] != "" && yvariables[y] != "") { + //make profile + TCanvas *c1 = trackSplitPlot( + nFiles, files, names, xvariables[x], yvariables[y], relativearray[y], false, (bool)pull, s[i], summaryfile); + if (misalignmentDependence(c1, + nFiles, + names, + misalignment, + values, + phases, + xvariables[x], + yvariables[y], + true, + relativearray[y], + false, + (bool)pull, + s[i + 2])) { + s[i + 2].ReplaceAll(".png", ".parameter.png"); + misalignmentDependence(c1, + nFiles, + names, + misalignment, + values, + phases, + xvariables[x], + yvariables[y], + false, + relativearray[y], + false, + (bool)pull, + s[i + 2]); + } + stufftodelete->Clear(); + for (; gROOT->GetListOfCanvases()->GetEntries() > 0;) + deleteCanvas(gROOT->GetListOfCanvases()->Last()); + for (; gROOT->GetListOfFiles()->GetEntries() > 0;) + delete (TFile *)gROOT->GetListOfFiles()->Last(); + + //make resolution plot + TCanvas *c2 = trackSplitPlot(nFiles, + files, + names, + xvariables[x], + yvariables[y], + relativearray[y], + true, + (bool)pull, + s[i + 1], + summaryfile); + if (misalignmentDependence(c2, + nFiles, + names, + misalignment, + values, + phases, + xvariables[x], + yvariables[y], + true, + relativearray[y], + true, + (bool)pull, + s[i + 3])) { + s[i + 3].ReplaceAll(".png", ".parameter.png"); + misalignmentDependence(c2, + nFiles, + names, + misalignment, + values, + phases, + xvariables[x], + yvariables[y], + false, + relativearray[y], + true, + (bool)pull, + s[i + 3]); + } + stufftodelete->Clear(); + for (; gROOT->GetListOfCanvases()->GetEntries() > 0;) + deleteCanvas(gROOT->GetListOfCanvases()->Last()); + for (; gROOT->GetListOfFiles()->GetEntries() > 0;) + delete (TFile *)gROOT->GetListOfFiles()->Last(); + } else { + //make histogram + TCanvas *c1 = trackSplitPlot( + nFiles, files, names, xvariables[x], yvariables[y], relativearray[y], false, (bool)pull, s[i], summaryfile); + if (misalignmentDependence(c1, + nFiles, + names, + misalignment, + values, + phases, + xvariables[x], + yvariables[y], + true, + relativearray[y], + false, + (bool)pull, + s[i + 2])) { + misalignmentDependence(c1, + nFiles, + names, + misalignment, + values, + phases, + xvariables[x], + yvariables[y], + true, + relativearray[y], + true, + (bool)pull, + s[i + 3]); + } + stufftodelete->Clear(); + for (; gROOT->GetListOfCanvases()->GetEntries() > 0;) + deleteCanvas(gROOT->GetListOfCanvases()->Last()); + for (; gROOT->GetListOfFiles()->GetEntries() > 0;) + delete (TFile *)gROOT->GetListOfFiles()->Last(); + } + } + cout << y + ysize * x + 1 << "/" << xsize * ysize << endl; } + } } -void makePlots(Int_t nFiles,TString *files,TString *names,TString directory, Bool_t matrix[xsize][ysize]) -{ - makePlots(nFiles,files,names,"",(Double_t*)0,(Double_t*)0,directory, - matrix); +void makePlots(Int_t nFiles, TString *files, TString *names, TString directory, Bool_t matrix[xsize][ysize]) { + makePlots(nFiles, files, names, "", (Double_t *)nullptr, (Double_t *)nullptr, directory, matrix); } -void makePlots(TString file,TString misalignment,Double_t *values,Double_t *phases,TString directory,Bool_t matrix[xsize][ysize]) -{ - setupcolors(); - file.Remove(TString::kTrailing, ','); - int n = file.CountChar(',') + 1; - TString *files = new TString[n]; - TString *names = new TString[n]; - vector tempcolors = colors; - vector tempstyles = styles; - for (int i = 0; i < n; i++) - { - TString thisfile = nPart(i+1,file,","); - int numberofpipes = thisfile.CountChar('|'); - if (numberofpipes >= 0 && nPart(numberofpipes+1,thisfile,"|").IsDigit()) - { - if (numberofpipes >= 1 && nPart(numberofpipes,thisfile,"|").IsDigit()) - { - colors[i] = nPart(numberofpipes,thisfile,"|").Atoi(); - styles[i] = nPart(numberofpipes+1,thisfile,"|").Atoi(); - thisfile.Remove(thisfile.Length() - nPart(numberofpipes,thisfile,"|").Length() - nPart(numberofpipes+1,thisfile,"|").Length() - 2); - } - else - { - colors[i] = nPart(numberofpipes + 1,thisfile,"|").Atoi(); - thisfile.Remove(thisfile.Length() - nPart(numberofpipes+1,thisfile,"|").Length() - 2); - } - } - files[i] = nPart(1,thisfile,"=",true); - names[i] = nPart(2,thisfile,"=",false); +void makePlots(TString file, + TString misalignment, + Double_t *values, + Double_t *phases, + TString directory, + Bool_t matrix[xsize][ysize]) { + setupcolors(); + file.Remove(TString::kTrailing, ','); + unsigned int n = file.CountChar(',') + 1; + TString *files = new TString[n]; + TString *names = new TString[n]; + vector tempcolors = colors; + vector tempstyles = styles; + for (unsigned int i = 0; i < n; i++) { + TString thisfile = nPart(i + 1, file, ","); + int numberofpipes = thisfile.CountChar('|'); + if (numberofpipes >= 0 && nPart(numberofpipes + 1, thisfile, "|").IsDigit()) { + if (numberofpipes >= 1 && nPart(numberofpipes, thisfile, "|").IsDigit()) { + colors[i] = nPart(numberofpipes, thisfile, "|").Atoi(); + styles[i] = nPart(numberofpipes + 1, thisfile, "|").Atoi(); + thisfile.Remove(thisfile.Length() - nPart(numberofpipes, thisfile, "|").Length() - + nPart(numberofpipes + 1, thisfile, "|").Length() - 2); + } else { + colors[i] = nPart(numberofpipes + 1, thisfile, "|").Atoi(); + thisfile.Remove(thisfile.Length() - nPart(numberofpipes + 1, thisfile, "|").Length() - 2); + } } - if (n == 1 && names[0] == "") - names[0] = "scatterplot"; //With 1 file there's no legend, so this is only used in the filename of the scatterplots, if made - makePlots(n,files,names,misalignment,values,phases,directory,matrix); - delete[] files; - delete[] names; - colors = tempcolors; - styles = tempstyles; + files[i] = nPart(1, thisfile, "=", true); + names[i] = nPart(2, thisfile, "=", false); + } + if (n == 1 && names[0] == "") + names[0] = + "scatterplot"; //With 1 file there's no legend, so this is only used in the filename of the scatterplots, if made + makePlots(n, files, names, misalignment, values, phases, directory, matrix); + delete[] files; + delete[] names; + colors = tempcolors; + styles = tempstyles; } -void makePlots(TString file,TString directory,Bool_t matrix[xsize][ysize]) -{ - makePlots(file,"",(Double_t*)0,(Double_t*)0,directory,matrix); +void makePlots(TString file, TString directory, Bool_t matrix[xsize][ysize]) { + makePlots(file, "", (Double_t *)nullptr, (Double_t *)nullptr, directory, matrix); } //*************************************************************************** @@ -1663,795 +1917,731 @@ void makePlots(TString file,TString directory,Bool_t matrix[xsize][ysize]) // (including Delta_pt/pt_org) //*************************************************************************** -void makePlots(Int_t nFiles,TString *files,TString *names,TString misalignment,Double_t *values,Double_t *phases,TString directory, - TString xvar,TString yvar) -{ - Bool_t matrix[xsize][ysize]; - for (int x = 0; x < xsize; x++) - for (int y = 0; y < ysize; y++) - { - bool xmatch = (xvar == "all" || xvar == xvariables[x]); - bool ymatch = (yvar == "all" || yvar == yvariables[y]); - if (yvar == "pt" && yvariables[y] == "pt" && relativearray[y] == true) - ymatch = false; - if (yvar == "ptrel" && yvariables[y] == "pt" && relativearray[y] == true) - ymatch = true; - matrix[x][y] = (xmatch && ymatch); - } - makePlots(nFiles,files,names,misalignment,values,phases,directory,matrix); +void makePlots(Int_t nFiles, + TString *files, + TString *names, + TString misalignment, + Double_t *values, + Double_t *phases, + TString directory, + TString xvar, + TString yvar) { + Bool_t matrix[xsize][ysize]; + for (int x = 0; x < xsize; x++) + for (int y = 0; y < ysize; y++) { + bool xmatch = (xvar == "all" || xvar == xvariables[x]); + bool ymatch = (yvar == "all" || yvar == yvariables[y]); + if (yvar == "pt" && yvariables[y] == "pt" && relativearray[y] == true) + ymatch = false; + if (yvar == "ptrel" && yvariables[y] == "pt" && relativearray[y] == true) + ymatch = true; + matrix[x][y] = (xmatch && ymatch); + } + makePlots(nFiles, files, names, misalignment, values, phases, directory, matrix); } -void makePlots(Int_t nFiles,TString *files,TString *names,TString directory, - TString xvar,TString yvar) -{ - makePlots(nFiles,files,names,"",(Double_t*)0,(Double_t*)0,directory, - xvar,yvar); +void makePlots(Int_t nFiles, TString *files, TString *names, TString directory, TString xvar, TString yvar) { + makePlots(nFiles, files, names, "", (Double_t *)nullptr, (Double_t *)nullptr, directory, xvar, yvar); } -void makePlots(TString file,TString misalignment,Double_t *values,Double_t *phases,TString directory, - TString xvar,TString yvar) -{ - setupcolors(); - file.Remove(TString::kTrailing, ','); - int n = file.CountChar(',') + 1; - TString *files = new TString[n]; - TString *names = new TString[n]; - vector tempcolors = colors; - vector tempstyles = styles; - for (int i = 0; i < n; i++) - { - TString thisfile = nPart(i+1,file,","); - int numberofpipes = thisfile.CountChar('|'); - if (numberofpipes >= 0 && nPart(numberofpipes+1,thisfile,"|").IsDigit()) - { - if (numberofpipes >= 1 && nPart(numberofpipes,thisfile,"|").IsDigit()) - { - colors[i] = nPart(numberofpipes,thisfile,"|").Atoi(); - styles[i] = nPart(numberofpipes+1,thisfile,"|").Atoi(); - thisfile.Remove(thisfile.Length() - nPart(numberofpipes,thisfile,"|").Length() - nPart(numberofpipes+1,thisfile,"|").Length() - 2); - } - else - { - colors[i] = nPart(numberofpipes + 1,thisfile,"|").Atoi(); - thisfile.Remove(thisfile.Length() - nPart(numberofpipes+1,thisfile,"|").Length() - 2); - } - } - files[i] = nPart(1,thisfile,"=",true); - names[i] = nPart(2,thisfile,"=",false); +void makePlots(TString file, + TString misalignment, + Double_t *values, + Double_t *phases, + TString directory, + TString xvar, + TString yvar) { + setupcolors(); + file.Remove(TString::kTrailing, ','); + unsigned int n = file.CountChar(',') + 1; + TString *files = new TString[n]; + TString *names = new TString[n]; + vector tempcolors = colors; + vector tempstyles = styles; + for (unsigned int i = 0; i < n; i++) { + TString thisfile = nPart(i + 1, file, ","); + int numberofpipes = thisfile.CountChar('|'); + if (numberofpipes >= 0 && nPart(numberofpipes + 1, thisfile, "|").IsDigit()) { + if (numberofpipes >= 1 && nPart(numberofpipes, thisfile, "|").IsDigit()) { + colors[i] = nPart(numberofpipes, thisfile, "|").Atoi(); + styles[i] = nPart(numberofpipes + 1, thisfile, "|").Atoi(); + thisfile.Remove(thisfile.Length() - nPart(numberofpipes, thisfile, "|").Length() - + nPart(numberofpipes + 1, thisfile, "|").Length() - 2); + } else { + colors[i] = nPart(numberofpipes + 1, thisfile, "|").Atoi(); + thisfile.Remove(thisfile.Length() - nPart(numberofpipes + 1, thisfile, "|").Length() - 2); + } } - if (n == 1 && names[0] == "") - names[0] = "scatterplot"; //With 1 file there's no legend, so this is only used in the filename of the scatterplots, if made - makePlots(n,files,names,misalignment,values,phases,directory,xvar,yvar); - delete[] files; - delete[] names; - colors = tempcolors; - styles = tempstyles; + files[i] = nPart(1, thisfile, "=", true); + names[i] = nPart(2, thisfile, "=", false); + } + if (n == 1 && names[0] == "") + names[0] = + "scatterplot"; //With 1 file there's no legend, so this is only used in the filename of the scatterplots, if made + makePlots(n, files, names, misalignment, values, phases, directory, xvar, yvar); + delete[] files; + delete[] names; + colors = tempcolors; + styles = tempstyles; } -void makePlots(TString file,TString directory,TString xvar,TString yvar) -{ - makePlots(file,"",(Double_t*)0,(Double_t*)0,directory,xvar,yvar); +void makePlots(TString file, TString directory, TString xvar, TString yvar) { + makePlots(file, "", (Double_t *)nullptr, (Double_t *)nullptr, directory, xvar, yvar); } //*************************** //functions to make all plots //*************************** -void makePlots(Int_t nFiles,TString *files,TString *names,TString misalignment,Double_t *values,Double_t *phases,TString directory) -{ - makePlots(nFiles,files,names,misalignment,values,phases,directory,"all","all"); +void makePlots(Int_t nFiles, + TString *files, + TString *names, + TString misalignment, + Double_t *values, + Double_t *phases, + TString directory) { + makePlots(nFiles, files, names, misalignment, values, phases, directory, "all", "all"); } -void makePlots(Int_t nFiles,TString *files,TString *names,TString directory) -{ - makePlots(nFiles,files,names,"",(Double_t*)0,(Double_t*)0,directory); +void makePlots(Int_t nFiles, TString *files, TString *names, TString directory) { + makePlots(nFiles, files, names, "", (Double_t *)nullptr, (Double_t *)nullptr, directory); } -void makePlots(TString file,TString misalignment,Double_t *values,Double_t *phases,TString directory) -{ - setupcolors(); - file.Remove(TString::kTrailing, ','); - int n = file.CountChar(',') + 1; - TString *files = new TString[n]; - TString *names = new TString[n]; - vector tempcolors = colors; - vector tempstyles = styles; - for (int i = 0; i < n; i++) - { - TString thisfile = nPart(i+1,file,","); - int numberofpipes = thisfile.CountChar('|'); - if (numberofpipes >= 0 && nPart(numberofpipes+1,thisfile,"|").IsDigit()) - { - if (numberofpipes >= 1 && nPart(numberofpipes,thisfile,"|").IsDigit()) - { - colors[i] = nPart(numberofpipes,thisfile,"|").Atoi(); - styles[i] = nPart(numberofpipes+1,thisfile,"|").Atoi(); - thisfile.Remove(thisfile.Length() - nPart(numberofpipes,thisfile,"|").Length() - nPart(numberofpipes+1,thisfile,"|").Length() - 2); - } - else - { - colors[i] = nPart(numberofpipes + 1,thisfile,"|").Atoi(); - thisfile.Remove(thisfile.Length() - nPart(numberofpipes+1,thisfile,"|").Length() - 2); - } - } - files[i] = nPart(1,thisfile,"=",true); - names[i] = nPart(2,thisfile,"=",false); +void makePlots(TString file, TString misalignment, Double_t *values, Double_t *phases, TString directory) { + setupcolors(); + file.Remove(TString::kTrailing, ','); + unsigned int n = file.CountChar(',') + 1; + TString *files = new TString[n]; + TString *names = new TString[n]; + vector tempcolors = colors; + vector tempstyles = styles; + for (unsigned int i = 0; i < n; i++) { + TString thisfile = nPart(i + 1, file, ","); + int numberofpipes = thisfile.CountChar('|'); + if (numberofpipes >= 0 && nPart(numberofpipes + 1, thisfile, "|").IsDigit()) { + if (numberofpipes >= 1 && nPart(numberofpipes, thisfile, "|").IsDigit()) { + colors[i] = nPart(numberofpipes, thisfile, "|").Atoi(); + styles[i] = nPart(numberofpipes + 1, thisfile, "|").Atoi(); + thisfile.Remove(thisfile.Length() - nPart(numberofpipes, thisfile, "|").Length() - + nPart(numberofpipes + 1, thisfile, "|").Length() - 2); + } else { + colors[i] = nPart(numberofpipes + 1, thisfile, "|").Atoi(); + thisfile.Remove(thisfile.Length() - nPart(numberofpipes + 1, thisfile, "|").Length() - 2); + } } - if (n == 1 && names[0] == "") - names[0] = "scatterplot"; //With 1 file there's no legend, so this is only used in the filename of the scatterplots, if made - makePlots(n,files,names,misalignment,values,phases,directory); - delete[] files; - delete[] names; - colors = tempcolors; - styles = tempstyles; + files[i] = nPart(1, thisfile, "=", true); + names[i] = nPart(2, thisfile, "=", false); + } + if (n == 1 && names[0] == "") + names[0] = + "scatterplot"; //With 1 file there's no legend, so this is only used in the filename of the scatterplots, if made + makePlots(n, files, names, misalignment, values, phases, directory); + delete[] files; + delete[] names; + colors = tempcolors; + styles = tempstyles; } -void makePlots(TString file,TString directory) -{ - makePlots(file,"",(Double_t*)0,(Double_t*)0,directory); +void makePlots(TString file, TString directory) { + makePlots(file, "", (Double_t *)nullptr, (Double_t *)nullptr, directory); } //============= //3. Axis Label //============= -TString fancyname(TString variable) -{ - if (variable == "pt") - return "p_{T}"; - else if (variable == "phi") - return "#phi"; - else if (variable == "eta") - return "#eta"; - else if (variable == "theta") - return "#theta"; - else if (variable == "qoverpt") - return "q/p_{T}"; - else if (variable == "runNumber") - return "run number"; - else if (variable == "dxy" || variable == "dz") - return variable.ReplaceAll("d","d_{").Append("}"); - else - return variable; +TString fancyname(TString variable) { + if (variable == "pt") + return "p_{T}"; + else if (variable == "phi") + return "#phi"; + else if (variable == "eta") + return "#eta"; + else if (variable == "theta") + return "#theta"; + else if (variable == "qoverpt") + return "q/p_{T}"; + else if (variable == "runNumber") + return "run number"; + else if (variable == "dxy" || variable == "dz") + return variable.ReplaceAll("d", "d_{").Append("}"); + else + return variable; } //this gives the units, to be put in the axis label -TString units(TString variable,Char_t axis) -{ - if (variable == "pt") - return "GeV"; - if (variable == "dxy" || variable == "dz") - { - if (axis == 'y') - return "#mum"; //in the tree, it's listed in centimeters, but in trackSplitPlot the value is divided by 1e4 - if (axis == 'x') - return "cm"; - } - if (variable == "qoverpt") - { - if (axis == 'y') - return "#times10^{-3}e/GeV"; //e/TeV is not particularly intuitive - if (axis == 'x') - return "e/GeV"; - } - if (axis == 'y' && (variable == "phi" || variable == "theta")) - return "mrad"; - return ""; +TString units(TString variable, Char_t axis) { + if (variable == "pt") + return "GeV"; + if (variable == "dxy" || variable == "dz") { + if (axis == 'y') + return "#mum"; //in the tree, it's listed in centimeters, but in trackSplitPlot the value is divided by 1e4 + if (axis == 'x') + return "cm"; + } + if (variable == "qoverpt") { + if (axis == 'y') + return "#times10^{-3}e/GeV"; //e/TeV is not particularly intuitive + if (axis == 'x') + return "e/GeV"; + } + if (axis == 'y' && (variable == "phi" || variable == "theta")) + return "mrad"; + return ""; } TString plainunits(TString variable, char axis) { - TString result = units(variable, axis); - result.ReplaceAll("#mu", "u"); - result.ReplaceAll("#times10^{-3}", "* 1e-3 "); - return result; + TString result = units(variable, axis); + result.ReplaceAll("#mu", "u"); + result.ReplaceAll("#times10^{-3}", "* 1e-3 "); + return result; } TString latexunits(TString variable, char axis) { - TString result = units(variable, axis); - result.ReplaceAll("#", "\\").ReplaceAll("{", "{{").ReplaceAll("}", "}}") - .ReplaceAll("\\mum", "$\\mu$m") - .ReplaceAll("\\times10^{{-3}}", "$\\times10^{{-3}}$"); - return result; + TString result = units(variable, axis); + result.ReplaceAll("#", "\\") + .ReplaceAll("{", "{{") + .ReplaceAll("}", "}}") + .ReplaceAll("\\mum", "$\\mu$m") + .ReplaceAll("\\times10^{{-3}}", "$\\times10^{{-3}}$"); + return result; } //this gives the full axis label, including units. It can handle any combination of relative, resolution, and pull. -TString axislabel(TString variable, Char_t axis, Bool_t relative, Bool_t resolution, Bool_t pull) -{ - if (axis == 'X' || axis == 'Y') - { - double min, max, bins; - axislimits(0,0,variable,tolower(axis),relative,pull,min,max,bins); +TString axislabel(TString variable, Char_t axis, Bool_t relative, Bool_t resolution, Bool_t pull) { + if (axis == 'X' || axis == 'Y') { + double min, max, bins; + axislimits(0, nullptr, variable, tolower(axis), relative, pull, min, max, bins); - if (variable.BeginsWith("nHits")) - return "fraction of tracks"; - if (variable == "runNumber") - return "number of tracks"; - - stringstream s; - s << "fraction of tracks / " << (max-min)/bins; - if (!pull && !relative) - { - TString varunits = units(variable, tolower(axis)); - if (varunits != "") - s << " " << varunits; - } - TString result = s.str(); - result.ReplaceAll(" #times","#times"); - return result; - } + if (variable.BeginsWith("nHits")) + return "fraction of tracks"; + if (variable == "runNumber") + return "number of tracks"; stringstream s; - if (resolution && axis == 'y') - s << "#sigma("; - if (axis == 'y') - s << "#Delta"; - s << fancyname(variable); - if (relative && axis == 'y') - { - s << " / "; - if (!pull) - s << "("; - s << fancyname(variable); - } - if (axis == 'y') - { - if (pull) - { - s << " / #delta(#Delta" << fancyname(variable); - if (relative) - s << " / " << fancyname(variable); - s << ")"; - } - else - { - if (!relative) - s << " / "; - s << "#sqrt{2}"; - if (relative) - s << ")"; - } + s << "fraction of tracks / " << (max - min) / bins; + if (!pull && !relative) { + TString varunits = units(variable, tolower(axis)); + if (varunits != "") + s << " " << varunits; } - if (resolution && axis == 'y') - s << ")"; - if (((!relative && !pull) || axis == 'x') && units(variable,axis) != "") - s << " (" << units(variable,axis) << ")"; TString result = s.str(); - result.ReplaceAll("#Deltaq/p_{T}","#Delta(q/p_{T})"); + result.ReplaceAll(" #times", "#times"); return result; + } + + stringstream s; + if (resolution && axis == 'y') + s << "#sigma("; + if (axis == 'y') + s << "#Delta"; + s << fancyname(variable); + if (relative && axis == 'y') { + s << " / "; + if (!pull) + s << "("; + s << fancyname(variable); + } + if (axis == 'y') { + if (pull) { + s << " / #delta(#Delta" << fancyname(variable); + if (relative) + s << " / " << fancyname(variable); + s << ")"; + } else { + if (!relative) + s << " / "; + s << "#sqrt{2}"; + if (relative) + s << ")"; + } + } + if (resolution && axis == 'y') + s << ")"; + if (((!relative && !pull) || axis == 'x') && units(variable, axis) != "") + s << " (" << units(variable, axis) << ")"; + TString result = s.str(); + result.ReplaceAll("#Deltaq/p_{T}", "#Delta(q/p_{T})"); + return result; } TString latexlabel(TString variable, Char_t axis, Bool_t relative, Bool_t resolution, Bool_t pull) { - TString result = axislabel(variable, axis, relative, resolution, pull); - result.ReplaceAll(" ("+units(variable, axis)+")", ""); - result.ReplaceAll("#", "\\").ReplaceAll("\\Delta", "\\Delta "); - return result; + TString result = axislabel(variable, axis, relative, resolution, pull); + result.ReplaceAll(" (" + units(variable, axis) + ")", ""); + result.ReplaceAll("#", "\\").ReplaceAll("\\Delta", "\\Delta "); + return result; } -void setAxisLabels(TH1 *p, PlotType type,TString xvar,TString yvar,Bool_t relative,Bool_t pull) -{ - if (type == Histogram) - p->SetXTitle(axislabel(yvar,'y',relative,false,pull)); - if (type == ScatterPlot || type == Profile || type == Resolution || type == OrgHistogram) - p->SetXTitle(axislabel(xvar,'x')); - - if (type == Histogram) - p->SetYTitle(axislabel(yvar,'Y',relative,false,pull)); - if (type == OrgHistogram) - p->SetYTitle(axislabel(xvar,'X',relative,false,pull)); - if (type == ScatterPlot || type == Profile) - p->SetYTitle(axislabel(yvar,'y',relative,false,pull)); - if (type == Resolution) - p->SetYTitle(axislabel(yvar,'y',relative,true,pull)); +void setAxisLabels(TH1 *p, PlotType type, TString xvar, TString yvar, Bool_t relative, Bool_t pull) { + if (type == Histogram) + p->SetXTitle(axislabel(yvar, 'y', relative, false, pull)); + if (type == ScatterPlot || type == Profile || type == Resolution || type == OrgHistogram) + p->SetXTitle(axislabel(xvar, 'x')); + + if (type == Histogram) + p->SetYTitle(axislabel(yvar, 'Y', relative, false, pull)); + if (type == OrgHistogram) + p->SetYTitle(axislabel(xvar, 'X', relative, false, pull)); + if (type == ScatterPlot || type == Profile) + p->SetYTitle(axislabel(yvar, 'y', relative, false, pull)); + if (type == Resolution) + p->SetYTitle(axislabel(yvar, 'y', relative, true, pull)); } -void setAxisLabels(TMultiGraph *p, PlotType type,TString xvar,TString yvar,Bool_t relative,Bool_t pull) -{ - if (type == Histogram) - p->GetXaxis()->SetTitle(axislabel(yvar,'y',relative,false,pull)); - if (type == ScatterPlot || type == Profile || type == Resolution || type == OrgHistogram) - p->GetXaxis()->SetTitle(axislabel(xvar,'x')); - - if (type == Histogram) - p->GetYaxis()->SetTitle(axislabel(yvar,'Y',relative,false,pull)); - if (type == OrgHistogram) - p->GetYaxis()->SetTitle(axislabel(xvar,'X',relative,false,pull)); - if (type == ScatterPlot || type == Profile) - p->GetYaxis()->SetTitle(axislabel(yvar,'y',relative,false,pull)); - if (type == Resolution) - p->GetYaxis()->SetTitle(axislabel(yvar,'y',relative,true,pull)); +void setAxisLabels(TMultiGraph *p, PlotType type, TString xvar, TString yvar, Bool_t relative, Bool_t pull) { + if (type == Histogram) + p->GetXaxis()->SetTitle(axislabel(yvar, 'y', relative, false, pull)); + if (type == ScatterPlot || type == Profile || type == Resolution || type == OrgHistogram) + p->GetXaxis()->SetTitle(axislabel(xvar, 'x')); + + if (type == Histogram) + p->GetYaxis()->SetTitle(axislabel(yvar, 'Y', relative, false, pull)); + if (type == OrgHistogram) + p->GetYaxis()->SetTitle(axislabel(xvar, 'X', relative, false, pull)); + if (type == ScatterPlot || type == Profile) + p->GetYaxis()->SetTitle(axislabel(yvar, 'y', relative, false, pull)); + if (type == Resolution) + p->GetYaxis()->SetTitle(axislabel(yvar, 'y', relative, true, pull)); } - -TString nPart(Int_t part,TString string,TString delimit,Bool_t removerest) -{ - if (part <= 0) return ""; - for (int i = 1; i < part; i++) //part-1 times - { - if (string.Index(delimit) < 0) return ""; - string.Replace(0,string.Index(delimit)+1,"",0); - } - if (string.Index(delimit) >= 0 && removerest) - string.Remove(string.Index(delimit)); - return string; +TString nPart(Int_t part, TString string, TString delimit, Bool_t removerest) { + if (part <= 0) + return ""; + for (int i = 1; i < part; i++) //part-1 times + { + if (string.Index(delimit) < 0) + return ""; + string.Replace(0, string.Index(delimit) + 1, "", 0); + } + if (string.Index(delimit) >= 0 && removerest) + string.Remove(string.Index(delimit)); + return string; } //============== //4. Axis Limits //============== +Double_t findStatistic( + Statistic what, Int_t nFiles, TString *files, TString var, Char_t axis, Bool_t relative, Bool_t pull) { + Double_t x = 0, //if axis == 'x', var_org goes in x; if axis == 'y', Delta_var goes in x + rel = 1, //if relative, var_org goes in rel. x is divided by rel, so you get Delta_var/var_org + sigma1 = 1, //if pull, the error for split track 1 goes in sigma1 and the error for split track 2 goes in sigma2. + sigma2 = 1, //x is divided by sqrt(sigma1^2+sigma2^2). If !pull && axis == 'y', this divides by sqrt(2) + sigmaorg = 0; // because we want the error in one track. sigmaorg is used when relative && pull + Int_t xint = 0, + xint2 = 0; //xint is used for run number and nHits. xint2 is used for nHits because each event has 2 values. + + Int_t runNumber = 0; //this is used to make sure the run number is between minrun and maxrun + + if (axis == 'x') { + sigma1 = 1 / sqrt(2); //if axis == 'x' don't divide by sqrt(2) + sigma2 = 1 / sqrt(2); + } + + Double_t totallength = 0; + vector xvect; + Double_t result = 0; + if (what == Minimum) + result = 1e100; + if (what == Maximum) + result = -1e100; + + stringstream sx, srel, ssigma1, ssigma2, ssigmaorg; + + if (axis == 'y') + sx << "Delta_"; + sx << var; + if (axis == 'x' && var != "runNumber" && !var.BeginsWith("nHits")) + sx << "_org"; + if (axis == 'x' && var.BeginsWith("nHits")) + sx << "1_spl"; + TString variable = sx.str(), variable2 = variable; + variable2.ReplaceAll("1_spl", "2_spl"); + + TString relvariable = "1"; + if (relative) { + srel << var << "_org"; + relvariable = srel.str(); + } + + if (pull) { + ssigma1 << var << "1Err_spl"; + ssigma2 << var << "2Err_spl"; + } + TString sigma1variable = ssigma1.str(); + TString sigma2variable = ssigma2.str(); + + if (pull && relative) + ssigmaorg << var << "Err_org"; + TString sigmaorgvariable = ssigmaorg.str(); + + if (!relative && !pull && (variable == "Delta_dxy" || variable == "Delta_dz")) + rel = 1e-4; //it's in cm but we want um + if (!relative && !pull && (variable == "Delta_phi" || variable == "Delta_theta" || variable == "Delta_qoverpt")) + rel = 1e-3; //make the axis labels manageable + + for (Int_t j = 0; j < nFiles; j++) { + if (((var == "runNumber" && what != Maximum) ? findMax(files[j], "runNumber", 'x') < 2 : false) || + files[j] == "") //if it's MC data (run 1), the run number is meaningless + continue; + TFile *f = TFile::Open(files[j]); + TTree *tree = (TTree *)f->Get("cosmicValidation/splitterTree"); + if (tree == nullptr) + tree = (TTree *)f->Get("splitterTree"); + Int_t length = tree->GetEntries(); + + tree->SetBranchAddress("runNumber", &runNumber); + if (var == "runNumber") + tree->SetBranchAddress(variable, &xint); + else if (var.BeginsWith("nHits")) { + tree->SetBranchAddress(variable, &xint); + tree->SetBranchAddress(variable2, &xint2); + } else + tree->SetBranchAddress(variable, &x); -Double_t findStatistic(Statistic what,Int_t nFiles,TString *files,TString var,Char_t axis,Bool_t relative,Bool_t pull) -{ - Double_t x = 0, //if axis == 'x', var_org goes in x; if axis == 'y', Delta_var goes in x - rel = 1, //if relative, var_org goes in rel. x is divided by rel, so you get Delta_var/var_org - sigma1 = 1, //if pull, the error for split track 1 goes in sigma1 and the error for split track 2 goes in sigma2. - sigma2 = 1, //x is divided by sqrt(sigma1^2+sigma2^2). If !pull && axis == 'y', this divides by sqrt(2) - sigmaorg = 0; // because we want the error in one track. sigmaorg is used when relative && pull - Int_t xint = 0, xint2 = 0; //xint is used for run number and nHits. xint2 is used for nHits because each event has 2 values. - - Int_t runNumber = 0; //this is used to make sure the run number is between minrun and maxrun - - if (axis == 'x') - { - sigma1 = 1/sqrt(2); //if axis == 'x' don't divide by sqrt(2) - sigma2 = 1/sqrt(2); - } - - Double_t totallength = 0; - vector xvect; - Double_t result = 0; - if (what == Minimum) result = 1e100; - if (what == Maximum) result = -1e100; - - stringstream sx,srel,ssigma1,ssigma2,ssigmaorg; - - if (axis == 'y') - sx << "Delta_"; - sx << var; - if (axis == 'x' && var != "runNumber" && !var.BeginsWith("nHits")) - sx << "_org"; - if (axis == 'x' && var.BeginsWith("nHits")) - sx << "1_spl"; - TString variable = sx.str(), - variable2 = variable; - variable2.ReplaceAll("1_spl","2_spl"); - - TString relvariable = "1"; if (relative) - { - srel << var << "_org"; - relvariable = srel.str(); + tree->SetBranchAddress(relvariable, &rel); + if (pull) { + tree->SetBranchAddress(sigma1variable, &sigma1); + tree->SetBranchAddress(sigma2variable, &sigma2); } - - if (pull) - { - ssigma1 << var << "1Err_spl"; - ssigma2 << var << "2Err_spl"; - } - TString sigma1variable = ssigma1.str(); - TString sigma2variable = ssigma2.str(); - - if (pull && relative) - ssigmaorg << var << "Err_org"; - TString sigmaorgvariable = ssigmaorg.str(); - - if (!relative && !pull && (variable == "Delta_dxy" || variable == "Delta_dz")) - rel = 1e-4; //it's in cm but we want um - if (!relative && !pull && (variable == "Delta_phi" || variable == "Delta_theta" || variable == "Delta_qoverpt")) - rel = 1e-3; //make the axis labels manageable - - for (Int_t j = 0; j < nFiles; j++) - { - if (((var == "runNumber" && what != Maximum) ? findMax(files[j],"runNumber",'x') < 2 : false) || files[j] == "") //if it's MC data (run 1), the run number is meaningless - continue; - TFile *f = TFile::Open(files[j]); - TTree *tree = (TTree*)f->Get("cosmicValidation/splitterTree"); - if (tree == 0) - tree = (TTree*)f->Get("splitterTree"); - Int_t length = tree->GetEntries(); - - tree->SetBranchAddress("runNumber",&runNumber); - if (var == "runNumber") - tree->SetBranchAddress(variable,&xint); - else if (var.BeginsWith("nHits")) - { - tree->SetBranchAddress(variable,&xint); - tree->SetBranchAddress(variable2,&xint2); - } - else - tree->SetBranchAddress(variable,&x); - - if (relative) - tree->SetBranchAddress(relvariable,&rel); - if (pull) - { - tree->SetBranchAddress(sigma1variable,&sigma1); - tree->SetBranchAddress(sigma2variable,&sigma2); - } - if (relative && pull) - tree->SetBranchAddress(sigmaorgvariable,&sigmaorg); - - for (Int_t i = 0; iGetEntry(i); - if (var == "runNumber" || var.BeginsWith("nHits")) - x = xint; - if (var == "runNumber") - runNumber = x; - if (var == "phi" && x >= pi) - x -= 2*pi; - if (var == "phi" && x <= -pi) - x += 2*pi; - if ((runNumber < minrun && runNumber > 1) || (runNumber > maxrun && maxrun > 0)) continue; - - totallength++; - - Double_t error; - if (relative && pull) - error = sqrt((sigma1/rel)*(sigma1/rel) + (sigma2/rel)*(sigma2/rel) + (sigmaorg*x/(rel*rel))*(sigmaorg*x/(rel*rel))); - else - error = sqrt(sigma1 * sigma1 + sigma2 * sigma2); // = 1 if axis == 'x' && !pull - // = sqrt(2) if axis == 'y' && !pull, so that you get the error in 1 track - // when you divide by it - x /= (rel * error); - if (!std::isfinite(x)) //e.g. in data with no pixels, the error occasionally comes out to be NaN - continue; //Filling a histogram with NaN is irrelevant, but here it would cause the whole result to be NaN - - if (what == Minimum && x < result) - result = x; - if (what == Maximum && x > result) - result = x; - xvect.push_back(x); - if (var.BeginsWith("nHits")) - { - x = xint2; - if (what == Minimum && x < result) - result = x; - if (what == Maximum && x > result) - result = x; - xvect.push_back(x); - } - } - delete f; //automatically closes the file + if (relative && pull) + tree->SetBranchAddress(sigmaorgvariable, &sigmaorg); + + for (Int_t i = 0; i < length; i++) { + tree->GetEntry(i); + if (var == "runNumber" || var.BeginsWith("nHits")) + x = xint; + if (var == "runNumber") + runNumber = x; + if (var == "phi" && x >= pi) + x -= 2 * pi; + if (var == "phi" && x <= -pi) + x += 2 * pi; + if ((runNumber < minrun && runNumber > 1) || (runNumber > maxrun && maxrun > 0)) + continue; + + totallength++; + + Double_t error; + if (relative && pull) + error = sqrt((sigma1 / rel) * (sigma1 / rel) + (sigma2 / rel) * (sigma2 / rel) + + (sigmaorg * x / (rel * rel)) * (sigmaorg * x / (rel * rel))); + else + error = sqrt(sigma1 * sigma1 + sigma2 * sigma2); // = 1 if axis == 'x' && !pull + // = sqrt(2) if axis == 'y' && !pull, so that you get the error in 1 track + // when you divide by it + x /= (rel * error); + if (!std::isfinite(x)) //e.g. in data with no pixels, the error occasionally comes out to be NaN + continue; //Filling a histogram with NaN is irrelevant, but here it would cause the whole result to be NaN + + if (what == Minimum && x < result) + result = x; + if (what == Maximum && x > result) + result = x; + xvect.push_back(x); + if (var.BeginsWith("nHits")) { + x = xint2; + if (what == Minimum && x < result) + result = x; + if (what == Maximum && x > result) + result = x; + xvect.push_back(x); + } } + delete f; //automatically closes the file + } - if (what == Minimum || what == Maximum) - return result; - - sort(xvect.begin(), xvect.end()); - - for (unsigned int i = (unsigned int)(xvect.size()*(1-outliercut)/2); i <= (unsigned int)(xvect.size()*(1+outliercut)/2 + .999); i++, totallength++) - result += xvect[i]; - - result /= totallength; - - if (what == RMS) - { - double average = result; - result = 0; - for (unsigned int i = (unsigned int)(xvect.size()*(1-outliercut)/2); i <= (unsigned int)(xvect.size()*(1+outliercut)/2 + .999); i++) - result += (x - average) * (x - average); - result = sqrt(result / (totallength - 1)); - } + if (what == Minimum || what == Maximum) return result; -} -Double_t findAverage(Int_t nFiles,TString *files,TString var,Char_t axis,Bool_t relative,Bool_t pull) -{ - return findStatistic(Average,nFiles,files,var,axis,relative,pull); + sort(xvect.begin(), xvect.end()); + + for (unsigned int i = (unsigned int)(xvect.size() * (1 - outliercut) / 2); + i <= (unsigned int)(xvect.size() * (1 + outliercut) / 2 + .999); + i++, totallength++) + result += xvect[i]; + + result /= totallength; + + if (what == RMS) { + double average = result; + result = 0; + for (unsigned int i = (unsigned int)(xvect.size() * (1 - outliercut) / 2); + i <= (unsigned int)(xvect.size() * (1 + outliercut) / 2 + .999); + i++) + result += (x - average) * (x - average); + result = sqrt(result / (totallength - 1)); + } + return result; } -Double_t findMin(Int_t nFiles,TString *files,TString var,Char_t axis,Bool_t relative,Bool_t pull) -{ - return findStatistic(Minimum,nFiles,files,var,axis,relative,pull); +Double_t findAverage(Int_t nFiles, TString *files, TString var, Char_t axis, Bool_t relative, Bool_t pull) { + return findStatistic(Average, nFiles, files, var, axis, relative, pull); } -Double_t findMax(Int_t nFiles,TString *files,TString var,Char_t axis,Bool_t relative,Bool_t pull) -{ - return findStatistic(Maximum,nFiles,files,var,axis,relative,pull); +Double_t findMin(Int_t nFiles, TString *files, TString var, Char_t axis, Bool_t relative, Bool_t pull) { + return findStatistic(Minimum, nFiles, files, var, axis, relative, pull); } -Double_t findRMS(Int_t nFiles,TString *files,TString var,Char_t axis,Bool_t relative,Bool_t pull) -{ - return findStatistic(RMS,nFiles,files,var,axis,relative,pull); +Double_t findMax(Int_t nFiles, TString *files, TString var, Char_t axis, Bool_t relative, Bool_t pull) { + return findStatistic(Maximum, nFiles, files, var, axis, relative, pull); } +Double_t findRMS(Int_t nFiles, TString *files, TString var, Char_t axis, Bool_t relative, Bool_t pull) { + return findStatistic(RMS, nFiles, files, var, axis, relative, pull); +} //These functions are for 1 file -Double_t findStatistic(Statistic what,TString file,TString var,Char_t axis,Bool_t relative,Bool_t pull) -{ - return findStatistic(what,1,&file,var,axis,relative,pull); +Double_t findStatistic(Statistic what, TString file, TString var, Char_t axis, Bool_t relative, Bool_t pull) { + return findStatistic(what, 1, &file, var, axis, relative, pull); } -Double_t findAverage(TString file,TString var,Char_t axis,Bool_t relative,Bool_t pull) -{ - return findStatistic(Average,file,var,axis,relative,pull); +Double_t findAverage(TString file, TString var, Char_t axis, Bool_t relative, Bool_t pull) { + return findStatistic(Average, file, var, axis, relative, pull); } -Double_t findMin(TString file,TString var,Char_t axis,Bool_t relative,Bool_t pull) -{ - return findStatistic(Minimum,file,var,axis,relative,pull); +Double_t findMin(TString file, TString var, Char_t axis, Bool_t relative, Bool_t pull) { + return findStatistic(Minimum, file, var, axis, relative, pull); } -Double_t findMax(TString file,TString var,Char_t axis,Bool_t relative,Bool_t pull) -{ - return findStatistic(Maximum,file,var,axis,relative,pull); +Double_t findMax(TString file, TString var, Char_t axis, Bool_t relative, Bool_t pull) { + return findStatistic(Maximum, file, var, axis, relative, pull); } -Double_t findRMS(TString file,TString var,Char_t axis,Bool_t relative,Bool_t pull) -{ - return findStatistic(RMS,file,var,axis,relative,pull); +Double_t findRMS(TString file, TString var, Char_t axis, Bool_t relative, Bool_t pull) { + return findStatistic(RMS, file, var, axis, relative, pull); } - - - //This puts the axis limits that should be used for trackSplitPlot in min and max. //Default axis limits are defined for pt, qoverpt, dxy, dz, theta, eta, and phi. //For run number and nHits, the minimum and maximum are used. //For any other variable, average +/- 5*rms are used. //To use this instead of the default values, just comment out the part that says [else] if (var == "?") {min = ?; max = ?;} -void axislimits(Int_t nFiles,TString *files,TString var,Char_t axis,Bool_t relative,Bool_t pull,Double_t &min,Double_t &max,Double_t &bins) -{ - bool pixel = subdetector.Contains("PIX"); - if (axis == 'x') - { - if (var == "pt") - { - min = 5; - max = 100; - bins = 38; - } - else if (var == "qoverpt") - { - min = -.35; - max = .35; - bins = 35; - } - else if (var == "dxy") - { - min = -100; - max = 100; - if (pixel) - { - min = -10; - max = 10; - } - bins = 20; - } - else if (var == "dz") - { - min = -250; - max = 250; - if (pixel) - { - min = -25; - max = 25; - } - bins = 25; - } - else if (var == "theta") - { - min = .5; - max = 2.5; - bins = 40; - } - else if (var == "eta") - { - min = -1.2; - max = 1.2; - bins = 40; - } - else if (var == "phi") - { - min = -3; - max = 0; - bins = 30; - } - else if (var == "runNumber" || var.BeginsWith("nHits")) - { - min = findMin(nFiles,files,var,'x') - .5; - max = findMax(nFiles,files,var,'x') + .5; - bins = max-min; - } - else - { - cout << "No x axis limits for " << var << ". Using average +/- 5*rms" << endl; - Double_t average = findAverage(nFiles,files,var,'x'); - Double_t rms = findRMS (nFiles,files,var,'x'); - max = TMath::Min(average + 5 * rms,findMax(nFiles,files,var,'x')); - min = TMath::Max(average - 5 * rms,findMin(nFiles,files,var,'x')); - bins = 50; - } +void axislimits(Int_t nFiles, + TString *files, + TString var, + Char_t axis, + Bool_t relative, + Bool_t pull, + Double_t &min, + Double_t &max, + Double_t &bins) { + bool pixel = subdetector.Contains("PIX"); + if (axis == 'x') { + if (var == "pt") { + min = 5; + max = 100; + bins = 38; + } else if (var == "qoverpt") { + min = -.35; + max = .35; + bins = 35; + } else if (var == "dxy") { + min = -100; + max = 100; + if (pixel) { + min = -10; + max = 10; + } + bins = 20; + } else if (var == "dz") { + min = -250; + max = 250; + if (pixel) { + min = -25; + max = 25; + } + bins = 25; + } else if (var == "theta") { + min = .5; + max = 2.5; + bins = 40; + } else if (var == "eta") { + min = -1.2; + max = 1.2; + bins = 40; + } else if (var == "phi") { + min = -3; + max = 0; + bins = 30; + } else if (var == "runNumber" || var.BeginsWith("nHits")) { + min = findMin(nFiles, files, var, 'x') - .5; + max = findMax(nFiles, files, var, 'x') + .5; + bins = max - min; + } else { + cout << "No x axis limits for " << var << ". Using average +/- 5*rms" << endl; + Double_t average = findAverage(nFiles, files, var, 'x'); + Double_t rms = findRMS(nFiles, files, var, 'x'); + max = TMath::Min(average + 5 * rms, findMax(nFiles, files, var, 'x')); + min = TMath::Max(average - 5 * rms, findMin(nFiles, files, var, 'x')); + bins = 50; } - if (axis == 'y') - { - if (pull) - { - min = -5; - max = 5; - bins = 40; - } - else if (var == "pt" && relative) - { - min = -.06; - max = .06; - bins = 30; - } - else if (var == "pt" && !relative) - { - min = -.8; - max = .8; - bins = 40; - } - else if (var == "qoverpt") - { - min = -2.5; - max = 2.5; - bins = 50; - } - else if (var == "dxy") - { - min = -1250; - max = 1250; - if (pixel) - { - min = -125; - max = 125; - } - bins = 50; - } - else if (var == "dz") - { - min = -2000; - max = 2000; - if (pixel) - { - min = -200; - max = 200; - } - bins = 40; - } - else if (var == "theta") - { - min = -10; - max = 10; - if (pixel) - { - min = -5; - max = 5; - } - bins = 50; - } - else if (var == "eta") - { - min = -.007; - max = .007; - if (pixel) - { - min = -.003; - max = .003; - } - bins = 30; - } - else if (var == "phi") - { - min = -2; - max = 2; - bins = 40; - } - else - { - cout << "No y axis limits for " << var << ". Using average +/- 5 * rms." << endl; - Double_t average = 0 /*findAverage(nFiles,files,var,'y',relative,pull)*/; - Double_t rms = findRMS (nFiles,files,var,'y',relative,pull); - min = TMath::Max(TMath::Max(-TMath::Abs(average) - 5*rms, - findMin(nFiles,files,var,'y',relative,pull)), - -findMax(nFiles,files,var,'y',relative,pull)); - max = -min; - bins = 50; - } + } + if (axis == 'y') { + if (pull) { + min = -5; + max = 5; + bins = 40; + } else if (var == "pt" && relative) { + min = -.06; + max = .06; + bins = 30; + } else if (var == "pt" && !relative) { + min = -.8; + max = .8; + bins = 40; + } else if (var == "qoverpt") { + min = -2.5; + max = 2.5; + bins = 50; + } else if (var == "dxy") { + min = -1250; + max = 1250; + if (pixel) { + min = -125; + max = 125; + } + bins = 50; + } else if (var == "dz") { + min = -2000; + max = 2000; + if (pixel) { + min = -200; + max = 200; + } + bins = 40; + } else if (var == "theta") { + min = -10; + max = 10; + if (pixel) { + min = -5; + max = 5; + } + bins = 50; + } else if (var == "eta") { + min = -.007; + max = .007; + if (pixel) { + min = -.003; + max = .003; + } + bins = 30; + } else if (var == "phi") { + min = -2; + max = 2; + bins = 40; + } else { + cout << "No y axis limits for " << var << ". Using average +/- 5 * rms." << endl; + Double_t average = 0 /*findAverage(nFiles,files,var,'y',relative,pull)*/; + Double_t rms = findRMS(nFiles, files, var, 'y', relative, pull); + min = TMath::Max(TMath::Max(-TMath::Abs(average) - 5 * rms, findMin(nFiles, files, var, 'y', relative, pull)), + -findMax(nFiles, files, var, 'y', relative, pull)); + max = -min; + bins = 50; } + } } //=============== //5. Place Legend //=============== -Double_t placeLegend(TLegend *l, Double_t width, Double_t height, Double_t x1min, Double_t y1min, Double_t x2max, Double_t y2max) -{ - for (int i = legendGrid; i >= 0; i--) - { - for (int j = legendGrid; j >= 0; j--) - { - Double_t x1 = x1min * (1-(double)i/legendGrid) + (x2max - width) * (double)i/legendGrid - margin*width; - Double_t y1 = y1min * (1-(double)j/legendGrid) + (y2max - height) * (double)j/legendGrid - margin*height; - Double_t x2 = x1 + (1+2*margin) * width; - Double_t y2 = y1 + (1+2*margin) * height; - if (fitsHere(l,x1,y1,x2,y2)) - { - x1 += margin*width; - y1 += margin*height; - x2 -= margin*width; - y2 -= margin*height; - l->SetX1(x1); - l->SetY1(y1); - l->SetX2(x2); - l->SetY2(y2); - return y2max; - } - } +Double_t placeLegend( + TLegend *l, Double_t width, Double_t height, Double_t x1min, Double_t y1min, Double_t x2max, Double_t y2max) { + for (int i = legendGrid; i >= 0; i--) { + for (int j = legendGrid; j >= 0; j--) { + Double_t x1 = x1min * (1 - (double)i / legendGrid) + (x2max - width) * (double)i / legendGrid - margin * width; + Double_t y1 = y1min * (1 - (double)j / legendGrid) + (y2max - height) * (double)j / legendGrid - margin * height; + Double_t x2 = x1 + (1 + 2 * margin) * width; + Double_t y2 = y1 + (1 + 2 * margin) * height; + if (fitsHere(l, x1, y1, x2, y2)) { + x1 += margin * width; + y1 += margin * height; + x2 -= margin * width; + y2 -= margin * height; + l->SetX1(x1); + l->SetY1(y1); + l->SetX2(x2); + l->SetY2(y2); + return y2max; + } } - Double_t newy2max = y2max + increaseby * (y2max-y1min); - Double_t newheight = height * (newy2max - y1min) / (y2max - y1min); - return placeLegend(l,width,newheight,x1min,y1min,x2max,newy2max); + } + Double_t newy2max = y2max + increaseby * (y2max - y1min); + Double_t newheight = height * (newy2max - y1min) / (y2max - y1min); + return placeLegend(l, width, newheight, x1min, y1min, x2max, newy2max); } -Bool_t fitsHere(TLegend *l,Double_t x1, Double_t y1, Double_t x2, Double_t y2) -{ - Bool_t fits = true; - TList *list = l->GetListOfPrimitives(); - for (Int_t k = 0; list->At(k) != 0 && fits; k++) +Bool_t fitsHere(TLegend *l, Double_t x1, Double_t y1, Double_t x2, Double_t y2) { + Bool_t fits = true; + TList *list = l->GetListOfPrimitives(); + for (Int_t k = 0; list->At(k) != nullptr && fits; k++) { + TObject *obj = ((TLegendEntry *)(list->At(k)))->GetObject(); + if (obj == nullptr) + continue; + TClass *cl = obj->IsA(); + + //Histogram, drawn as a histogram + if (cl->InheritsFrom("TH1") && !cl->InheritsFrom("TH2") && !cl->InheritsFrom("TH3") && cl != TProfile::Class() && + ((TH1 *)obj)->GetMarkerColor() == kWhite) { + Int_t where = 0; + TH1 *h = (TH1 *)obj; + for (Int_t i = 1; i <= h->GetNbinsX() && fits; i++) { + if (h->GetBinLowEdge(i) + h->GetBinWidth(i) < x1) + continue; //to the left of the legend + if (h->GetBinLowEdge(i) > x2) + continue; //to the right of the legend + if (h->GetBinContent(i) > y1 && h->GetBinContent(i) < y2) + fits = false; //inside the legend + if (h->GetBinContent(i) < y1) { + if (where == 0) + where = -1; //below the legend + if (where == 1) + fits = false; //a previous bin was above it so there's a vertical line through it + } + if (h->GetBinContent(i) > y2) { + if (where == 0) + where = 1; //above the legend + if (where == -1) + fits = false; //a previous bin was below it so there's a vertical line through it + } + } + continue; + } + //Histogram, drawn with Draw("P") + else if (cl->InheritsFrom("TH1") && !cl->InheritsFrom("TH2") && !cl->InheritsFrom("TH3") && cl != TProfile::Class()) + //Probably TProfile would be the same but I haven't tested it { - TObject *obj = ((TLegendEntry*)(list->At(k)))->GetObject(); - if (obj == 0) continue; - TClass *cl = obj->IsA(); - - //Histogram, drawn as a histogram - if (cl->InheritsFrom("TH1") && !cl->InheritsFrom("TH2") && !cl->InheritsFrom("TH3") - && cl != TProfile::Class() && ((TH1*)obj)->GetMarkerColor() == kWhite) - { - Int_t where = 0; - TH1 *h = (TH1*)obj; - for (Int_t i = 1; i <= h->GetNbinsX() && fits; i++) - { - if (h->GetBinLowEdge(i) + h->GetBinWidth(i) < x1) continue; //to the left of the legend - if (h->GetBinLowEdge(i) > x2) continue; //to the right of the legend - if (h->GetBinContent(i) > y1 && h->GetBinContent(i) < y2) fits = false; //inside the legend - if (h->GetBinContent(i) < y1) - { - if (where == 0) where = -1; //below the legend - if (where == 1) fits = false; //a previous bin was above it so there's a vertical line through it - } - if (h->GetBinContent(i) > y2) - { - if (where == 0) where = 1; //above the legend - if (where == -1) fits = false; //a previous bin was below it so there's a vertical line through it - } - } - continue; - } - //Histogram, drawn with Draw("P") - else if (cl->InheritsFrom("TH1") && !cl->InheritsFrom("TH2") && !cl->InheritsFrom("TH3") - && cl != TProfile::Class()) - //Probably TProfile would be the same but I haven't tested it - { - TH1 *h = (TH1*)obj; - for (Int_t i = 1; i <= h->GetNbinsX() && fits; i++) - { - if (h->GetBinLowEdge(i) + h->GetBinWidth(i)/2 < x1) continue; - if (h->GetBinLowEdge(i) > x2) continue; - if (h->GetBinContent(i) > y1 && h->GetBinContent(i) < y2) fits = false; - if (h->GetBinContent(i) + h->GetBinError(i) > y2 && h->GetBinContent(i) - h->GetBinError(i) < y2) fits = false; - if (h->GetBinContent(i) + h->GetBinError(i) > y1 && h->GetBinContent(i) - h->GetBinError(i) < y1) fits = false; - } - } - else if (cl->InheritsFrom("TF1") && !cl->InheritsFrom("TF2")) - { - TF1 *f = (TF1*)obj; - Double_t max = f->GetMaximum(x1,x2); - Double_t min = f->GetMinimum(x1,x2); - if (min < y2 && max > y1) fits = false; - } - // else if (cl->InheritsFrom(...... add more objects here - else - { - cout << "Don't know how to place the legend around objects of type " << obj->ClassName() << "." << endl - << "Add this class into fitsHere() if you want it to work properly." << endl - << "The legend will still be placed around any other objects." << endl; - } + TH1 *h = (TH1 *)obj; + for (Int_t i = 1; i <= h->GetNbinsX() && fits; i++) { + if (h->GetBinLowEdge(i) + h->GetBinWidth(i) / 2 < x1) + continue; + if (h->GetBinLowEdge(i) > x2) + continue; + if (h->GetBinContent(i) > y1 && h->GetBinContent(i) < y2) + fits = false; + if (h->GetBinContent(i) + h->GetBinError(i) > y2 && h->GetBinContent(i) - h->GetBinError(i) < y2) + fits = false; + if (h->GetBinContent(i) + h->GetBinError(i) > y1 && h->GetBinContent(i) - h->GetBinError(i) < y1) + fits = false; + } + } else if (cl->InheritsFrom("TF1") && !cl->InheritsFrom("TF2")) { + TF1 *f = (TF1 *)obj; + Double_t max = f->GetMaximum(x1, x2); + Double_t min = f->GetMinimum(x1, x2); + if (min < y2 && max > y1) + fits = false; + } + // else if (cl->InheritsFrom(...... add more objects here + else { + cout << "Don't know how to place the legend around objects of type " << obj->ClassName() << "." << endl + << "Add this class into fitsHere() if you want it to work properly." << endl + << "The legend will still be placed around any other objects." << endl; } - return fits; + } + return fits; } diff --git a/Alignment/OfflineValidation/macros/trackSplitPlot.h b/Alignment/OfflineValidation/macros/trackSplitPlot.h index f861dcfd3b8b7..0f2d5d4b17972 100644 --- a/Alignment/OfflineValidation/macros/trackSplitPlot.h +++ b/Alignment/OfflineValidation/macros/trackSplitPlot.h @@ -34,8 +34,8 @@ enum PlotType { ScatterPlot, Profile, Histogram, OrgHistogram, Resolution }; enum Statistic { Minimum, Maximum, Average, RMS }; const Double_t pi = TMath::Pi(); -vector colors; -vector styles; +std::vector colors; +std::vector styles; bool colorsset = false; Int_t minrun = -1; Int_t maxrun = -1; @@ -67,7 +67,7 @@ Table Of Contents #include "trackSplitPlot.h" -ofstream devnull("/dev/null"); +std::ofstream devnull("/dev/null"); template T identity(T t) { return t; @@ -86,7 +86,7 @@ TCanvas *trackSplitPlot(Int_t nFiles, Bool_t resolution = false, Bool_t pull = false, TString saveas = "", - ostream &summaryfile = devnull); + std::ostream &summaryfile = devnull); TCanvas *trackSplitPlot(Int_t nFiles, TString *files, TString *names, @@ -94,7 +94,7 @@ TCanvas *trackSplitPlot(Int_t nFiles, Bool_t relative = false, Bool_t pull = false, TString saveas = "", - ostream &summaryfile = devnull); + std::ostream &summaryfile = devnull); TCanvas *trackSplitPlot(TString file, TString xvar, TString yvar, @@ -103,13 +103,13 @@ TCanvas *trackSplitPlot(TString file, Bool_t resolution = false, Bool_t pull = false, TString saveas = "", - ostream &summaryfile = devnull); + std::ostream &summaryfile = devnull); TCanvas *trackSplitPlot(TString file, TString var, Bool_t relative = false, Bool_t pull = false, TString saveas = "", - ostream &summaryfile = devnull); + std::ostream &summaryfile = devnull); void placeholder(TString saveas = "", Bool_t wide = false); void saveplot(TCanvas *c1, TString saveas); void deleteCanvas(TObject *canvas); diff --git a/Alignment/OfflineValidation/plugins/DMRChecker.cc b/Alignment/OfflineValidation/plugins/DMRChecker.cc index 48dac30ea14e2..0bc402c14a808 100644 --- a/Alignment/OfflineValidation/plugins/DMRChecker.cc +++ b/Alignment/OfflineValidation/plugins/DMRChecker.cc @@ -154,10 +154,16 @@ class DMRChecker : public edm::one::EDAnalyzer()), magFieldToken_(esConsumes()), topoToken_(esConsumes()), - latencyToken_(esConsumes()), - isCosmics_(pset.getParameter("isCosmics")) { + isCosmics_(pset.getParameter("isCosmics")), + doLatencyAnalysis_(pset.getParameter("doLatencyAnalysis")) { usesResource(TFileService::kSharedResource); + if (doLatencyAnalysis_) { + latencyToken_ = esConsumes(); + } else { + latencyToken_ = edm::ESGetToken(); + } + TkTag_ = pset.getParameter("TkTag"); theTrackCollectionToken_ = consumes(TkTag_); @@ -231,7 +237,8 @@ class DMRChecker : public edm::one::EDAnalyzer runInfoToken_; const edm::ESGetToken magFieldToken_; const edm::ESGetToken topoToken_; - const edm::ESGetToken latencyToken_; + // not const + edm::ESGetToken latencyToken_; const MagneticField *magneticField_; const TrackerGeometry *trackerGeometry_; @@ -463,12 +470,12 @@ class DMRChecker : public edm::one::EDAnalyzer trackCollection = event.getHandle(theTrackCollectionToken_); - if (firstEvent_) { - if (trackerGeometry_->isThere(GeomDetEnumerators::P2PXB) || - trackerGeometry_->isThere(GeomDetEnumerators::P2PXEC)) { - phase_ = SiPixelPI::phase::two; - } else if (trackerGeometry_->isThere(GeomDetEnumerators::P1PXB) || - trackerGeometry_->isThere(GeomDetEnumerators::P1PXEC)) { - phase_ = SiPixelPI::phase::one; - } else { - phase_ = SiPixelPI::phase::zero; - } - firstEvent_ = false; - } - GlobalPoint zeroPoint(0, 0, 0); if (DEBUG) edm::LogVerbatim("DMRChecker") << "event #" << ievt << " Event ID = " << event.id() @@ -589,7 +583,8 @@ class DMRChecker : public edm::one::EDAnalyzerisValid() && (subid > PixelSubdetector::PixelEndcap)) { - tmap->fill(detid_db, 1); + if (phase_ != SiPixelPI::phase::two) + tmap->fill(detid_db, 1); //LocalPoint lp = (*iHit)->localPosition(); //LocalError le = (*iHit)->localPositionError(); @@ -1105,20 +1100,38 @@ class DMRChecker : public edm::one::EDAnalyzersingleReadOutMode() == 1) { - mode = 1; // peak mode - } else if (apvlat->singleReadOutMode() == 0) { - mode = -1; // deco mode + // set geometry and topology + trackerGeometry_ = &setup.getData(geomToken_); + if (trackerGeometry_->isThere(GeomDetEnumerators::P2PXB) || trackerGeometry_->isThere(GeomDetEnumerators::P2PXEC)) { + phase_ = SiPixelPI::phase::two; + } else if (trackerGeometry_->isThere(GeomDetEnumerators::P1PXB) || + trackerGeometry_->isThere(GeomDetEnumerators::P1PXEC)) { + phase_ = SiPixelPI::phase::one; + } else { + phase_ = SiPixelPI::phase::zero; + } + + trackerTopology_ = &setup.getData(topoToken_); + + // if it's a phase-2 geometry there are no phase-1 conditions + if (phase_ == SiPixelPI::phase::two) { + mode = 0; + } else { + if (doLatencyAnalysis_) { + //SiStrip Latency + const SiStripLatency *apvlat = &setup.getData(latencyToken_); + if (apvlat->singleReadOutMode() == 1) { + mode = 1; // peak mode + } else if (apvlat->singleReadOutMode() == 0) { + mode = -1; // deco mode + } + } else { + mode = 0.; + } } conditionsMap_[run.run()].first = mode; conditionsMap_[run.run()].second = B_; - - // set geometry and topology - trackerGeometry_ = &setup.getData(geomToken_); - trackerTopology_ = &setup.getData(topoToken_); } //************************************************************* @@ -1378,11 +1391,8 @@ class DMRChecker : public edm::one::EDAnalyzer("h2_kappa_vs_phi", "#kappa vs. #phi;#phi_{Track};#kappa", 100, -M_PI, M_PI, 100, .0, .05)); vTrack2DHistos_.push_back(book("h2_kappa_vs_eta", "#kappa vs. #eta;#eta_{Track};#kappa", 100, -etaMax_, etaMax_, 100, .0, .05)); vTrack2DHistos_.push_back(book("h2_normchi2_vs_kappa", "#kappa vs. #chi^{2}/ndof;#chi^{2}/ndof;#kappa", 100, 0., 10, 100, -.03, .03)); - // clang-format on - firstEvent_ = true; - // create the full maps fullPixelmapXDMR->createTrackerBaseMap(); fullPixelmapYDMR->createTrackerBaseMap(); @@ -2102,6 +2112,7 @@ void DMRChecker::fillDescriptions(edm::ConfigurationDescriptions &descriptions) desc.add("BeamSpotTag", edm::InputTag("offlineBeamSpot")); desc.add("VerticesTag", edm::InputTag("offlinePrimaryVertices")); desc.add("isCosmics", false); + desc.add("doLatencyAnalysis", true); descriptions.addWithDefaultLabel(desc); } diff --git a/Alignment/OfflineValidation/plugins/DiMuonVertexValidation.cc b/Alignment/OfflineValidation/plugins/DiMuonVertexValidation.cc index 53b0021ee111e..5af46dc1759ee 100644 --- a/Alignment/OfflineValidation/plugins/DiMuonVertexValidation.cc +++ b/Alignment/OfflineValidation/plugins/DiMuonVertexValidation.cc @@ -79,18 +79,17 @@ class DiMuonVertexValidation : public edm::one::EDAnalyzer pTthresholds_; - float maxSVdist_; + const float maxSVdist_; // plot configurations - - edm::ParameterSet CosPhiConfiguration_; - edm::ParameterSet CosPhi3DConfiguration_; - edm::ParameterSet VtxProbConfiguration_; - edm::ParameterSet VtxDistConfiguration_; - edm::ParameterSet VtxDist3DConfiguration_; - edm::ParameterSet VtxDistSigConfiguration_; - edm::ParameterSet VtxDist3DSigConfiguration_; - edm::ParameterSet DiMuMassConfiguration_; + const edm::ParameterSet CosPhiConfiguration_; + const edm::ParameterSet CosPhi3DConfiguration_; + const edm::ParameterSet VtxProbConfiguration_; + const edm::ParameterSet VtxDistConfiguration_; + const edm::ParameterSet VtxDist3DConfiguration_; + const edm::ParameterSet VtxDistSigConfiguration_; + const edm::ParameterSet VtxDist3DSigConfiguration_; + const edm::ParameterSet DiMuMassConfiguration_; // control plots @@ -121,15 +120,20 @@ class DiMuonVertexValidation : public edm::one::EDAnalyzer ttbESToken_; - edm::EDGetTokenT tracksToken_; //used to select what tracks to read from configuration file - edm::EDGetTokenT vertexToken_; //used to select what vertices to read from configuration file + //used to select what tracks to read from configuration file + edm::EDGetTokenT tracksToken_; + //used to select what vertices to read from configuration file + const edm::EDGetTokenT vertexToken_; // either on or the other! - edm::EDGetTokenT muonsToken_; //used to select what tracks to read from configuration file - edm::EDGetTokenT - alcaRecoToken_; //used to select what muon tracks to read from configuration file + edm::EDGetTokenT muonsToken_; // used to select tracks to read from configuration file + edm::EDGetTokenT alcaRecoToken_; //used to select muon tracks to read from configuration file }; // @@ -160,12 +164,12 @@ DiMuonVertexValidation::DiMuonVertexValidation(const edm::ParameterSet& iConfig) VtxDist3DSigConfiguration_(iConfig.getParameter("VtxDist3DSigConfig")), DiMuMassConfiguration_(iConfig.getParameter("DiMuMassConfig")), ttbESToken_(esConsumes(edm::ESInputTag("", "TransientTrackBuilder"))), - tracksToken_(consumes(iConfig.getParameter("tracks"))), vertexToken_(consumes(iConfig.getParameter("vertices"))) { if (useReco_) { - muonsToken_ = mayConsume(iConfig.getParameter("muons")); + tracksToken_ = consumes(iConfig.getParameter("tracks")); + muonsToken_ = consumes(iConfig.getParameter("muons")); } else { - alcaRecoToken_ = mayConsume(iConfig.getParameter("muonTracks")); + alcaRecoToken_ = consumes(iConfig.getParameter("muonTracks")); } usesResource(TFileService::kSharedResource); @@ -319,6 +323,7 @@ void DiMuonVertexValidation::analyze(const edm::Event& iEvent, const edm::EventS // fill the z->mm mass plots ZMassPlots.fillPlots(track_invMass, tktk_p4); + InvMassInEtaBins.fillTH1Plots(track_invMass, tktk_p4); math::XYZPoint ZpT(ditrack.x(), ditrack.y(), 0); math::XYZPoint Zp(ditrack.x(), ditrack.y(), ditrack.z()); @@ -345,7 +350,7 @@ void DiMuonVertexValidation::analyze(const edm::Event& iEvent, const edm::EventS // fill the VtxProb plots VtxProbPlots.fillPlots(SVProb, tktk_p4); - math::XYZPoint MainVertex(0, 0, 0); + math::XYZPoint mainVtxPos(0, 0, 0); const reco::Vertex* theClosestVertex = nullptr; // get collection of reconstructed vertices from event edm::Handle vertexHandle = iEvent.getHandle(vertexToken_); @@ -357,35 +362,35 @@ void DiMuonVertexValidation::analyze(const edm::Event& iEvent, const edm::EventS return; } - reco::Vertex TheMainVertex; + reco::Vertex theMainVertex; if (!useClosestVertex_ || theClosestVertex == nullptr) { // if the closest vertex is not available, or explicitly not chosen - TheMainVertex = vertexHandle.product()->front(); + theMainVertex = vertexHandle.product()->front(); } else { - TheMainVertex = *theClosestVertex; + theMainVertex = *theClosestVertex; } - MainVertex.SetXYZ(TheMainVertex.position().x(), TheMainVertex.position().y(), TheMainVertex.position().z()); + mainVtxPos.SetXYZ(theMainVertex.position().x(), theMainVertex.position().y(), theMainVertex.position().z()); const math::XYZPoint myVertex( aTransientVertex.position().x(), aTransientVertex.position().y(), aTransientVertex.position().z()); const math::XYZPoint deltaVtx( - MainVertex.x() - myVertex.x(), MainVertex.y() - myVertex.y(), MainVertex.z() - myVertex.z()); + mainVtxPos.x() - myVertex.x(), mainVtxPos.y() - myVertex.y(), mainVtxPos.z() - myVertex.z()); #ifdef EDM_ML_DEBUG edm::LogVerbatim("DiMuonVertexValidation") << "mm vertex position:" << aTransientVertex.position().x() << "," << aTransientVertex.position().y() << "," << aTransientVertex.position().z(); - edm::LogVerbatim("DiMuonVertexValidation") << "main vertex position:" << TheMainVertex.position().x() << "," - << TheMainVertex.position().y() << "," << TheMainVertex.position().z(); + edm::LogVerbatim("DiMuonVertexValidation") << "main vertex position:" << theMainVertex.position().x() << "," + << theMainVertex.position().y() << "," << theMainVertex.position().z(); #endif - if (TheMainVertex.isValid()) { + if (theMainVertex.isValid()) { // Z Vertex distance in the xy plane VertexDistanceXY vertTool; - double distance = vertTool.distance(aTransientVertex, TheMainVertex).value(); - double dist_err = vertTool.distance(aTransientVertex, TheMainVertex).error(); + double distance = vertTool.distance(aTransientVertex, theMainVertex).value(); + double dist_err = vertTool.distance(aTransientVertex, theMainVertex).error(); hSVDist_->Fill(distance * cmToum); hSVDistSig_->Fill(distance / dist_err); @@ -399,8 +404,8 @@ void DiMuonVertexValidation::analyze(const edm::Event& iEvent, const edm::EventS // Z Vertex distance in 3D VertexDistance3D vertTool3D; - double distance3D = vertTool3D.distance(aTransientVertex, TheMainVertex).value(); - double dist3D_err = vertTool3D.distance(aTransientVertex, TheMainVertex).error(); + double distance3D = vertTool3D.distance(aTransientVertex, theMainVertex).value(); + double dist3D_err = vertTool3D.distance(aTransientVertex, theMainVertex).error(); hSVDist3D_->Fill(distance3D * cmToum); hSVDist3DSig_->Fill(distance3D / dist3D_err); @@ -437,8 +442,11 @@ void DiMuonVertexValidation::analyze(const edm::Event& iEvent, const edm::EventS // fill the cosphi plots CosPhiPlots.fillPlots(cosphi, tktk_p4); - // fill the VtxDisSig plots + // fill the cosphi3D plots CosPhi3DPlots.fillPlots(cosphi3D, tktk_p4); + + // fill the cosphi3D plots in eta bins + CosPhi3DInEtaBins.fillTH1Plots(cosphi3D, tktk_p4); } } } @@ -449,16 +457,34 @@ void DiMuonVertexValidation::beginJob() { // clang-format off TH1F::SetDefaultSumw2(kTRUE); - hSVProb_ = fs->make("VtxProb", ";ZV vertex probability;N(#mu#mu pairs)", 100, 0., 1.); + hSVProb_ = fs->make("VtxProb", ";#mu^{+}#mu^{-} vertex probability;N(#mu#mu pairs)", 100, 0., 1.); + + auto extractRangeValues = [](const edm::ParameterSet& PSetConfiguration_) -> std::pair { + double min = PSetConfiguration_.getParameter("ymin"); + double max = PSetConfiguration_.getParameter("ymax"); + return {min, max}; + }; - hSVDist_ = fs->make("VtxDist", ";PV-ZV xy distance [#mum];N(#mu#mu pairs)", 100, 0., 300.); - hSVDistSig_ = fs->make("VtxDistSig", ";PV-ZV xy distance signficance;N(#mu#mu pairs)", 100, 0., 5.); + // take the range from the 2D histograms + const auto& svDistRng = extractRangeValues(VtxDistConfiguration_); + hSVDist_ = fs->make("VtxDist", ";PV-#mu^{+}#mu^{-} vertex xy distance [#mum];N(#mu#mu pairs)", 100, svDistRng.first, svDistRng.second); - hSVDist3D_ = fs->make("VtxDist3D", ";PV-ZV 3D distance [#mum];N(#mu#mu pairs)", 100, 0., 300.); - hSVDist3DSig_ = fs->make("VtxDist3DSig", ";PV-ZV 3D distance signficance;N(#mu#mu pairs)", 100, 0., 5.); + // take the range from the 2D histograms + const auto& svDistSigRng = extractRangeValues(VtxDistSigConfiguration_); + hSVDistSig_ = fs->make("VtxDistSig", ";PV-#mu^{+}#mu^{-} vertex xy distance signficance;N(#mu#mu pairs)", 100, svDistSigRng.first, svDistRng.second); - hInvMass_ = fs->make("InvMass", ";M(#mu#mu) [GeV];N(#mu#mu pairs)", 70., 50., 120.); - hTrackInvMass_ = fs->make("TkTkInvMass", ";M(tk,tk) [GeV];N(tk tk pairs)", 70., 50., 120.); + // take the range from the 2D histograms + const auto& svDist3DRng = extractRangeValues(VtxDist3DConfiguration_); + hSVDist3D_ = fs->make("VtxDist3D", ";PV-#mu^{+}#mu^{-} vertex 3D distance [#mum];N(#mu#mu pairs)", 100, svDist3DRng.first, svDist3DRng.second); + + // take the range from the 2D histograms + const auto& svDist3DSigRng = extractRangeValues(VtxDist3DSigConfiguration_); + hSVDist3DSig_ = fs->make("VtxDist3DSig", ";PV-#mu^{+}#mu^{-} vertex 3D distance signficance;N(#mu#mu pairs)", 100, svDist3DSigRng.first, svDist3DSigRng.second); + + // take the range from the 2D histograms + const auto& massRng = extractRangeValues(DiMuMassConfiguration_); + hInvMass_ = fs->make("InvMass", ";M(#mu#mu) [GeV];N(#mu#mu pairs)", 70., massRng.first, massRng.second); + hTrackInvMass_ = fs->make("TkTkInvMass", ";M(tk,tk) [GeV];N(tk tk pairs)", 70., massRng.first, massRng.second); hCosPhi_ = fs->make("CosPhi", ";cos(#phi_{xy});N(#mu#mu pairs)", 50, -1., 1.); hCosPhi3D_ = fs->make("CosPhi3D", ";cos(#phi_{3D});N(#mu#mu pairs)", 50, -1., 1.); @@ -493,6 +519,14 @@ void DiMuonVertexValidation::beginJob() { TFileDirectory dirInvariantMass = fs->mkdir("InvariantMassPlots"); ZMassPlots.bookFromPSet(dirInvariantMass, DiMuMassConfiguration_); + // CosPhi3D in eta bins + TFileDirectory dirCosphi3DEta = fs->mkdir("CosPhi3DInEtaBins"); + CosPhi3DInEtaBins.bookSet(dirCosphi3DEta, hCosPhi3D_); + + // Z-> mm mass in eta bins + TFileDirectory dirResMassEta = fs->mkdir("TkTkMassInEtaBins"); + InvMassInEtaBins.bookSet(dirResMassEta, hTrackInvMass_); + // cut flow hCutFlow_ = fs->make("hCutFlow", "cut flow;cut step;events left", 6, -0.5, 5.5); diff --git a/Alignment/OfflineValidation/plugins/GeneralPurposeTrackAnalyzer.cc b/Alignment/OfflineValidation/plugins/GeneralPurposeTrackAnalyzer.cc index 080f8c39542e1..73f043c4cdd48 100644 --- a/Alignment/OfflineValidation/plugins/GeneralPurposeTrackAnalyzer.cc +++ b/Alignment/OfflineValidation/plugins/GeneralPurposeTrackAnalyzer.cc @@ -869,7 +869,7 @@ class GeneralPurposeTrackAnalyzer : public edm::one::EDAnalyzer("h_Eta", "Track pseudorapidity; track #eta;tracks", 100, -etaMax_, etaMax_); hPhi = book("h_Phi", "Track azimuth; track #phi;tracks", 100, -M_PI, M_PI); - hPhiBarrel = book("h_PhiBarrel", "hPhiBarrel (0<|#eta|<0.8);track #Phi;tracks", 100, -M_PI, M_PI); + hPhiBarrel = book("h_PhiBarrel", "hPhiBarrel (0<|#eta|<0.8);track #phi;tracks", 100, -M_PI, M_PI); hPhiOverlapPlus = book("h_PhiOverlapPlus", "hPhiOverlapPlus (0.8<#eta<1.4);track #phi;tracks", 100, -M_PI, M_PI); hPhiOverlapMinus = @@ -880,7 +880,7 @@ class GeneralPurposeTrackAnalyzer : public edm::one::EDAnalyzer("h_BSx0", "x-coordinate of reco beamspot;x^{BS}_{0};n_{events}", 100, -0.1, 0.1); h_BSy0 = book("h_BSy0", "y-coordinate of reco beamspot;y^{BS}_{0};n_{events}", 100, -0.1, 0.1); h_BSz0 = book("h_BSz0", "z-coordinate of reco beamspot;z^{BS}_{0};n_{events}", 100, -1., 1.); - h_Beamsigmaz = book("h_Beamsigmaz", "z-coordinate beam width;#sigma_{Z}^{beam};n_{events}", 100, 0., 1.); + h_Beamsigmaz = book("h_Beamsigmaz", "z-coordinate beam width;#sigma_{Z}^{beam};n_{events}", 100, 0., 7.); h_BeamWidthX = book("h_BeamWidthX", "x-coordinate beam width;#sigma_{X}^{beam};n_{events}", 100, 0., 0.01); h_BeamWidthY = book("h_BeamWidthY", "y-coordinate beam width;#sigma_{Y}^{beam};n_{events}", 100, 0., 0.01); h_BSdxdz = book("h_BSdxdz", "BeamSpot dxdz;beamspot dx/dz;n_{events}", 100, -0.0003, 0.0003); @@ -1126,6 +1126,12 @@ class GeneralPurposeTrackAnalyzer : public edm::one::EDAnalyzerGetXaxis()->SetBinLabel((the_r - theRuns_.front()) + 1, std::to_string(the_r).c_str()); } + static const int kappadiffindex = this->index(vTrackHistos_, "h_diff_curvature"); + vTrackHistos_[kappadiffindex]->Add(vTrackHistos_[this->index(vTrackHistos_, "h_curvature_neg")], + vTrackHistos_[this->index(vTrackHistos_, "h_curvature_pos")], + -1, + 1); + if (phase_ < SiPixelPI::phase::two) { if (phase_ == SiPixelPI::phase::zero) { pmap->save(true, 0, 0, "PixelHitMap.pdf", 600, 800); diff --git a/Alignment/OfflineValidation/plugins/PrimaryVertexValidation.cc b/Alignment/OfflineValidation/plugins/PrimaryVertexValidation.cc index 5bfa958662f29..22e4050ef707a 100644 --- a/Alignment/OfflineValidation/plugins/PrimaryVertexValidation.cc +++ b/Alignment/OfflineValidation/plugins/PrimaryVertexValidation.cc @@ -55,8 +55,9 @@ #include "Geometry/TrackerGeometryBuilder/interface/PixelTopologyMap.h" #include "Geometry/Records/interface/TrackerTopologyRcd.h" #include "RecoVertex/PrimaryVertexProducer/interface/TrackFilterForPVFinding.h" +#include "RecoVertex/PrimaryVertexProducer/interface/HITrackFilterForPVFinding.h" #include "RecoVertex/PrimaryVertexProducer/interface/DAClusterizerInZ_vect.h" -#include "RecoVertex/PrimaryVertexProducer/interface/DAClusterizerInZ.h" +#include "RecoVertex/PrimaryVertexProducer/interface/DAClusterizerInZT_vect.h" #include "RecoVertex/PrimaryVertexProducer/interface/GapClusterizerInZ.h" #include "RecoVertex/VertexPrimitives/interface/TransientVertex.h" #include "TrackingTools/TransientTrack/interface/TransientTrack.h" @@ -120,11 +121,6 @@ PrimaryVertexValidation::PrimaryVertexValidation(const edm::ParameterSet& iConfi theTrackClusterizer_ = std::make_unique(iConfig.getParameter("TkClusParameters") .getParameter("TkGapClusParameters")); - } else if (clusteringAlgorithm == "DA") { - theTrackClusterizer_ = - std::make_unique(iConfig.getParameter("TkClusParameters") - .getParameter("TkDAClusParameters")); - // provide the vectorized version of the clusterizer, if supported by the build } else if (clusteringAlgorithm == "DA_vect") { theTrackClusterizer_ = std::make_unique(iConfig.getParameter("TkClusParameters") @@ -3700,7 +3696,7 @@ void PrimaryVertexValidation::fillDescriptions(edm::ConfigurationDescriptions& d // track filtering edm::ParameterSetDescription psd0; TrackFilterForPVFinding::fillPSetDescription(psd0); - psd0.add("numTracksThreshold", 0); // HI only + HITrackFilterForPVFinding::fillPSetDescription(psd0); // HI only desc.add("TkFilterParameters", psd0); // PV Clusterization @@ -3708,7 +3704,7 @@ void PrimaryVertexValidation::fillDescriptions(edm::ConfigurationDescriptions& d edm::ParameterSetDescription psd0; { edm::ParameterSetDescription psd1; - DAClusterizerInZ_vect::fillPSetDescription(psd1); + DAClusterizerInZT_vect::fillPSetDescription(psd1); psd0.add("TkDAClusParameters", psd1); edm::ParameterSetDescription psd2; diff --git a/Alignment/OfflineValidation/plugins/ShortenedTrackValidation.cc b/Alignment/OfflineValidation/plugins/ShortenedTrackValidation.cc new file mode 100644 index 0000000000000..d606dd6f2704d --- /dev/null +++ b/Alignment/OfflineValidation/plugins/ShortenedTrackValidation.cc @@ -0,0 +1,600 @@ +// -*- C++ -*- +// +// Package: Alignment/OfflineValidation +// Class: ShortenedTrackValidation +// +/* + *\class ShortenedTrackValidation ShortenedTrackValidation.cc Alignment/OfflineValidation/plugins/ShortenedTrackValidation.cc + + Description: This module is meant to monitor the track pT resolution using the amputated tracks method, by comparing the performance using different alignments. + + Implementation: The implemenation takes advantage of the existing implementation in the DQM/TrackingMonitorSource. + +*/ +// +// Original Author: Marco Musich +// Created: Fri, 05 Jan 2023 11:41:00 GMT +// +// + +// ROOT includes files +#include "TMath.h" +#include "TFile.h" +#include "TH1D.h" +#include "TH1I.h" +#include "TH2D.h" +#include "TProfile.h" +#include "TLorentzVector.h" + +// standard includes +#include + +// user includes +#include "CommonTools/UtilAlgos/interface/TFileService.h" +#include "DataFormats/TrackReco/interface/Track.h" +#include "DataFormats/SiStripDetId/interface/SiStripDetId.h" +#include "DataFormats/TrackerRecHit2D/interface/ProjectedSiStripRecHit2D.h" +#include "DataFormats/TrackerRecHit2D/interface/SiStripMatchedRecHit2D.h" +#include "DataFormats/TrackerRecHit2D/interface/SiStripRecHit1D.h" +#include "DataFormats/TrackerRecHit2D/interface/SiTrackerMultiRecHit.h" +#include "DataFormats/VertexReco/interface/Vertex.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/one/EDAnalyzer.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/ServiceRegistry/interface/Service.h" +#include "FWCore/Utilities/interface/transform.h" // for edm::vector_transform + +#define CREATE_HIST_1D(varname, nbins, first, last, fs) fs.make(#varname, #varname, nbins, first, last) + +#define CREATE_HIST_2D(varname, nbins, first, last, fs) \ + fs.make(#varname, #varname, nbins, first, last, nbins, first, last) + +const int kBPIX = PixelSubdetector::PixelBarrel; +const int kFPIX = PixelSubdetector::PixelEndcap; + +class ShortenedTrackValidation : public edm::one::EDAnalyzer { + class trackingMon { + public: + trackingMon() {} + ~trackingMon() = default; + + void book(const TFileDirectory &fs) { + h_chi2ndof = CREATE_HIST_1D(h_chi2ndof, 100, 0.0, 10.0, fs); + h_trkQuality = CREATE_HIST_1D(h_trkQuality, 6, -1, 5, fs); + h_trkAlgo = CREATE_HIST_1D(h_trkAlgo, reco::TrackBase::algoSize, 0.0, double(reco::TrackBase::algoSize), fs); + h_trkOriAlgo = + CREATE_HIST_1D(h_trkOriAlgo, reco::TrackBase::algoSize, 0.0, double(reco::TrackBase::algoSize), fs); + h_P = CREATE_HIST_1D(h_P, 100, 0.0, 200.0, fs); + h_Pt = CREATE_HIST_1D(h_Pt, 100, 0.0, 100.0, fs); + h_nHit = CREATE_HIST_1D(h_nHit, 50, -0.5, 49.5, fs); + h_nHit2D = CREATE_HIST_1D(h_nHit2D, 20, -0.5, 19.5, fs); + h_Charge = CREATE_HIST_1D(h_Charge, 3, -1.5, 1.5, fs); + h_QoverP = CREATE_HIST_1D(h_QoverP, 100, -1.0, 1.0, fs); + h_QoverPZoom = CREATE_HIST_1D(h_QoverPZoom, 100, -0.1, 0.1, fs); + h_Eta = CREATE_HIST_1D(h_Eta, 100, -3., 3., fs); + h_Phi = CREATE_HIST_1D(h_Phi, 100, -M_PI, M_PI, fs); + h_vx = CREATE_HIST_1D(h_vx, 100, -0.5, 0.5, fs); + h_vy = CREATE_HIST_1D(h_vy, 100, -0.5, 0.5, fs); + h_vz = CREATE_HIST_1D(h_vz, 100, -20.0, 20.0, fs); + h_d0 = CREATE_HIST_1D(h_d0, 100, -0.5, 0.5, fs); + h_dz = CREATE_HIST_1D(h_dz, 100, -20.0, 20.0, fs); + h_dxy = CREATE_HIST_1D(h_dxy, 100, -0.5, 0.5, fs); + h_nhpxb = CREATE_HIST_1D(h_nhpxb, 10, -0.5, 9.5, fs); + h_nhpxe = CREATE_HIST_1D(h_nhpxe, 10, -0.5, 9.5, fs); + h_nhTIB = CREATE_HIST_1D(h_nhTIB, 20, -0.5, 19.5, fs); + h_nhTID = CREATE_HIST_1D(h_nhTID, 20, -0.5, 19.5, fs); + h_nhTOB = CREATE_HIST_1D(h_nhTOB, 20, -0.5, 19.5, fs); + h_nhTEC = CREATE_HIST_1D(h_nhTEC, 20, -0.5, 19.5, fs); + h_dxyBS = CREATE_HIST_1D(h_dxyBS, 100, -0.05, 0.05, fs); + h_d0BS = CREATE_HIST_1D(h_d0BS, 100, -0.05, 0.05, fs); + h_dzBS = CREATE_HIST_1D(h_dzBS, 100, -20.0, 20., fs); + h_dxyPV = CREATE_HIST_1D(h_dxyPV, 100, -0.05, 0.05, fs); + h_d0PV = CREATE_HIST_1D(h_d0PV, 100, -0.05, 0.05, fs); + h_dzPV = CREATE_HIST_1D(h_dzPV, 100, -0.05, 0.05, fs); + + edm::LogInfo("trackingMonitoring") << "done booking"; + } + + //____________________________________________________________ + int trackQual(const reco::Track &track) { + int myquality = -99; + if (track.quality(reco::TrackBase::undefQuality)) + myquality = -1; + if (track.quality(reco::TrackBase::loose)) + myquality = 0; + if (track.quality(reco::TrackBase::tight)) + myquality = 1; + if (track.quality(reco::TrackBase::highPurity)) + myquality = 2; + if (track.quality(reco::TrackBase::goodIterative)) + myquality = 3; + + return myquality; + } + + //____________________________________________________________ + static bool isHit2D(const TrackingRecHit &hit) { + if (hit.dimension() < 2) { + return false; // some (muon...) stuff really has RecHit1D + } else { + const DetId detId(hit.geographicalId()); + if (detId.det() == DetId::Tracker) { + if (detId.subdetId() == kBPIX || detId.subdetId() == kFPIX) { + return true; // pixel is always 2D + } else { // should be SiStrip now + if (dynamic_cast(&hit)) + return false; // normal hit + else if (dynamic_cast(&hit)) + return true; // matched is 2D + else if (dynamic_cast(&hit)) + return false; // crazy hit... + else { + edm::LogError("UnknownType") << "@SUB=CalibrationTrackSelector::isHit2D" + << "Tracker hit not in pixel and neither SiStripRecHit2D nor " + << "SiStripMatchedRecHit2D nor ProjectedSiStripRecHit2D."; + return false; + } + } + } else { // not tracker?? + edm::LogWarning("DetectorMismatch") << "@SUB=CalibrationTrackSelector::isHit2D" + << "Hit not in tracker with 'official' dimension >=2."; + return true; // dimension() >= 2 so accept that... + } + } + // never reached... + } + + //____________________________________________________________ + unsigned int count2DHits(const reco::Track &track) { + unsigned int nHit2D = 0; + for (auto iHit = track.recHitsBegin(); iHit != track.recHitsEnd(); ++iHit) { + if (isHit2D(**iHit)) { + ++nHit2D; + } + } + return nHit2D; + } + + //____________________________________________________________ + void fill(const reco::Track &track, const reco::BeamSpot &beamSpot, const reco::Vertex &pvtx) { + h_chi2ndof->Fill(track.normalizedChi2()); + h_trkQuality->Fill(trackQual(track)); + h_trkAlgo->Fill(static_cast(track.algo())); + h_trkOriAlgo->Fill(static_cast(track.originalAlgo())); + h_P->Fill(track.p()); + h_Pt->Fill(track.pt()); + h_nHit->Fill(track.numberOfValidHits()); + h_nHit2D->Fill(count2DHits(track)); + h_Charge->Fill(track.charge()); + h_QoverP->Fill(track.qoverp()); + h_QoverPZoom->Fill(track.qoverp()); + h_Eta->Fill(track.eta()); + h_Phi->Fill(track.phi()); + h_vx->Fill(track.vx()); + h_vy->Fill(track.vy()); + h_vz->Fill(track.vz()); + h_d0->Fill(track.d0()); + h_dz->Fill(track.dz()); + h_dxy->Fill(track.dxy()); + h_nhpxb->Fill(track.hitPattern().numberOfValidPixelBarrelHits()); + h_nhpxe->Fill(track.hitPattern().numberOfValidPixelEndcapHits()); + h_nhTIB->Fill(track.hitPattern().numberOfValidStripTIBHits()); + h_nhTID->Fill(track.hitPattern().numberOfValidStripTIDHits()); + h_nhTOB->Fill(track.hitPattern().numberOfValidStripTOBHits()); + h_nhTEC->Fill(track.hitPattern().numberOfValidStripTECHits()); + + math::XYZPoint BS(beamSpot.x0(), beamSpot.y0(), beamSpot.z0()); + h_dxyBS->Fill(track.dxy(BS)); + h_d0BS->Fill(-track.dxy(BS)); + h_dzBS->Fill(track.dz(BS)); + + math::XYZPoint PV(pvtx.x(), pvtx.y(), pvtx.z()); + h_dxyPV->Fill(track.dxy(PV)); + h_d0PV->Fill(-track.dxy(PV)); + h_dzPV->Fill(track.dz(PV)); + } + + private: + TH1D *h_chi2ndof; + TH1D *h_trkQuality; + TH1D *h_trkAlgo; + TH1D *h_trkOriAlgo; + TH1D *h_P; + TH1D *h_Pt; + TH1D *h_nHit; + TH1D *h_nHit2D; + TH1D *h_Charge; + TH1D *h_QoverP; + TH1D *h_QoverPZoom; + TH1D *h_Eta; + TH1D *h_Phi; + TH1D *h_vx; + TH1D *h_vy; + TH1D *h_vz; + TH1D *h_d0; + TH1D *h_dz; + TH1D *h_dxy; + TH1D *h_nhpxb; + TH1D *h_nhpxe; + TH1D *h_nhTIB; + TH1D *h_nhTID; + TH1D *h_nhTOB; + TH1D *h_nhTEC; + TH1D *h_dxyBS; + TH1D *h_d0BS; + TH1D *h_dzBS; + TH1D *h_dxyPV; + TH1D *h_d0PV; + TH1D *h_dzPV; + }; + + class trackComparator { + public: + trackComparator() {} + ~trackComparator() = default; + + //__________________________________________________ + void book(const TFileDirectory &fs) { + h2_chi2ndof = CREATE_HIST_2D(h2_chi2ndof, 100, 0.0, 10.0, fs); + h2_trkAlgo = CREATE_HIST_2D(h2_trkAlgo, reco::TrackBase::algoSize, 0.0, double(reco::TrackBase::algoSize), fs); + h2_trkOriAlgo = + CREATE_HIST_2D(h2_trkOriAlgo, reco::TrackBase::algoSize, 0.0, double(reco::TrackBase::algoSize), fs); + h2_P = CREATE_HIST_2D(h2_P, 100, 0.0, 200.0, fs); + h2_Pt = CREATE_HIST_2D(h2_Pt, 100, 0.0, 100.0, fs); + h2_nHit = CREATE_HIST_2D(h2_nHit, 50, -0.5, 49.5, fs); + h2_Charge = CREATE_HIST_2D(h2_Charge, 3, -1.5, 1.5, fs); + h2_QoverPZoom = CREATE_HIST_2D(h2_QoverPZoom, 100, -0.1, 0.1, fs); + h2_Eta = CREATE_HIST_2D(h2_Eta, 100, -3., 3., fs); + h2_Phi = CREATE_HIST_2D(h2_Phi, 100, -M_PI, M_PI, fs); + h2_vx = CREATE_HIST_2D(h2_vx, 100, -0.5, 0.5, fs); + h2_vy = CREATE_HIST_2D(h2_vy, 100, -0.5, 0.5, fs); + h2_vz = CREATE_HIST_2D(h2_vz, 100, -20.0, 20.0, fs); + h2_d0 = CREATE_HIST_2D(h2_d0, 100, -0.5, 0.5, fs); + h2_dz = CREATE_HIST_2D(h2_dz, 100, -20.0, 20.0, fs); + h2_nhpxb = CREATE_HIST_2D(h2_nhpxb, 10, -0.5, 9.5, fs); + h2_nhpxe = CREATE_HIST_2D(h2_nhpxe, 10, -0.5, 9.5, fs); + h2_nhTIB = CREATE_HIST_2D(h2_nhTIB, 20, -0.5, 19.5, fs); + h2_nhTID = CREATE_HIST_2D(h2_nhTID, 20, -0.5, 19.5, fs); + h2_nhTOB = CREATE_HIST_2D(h2_nhTOB, 20, -0.5, 19.5, fs); + h2_nhTEC = CREATE_HIST_2D(h2_nhTEC, 20, -0.5, 19.5, fs); + } + + //__________________________________________________ + void fill(const reco::Track &tk1, const reco::Track &tk2) { + h2_chi2ndof->Fill(tk1.normalizedChi2(), tk2.normalizedChi2()); + h2_trkAlgo->Fill(static_cast(tk1.algo()), static_cast(tk2.algo())); + h2_trkOriAlgo->Fill(static_cast(tk1.originalAlgo()), static_cast(tk2.originalAlgo())); + h2_P->Fill(tk1.p(), tk2.p()); + h2_Pt->Fill(tk1.pt(), tk2.p()); + h2_nHit->Fill(tk1.numberOfValidHits(), tk2.numberOfValidHits()); + h2_Charge->Fill(tk1.charge(), tk2.charge()); + h2_QoverPZoom->Fill(tk1.qoverp(), tk2.qoverp()); + h2_Eta->Fill(tk1.eta(), tk2.eta()); + h2_Phi->Fill(tk1.phi(), tk2.phi()); + h2_vx->Fill(tk1.vx(), tk2.vx()); + h2_vy->Fill(tk1.vy(), tk2.vy()); + h2_vz->Fill(tk1.vz(), tk2.vz()); + h2_d0->Fill(tk1.d0(), tk2.d0()); + h2_dz->Fill(tk2.dz(), tk2.dz()); + h2_nhpxb->Fill(tk1.hitPattern().numberOfValidPixelBarrelHits(), tk2.hitPattern().numberOfValidPixelBarrelHits()); + h2_nhpxe->Fill(tk1.hitPattern().numberOfValidPixelEndcapHits(), tk2.hitPattern().numberOfValidPixelEndcapHits()); + h2_nhTIB->Fill(tk1.hitPattern().numberOfValidStripTIBHits(), tk2.hitPattern().numberOfValidStripTIBHits()); + h2_nhTID->Fill(tk1.hitPattern().numberOfValidStripTIDHits(), tk2.hitPattern().numberOfValidStripTIDHits()); + h2_nhTOB->Fill(tk1.hitPattern().numberOfValidStripTOBHits(), tk2.hitPattern().numberOfValidStripTOBHits()); + h2_nhTEC->Fill(tk1.hitPattern().numberOfValidStripTECHits(), tk2.hitPattern().numberOfValidStripTECHits()); + } + + private: + TH2D *h2_chi2ndof; + TH2D *h2_trkAlgo; + TH2D *h2_trkOriAlgo; + TH2D *h2_P; + TH2D *h2_Pt; + TH2D *h2_nHit; + TH2D *h2_Charge; + TH2D *h2_QoverPZoom; + TH2D *h2_Eta; + TH2D *h2_Phi; + TH2D *h2_vx; + TH2D *h2_vy; + TH2D *h2_vz; + TH2D *h2_d0; + TH2D *h2_dz; + TH2D *h2_nhpxb; + TH2D *h2_nhpxe; + TH2D *h2_nhTIB; + TH2D *h2_nhTID; + TH2D *h2_nhTOB; + TH2D *h2_nhTEC; + }; + +public: + explicit ShortenedTrackValidation(const edm::ParameterSet &); + ~ShortenedTrackValidation() override = default; + static void fillDescriptions(edm::ConfigurationDescriptions &descriptions); + +private: + template + T *book(const TFileDirectory &dir, const Args &...args) const; + void beginJob() override; + void analyze(edm::Event const &iEvent, edm::EventSetup const &iSetup) override; + + // ----------member data --------------------------- + edm::Service fs_; + const std::string folderName_; + const std::vector hitsRemain_; + const double minTracksEta_; + const double maxTracksEta_; + const double minTracksPt_; + const double maxTracksPt_; + + const double maxDr_; + const edm::InputTag tracksTag_; + const std::vector tracksRerecoTag_; + const edm::InputTag BeamSpotTag_; + const edm::InputTag VerticesTag_; + const edm::EDGetTokenT> tracksToken_; + const std::vector>> tracksRerecoToken_; + const edm::EDGetTokenT beamspotToken_; + const edm::EDGetTokenT vertexToken_; + + // monitoring histograms + std::vector histsPtRatioAll_; + std::vector histsPtDiffAll_; + std::vector histsEtaDiffAll_; + std::vector histsPhiDiffAll_; + std::vector histsPtRatioVsDeltaRAll_; + std::vector histsDeltaPtOverPtAll_; + std::vector histsPtAll_; + std::vector histsNhitsAll_; + std::vector histsDeltaRAll_; + + trackingMon originalTrack; + std::vector comparators_; + static constexpr double muMass = 0.105658; +}; + +// ----------------------------- +// constructors and destructor +// ----------------------------- +ShortenedTrackValidation::ShortenedTrackValidation(const edm::ParameterSet &ps) + : folderName_(ps.getUntrackedParameter("folderName", "TrackRefitting")), + hitsRemain_(ps.getUntrackedParameter>("hitsRemainInput")), + minTracksEta_(ps.getUntrackedParameter("minTracksEtaInput", 0.0)), + maxTracksEta_(ps.getUntrackedParameter("maxTracksEtaInput", 2.2)), + minTracksPt_(ps.getUntrackedParameter("minTracksPtInput", 15.0)), + maxTracksPt_(ps.getUntrackedParameter("maxTracksPtInput", 99999.9)), + maxDr_(ps.getUntrackedParameter("maxDrInput", 0.01)), + tracksTag_(ps.getUntrackedParameter("tracksInputTag", edm::InputTag("generalTracks", "", "DQM"))), + tracksRerecoTag_(ps.getUntrackedParameter>("tracksRerecoInputTag")), + BeamSpotTag_(ps.getUntrackedParameter("BeamSpotTag", edm::InputTag("offlineBeamSpot"))), + VerticesTag_(ps.getUntrackedParameter("VerticesTag", edm::InputTag("offlinePrimaryVertices"))), + tracksToken_(consumes>(tracksTag_)), + tracksRerecoToken_(edm::vector_transform( + tracksRerecoTag_, [this](edm::InputTag const &tag) { return consumes>(tag); })), + beamspotToken_(consumes(BeamSpotTag_)), + vertexToken_(consumes(VerticesTag_)) { + usesResource(TFileService::kSharedResource); + histsPtRatioAll_.clear(); + histsPtDiffAll_.clear(); + histsEtaDiffAll_.clear(); + histsPhiDiffAll_.clear(); + histsPtRatioVsDeltaRAll_.clear(); + histsDeltaPtOverPtAll_.clear(); + histsPtAll_.clear(); + histsNhitsAll_.clear(); + histsDeltaRAll_.clear(); + comparators_.clear(); + + comparators_.reserve(hitsRemain_.size()); + for (unsigned int i = 0; i < hitsRemain_.size(); ++i) { + comparators_.push_back(new trackComparator()); + } +} + +//__________________________________________________________________________________ +template +T *ShortenedTrackValidation::book(const TFileDirectory &dir, const Args &...args) const { + T *t = dir.make(args...); + return t; +} + +//__________________________________________________________________________________ +void ShortenedTrackValidation::beginJob() { + std::string currentFolder = folderName_ + "/Resolutions"; + TFileDirectory ShortTrackResolution = fs_->mkdir(currentFolder); + currentFolder = folderName_ + "/Tracks"; + TFileDirectory TrackQuals = fs_->mkdir(currentFolder); + + for (unsigned int i = 0; i < hitsRemain_.size(); ++i) { + histsPtRatioAll_.push_back( + book(ShortTrackResolution, + fmt::sprintf("trackPtRatio_%s", hitsRemain_[i]).c_str(), + fmt::sprintf("Short Track p_{T} / Full Track p_{T} - %s layers;p_{T}^{short}/p_{T}^{full};n. tracks", + hitsRemain_[i]) + .c_str(), + 101, + -0.05, + 2.05)); + + histsPtDiffAll_.push_back(book( + ShortTrackResolution, + fmt::sprintf("trackPtDiff_%s", hitsRemain_[i]).c_str(), + fmt::sprintf("Short Track p_{T} - Full Track p_{T} - %s layers;p_{T}^{short} - p_{T}^{full} [GeV];n. tracks", + hitsRemain_[i]) + .c_str(), + 100, + -10., + 10.)); + + histsEtaDiffAll_.push_back( + book(ShortTrackResolution, + fmt::sprintf("trackEtaDiff_%s", hitsRemain_[i]).c_str(), + fmt::sprintf("Short Track #eta - Full Track #eta - %s layers;#eta^{short} - #eta^{full};n. tracks", + hitsRemain_[i]) + .c_str(), + 100, + -0.01, + 0.01)); + + histsPhiDiffAll_.push_back( + book(ShortTrackResolution, + fmt::sprintf("trackPhiDiff_%s", hitsRemain_[i]).c_str(), + fmt::sprintf("Short Track #phi - Full Track #phi - %s layers;#phi^{short} - #phi^{full};n. tracks", + hitsRemain_[i]) + .c_str(), + 100, + -0.01, + 0.01)); + + histsPtRatioVsDeltaRAll_.push_back( + book(ShortTrackResolution, + fmt::sprintf("trackPtRatioVsDeltaR_%s", hitsRemain_[i]).c_str(), + fmt::sprintf("Short Track p_{T} / Full Track p_{T} - %s layers vs " + "#DeltaR;#DeltaR(short,full);p_{T}^{short}/p_{T}^{full} [GeV];n. tracks", + hitsRemain_[i]) + .c_str(), + 100, + 0., + 0.01, + 101, + -0.05, + 2.05)); + + histsDeltaPtOverPtAll_.push_back( + book(ShortTrackResolution, + fmt::sprintf("trackDeltaPtOverPt_%s", hitsRemain_[i]).c_str(), + fmt::sprintf("Short Track p_{T} - Full Track p_{T} / Full Track p_{T} - %s layers;p_{T}^{short} - " + "p_{T}^{full} / p^{full}_{T};n. tracks", + hitsRemain_[i]) + .c_str(), + 101, + -10., + 10.)); + + histsPtAll_.push_back( + book(TrackQuals, + fmt::sprintf("trackPt_%s", hitsRemain_[i]).c_str(), + fmt::sprintf("Short Track p_{T} - %s layers;p_{T}^{short} [GeV];n. tracks", hitsRemain_[i]).c_str(), + 101, + -0.05, + 200.5)); + + histsNhitsAll_.push_back( + book(TrackQuals, + fmt::sprintf("trackNhits_%s", hitsRemain_[i]).c_str(), + fmt::sprintf("Short Track n. hits - %s layers; n. hits per track;n. tracks", hitsRemain_[i]).c_str(), + 20, + -0.5, + 19.5)); + + histsDeltaRAll_.push_back(book( + TrackQuals, + fmt::sprintf("trackDeltaR_%s", hitsRemain_[i]).c_str(), + fmt::sprintf("Short Track / Long Track #DeltaR %s layers;#DeltaR(short,long);n. tracks", hitsRemain_[i]).c_str(), + 100, + 0., + 0.01)); + + currentFolder = fmt::sprintf("%s/Compare_%sHit", folderName_, hitsRemain_[i]); + comparators_[i]->book(fs_->mkdir(currentFolder)); + } + + currentFolder = folderName_ + "/OriginalTrack"; + TFileDirectory original = fs_->mkdir(currentFolder); + originalTrack.book(original); +} + +//__________________________________________________________________________________ +void ShortenedTrackValidation::analyze(edm::Event const &iEvent, edm::EventSetup const &iSetup) { + const auto &tracks = iEvent.getHandle(tracksToken_); + + if (!tracks.isValid()) { + edm::LogError("ShortenedTrackValidation") << "Missing input track collection " << tracksTag_.encode() << std::endl; + return; + } + + reco::BeamSpot beamSpot; + edm::Handle beamSpotHandle = iEvent.getHandle(beamspotToken_); + if (beamSpotHandle.isValid()) { + beamSpot = *beamSpotHandle; + } else { + beamSpot = reco::BeamSpot(); + } + + reco::Vertex pvtx; + edm::Handle vertexHandle = iEvent.getHandle(vertexToken_); + if (vertexHandle.isValid()) { + pvtx = (*vertexHandle).at(0); + } else { + pvtx = reco::Vertex(); + } + + // the original long track + for (const auto &track : *tracks) { + const reco::HitPattern &hp = track.hitPattern(); + if (int(int(hp.numberOfValidHits()) - int(hp.numberOfAllHits(reco::HitPattern::TRACK_HITS))) != 0) { + break; + } + + // fill the original track properties monitoring + originalTrack.fill(track, beamSpot, pvtx); + + TLorentzVector tvec; + tvec.SetPtEtaPhiM(track.pt(), track.eta(), track.phi(), muMass); + + int i = 0; // token index + // loop on the re-recoed shortened track collections + for (const auto &token : tracksRerecoToken_) { + const auto &tracks_rereco = iEvent.getHandle(token); + + for (const auto &track_rereco : *tracks_rereco) { + TLorentzVector trerecovec; + trerecovec.SetPtEtaPhiM(track_rereco.pt(), track_rereco.eta(), track_rereco.phi(), 0.0); + double deltaR = tvec.DeltaR(trerecovec); + + if (deltaR < maxDr_) { + if (track_rereco.pt() >= minTracksPt_ && track_rereco.pt() <= maxTracksPt_ && + std::abs(track_rereco.eta()) >= minTracksEta_ && std::abs(track_rereco.eta()) <= maxTracksEta_) { + // fill the 2D comparisons per track + comparators_[i]->fill(track, track_rereco); + + histsPtRatioAll_[i]->Fill(1.0 * track_rereco.pt() / track.pt()); + histsPtDiffAll_[i]->Fill(track_rereco.pt() - track.pt()); + histsDeltaPtOverPtAll_[i]->Fill((track_rereco.pt() - track.pt()) / track.pt()); + histsEtaDiffAll_[i]->Fill(track_rereco.eta() - track.eta()); + histsPhiDiffAll_[i]->Fill(track_rereco.phi() - track.phi()); + histsPtRatioVsDeltaRAll_[i]->Fill(deltaR, track_rereco.pt() / track.pt()); + histsPtAll_[i]->Fill(track_rereco.pt()); + histsNhitsAll_[i]->Fill(track_rereco.numberOfValidHits()); + histsDeltaRAll_[i]->Fill(deltaR); + } + } + } + ++i; + } + } +} + +//__________________________________________________________________________________ +void ShortenedTrackValidation::fillDescriptions(edm::ConfigurationDescriptions &descriptions) { + edm::ParameterSetDescription desc; + desc.addUntracked("folderName", "TrackRefitting"); + desc.addUntracked>("hitsRemainInput", {}); + desc.addUntracked("minTracksEtaInput", 0.0); + desc.addUntracked("maxTracksEtaInput", 2.2); + desc.addUntracked("minTracksPtInput", 15.0); + desc.addUntracked("maxTracksPtInput", 99999.9); + desc.addUntracked("maxDrInput", 0.01); + desc.addUntracked("tracksInputTag", edm::InputTag("generalTracks", "", "DQM")); + desc.addUntracked>("tracksRerecoInputTag", {}); + desc.addUntracked("BeamSpotTag", edm::InputTag("offlineBeamSpot")); + desc.addUntracked("VerticesTag", edm::InputTag("offlinePrimaryVertices")); + descriptions.addWithDefaultLabel(desc); +} + +// Define this as a plug-in +#include "FWCore/Framework/interface/MakerMacros.h" +DEFINE_FWK_MODULE(ShortenedTrackValidation); diff --git a/Alignment/OfflineValidation/plugins/TkAlV0sAnalyzer.cc b/Alignment/OfflineValidation/plugins/TkAlV0sAnalyzer.cc new file mode 100644 index 0000000000000..33f651e753a67 --- /dev/null +++ b/Alignment/OfflineValidation/plugins/TkAlV0sAnalyzer.cc @@ -0,0 +1,161 @@ +// -*- C++ -*- +// +// Package: Alignment/OfflineValidation +// Class: TkAlV0sAnalyzer +// +/* + *\class TkAlV0sAnalyzer TkAlV0sAnalyzer.cc Alignment/TkAlV0sAnalyzer/plugins/TkAlV0sAnalyzer.cc + + Description: [one line class summary] + + Implementation: + [Notes on implementation] +*/ +// +// Original Author: Marco Musich +// Created: Thu, 14 Dec 2023 15:10:34 GMT +// +// + +// system include files +#include + +// user include files +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/one/EDAnalyzer.h" + +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/MakerMacros.h" + +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "DataFormats/TrackReco/interface/Track.h" +#include "DataFormats/TrackReco/interface/TrackFwd.h" +#include "CommonTools/UtilAlgos/interface/TFileService.h" +#include "FWCore/ServiceRegistry/interface/Service.h" +#include "DataFormats/Candidate/interface/VertexCompositeCandidate.h" +#include "DataFormats/RecoCandidate/interface/RecoChargedCandidate.h" + +#include "TLorentzVector.h" + +// +// class declaration +// + +// If the analyzer does not use TFileService, please remove +// the template argument to the base class so the class inherits +// from edm::one::EDAnalyzer<> +// This will improve performance in multithreaded jobs. + +using reco::TrackCollection; + +class TkAlV0sAnalyzer : public edm::one::EDAnalyzer { +public: + explicit TkAlV0sAnalyzer(const edm::ParameterSet&); + ~TkAlV0sAnalyzer() override = default; + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + +private: + template + T* book(const Args&... args) const; + void beginJob() override; + void analyze(const edm::Event&, const edm::EventSetup&) override; + + // ----------member data --------------------------- + const edm::EDGetTokenT tracksToken_; //used to select what tracks to read from configuration file + const edm::EDGetTokenT vccToken_; + edm::Service fs_; + + TH1F* h_diTrackMass; + TH1F* h_V0Mass; +}; + +static constexpr double piMass2 = 0.13957018 * 0.13957018; + +// +// constructors and destructor +// +TkAlV0sAnalyzer::TkAlV0sAnalyzer(const edm::ParameterSet& iConfig) + : tracksToken_(consumes(iConfig.getUntrackedParameter("tracks"))), + vccToken_(consumes( + iConfig.getParameter("vertexCompositeCandidates"))) { + usesResource(TFileService::kSharedResource); +} + +template +T* TkAlV0sAnalyzer::book(const Args&... args) const { + T* t = fs_->make(args...); + return t; +} + +// +// member functions +// +// ------------ method called for each event ------------ +void TkAlV0sAnalyzer::analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) { + using namespace edm; + + std::vector myTracks; + + edm::Handle vccHandle; + iEvent.getByToken(vccToken_, vccHandle); + + if (vccHandle->empty()) + return; + + reco::VertexCompositeCandidateCollection v0s = *vccHandle.product(); + + for (const auto& track : iEvent.get(tracksToken_)) { + myTracks.emplace_back(&track); + } + + // exclude multiple candidates + if (myTracks.size() != 2) + return; + + for (const auto& v0 : v0s) { + float mass = v0.mass(); + h_V0Mass->Fill(mass); + + for (size_t i = 0; i < v0.numberOfDaughters(); ++i) { + //LogPrint("AlignmentTrackFromVertexCompositeCandidateSelector") << "daughter: " << i << std::endl; + const reco::Candidate* daughter = v0.daughter(i); + const reco::RecoChargedCandidate* chargedDaughter = dynamic_cast(daughter); + if (chargedDaughter) { + //LogPrint("AlignmentTrackFromVertexCompositeCandidateSelector") << "charged daughter: " << i << std::endl; + const reco::TrackRef trackRef = chargedDaughter->track(); + if (trackRef.isNonnull()) { + // LogPrint("AlignmentTrackFromVertexCompositeCandidateSelector") + // << "charged daughter has non-null trackref: " << i << std::endl; + } + } + } + } + + const auto& tplus = myTracks[0]->charge() > 0 ? myTracks[0] : myTracks[1]; + const auto& tminus = myTracks[0]->charge() < 0 ? myTracks[0] : myTracks[1]; + + TLorentzVector p4_tplus(tplus->px(), tplus->py(), tplus->pz(), sqrt((tplus->p() * tplus->p()) + piMass2)); + TLorentzVector p4_tminus(tminus->px(), tminus->py(), tminus->pz(), sqrt((tminus->p() * tminus->p()) + piMass2)); + + const auto& V0p4 = p4_tplus + p4_tminus; + float track_invMass = V0p4.M(); + h_diTrackMass->Fill(track_invMass); +} + +void TkAlV0sAnalyzer::beginJob() { + h_diTrackMass = book("diTrackMass", "V0 mass from tracks in Event", 100, 0.400, 0.600); + h_V0Mass = book("V0kMass", "Reconstructed V0 mass in Event", 100, 0.400, 0.600); +} + +// ------------ method fills 'descriptions' with the allowed parameters for the module ------------ +void TkAlV0sAnalyzer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("vertexCompositeCandidates", edm::InputTag("generalV0Candidates:Kshort")); + desc.addUntracked("tracks", edm::InputTag("ALCARECOTkAlKShortTracks")); + descriptions.addWithDefaultLabel(desc); +} + +//define this as a plug-in +DEFINE_FWK_MODULE(TkAlV0sAnalyzer); diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/DiMuonV.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/DiMuonV.py new file mode 100644 index 0000000000000..37808e4643823 --- /dev/null +++ b/Alignment/OfflineValidation/python/TkAlAllInOneTool/DiMuonV.py @@ -0,0 +1,96 @@ +import copy +import os + +def DiMuonV(config, validationDir): + ##List with all jobs + jobs = [] + DiMuonVType = "single" + + ##List with all wished IOVs + IOVs = [] + + ##Start with single DiMuonV jobs + if not DiMuonVType in config["validations"]["DiMuonV"]: + raise Exception("No 'single' key word in config for DiMuonV") + + for singleName in config["validations"]["DiMuonV"][DiMuonVType]: + for IOV in config["validations"]["DiMuonV"][DiMuonVType][singleName]["IOV"]: + ##Save IOV to loop later for merge jobs + if not IOV in IOVs: + IOVs.append(IOV) + + for alignment in config["validations"]["DiMuonV"][DiMuonVType][singleName]["alignments"]: + ##Work directory for each IOV + workDir = "{}/DiMuonV/{}/{}/{}/{}".format(validationDir, DiMuonVType, singleName, alignment, IOV) + + ##Write local config + local = {} + local["output"] = "{}/{}/DiMuonV/{}/{}/{}/{}".format(config["LFS"], config["name"], DiMuonVType, alignment, singleName, IOV) + local["alignment"] = copy.deepcopy(config["alignments"][alignment]) + local["validation"] = copy.deepcopy(config["validations"]["DiMuonV"][DiMuonVType][singleName]) + local["validation"].pop("alignments") + local["validation"]["IOV"] = IOV + if "dataset" in local["validation"]: + local["validation"]["dataset"] = local["validation"]["dataset"].format(IOV) + if "goodlumi" in local["validation"]: + local["validation"]["goodlumi"] = local["validation"]["goodlumi"].format(IOV) + + ##Write job info + job = { + "name": "DiMuonV_{}_{}_{}_{}".format(DiMuonVType, alignment, singleName, IOV), + "dir": workDir, + "exe": "cmsRun", + "cms-config": "{}/src/Alignment/OfflineValidation/python/TkAlAllInOneTool/DiMuonV_cfg.py".format(os.environ["CMSSW_BASE"]), + "run-mode": "Condor", + "dependencies": [], + "config": local, + } + + jobs.append(job) + + ##Do merge DiMuonV if wished + if "merge" in config["validations"]["DiMuonV"]: + ##List with merge jobs, will be expanded to jobs after looping + mergeJobs = [] + DiMuonVType = "merge" + + ##Loop over all merge jobs/IOVs which are wished + for mergeName in config["validations"]["DiMuonV"][DiMuonVType]: + for IOV in IOVs: + ##Work directory for each IOV + workDir = "{}/DiMuonV/{}/{}/{}".format(validationDir, DiMuonVType, mergeName, IOV) + + ##Write job info + local = {} + + job = { + "name": "DiMuonV_{}_{}_{}".format(DiMuonVType, mergeName, IOV), + "dir": workDir, + "exe": "DiMuonVmerge", + "run-mode": "Condor", + "dependencies": [], + "config": local, + } + + for alignment in config["alignments"]: + ##Deep copy necessary things from global config + local.setdefault("alignments", {}) + if alignment in config["validations"]["DiMuonV"]["single"][mergeName]["alignments"]: + local["alignments"][alignment] = copy.deepcopy(config["alignments"][alignment]) + local["validation"] = copy.deepcopy(config["validations"]["DiMuonV"][DiMuonVType][mergeName]) + local["output"] = "{}/{}/DiMuonV/{}/{}/{}".format(config["LFS"], config["name"], DiMuonVType, mergeName, IOV) + + ##Loop over all single jobs + for singleJob in jobs: + ##Get single job info and append to merge job if requirements fullfilled + alignment, singleName, singleIOV = singleJob["name"].split("_")[2:] + + if int(singleIOV) == IOV and singleName in config["validations"]["DiMuonV"][DiMuonVType][mergeName]["singles"]: + local["alignments"][alignment]["file"] = singleJob["config"]["output"] + job["dependencies"].append(singleJob["name"]) + + mergeJobs.append(job) + + jobs.extend(mergeJobs) + + return jobs diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/DiMuonV_cfg.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/DiMuonV_cfg.py new file mode 100644 index 0000000000000..e51c5fa46794d --- /dev/null +++ b/Alignment/OfflineValidation/python/TkAlAllInOneTool/DiMuonV_cfg.py @@ -0,0 +1,224 @@ +from __future__ import print_function +from fnmatch import fnmatch +import FWCore.ParameterSet.Config as cms +import FWCore.Utilities.FileUtils as FileUtils +from FWCore.ParameterSet.VarParsing import VarParsing +from Alignment.OfflineValidation.TkAlAllInOneTool.defaultInputFiles_cff import filesDefaultMC_DoubleMuon_string + +import sys +import json +import os + +################################################################### +# Define process +################################################################### +process = cms.Process("DiMuonVertexValidation") + +################################################################### +# Argument parsing +################################################################### +options = VarParsing() +options.register("config", "", VarParsing.multiplicity.singleton, VarParsing.varType.string , "AllInOne config") + +options.parseArguments() + +################################################################### +# Read in AllInOne config in JSON format +################################################################### +if options.config == "": + config = {"validation": {}, + "alignment": {}} +else: + with open(options.config, "r") as configFile: + config = json.load(configFile) + +isMC = config["validation"].get("ismc", True) + +################################################################### +# Read filenames from given TXT file and define input source +################################################################### +readFiles = [] + +if "dataset" in config["validation"]: + with open(config["validation"]["dataset"], "r") as datafiles: + for fileName in datafiles.readlines(): + readFiles.append(fileName.replace("\n", "")) + + process.source = cms.Source("PoolSource", + fileNames = cms.untracked.vstring(readFiles), + skipEvents = cms.untracked.uint32(0)) +else: + process.source = cms.Source("PoolSource", + fileNames = cms.untracked.vstring(filesDefaultMC_DoubleMuon_string), + skipEvents = cms.untracked.uint32(0)) + +################################################################### +# Get good lumi section and load data or handle MC +################################################################### +if "goodlumi" in config["validation"]: + if os.path.isfile(config["validation"]["goodlumi"]): + goodLumiSecs = cms.untracked.VLuminosityBlockRange(LumiList.LumiList(filename = config["validation"]["goodlumi"]).getCMSSWString().split(',')) + + else: + print("Does not exist: {}. Continue without good lumi section file.") + goodLumiSecs = cms.untracked.VLuminosityBlockRange() + +else: + goodLumiSecs = cms.untracked.VLuminosityBlockRange() + +if isMC: + pass +else: + process.source.lumisToProcess = goodLumiSecs + +################################################################### +## efault set to 1 for unit tests +################################################################### +process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(config["validation"].get("maxevents", 100))) + +################################################################### +# Bookeeping +################################################################### +process.options = cms.untracked.PSet( + wantSummary = cms.untracked.bool(False), + Rethrow = cms.untracked.vstring("ProductNotFound"), + fileMode = cms.untracked.string('NOMERGE'), +) + +################################################################### +# Messages +################################################################### +process.load('FWCore.MessageService.MessageLogger_cfi') +process.MessageLogger.cerr.enable = False +process.MessageLogger.TrackRefitter=dict() +process.MessageLogger.PrimaryVertexProducer=dict() +process.MessageLogger.DiMuonVertexValidation=dict() +process.MessageLogger.DiLeptonHelpCounts=dict() +process.MessageLogger.PlotsVsKinematics=dict() +process.MessageLogger.cout = cms.untracked.PSet( + enable = cms.untracked.bool(True), + threshold = cms.untracked.string("INFO"), + default = cms.untracked.PSet(limit = cms.untracked.int32(0)), + FwkReport = cms.untracked.PSet(limit = cms.untracked.int32(-1), + reportEvery = cms.untracked.int32(100) + ), + DiMuonVertexValidation = cms.untracked.PSet( limit = cms.untracked.int32(-1)), + DiLeptonHelpCounts = cms.untracked.PSet( limit = cms.untracked.int32(-1)), + enableStatistics = cms.untracked.bool(True) + ) + +################################################################### +# import of standard configurations +################################################################### +process.load('Configuration.StandardSequences.Services_cff') +process.load('Configuration.StandardSequences.GeometryRecoDB_cff') +process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff') +process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff') + +################################################################### +# TransientTrack from https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideTransientTracks +################################################################### +process.load("TrackingTools.TransientTrack.TransientTrackBuilder_cfi") +process.load('TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagatorOpposite_cfi') +process.load('TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagatorAlong_cfi') +process.load('TrackingTools.TrackAssociator.DetIdAssociatorESProducer_cff') + +#################################################################### +# Get the BeamSpot +#################################################################### +process.load("RecoVertex.BeamSpotProducer.BeamSpot_cff") + +#################################################################### +# Global tag +#################################################################### +process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff") +from Configuration.AlCa.GlobalTag import GlobalTag +# default to remain in sycn with the default input sample +process.GlobalTag = GlobalTag(process.GlobalTag, config["alignment"].get("globaltag", "auto:phase1_2022_realistic")) + +#################################################################### +# Load conditions if wished +#################################################################### +if "conditions" in config["alignment"]: + from CalibTracker.Configuration.Common.PoolDBESSource_cfi import poolDBESSource + + for condition in config["alignment"]["conditions"]: + setattr(process, "conditionsIn{}".format(condition), poolDBESSource.clone( + connect = cms.string(str(config["alignment"]["conditions"][condition]["connect"])), + toGet = cms.VPSet( + cms.PSet( + record = cms.string(str(condition)), + tag = cms.string(str(config["alignment"]["conditions"][condition]["tag"])) + ) + ) + ) + ) + + setattr(process, "prefer_conditionsIn{}".format(condition), cms.ESPrefer("PoolDBESSource", "conditionsIn{}".format(condition))) + +################################################################### +# refitting the muon tracks +################################################################### +process.load("RecoTracker.TrackProducer.TrackRefitters_cff") +import RecoTracker.TrackProducer.TrackRefitters_cff +process.refittedMuons = RecoTracker.TrackProducer.TrackRefitter_cfi.TrackRefitter.clone( + src = config["validation"].get("muonTrackcollection", "ALCARECOTkAlDiMuon"), # ALCARECOTkAlDiMuon + TrajectoryInEvent = True, + NavigationSchool = '', + TTRHBuilder = config["validation"].get("tthrbuilder", "WithAngleAndTemplate")) + +################################################################### +# refitting the vertex tracks +################################################################### +process.refittedVtxTracks = RecoTracker.TrackProducer.TrackRefitter_cfi.TrackRefitter.clone( + src = config["validation"].get("trackcollection", "generalTracks"), # ALCARECOTkAlDiMuonVertexTracks + TrajectoryInEvent = True, + NavigationSchool = '', + TTRHBuilder = config["validation"].get("tthrbuilder", "WithAngleAndTemplate")) + +#################################################################### +# Re-do vertices +#################################################################### +from RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi import offlinePrimaryVertices +process.offlinePrimaryVerticesFromRefittedTrks = offlinePrimaryVertices.clone() +process.offlinePrimaryVerticesFromRefittedTrks.TrackLabel = cms.InputTag("refittedVtxTracks") + +#################################################################### +# Sequence +#################################################################### +process.seqRefitting = cms.Sequence(process.offlineBeamSpot + + process.refittedMuons + + process.refittedVtxTracks + + process.offlinePrimaryVerticesFromRefittedTrks) + +#################################################################### +# Output file +#################################################################### +process.TFileService = cms.Service("TFileService", + fileName = cms.string("{}/DiMuonVertexValidation.root".format(config.get("output", os.getcwd()))), + closeFileFast = cms.untracked.bool(True)) + +#################################################################### +# Analysis module +#################################################################### +from Alignment.OfflineValidation.diMuonVertexValidation_cfi import diMuonVertexValidation +process.DiMuonVertexValidation = diMuonVertexValidation.clone(useReco = config["validation"].get("useReco",True), + vertices = 'offlinePrimaryVerticesFromRefittedTrks') + +## the two sets of parameters below are mutually exclusive, +## depending if RECO or ALCARECO is used +## the useReco flag above must be set accordingly +if (config["validation"].get("useReco",True)): + process.DiMuonVertexValidation.muons = 'muons' + process.DiMuonVertexValidation.tracks = 'refittedVtxTracks' +else: + process.DiMuonVertexValidation.muonTracks = cms.InputTag('refittedMuons') + +#################################################################### +# Path +#################################################################### +process.p = cms.Path(process.seqRefitting + + process.offlinePrimaryVerticesFromRefittedTrks + + process.DiMuonVertexValidation) + +print("# Done") diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/MTS.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/MTS.py new file mode 100644 index 0000000000000..ae0d9a44a6bcb --- /dev/null +++ b/Alignment/OfflineValidation/python/TkAlAllInOneTool/MTS.py @@ -0,0 +1,124 @@ +import copy +import os + +def MTS(config, validationDir): + ##List with all jobs + jobs = [] + mtsType = "single" + + ##Dictionary of lists of all IOVs (can be different per each single job) + IOVs = {} + + ##Auxilliary dictionary of isData flags per each merged job + isDataMerged = {} + + ##Start with single MTS jobs + if not mtsType in config["validations"]["MTS"]: + raise Exception("No 'single' key word in config for MTS") + + for singleName in config["validations"]["MTS"][mtsType]: + aux_IOV = config["validations"]["MTS"][mtsType][singleName]["IOV"] + if not isinstance(aux_IOV, list) and aux_IOV.endswith(".txt"): + config["validations"]["MTS"][mtsType][singleName]["IOV"] = [] + with open(aux_IOV, 'r') as IOVfile: + for line in IOVfile.readlines(): + if len(line) != 0: config["validations"]["MTS"][mtsType][singleName]["IOV"].append(int(line)) + for IOV in config["validations"]["MTS"][mtsType][singleName]["IOV"]: + ##Save IOV to loop later for merge jobs + if singleName not in IOVs.keys(): + IOVs[singleName] = [] + if IOV not in IOVs[singleName]: + IOVs[singleName].append(IOV) + + for alignment in config["validations"]["MTS"][mtsType][singleName]["alignments"]: + ##Work directory for each IOV + workDir = "{}/MTS/{}/{}/{}/{}".format(validationDir, mtsType, singleName, alignment, IOV) + + ##Write local config + local = {} + local["output"] = "{}/{}/MTS/{}/{}/{}/{}".format(config["LFS"], config["name"], mtsType, alignment, singleName, IOV) + local["alignment"] = copy.deepcopy(config["alignments"][alignment]) + local["validation"] = copy.deepcopy(config["validations"]["MTS"][mtsType][singleName]) + local["validation"].pop("alignments") + local["validation"]["IOV"] = IOV + if "dataset" in local["validation"]: + local["validation"]["dataset"] = local["validation"]["dataset"].format(IOV) + if "goodlumi" in local["validation"]: + local["validation"]["goodlumi"] = local["validation"]["goodlumi"].format(IOV) + + ##Write job info + job = { + "name": "MTS_{}_{}_{}_{}".format(mtsType, alignment, singleName, IOV), + "dir": workDir, + "exe": "cmsRun", + "cms-config": "{}/src/Alignment/OfflineValidation/python/TkAlAllInOneTool/MTS_cfg.py".format(os.environ["CMSSW_BASE"]), + "run-mode": "Condor", + "dependencies": [], + "config": local, + } + + jobs.append(job) + + ##Do merge MTS if wished + if "merge" in config["validations"]["MTS"]: + ##List with merge jobs, will be expanded to jobs after looping + mergeJobs = [] + pvType = "merge" + + ##Loop over all merge jobs/IOVs which are wished + for mergeName in config["validations"]["MTS"][pvType]: + ##Loop over singles + for iname,singleName in enumerate(config["validations"]["MTS"][pvType][mergeName]['singles']): + for IOV in IOVs[singleName]: + + ##Work directory for each IOV + workDir = "{}/MTS/{}/{}/{}".format(validationDir, pvType, mergeName, IOV) #Different (DATA) single jobs must contain different set of IOVs + + ##Write job info + local = {} + + job = { + "name": "MTS_{}_{}_{}".format(pvType, mergeName, IOV), + "dir": workDir, + "exe": "MTSmerge", + "run-mode": "Condor", + "dependencies": [], + "config": local, + } + + ##Deep copy necessary things from global config + assure plot order + for alignment in config["alignments"]: + local.setdefault("alignments", {}) + if alignment in config["validations"]["MTS"]["single"][singleName]["alignments"]: #Cover all DATA validations + local["alignments"][alignment] = copy.deepcopy(config["alignments"][alignment]) + local["alignments"][alignment]['index'] = config["validations"]["MTS"]["single"][singleName]["alignments"].index(alignment) + local["alignments"][alignment]['isMC'] = False + local["validation"] = copy.deepcopy(config["validations"]["MTS"][pvType][mergeName]) + local["validation"]["IOV"] = IOV + if "customrighttitle" in local["validation"].keys(): + if "IOV" in local["validation"]["customrighttitle"]: + local["validation"]["customrighttitle"] = local["validation"]["customrighttitle"].replace("IOV",str(IOV)) + local["output"] = "{}/{}/MTS/{}/{}/{}".format(config["LFS"], config["name"], pvType, mergeName, IOV) + + ##Add global plotting options + if "style" in config.keys(): + if "MTS" in config['style'].keys(): + if pvType in config['style']['MTS'].keys(): + local["style"] = copy.deepcopy(config["style"]["MTS"][pvType]) + if "Rlabel" in local["style"] and "customrighttitle" in local["validation"].keys(): + print("WARNING: custom right label is overwritten by global settings") + + ##Loop over all single jobs + for singleJob in jobs: + ##Get single job info and append to merge job if requirements fullfilled + _alignment, _singleName, _singleIOV = singleJob["name"].split("_")[2:] + if _singleName in config["validations"]["MTS"][pvType][mergeName]["singles"]: + if (int(_singleIOV) == IOV): #matching DATA job or any MC single job + local["alignments"][_alignment]["file"] = singleJob["config"]["output"] + job["dependencies"].append(singleJob["name"]) + + mergeJobs.append(job) + + jobs.extend(mergeJobs) + + return jobs diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/MTS_cfg.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/MTS_cfg.py new file mode 100644 index 0000000000000..f5aaedc8414fa --- /dev/null +++ b/Alignment/OfflineValidation/python/TkAlAllInOneTool/MTS_cfg.py @@ -0,0 +1,203 @@ +import json +import yaml +import os +import FWCore.ParameterSet.Config as cms +import FWCore.PythonUtilities.LumiList as LumiList +from Alignment.OfflineValidation.TkAlAllInOneTool.defaultInputFiles_cff import filesDefaultData_Cosmics_string +from FWCore.ParameterSet.VarParsing import VarParsing +from Alignment.OfflineValidation.TkAlAllInOneTool.utils import _byteify +import pdb + +################################################################### +# Define process +################################################################### +process = cms.Process("splitter") + +################################################################### +# Argument parsing +################################################################### +options = VarParsing() +options.register("config", "", VarParsing.multiplicity.singleton, VarParsing.varType.string , "AllInOne config") +options.parseArguments() + +################################################################### +# Read in AllInOne config in JSON format +################################################################### +if options.config == "": + config = {"validation": {}, + "alignment": {}} +else: + with open(options.config, "r") as configFile: + if options.config.endswith(".json"): + config = json.load(configFile) + elif options.config.endswith(".yaml"): + config = yaml.safe_load(configFile) + +################################################################### +# Read filenames from given TXT file and define input source +################################################################### +readFiles = [] + +if "dataset" in config["validation"]: + with open(config["validation"]["dataset"], "r") as datafiles: + for fileName in datafiles.readlines(): + readFiles.append(fileName.replace("\n", "")) + + process.source = cms.Source("PoolSource", + fileNames = cms.untracked.vstring(readFiles), + skipEvents = cms.untracked.uint32(0) + ) +else: + print(">>>>>>>>>> MTS_cfg.py: msg%-i: dataset not specified! Loading default file -> filesDefaultData_Cosmics_string!") + process.source = cms.Source("PoolSource", + fileNames = cms.untracked.vstring(filesDefaultData_Cosmics_string), + skipEvents = cms.untracked.uint32(0) + ) + +################################################################### +# Get good lumi section and load data or handle MC +################################################################### +if "goodlumi" in config["validation"]: + if os.path.isfile(config["validation"]["goodlumi"]): + goodLumiSecs = cms.untracked.VLuminosityBlockRange(LumiList.LumiList(filename = config["validation"]["goodlumi"]).getCMSSWString().split(',')) + else: + print("Does not exist: {}. Continue without good lumi section file.") + goodLumiSecs = cms.untracked.VLuminosityBlockRange() + +else: + goodLumiSecs = cms.untracked.VLuminosityBlockRange() + +################################################################### +# Runs and events +################################################################### +runboundary = config["validation"].get("runboundary", 1) +isMultipleRuns=False +if(isinstance(runboundary, (list, tuple))): + isMultipleRuns=True + print("Multiple Runs are selected") +if(isMultipleRuns): + process.source.firstRun = cms.untracked.uint32(runboundary[0]) +else: + process.source.firstRun = cms.untracked.uint32(runboundary) + +################################################################### +# Default set to 1 for unit tests +################################################################### +process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(config["validation"].get("maxevents", 1))) + +################################################################### +# Bookeeping +################################################################### +process.options = cms.untracked.PSet( + wantSummary = cms.untracked.bool(False), + Rethrow = cms.untracked.vstring("ProductNotFound"), # make this exception fatal + fileMode = cms.untracked.string('NOMERGE'), # no ordering needed, but calls endRun/beginRun etc. at file boundaries +) + +################################################################### +# Messages +################################################################### +process.load("FWCore.MessageLogger.MessageLogger_cfi") +process.MessageLogger.cerr.FwkReport.reportEvery = 1000 +process.MessageLogger.cout.enableStatistics = cms.untracked.bool(True) + +################################################################### +# Basic modules +################################################################### +process.load("RecoVertex.BeamSpotProducer.BeamSpot_cff") +process.load("Configuration.Geometry.GeometryDB_cff") +process.load('Configuration.StandardSequences.Services_cff') +process.load("Configuration.StandardSequences.MagneticField_cff") + +#################################################################### +# Load and Configure Track refitter +#################################################################### +import Alignment.CommonAlignment.tools.trackselectionRefitting as trackselRefit +process.seqTrackselRefit = trackselRefit.getSequence( + process, + config["validation"].get("trackcollection", "ALCARECOTkAlCosmicsCTF0T"), + isPVValidation = False, + TTRHBuilder = config["validation"].get("tthrbuilder", "WithAngleAndTemplate"), + usePixelQualityFlag=config["validation"].get("usePixelQualityFlag", True), + openMassWindow = False, + cosmicsDecoMode = True, + cosmicsZeroTesla=config["validation"].get("cosmicsZeroTesla", False), + momentumConstraint = None, + cosmicTrackSplitting = True, + use_d0cut = False +) + +#################################################################### +# Global tag +#################################################################### +process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff") +from Configuration.AlCa.GlobalTag import GlobalTag +process.GlobalTag = GlobalTag(process.GlobalTag, config["alignment"].get("globaltag", "124X_dataRun3_Prompt_v10")) + +#################################################################### +# Load conditions if wished +#################################################################### +if "conditions" in config["alignment"]: + from CalibTracker.Configuration.Common.PoolDBESSource_cfi import poolDBESSource + + for condition in config["alignment"]["conditions"]: + setattr( + process, + "conditionsIn{}".format(condition), + poolDBESSource.clone( + # FIXME%START + connect = cms.string("sqlite_file:" + str(config["alignment"]["conditions"][condition]["connect"]) if "alignments_MP.db" in str(config["alignment"]["conditions"][condition]["connect"]) else str(config["alignment"]["conditions"][condition]["connect"])), + #FIXME%END + toGet = cms.VPSet( + cms.PSet( + record = cms.string(str(condition)), + tag = cms.string(str(config["alignment"]["conditions"][condition]["tag"])) + ) + ) + ) + ) + + setattr( + process, + "prefer_conditionsIn{}".format(condition), + cms.ESPrefer("PoolDBESSource", "conditionsIn{}".format(condition)) + ) + +#################################################################### +# Configure the Analyzer module +#################################################################### + +process.FittingSmootherRKP5.EstimateCut = -1 +process.AlignmentTrackSelector.minHitsPerSubDet.inPIXEL = 2 +# Use compressions settings of TFile +# see https://root.cern.ch/root/html534/TFile.html#TFile:SetCompressionSet tings +# settings = 100 * algorithm + level +# level is from 1 (small) to 9 (large compression) +# algo: 1 (ZLIB), 2 (LMZA) +# see more about compression & performance: https://root.cern.ch/root/html534/guides/users-guide/InputOutput.html#compression-and-performance +compressionSettings = 207 +process.cosmicValidation = cms.EDAnalyzer( + "CosmicSplitterValidation", + compressionSettings = cms.untracked.int32(compressionSettings), + ifSplitMuons = cms.bool(False), + checkIfGolden = cms.bool(False), + splitTracks = cms.InputTag("FinalTrackRefitter","","splitter"), + splitGlobalMuons = cms.InputTag("muons","","splitter"), + originalTracks = cms.InputTag("FirstTrackRefitter","","splitter"), + originalGlobalMuons = cms.InputTag("muons","","Rec") +) + +#################################################################### +# Output file +#################################################################### +process.TFileService = cms.Service("TFileService", + fileName = cms.string("{}/MTSValidation_{}_{}.root".format(config.get("output", os.getcwd()), config["alignment"].get("name", ""), config["validation"].get("IOV", 1.))), + closeFileFast = cms.untracked.bool(True), + ) + +#################################################################### +# Path +#################################################################### +process.p = cms.Path(process.seqTrackselRefit*process.cosmicValidation) + +print("Done") diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/PV_cfg.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/PV_cfg.py index 071ba09157af5..33ca243a73a24 100644 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/PV_cfg.py +++ b/Alignment/OfflineValidation/python/TkAlAllInOneTool/PV_cfg.py @@ -187,7 +187,7 @@ ) ## MM 04.05.2017 (use settings as in: https://github.com/cms-sw/cmssw/pull/18330) -from RecoVertex.PrimaryVertexProducer.TkClusParameters_cff import DA_vectParameters +from RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi import DA_vectParameters DAClusterizationParams = DA_vectParameters.clone() GapClusterizationParams = cms.PSet(algorithm = cms.string('gap'), diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/alignment.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/alignment.py deleted file mode 100644 index 8520c1024a123..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/alignment.py +++ /dev/null @@ -1,299 +0,0 @@ -from __future__ import absolute_import -import collections -import os -import re - -from . import configTemplates -from .helperFunctions import parsecolor, parsestyle, replaceByMap, clean_name, getTagsMap -from .TkAlExceptions import AllInOneError - -class Alignment(object): - condShorts = { - "TrackerAlignmentErrorExtendedRcd": { - "zeroAPE_phase0": { - "connectString":("frontier://FrontierProd" - "/CMS_CONDITIONS"), - "tagName": "TrackerIdealGeometryErrorsExtended210_mc", - "labelName": "" - }, - "zeroAPE_phase1": { - "connectString":("frontier://FrontierProd" - "/CMS_CONDITIONS"), - "tagName": "TrackerAlignmentErrorsExtended_Upgrade2017_design_v0", - "labelName": "" - }, - }, - "TrackerSurfaceDeformationRcd": { - "zeroDeformations": { - "connectString":("frontier://FrontierProd" - "/CMS_CONDITIONS"), - "tagName": "TrackerSurfaceDeformations_zero", - "labelName": "" - }, - }, - } - def __init__(self, name, config, runGeomComp = "1"): - section = "alignment:%s"%name - if not config.has_section( section ): - raise AllInOneError("section %s not found. Please define the " - "alignment!"%section) - config.checkInput(section, - knownSimpleOptions = ['globaltag', 'style', 'color', 'title', 'mp', 'mp_alignments', 'mp_deformations', 'mp_APEs', 'hp', 'hp_alignments', 'hp_deformations', 'sm', 'sm_alignments', 'sm_deformations'], - knownKeywords = ['condition']) - self.name = clean_name(name) - if config.exists(section,"title"): - self.title = config.get(section,"title") - else: - self.title = self.name - if (int(runGeomComp) != 1): - self.name += "_run" + runGeomComp - self.title += " run " + runGeomComp - if "|" in self.title or "," in self.title or '"' in self.title: - msg = "The characters '|', '\"', and ',' cannot be used in the alignment title!" - raise AllInOneError(msg) - self.runGeomComp = runGeomComp - self.globaltag = config.get( section, "globaltag" ) - self.conditions = self.__getConditions( config, section ) - - self.color = config.get(section,"color") - self.style = config.get(section,"style") - - self.color = str(parsecolor(self.color)) - self.style = str(parsestyle(self.style)) - - def __shorthandExists(self, theRcdName, theShorthand): - """Method which checks, if `theShorthand` is a valid shorthand for the - given `theRcdName`. - - Arguments: - - `theRcdName`: String which specifies the database record. - - `theShorthand`: String which specifies the shorthand to check. - """ - - if (theRcdName in self.condShorts) and \ - (theShorthand in self.condShorts[theRcdName]): - return True - else: - return False - - def __getConditions( self, theConfig, theSection ): - conditions = [] - for option in theConfig.options( theSection ): - if option in ("mp", "mp_alignments", "mp_deformations", "mp_APEs", "hp", "hp_alignments", "hp_deformations", "sm", "sm_alignments", "sm_deformations"): - matches = [re.match(_, option) for _ in ("^(..)$", "^(..)_alignments$", "^(..)_deformations$", "^(..)_APEs$")] - assert sum(bool(_) for _ in matches) == 1, option - condPars = theConfig.get(theSection, option).split(",") - condPars = [_.strip() for _ in condPars] - if matches[0]: - alignments = True - deformations = True - APEs = {"hp": False, "mp": True}[option] - elif matches[1]: - alignments = True - deformations = False - APEs = False - option = matches[1].group(1) - elif matches[2]: - alignments = False - deformations = True - APEs = False - option = matches[2].group(1) - elif matches[3]: - alignments = False - deformations = False - APEs = True - option = matches[3].group(1) - else: - assert False - - if option == "mp": - if len(condPars) == 1: - number, = condPars - jobm = None - elif len(condPars) == 2: - number, jobm = condPars - else: - raise AllInOneError("Up to 2 arguments accepted for {} (job number, and optionally jobm index)".format(option)) - - folder = "/afs/cern.ch/cms/CAF/CMSALCA/ALCA_TRACKERALIGN/MP/MPproduction/{}{}/".format(option, number) - if not os.path.exists(folder): - raise AllInOneError(folder+" does not exist.") - folder = os.path.join(folder, "jobData") - jobmfolders = set() - if jobm is None: - for filename in os.listdir(folder): - if re.match("jobm([0-9]*)", filename) and os.path.isdir(os.path.join(folder, filename)): - jobmfolders.add(filename) - if len(jobmfolders) == 0: - raise AllInOneError("No jobm or jobm(number) folder in {}".format(folder)) - elif len(jobmfolders) == 1: - folder = os.path.join(folder, jobmfolders.pop()) - else: - raise AllInOneError( - "Multiple jobm or jobm(number) folders in {}\n".format(folder) - + ", ".join(jobmfolders) + "\n" - + "Please specify 0 for jobm, or a number for one of the others." - ) - elif jobm == "0": - folder = os.path.join(folder, "jobm") - if os.path.exists(folder + "0"): - raise AllInOneError("Not set up to handle a folder named jobm0") - else: - folder = os.path.join(folder, "jobm{}".format(jobm)) - - dbfile = os.path.join(folder, "alignments_MP.db") - if not os.path.exists(dbfile): - raise AllInOneError("No file {}. Maybe your alignment folder is corrupted, or maybe you specified the wrong jobm?".format(dbfile)) - - elif option in ("hp", "sm"): - if len(condPars) == 1: - number, = condPars - iteration = None - elif len(condPars) == 2: - number, iteration = condPars - else: - raise AllInOneError("Up to 2 arguments accepted for {} (job number, and optionally iteration)".format(option)) - folder = "/afs/cern.ch/cms/CAF/CMSALCA/ALCA_TRACKERALIGN2/HipPy/alignments/{}{}".format(option, number) - if not os.path.exists(folder): - raise AllInOneError(folder+" does not exist.") - if iteration is None: - for filename in os.listdir(folder): - match = re.match("alignments_iter([0-9]*).db", filename) - if match: - if iteration is None or int(match.group(1)) > iteration: - iteration = int(match.group(1)) - if iteration is None: - raise AllInOneError("No alignments in {}".format(folder)) - dbfile = os.path.join(folder, "alignments_iter{}.db".format(iteration)) - if not os.path.exists(dbfile): - raise AllInOneError("No file {}.".format(dbfile)) - - if "Deformations" not in getTagsMap(dbfile).keys(): - deformations = False #so that hp = XXXX works whether or not deformations were aligned - if not alignments: #then it's specified with hp_deformations, which is a mistake - raise AllInOneError("{}{} has no deformations".format(option, number)) - - else: - assert False, option - - if alignments: - conditions.append({"rcdName": "TrackerAlignmentRcd", - "connectString": "sqlite_file:"+dbfile, - "tagName": "Alignments", - "labelName": ""}) - if deformations: - conditions.append({"rcdName": "TrackerSurfaceDeformationRcd", - "connectString": "sqlite_file:"+dbfile, - "tagName": "Deformations", - "labelName": ""}) - if APEs: - conditions.append({"rcdName": "TrackerAlignmentErrorExtendedRcd", - "connectString": "sqlite_file:"+dbfile, - "tagName": "AlignmentErrorsExtended", - "labelName": ""}) - - elif option.startswith( "condition " ): - rcdName = option.split( "condition " )[1] - condPars = theConfig.get( theSection, option ).split( "," ) - if len(condPars) == 1: - if len(condPars[0])==0: - msg = ("In section [%s]: '%s' is used with too few " - "arguments. A connect_string and a tag are " - "required!"%(theSection, option)) - raise AllInOneError(msg) - elif self.__shorthandExists(rcdName, condPars[0]): - shorthand = condPars[0] - condPars = [ - self.condShorts[rcdName][shorthand]["connectString"], - self.condShorts[rcdName][shorthand]["tagName"], - self.condShorts[rcdName][shorthand]["labelName"]] - elif rcdName == "TrackerAlignmentErrorExtendedRcd" and condPars[0] == "zeroAPE": - raise AllInOneError("Please specify either zeroAPE_phase0 or zeroAPE_phase1") - #can probably make zeroAPE an alias of zeroAPE_phase1 at some point, - #but not sure if now is the time - else: - msg = ("In section [%s]: '%s' is used with '%s', " - "which is an unknown shorthand for '%s'. Either " - "provide at least a connect_string and a tag or " - "use a known shorthand.\n" - %(theSection, option, condPars[0], rcdName)) - if rcdName in self.condShorts: - msg += "Known shorthands for '%s':\n"%(rcdName) - theShorts = self.condShorts[rcdName] - knownShorts = [("\t"+key+": " - +theShorts[key]["connectString"]+"," - +theShorts[key]["tagName"]+"," - +theShorts[key]["labelName"]) \ - for key in theShorts] - msg+="\n".join(knownShorts) - else: - msg += ("There are no known shorthands for '%s'." - %(rcdName)) - raise AllInOneError(msg) - if len( condPars ) == 2: - condPars.append( "" ) - if len(condPars) > 3: - msg = ("In section [%s]: '%s' is used with too many " - "arguments. A maximum of 3 arguments is allowed." - %(theSection, option)) - raise AllInOneError(msg) - conditions.append({"rcdName": rcdName.strip(), - "connectString": condPars[0].strip(), - "tagName": condPars[1].strip(), - "labelName": condPars[2].strip()}) - - rcdnames = collections.Counter(condition["rcdName"] for condition in conditions) - if rcdnames and max(rcdnames.values()) >= 2: - raise AllInOneError("Some conditions are specified multiple times (possibly through mp or hp options)!\n" - + ", ".join(rcdname for rcdname, count in rcdnames.items() if count >= 2)) - - for condition in conditions: - self.__testDbExist(condition["connectString"], condition["tagName"]) - - return conditions - - def __testDbExist(self, dbpath, tagname): - if dbpath.startswith("sqlite_file:"): - if not os.path.exists( dbpath.split("sqlite_file:")[1] ): - raise AllInOneError("could not find file: '%s'"%dbpath.split("sqlite_file:")[1]) - elif tagname not in getTagsMap(dbpath).values(): - raise AllInOneError("{} does not exist in {}".format(tagname, dbpath)) - - def restrictTo( self, restriction ): - result = [] - if not restriction == None: - for mode in self.mode: - if mode in restriction: - result.append( mode ) - self.mode = result - - def getRepMap( self ): - result = { - "name": self.name, - "title": self.title, - "color": self.color, - "style": self.style, - "runGeomComp": self.runGeomComp, - "GlobalTag": self.globaltag - } - return result - - def getConditions(self): - """This function creates the configuration snippet to override - global tag conditions. - """ - if len( self.conditions ): - loadCond = ("\nimport CalibTracker.Configuration." - "Common.PoolDBESSource_cfi\n") - for cond in self.conditions: - if not cond["labelName"] == "": - temp = configTemplates.conditionsTemplate.replace( - "tag = cms.string('.oO[tagName]Oo.')", - ("tag = cms.string('.oO[tagName]Oo.')," - "\nlabel = cms.untracked.string('.oO[labelName]Oo.')")) - else: - temp = configTemplates.conditionsTemplate - loadCond += replaceByMap( temp, cond ) - else: - loadCond = "" - return loadCond diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/alternateValidationTemplates.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/alternateValidationTemplates.py deleted file mode 100644 index fd0763e6909e5..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/alternateValidationTemplates.py +++ /dev/null @@ -1,5 +0,0 @@ -###################################################################### -###################################################################### -otherTemplate = """ -schum schum -""" diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/configTemplates.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/configTemplates.py deleted file mode 100644 index 31c75cd743cb5..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/configTemplates.py +++ /dev/null @@ -1,400 +0,0 @@ -from __future__ import absolute_import -from .alternateValidationTemplates import * -from .offlineValidationTemplates import * -from .primaryVertexValidationTemplates import * -from .primaryVertexResolutionTemplates import * -from .geometryComparisonTemplates import * -from .monteCarloValidationTemplates import * -from .trackSplittingValidationTemplates import * -from .zMuMuValidationTemplates import * -from .TkAlExceptions import AllInOneError -from .overlapValidationTemplates import * - -###################################################################### -###################################################################### -### ### -### General Templates ### -### ### -###################################################################### -###################################################################### - -###################################################################### -###################################################################### -loadGlobalTagTemplate=""" -#Global tag -process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff") -from Configuration.AlCa.GlobalTag import GlobalTag -process.GlobalTag = GlobalTag(process.GlobalTag,".oO[GlobalTag]Oo.") -""" - - -###################################################################### -###################################################################### -conditionsTemplate=""" -process.conditionsIn.oO[rcdName]Oo. = CalibTracker.Configuration.Common.PoolDBESSource_cfi.poolDBESSource.clone( - connect = cms.string('.oO[connectString]Oo.'), - toGet = cms.VPSet(cms.PSet(record = cms.string('.oO[rcdName]Oo.'), - tag = cms.string('.oO[tagName]Oo.') - ) - ) - ) -process.prefer_conditionsIn.oO[rcdName]Oo. = cms.ESPrefer("PoolDBESSource", "conditionsIn.oO[rcdName]Oo.") -""" - - -###################################################################### -###################################################################### -#batch job execution -scriptTemplate="""#!/bin/bash -#init -#ulimit -v 3072000 -#export STAGE_SVCCLASS=cmscafuser -#save path to the condor batch working directory (/pool/condor) - -export CONDORWORKDIR=`pwd -P` -echo CONDOR working directory is $CONDORWORKDIR -source /afs/cern.ch/cms/caf/setup.sh -export X509_USER_PROXY=.oO[scriptsdir]Oo./.user_proxy -cd .oO[CMSSW_BASE]Oo./src -export SCRAM_ARCH=.oO[SCRAM_ARCH]Oo. -eval `scramv1 ru -sh` -#mkdir -p .oO[datadir]Oo. &>! /dev/null - -#remove possible result file from previous runs -previous_results=$(ls /eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo.) -for file in ${previous_results} -do - if [ ${file} = /eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./.oO[outputFile]Oo. ] - then - xrdcp -f root://eoscms//eos/cms${file} root://eoscms//eos/cms${file}.bak - fi -done - -if [[ $HOSTNAME = lxplus[0-9]*[.a-z0-9]* ]] # check for interactive mode -then - mkdir -p .oO[workdir]Oo. - rm -f .oO[workdir]Oo./* - cd .oO[workdir]Oo. -else - mkdir -p $CONDORWORKDIR/TkAllInOneTool - cd $CONDORWORKDIR/TkAllInOneTool -fi - -# rm -f .oO[workdir]Oo./* -# cd .oO[workdir]Oo. - -#run -pwd -df -h . -which cmsRun -.oO[CommandLine]Oo. -echo "----" -echo "List of files in $(pwd):" -ls -ltr -echo "----" -echo "" - - -#retrieve -mkdir -p .oO[logdir]Oo. >&! /dev/null -gzip -f LOGFILE_*_.oO[name]Oo..log -find . -maxdepth 1 -name "LOGFILE*.oO[alignmentName]Oo.*" -print | xargs -I {} bash -c "cp {} .oO[logdir]Oo." - -#copy root files to eos -mkdir -p /eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo. -if [ .oO[parallelJobs]Oo. -eq 1 ] -then - root_files=$(ls --color=never -d *.oO[alignmentName]Oo.*.root) -else - root_files=$(ls --color=never -d *.oO[alignmentName]Oo._.oO[nIndex]Oo.*.root) -fi -echo ${root_files} - -for file in ${root_files} -do - xrdcp -f ${file} root://eoscms//eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo. - echo ${file} -done - -#cleanup -if [[ $HOSTNAME = lxplus[0-9]*[.a-z0-9]* ]] # check for interactive mode -then - rm -rf .oO[workdir]Oo. -fi -echo "done." -""" - - -###################################################################### -###################################################################### -cfgTemplate=""" -import FWCore.ParameterSet.Config as cms - -process = cms.Process(".oO[ProcessName]Oo.") - -.oO[datasetDefinition]Oo. -.oO[Bookkeeping]Oo. -.oO[LoadBasicModules]Oo. -.oO[TrackSelectionRefitting]Oo. -.oO[LoadGlobalTagTemplate]Oo. -.oO[condLoad]Oo. -.oO[ValidationConfig]Oo. -.oO[FileOutputTemplate]Oo. - -.oO[DefinePath]Oo. - -print("Done") -""" - - -###################################################################### -###################################################################### -Bookkeeping = """ -process.options = cms.untracked.PSet( - wantSummary = cms.untracked.bool(False), - Rethrow = cms.untracked.vstring("ProductNotFound"), # make this exception fatal - fileMode = cms.untracked.string('NOMERGE') # no ordering needed, but calls endRun/beginRun etc. at file boundaries -) - -process.load("FWCore.MessageLogger.MessageLogger_cfi") -process.MessageLogger.cerr.FwkReport.reportEvery = 1000 -process.MessageLogger.cout.enableStatistics = cms.untracked.bool(True) -""" - - -###################################################################### -###################################################################### -CommonTrackSelectionRefitting = """ -import Alignment.CommonAlignment.tools.trackselectionRefitting as trackselRefit -process.seqTrackselRefit = trackselRefit.getSequence(process, '.oO[trackcollection]Oo.', - isPVValidation=.oO[ispvvalidation]Oo., - TTRHBuilder='.oO[ttrhbuilder]Oo.', - usePixelQualityFlag=.oO[usepixelqualityflag]Oo., - openMassWindow=.oO[openmasswindow]Oo., - cosmicsDecoMode=.oO[cosmicsdecomode]Oo., - cosmicsZeroTesla=.oO[cosmics0T]Oo., - momentumConstraint=.oO[momentumconstraint]Oo., - cosmicTrackSplitting=.oO[istracksplitting]Oo., - use_d0cut=.oO[use_d0cut]Oo., - ) - -.oO[trackhitfiltercommands]Oo. -""" - - -###################################################################### -###################################################################### -SingleTrackRefitter = """ -process.load("RecoTracker.TrackProducer.TrackRefitters_cff") -process.TrackRefitter.src = ".oO[TrackCollection]Oo." -process.TrackRefitter.TTRHBuilder = ".oO[ttrhbuilder]Oo." -process.TrackRefitter.NavigationSchool = "" -""" - - -###################################################################### -###################################################################### -LoadBasicModules = """ -process.load("RecoVertex.BeamSpotProducer.BeamSpot_cff") -process.load("Configuration.Geometry.GeometryDB_cff") -process.load('Configuration.StandardSequences.Services_cff') -process.load("Configuration.StandardSequences..oO[magneticField]Oo._cff") -""" - - -###################################################################### -###################################################################### -FileOutputTemplate = """ -process.TFileService = cms.Service("TFileService", - fileName = cms.string('.oO[outputFile]Oo.') -) -""" - - -###################################################################### -###################################################################### -DefinePath_CommonSelectionRefitting = """ -process.p = cms.Path( -process.seqTrackselRefit*.oO[ValidationSequence]Oo.) -""" - -###################################################################### -###################################################################### -mergeTemplate="""#!/bin/bash -CWD=`pwd -P` -cd .oO[CMSSW_BASE]Oo./src -export SCRAM_ARCH=.oO[SCRAM_ARCH]Oo. -eval `scramv1 ru -sh` - - -.oO[createResultsDirectory]Oo. - -if [[ $HOSTNAME = lxplus[0-9]*[.a-z0-9]* ]] # check for interactive mode -then - mkdir -p .oO[workdir]Oo. - cd .oO[workdir]Oo. -else - cd $CWD -fi -echo "Working directory: $(pwd -P)" - -############################################################################### -# download root files from eos -root_files=$(ls /eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo. \ - | grep ".root$" | grep -v "result.root$") -#for file in ${root_files} -#do -# xrdcp -f root://eoscms//eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./${file} . -# echo ${file} -#done - - -#run -.oO[DownloadData]Oo. -.oO[CompareAlignments]Oo. - -.oO[RunValidationPlots]Oo. - -# clean-up -# ls -l *.root -rm -f *.root - -#zip stdout and stderr from the farm jobs -cd .oO[logdir]Oo. -find . -name "*.stderr" -exec gzip -f {} \; -find . -name "*.stdout" -exec gzip -f {} \; -""" - - - -###################################################################### -###################################################################### -mergeParallelOfflineTemplate="""#!/bin/bash -CWD=`pwd -P` -cd .oO[CMSSW_BASE]Oo./src -export SCRAM_ARCH=.oO[SCRAM_ARCH]Oo. -eval `scramv1 ru -sh` - -if [[ $HOSTNAME = lxplus[0-9]*[.a-z0-9]* ]] # check for interactive mode -then - mkdir -p .oO[workdir]Oo. - cd .oO[workdir]Oo. -else - cd $CWD -fi -echo "Working directory: $(pwd -P)" - -############################################################################### -# download root files from eos -root_files=$(ls /eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo. \ - | grep ".root$" | grep -v "result.root$") -#for file in ${root_files} -#do -# xrdcp -f root://eoscms//eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./${file} . -# echo ${file} -#done - - -#run -.oO[DownloadData]Oo. -""" - -###################################################################### -###################################################################### -createResultsDirectoryTemplate=""" -#create results-directory and copy used configuration there -mkdir -p .oO[datadir]Oo. -cp .oO[logdir]Oo./usedConfiguration.ini .oO[datadir]Oo. -""" - - -###################################################################### -###################################################################### -mergeParallelResults=""" - -.oO[beforeMerge]Oo. -.oO[doMerge]Oo. - -# create log file -ls -al .oO[mergeParallelFilePrefixes]Oo. > .oO[datadir]Oo./log_rootfilelist.txt - -# Remove parallel job files -.oO[rmUnmerged]Oo. -""" - - -###################################################################### -###################################################################### -compareAlignmentsExecution=""" -#merge for .oO[validationId]Oo. if it does not exist or is not up-to-date -echo -e "\n\nComparing validations" -mkdir -p /eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./ -cp .oO[Alignment/OfflineValidation]Oo./scripts/compareFileAges.C . -root -x -q -b -l "compareFileAges.C(\\\"root://eoscms.cern.ch//eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./.oO[validationId]Oo._result.root\\\", \\\".oO[compareStringsPlain]Oo.\\\")" -comparisonNeeded=${?} - -if [[ ${comparisonNeeded} -eq 1 ]] -then - cp .oO[compareAlignmentsPath]Oo. . - root -x -q -b -l '.oO[compareAlignmentsName]Oo.++(\".oO[compareStrings]Oo.\", ".oO[legendheader]Oo.", ".oO[customtitle]Oo.", ".oO[customrighttitle]Oo.", .oO[bigtext]Oo.)' - mv result.root .oO[validationId]Oo._result.root - xrdcp -f .oO[validationId]Oo._result.root root://eoscms//eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo. -else - echo ".oO[validationId]Oo._result.root is up-to-date, no need to compare again." - xrdcp -f root://eoscms//eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./.oO[validationId]Oo._result.root . -fi -""" - - -###################################################################### -###################################################################### -crabCfgTemplate=""" -[CRAB] -jobtype = cmssw -scheduler = caf -use_server = 0 - -[CMSSW] -datasetpath = .oO[dataset]Oo. -pset = .oO[cfgFile]Oo. -total_number_of_.oO[McOrData]Oo. -number_of_jobs = .oO[numberOfJobs]Oo. -output_file = .oO[outputFile]Oo. -runselection = .oO[runRange]Oo. -lumi_mask = .oO[JSON]Oo. - -[USER] -return_data = 0 -copy_data = 1 -storage_element = T2_CH_CERN -user_remote_dir = .oO[eosdir]Oo. -ui_working_dir = .oO[crabWorkingDir]Oo. -# script_exe = .oO[script]Oo. -# .oO[email]Oo. - -[CAF] -queue = .oO[queue]Oo. -""" - - - - -###################################################################### -###################################################################### -### ### -### Alternate Templates ### -### ### -###################################################################### -###################################################################### - - -def alternateTemplate( templateName, alternateTemplateName ): - - if not templateName in globals().keys(): - msg = "unknown template to replace %s"%templateName - raise AllInOneError(msg) - if not alternateTemplateName in globals().keys(): - msg = "unknown template to replace %s"%alternateTemplateName - raise AllInOneError(msg) - globals()[ templateName ] = globals()[ alternateTemplateName ] - # = eval("configTemplates.%s"%"alternateTemplate") diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/crabWrapper.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/crabWrapper.py deleted file mode 100644 index 4500f61a4cbdf..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/crabWrapper.py +++ /dev/null @@ -1,78 +0,0 @@ -from __future__ import print_function -from __future__ import absolute_import -import sys -import os -import subprocess -from .TkAlExceptions import AllInOneError - -# script which needs to be sourced for use of crab -crabSourceScript = '/afs/cern.ch/cms/ccs/wm/scripts/Crab/crab.sh' - -# source the environment variables needed for crab -sourceStr = ( 'cd $CMSSW_BASE/src;' - 'source /afs/cern.ch/cms/LCG/LCG-2/UI/cms_ui_env.sh;' - 'eval `scramv1 runtime -sh`;' - 'source ' + crabSourceScript + ' && env' ) -sourceCmd = ['bash', '-c', sourceStr ] -sourceProc = subprocess.Popen(sourceCmd, stdout = subprocess.PIPE) -for line in sourceProc.stdout: - (key, _, value) = line.partition("=") - os.environ[key] = value.replace("\n","") -sourceProc.communicate() - -# source variables from crab wrapper script -crabFile = open('/'.join([os.environ["CRABPYTHON"],'crab'])) -theLines = crabFile.readlines() -theLine = [] -for line in theLines: - if ( line[0] == '#' ) or \ - ( line == ' python $CRABPYTHON/crab.py $*\n' ): - continue - theLine.append( line ) -tempFilePath = "tempCrab" -tempFile = open( tempFilePath, "w" ) -tempFile.write( ''.join(theLine) ) -tempFile.close() -crabStr = ('source tempCrab && env' ) -crabCmd = ['bash', '-c', crabStr ] -crabProc = subprocess.Popen(crabCmd, stdout = subprocess.PIPE) -for line in crabProc.stdout: - (key, _, value) = line.partition("=") - os.environ[key] = value.replace("\n","") -crabProc.communicate() -os.remove( tempFilePath ) - -# add sourced paths to search path of python -sys.path.extend( os.environ["PYTHONPATH"].split( ':' ) ) - -import crab -import crab_exceptions - -class CrabWrapper(object): - def run( self, options ): - theCrab = crab.Crab() - try: - theCrab.initialize_( options ) - theCrab.run() - except crab_exceptions.CrabException as e: - raise AllInOneError( str( e ) ) - del theCrab - - -if __name__ == "__main__": - theCrab = CrabWrapper() - theCrabOptions = {"-create":"", - "-cfg":"TkAlOfflineValidation.shiftPlots.crab.cfg"} - theCrab.run( theCrabOptions ) - - theCrabOptions = {"-submit":""} - theCrab.run( theCrabOptions ) - - theCrabOptions = {"-status":""} - theCrab.run( theCrabOptions ) - - theCrabOptions = {"-getoutput":""} - try: - theCrab.run( theCrabOptions ) - except AllInOneError as e: - print("crab: ", e) diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/dataset.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/dataset.py deleted file mode 100644 index 7c3fd08b497b3..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/dataset.py +++ /dev/null @@ -1,941 +0,0 @@ -from __future__ import print_function -from __future__ import absolute_import -# idea stolen from: -# http://cmssw.cvs.cern.ch/cgi-bin/cmssw.cgi/CMSSW/ -# PhysicsTools/PatAlgos/python/tools/cmsswVersionTools.py -from builtins import range -import bisect -import datetime -import json -import os -import re -import sys - -import Utilities.General.cmssw_das_client as das_client -from FWCore.PythonUtilities.LumiList import LumiList - -from .helperFunctions import cache -from .TkAlExceptions import AllInOneError - -class Dataset(object): - def __init__( self, datasetName, dasLimit = 0, tryPredefinedFirst = True, - cmssw = os.environ["CMSSW_BASE"], cmsswrelease = os.environ["CMSSW_RELEASE_BASE"], - magneticfield = None, dasinstance = None): - self.__name = datasetName - self.__origName = datasetName - self.__dasLimit = dasLimit - self.__dasinstance = dasinstance - self.__cmssw = cmssw - self.__cmsswrelease = cmsswrelease - self.__firstusedrun = None - self.__lastusedrun = None - self.__parentDataset = None - - # check, if dataset name matches CMS dataset naming scheme - if re.match( r'/.+/.+/.+', self.__name ): - self.__official = True - fileName = "Dataset" + self.__name.replace("/","_") + "_cff.py" - else: - self.__official = False - fileName = self.__name + "_cff.py" - - searchPath1 = os.path.join( self.__cmssw, "python", - "Alignment", "OfflineValidation", - fileName ) - searchPath2 = os.path.join( self.__cmssw, "src", - "Alignment", "OfflineValidation", - "python", fileName ) - searchPath3 = os.path.join( self.__cmsswrelease, - "python", "Alignment", - "OfflineValidation", fileName ) - if self.__official and not tryPredefinedFirst: - self.__predefined = False - elif os.path.exists( searchPath1 ): - self.__predefined = True - self.__filename = searchPath1 - elif os.path.exists( searchPath2 ): - msg = ("The predefined dataset '%s' does exist in '%s', but " - "you need to run 'scram b' first." - %( self.__name, searchPath2 )) - if self.__official: - print(msg) - print("Getting the data from DAS again. To go faster next time, run scram b.") - else: - raise AllInOneError( msg ) - elif os.path.exists( searchPath3 ): - self.__predefined = True - self.__filename = searchPath3 - elif self.__official: - self.__predefined = False - else: - msg = ("The predefined dataset '%s' does not exist. Please " - "create it first or check for typos."%( self.__name )) - raise AllInOneError( msg ) - - if self.__predefined and self.__official: - self.__name = "Dataset" + self.__name.replace("/","_") - - if magneticfield is not None: - try: - magneticfield = float(magneticfield) - except ValueError: - raise AllInOneError("Bad magneticfield {} which can't be converted to float".format(magneticfield)) - self.__inputMagneticField = magneticfield - - self.__dataType = self.__getDataType() - self.__magneticField = self.__getMagneticField() - - - def __chunks( self, theList, n ): - """ Yield successive n-sized chunks from theList. - """ - for i in range( 0, len( theList ), n ): - yield theList[i:i+n] - - __source_template= ("%(header)s" - "%(importCms)s" - "import FWCore.PythonUtilities.LumiList as LumiList\n\n" - "%(goodLumiSecStr)s" - "readFiles = cms.untracked.vstring()\n" - "secFiles = cms.untracked.vstring()\n" - "%(process)ssource = cms.Source(\"PoolSource\",\n" - "%(lumiStr)s" - "%(tab)s secondaryFileNames =" - "secFiles,\n" - "%(tab)s fileNames = readFiles\n" - ")\n" - "%(files)s\n" - "%(lumiSecExtend)s\n" - "%(process)smaxEvents = cms.untracked.PSet( " - "input = cms.untracked.int32(int(%(nEvents)s)) )\n" - "%(skipEventsString)s\n") - - __dummy_source_template = ("readFiles = cms.untracked.vstring()\n" - "secFiles = cms.untracked.vstring()\n" - "%(process)ssource = cms.Source(\"PoolSource\",\n" - "%(tab)s secondaryFileNames =" - "secFiles,\n" - "%(tab)s fileNames = readFiles\n" - ")\n" - "readFiles.extend(['dummy_File.root'])\n" - "%(process)smaxEvents = cms.untracked.PSet( " - "input = cms.untracked.int32(int(%(nEvents)s)) )\n" - "%(skipEventsString)s\n") - - def __lumiSelectionSnippet( self, jsonPath = None, firstRun = None, lastRun = None ): - lumiSecExtend = "" - if firstRun or lastRun or jsonPath: - if not jsonPath: - selectedRunList = self.__getRunList() - if firstRun: - selectedRunList = [ run for run in selectedRunList \ - if self.__findInJson(run, "run_number") >= firstRun ] - if lastRun: - selectedRunList = [ run for run in selectedRunList \ - if self.__findInJson(run, "run_number") <= lastRun ] - lumiList = [ str( self.__findInJson(run, "run_number") ) + ":1-" \ - + str( self.__findInJson(run, "run_number") ) + ":max" \ - for run in selectedRunList ] - splitLumiList = list( self.__chunks( lumiList, 255 ) ) - else: - theLumiList = None - try: - theLumiList = LumiList ( filename = jsonPath ) - except ValueError: - pass - - if theLumiList is not None: - allRuns = theLumiList.getRuns() - runsToRemove = [] - for run in allRuns: - if firstRun and int( run ) < firstRun: - runsToRemove.append( run ) - if lastRun and int( run ) > lastRun: - runsToRemove.append( run ) - theLumiList.removeRuns( runsToRemove ) - splitLumiList = list( self.__chunks( - theLumiList.getCMSSWString().split(','), 255 ) ) - if not (splitLumiList and splitLumiList[0] and splitLumiList[0][0]): - splitLumiList = None - else: - with open(jsonPath) as f: - jsoncontents = f.read() - if "process.source.lumisToProcess" in jsoncontents: - msg = "%s is not a json file, but it seems to be a CMSSW lumi selection cff snippet. Trying to use it" % jsonPath - if firstRun or lastRun: - msg += ("\n (after applying firstRun and/or lastRun)") - msg += ".\nPlease note that, depending on the format of this file, it may not work as expected." - msg += "\nCheck your config file to make sure that it worked properly." - print(msg) - - runlist = self.__getRunList() - if firstRun or lastRun: - self.__firstusedrun = -1 - self.__lastusedrun = -1 - jsoncontents = re.sub(r"\d+:(\d+|max)(-\d+:(\d+|max))?", self.getForceRunRangeFunction(firstRun, lastRun), jsoncontents) - jsoncontents = (jsoncontents.replace("'',\n","").replace("''\n","") - .replace('"",\n','').replace('""\n','')) - self.__firstusedrun = max(self.__firstusedrun, int(self.__findInJson(runlist[0],"run_number"))) - self.__lastusedrun = min(self.__lastusedrun, int(self.__findInJson(runlist[-1],"run_number"))) - if self.__lastusedrun < self.__firstusedrun: - jsoncontents = None - else: - self.__firstusedrun = int(self.__findInJson(runlist[0],"run_number")) - self.__lastusedrun = int(self.__findInJson(runlist[-1],"run_number")) - lumiSecExtend = jsoncontents - splitLumiList = None - else: - raise AllInOneError("%s is not a valid json file!" % jsonPath) - - if splitLumiList and splitLumiList[0] and splitLumiList[0][0]: - lumiSecStr = [ "',\n'".join( lumis ) \ - for lumis in splitLumiList ] - lumiSecStr = [ "lumiSecs.extend( [\n'" + lumis + "'\n] )" \ - for lumis in lumiSecStr ] - lumiSecExtend = "\n".join( lumiSecStr ) - runlist = self.__getRunList() - self.__firstusedrun = max(int(splitLumiList[0][0].split(":")[0]), int(self.__findInJson(runlist[0],"run_number"))) - self.__lastusedrun = min(int(splitLumiList[-1][-1].split(":")[0]), int(self.__findInJson(runlist[-1],"run_number"))) - elif lumiSecExtend: - pass - else: - msg = "You are trying to run a validation without any runs! Check that:" - if firstRun or lastRun: - msg += "\n - firstRun/begin and lastRun/end are correct for this dataset, and there are runs in between containing data" - if jsonPath: - msg += "\n - your JSON file is correct for this dataset, and the runs contain data" - if (firstRun or lastRun) and jsonPath: - msg += "\n - firstRun/begin and lastRun/end are consistent with your JSON file" - raise AllInOneError(msg) - - else: - if self.__inputMagneticField is not None: - pass #never need self.__firstusedrun or self.__lastusedrun - else: - runlist = self.__getRunList() - self.__firstusedrun = int(self.__findInJson(self.__getRunList()[0],"run_number")) - self.__lastusedrun = int(self.__findInJson(self.__getRunList()[-1],"run_number")) - - return lumiSecExtend - - def __fileListSnippet(self, crab=False, parent=False, firstRun=None, lastRun=None, forcerunselection=False): - if crab: - files = "" - else: - splitFileList = list( self.__chunks( self.fileList(firstRun=firstRun, lastRun=lastRun, forcerunselection=forcerunselection), 255 ) ) - if not splitFileList: - raise AllInOneError("No files found for dataset {}. Check the spelling, or maybe specify another das instance?".format(self.__name)) - fileStr = [ "',\n'".join( files ) for files in splitFileList ] - fileStr = [ "readFiles.extend( [\n'" + files + "'\n] )" \ - for files in fileStr ] - files = "\n".join( fileStr ) - - if parent: - splitParentFileList = list( self.__chunks( self.fileList(parent=True, firstRun=firstRun, lastRun=lastRun, forcerunselection=forcerunselection), 255 ) ) - parentFileStr = [ "',\n'".join( parentFiles ) for parentFiles in splitParentFileList ] - parentFileStr = [ "secFiles.extend( [\n'" + parentFiles + "'\n] )" \ - for parentFiles in parentFileStr ] - parentFiles = "\n".join( parentFileStr ) - files += "\n\n" + parentFiles - - return files - - def __createSnippet( self, jsonPath = None, begin = None, end = None, - firstRun = None, lastRun = None, repMap = None, - crab = False, parent = False ): - - if firstRun: - firstRun = int( firstRun ) - if lastRun: - lastRun = int( lastRun ) - if ( begin and firstRun ) or ( end and lastRun ): - msg = ( "The Usage of " - + "'begin' & 'firstRun' " * int( bool( begin and - firstRun ) ) - + "and " * int( bool( ( begin and firstRun ) and - ( end and lastRun ) ) ) - + "'end' & 'lastRun' " * int( bool( end and lastRun ) ) - + "is ambigous." ) - raise AllInOneError( msg ) - if begin or end: - ( firstRun, lastRun ) = self.convertTimeToRun( - begin = begin, end = end, firstRun = firstRun, - lastRun = lastRun ) - if ( firstRun and lastRun ) and ( firstRun > lastRun ): - msg = ( "The lower time/runrange limit ('begin'/'firstRun') " - "chosen is greater than the upper time/runrange limit " - "('end'/'lastRun').") - raise AllInOneError( msg ) - - lumiSecExtend = self.__lumiSelectionSnippet(jsonPath=jsonPath, firstRun=firstRun, lastRun=lastRun) - lumiStr = goodLumiSecStr = "" - if lumiSecExtend: - goodLumiSecStr = "lumiSecs = cms.untracked.VLuminosityBlockRange()\n" - lumiStr = " lumisToProcess = lumiSecs,\n" - - files = self.__fileListSnippet(crab=crab, parent=parent, firstRun=firstRun, lastRun=lastRun, forcerunselection=False) - - theMap = repMap - theMap["files"] = files - theMap["json"] = jsonPath - theMap["lumiStr"] = lumiStr - theMap["goodLumiSecStr"] = goodLumiSecStr%( theMap ) - theMap["lumiSecExtend"] = lumiSecExtend - if crab: - dataset_snippet = self.__dummy_source_template%( theMap ) - else: - dataset_snippet = self.__source_template%( theMap ) - return dataset_snippet - - def __find_lt( self, a, x ): - 'Find rightmost value less than x' - i = bisect.bisect_left( a, x ) - if i: - return i-1 - raise ValueError - - def __find_ge( self, a, x): - 'Find leftmost item greater than or equal to x' - i = bisect.bisect_left( a, x ) - if i != len( a ): - return i - raise ValueError - - def __findInJson(self, jsondict, strings): - if isinstance(strings, str): - strings = [ strings ] - - if len(strings) == 0: - return jsondict - if isinstance(jsondict,dict): - if strings[0] in jsondict: - try: - return self.__findInJson(jsondict[strings[0]], strings[1:]) - except KeyError: - pass - else: - for a in jsondict: - if strings[0] in a: - try: - return self.__findInJson(a[strings[0]], strings[1:]) - except (TypeError, KeyError): #TypeError because a could be a string and contain strings[0] - pass - #if it's not found - raise KeyError("Can't find " + strings[0]) - - def forcerunrange(self, firstRun, lastRun, s): - """s must be in the format run1:lum1-run2:lum2""" - s = s.group() - run1 = s.split("-")[0].split(":")[0] - lum1 = s.split("-")[0].split(":")[1] - try: - run2 = s.split("-")[1].split(":")[0] - lum2 = s.split("-")[1].split(":")[1] - except IndexError: - run2 = run1 - lum2 = lum1 - if int(run2) < firstRun or int(run1) > lastRun: - return "" - if int(run1) < firstRun or firstRun < 0: - run1 = firstRun - lum1 = 1 - if int(run2) > lastRun: - run2 = lastRun - lum2 = "max" - if int(run1) < self.__firstusedrun or self.__firstusedrun < 0: - self.__firstusedrun = int(run1) - if int(run2) > self.__lastusedrun: - self.__lastusedrun = int(run2) - return "%s:%s-%s:%s" % (run1, lum1, run2, lum2) - - def getForceRunRangeFunction(self, firstRun, lastRun): - def forcerunrangefunction(s): - return self.forcerunrange(firstRun, lastRun, s) - return forcerunrangefunction - - def __getData( self, dasQuery, dasLimit = 0 ): - dasData = das_client.get_data(dasQuery, dasLimit) - if isinstance(dasData, str): - jsondict = json.loads( dasData ) - else: - jsondict = dasData - # Check, if the DAS query fails - try: - error = self.__findInJson(jsondict,["data","error"]) - except KeyError: - error = None - if error or self.__findInJson(jsondict,"status") != 'ok' or "data" not in jsondict: - try: - jsonstr = self.__findInJson(jsondict,"reason") - except KeyError: - jsonstr = str(jsondict) - if len(jsonstr) > 10000: - jsonfile = "das_query_output_%i.txt" - i = 0 - while os.path.lexists(jsonfile % i): - i += 1 - jsonfile = jsonfile % i - theFile = open( jsonfile, "w" ) - theFile.write( jsonstr ) - theFile.close() - msg = "The DAS query returned an error. The output is very long, and has been stored in:\n" + jsonfile - else: - msg = "The DAS query returned a error. Here is the output\n" + jsonstr - msg += "\nIt's possible that this was a server error. If so, it may work if you try again later" - raise AllInOneError(msg) - return self.__findInJson(jsondict,"data") - - def __getDataType( self ): - if self.__predefined: - with open(self.__filename) as f: - datatype = None - for line in f.readlines(): - if line.startswith("#data type: "): - if datatype is not None: - raise AllInOneError(self.__filename + " has multiple 'data type' lines.") - datatype = line.replace("#data type: ", "").replace("\n","") - return datatype - return "unknown" - - dasQuery_type = ( 'dataset dataset=%s instance=%s detail=true | grep dataset.datatype,' - 'dataset.name'%( self.__name, self.__dasinstance ) ) - data = self.__getData( dasQuery_type ) - - try: - return self.__findInJson(data, ["dataset", "datatype"]) - except KeyError: - print ("Cannot find the datatype of the dataset '%s'\n" - "It may not be possible to automatically find the magnetic field,\n" - "and you will not be able run in CRAB mode" - %( self.name() )) - return "unknown" - - def __getParentDataset( self ): - dasQuery = "parent dataset=" + self.__name + " instance="+self.__dasinstance - data = self.__getData( dasQuery ) - try: - return self.__findInJson(data, ["parent", "name"]) - except KeyError: - raise AllInOneError("Cannot find the parent of the dataset '" + self.__name + "'\n" - "Here is the DAS output:\n" + str(jsondict) + - "\nIt's possible that this was a server error. If so, it may work if you try again later") - - def __getMagneticField( self ): - Bfieldlocation = os.path.join( self.__cmssw, "python", "Configuration", "StandardSequences" ) - if not os.path.isdir(Bfieldlocation): - Bfieldlocation = os.path.join( self.__cmsswrelease, "python", "Configuration", "StandardSequences" ) - Bfieldlist = [ f.replace("_cff.py",'') \ - for f in os.listdir(Bfieldlocation) \ - if f.startswith("MagneticField_") and f.endswith("_cff.py") ] - Bfieldlist.sort( key = lambda Bfield: -len(Bfield) ) #Put it in order of decreasing length, so that searching in the name gives the longer match - - if self.__inputMagneticField is not None: - if self.__inputMagneticField == 3.8: - return "MagneticField" - elif self.__inputMagneticField == 0: - return "MagneticField_0T" - else: - raise ValueError("Unknown input magnetic field {}".format(self.__inputMagneticField)) - - if self.__predefined: - with open(self.__filename) as f: - datatype = None - Bfield = None - for line in f.readlines(): - if line.startswith("#data type: "): - if datatype is not None: - raise AllInOneError(self.__filename + " has multiple 'data type' lines.") - datatype = line.replace("#data type: ", "").replace("\n","") - datatype = datatype.split("#")[0].strip() - if line.startswith("#magnetic field: "): - if Bfield is not None: - raise AllInOneError(self.__filename + " has multiple 'magnetic field' lines.") - Bfield = line.replace("#magnetic field: ", "").replace("\n","") - Bfield = Bfield.split("#")[0].strip() - if Bfield is not None: - Bfield = Bfield.split(",")[0] - if Bfield in Bfieldlist or Bfield == "unknown": - return Bfield - else: - print("Your dataset has magnetic field '%s', which does not exist in your CMSSW version!" % Bfield) - print("Using Bfield='unknown' - this will revert to the default") - return "unknown" - elif datatype == "data": - return "MagneticField" #this should be in the "#magnetic field" line, but for safety in case it got messed up - else: - return "unknown" - - if self.__dataType == "data": - return "MagneticField" - - #try to find the magnetic field from DAS - #it seems to be there for the newer (7X) MC samples, except cosmics - dasQuery_B = ('dataset dataset=%s instance=%s'%(self.__name, self.__dasinstance)) - data = self.__getData( dasQuery_B ) - - try: - Bfield = self.__findInJson(data, ["dataset", "mcm", "sequences", "magField"]) - if Bfield in Bfieldlist: - return Bfield - elif Bfield == "38T" or Bfield == "38T_PostLS1": - return "MagneticField" - elif "MagneticField_" + Bfield in Bfieldlist: - return "MagneticField_" + Bfield - elif Bfield == "": - pass - else: - print("Your dataset has magnetic field '%s', which does not exist in your CMSSW version!" % Bfield) - print("Using Bfield='unknown' - this will revert to the default magnetic field") - return "unknown" - except KeyError: - pass - - for possibleB in Bfieldlist: - if (possibleB != "MagneticField" - and possibleB.replace("MagneticField_","") in self.__name.replace("TkAlCosmics0T", "")): - #final attempt - try to identify the dataset from the name - #all cosmics dataset names contain "TkAlCosmics0T" - if possibleB == "MagneticField_38T" or possibleB == "MagneticField_38T_PostLS1": - return "MagneticField" - return possibleB - - return "unknown" - - def __getMagneticFieldForRun( self, run = -1, tolerance = 0.5 ): - """For MC, this returns the same as the previous function. - For data, it gets the magnetic field from the runs. This is important for - deciding which template to use for offlinevalidation - """ - if self.__dataType == "mc" and self.__magneticField == "MagneticField": - return 3.8 #For 3.8T MC the default MagneticField is used - if self.__inputMagneticField is not None: - return self.__inputMagneticField - if "T" in self.__magneticField: - Bfield = self.__magneticField.split("T")[0].replace("MagneticField_","") - try: - return float(Bfield) / 10.0 #e.g. 38T and 38T_PostLS1 both return 3.8 - except ValueError: - pass - if self.__predefined: - with open(self.__filename) as f: - Bfield = None - for line in f.readlines(): - if line.startswith("#magnetic field: ") and "," in line: - if Bfield is not None: - raise AllInOneError(self.__filename + " has multiple 'magnetic field' lines.") - return float(line.replace("#magnetic field: ", "").split(",")[1].split("#")[0].strip()) - - if run > 0: - dasQuery = ('run=%s instance=%s detail=true'%(run, self.__dasinstance)) #for data - data = self.__getData(dasQuery) - try: - return self.__findInJson(data, ["run","bfield"]) - except KeyError: - return "unknown Can't get the magnetic field for run %s from DAS" % run - - #run < 0 - find B field for the first and last runs, and make sure they're compatible - # (to within tolerance) - #NOT FOOLPROOF! The magnetic field might go up and then down, or vice versa - if self.__firstusedrun is None or self.__lastusedrun is None: - return "unknown Can't get the exact magnetic field for the dataset until data has been retrieved from DAS." - firstrunB = self.__getMagneticFieldForRun(self.__firstusedrun) - lastrunB = self.__getMagneticFieldForRun(self.__lastusedrun) - try: - if abs(firstrunB - lastrunB) <= tolerance: - return .5*(firstrunB + lastrunB) - print(firstrunB, lastrunB, tolerance) - return ("unknown The beginning and end of your run range for %s\n" - "have different magnetic fields (%s, %s)!\n" - "Try limiting the run range using firstRun, lastRun, begin, end, or JSON,\n" - "or increasing the tolerance (in dataset.py) from %s.") % (self.__name, firstrunB, lastrunB, tolerance) - except TypeError: - try: - if "unknown" in firstrunB: - return firstrunB - else: - return lastrunB - except TypeError: - return lastrunB - - @cache - def __getFileInfoList( self, dasLimit, parent = False ): - if self.__predefined: - if parent: - extendstring = "secFiles.extend" - else: - extendstring = "readFiles.extend" - with open(self.__fileName) as f: - files = [] - copy = False - for line in f.readlines(): - if "]" in line: - copy = False - if copy: - files.append({name: line.translate(None, "', " + '"')}) - if extendstring in line and "[" in line and "]" not in line: - copy = True - return files - - if parent: - searchdataset = self.parentDataset() - else: - searchdataset = self.__name - dasQuery_files = ( 'file dataset=%s instance=%s detail=true | grep file.name, file.nevents, ' - 'file.creation_time, ' - 'file.modification_time'%( searchdataset, self.__dasinstance ) ) - print("Requesting file information for '%s' from DAS..."%( searchdataset ), end=' ') - sys.stdout.flush() - data = self.__getData( dasQuery_files, dasLimit ) - print("Done.") - data = [ self.__findInJson(entry,"file") for entry in data ] - if len( data ) == 0: - msg = ("No files are available for the dataset '%s'. This can be " - "due to a typo or due to a DAS problem. Please check the " - "spelling of the dataset and/or retry to run " - "'validateAlignments.py'."%( self.name() )) - raise AllInOneError( msg ) - fileInformationList = [] - for file in data: - fileName = 'unknown' - try: - fileName = self.__findInJson(file, "name") - fileCreationTime = self.__findInJson(file, "creation_time") - fileNEvents = self.__findInJson(file, "nevents") - except KeyError: - print(("DAS query gives bad output for file '%s'. Skipping it.\n" - "It may work if you try again later.") % fileName) - fileNEvents = 0 - # select only non-empty files - if fileNEvents == 0: - continue - fileDict = { "name": fileName, - "creation_time": fileCreationTime, - "nevents": fileNEvents - } - fileInformationList.append( fileDict ) - fileInformationList.sort( key=lambda info: self.__findInJson(info,"name") ) - return fileInformationList - - @cache - def __getRunList( self ): - dasQuery_runs = ( 'run dataset=%s instance=%s | grep run.run_number,' - 'run.creation_time'%( self.__name, self.__dasinstance ) ) - print("Requesting run information for '%s' from DAS..."%( self.__name ), end=' ') - sys.stdout.flush() - data = self.__getData( dasQuery_runs ) - print("Done.") - data = [ self.__findInJson(entry,"run") for entry in data ] - data.sort( key = lambda run: self.__findInJson(run, "run_number") ) - return data - - def __datetime(self, stringForDas): - if len(stringForDas) != 8: - raise AllInOneError(stringForDas + " is not a valid date string.\n" - + "DAS accepts dates in the form 'yyyymmdd'") - year = stringForDas[:4] - month = stringForDas[4:6] - day = stringForDas[6:8] - return datetime.date(int(year), int(month), int(day)) - - def __dateString(self, date): - return str(date.year) + str(date.month).zfill(2) + str(date.day).zfill(2) - - def convertTimeToRun( self, begin = None, end = None, - firstRun = None, lastRun = None, - shortTuple = True ): - if ( begin and firstRun ) or ( end and lastRun ): - msg = ( "The Usage of " - + "'begin' & 'firstRun' " * int( bool( begin and - firstRun ) ) - + "and " * int( bool( ( begin and firstRun ) and - ( end and lastRun ) ) ) - + "'end' & 'lastRun' " * int( bool( end and lastRun ) ) - + "is ambigous." ) - raise AllInOneError( msg ) - - if begin or end: - runList = [ self.__findInJson(run, "run_number") for run in self.__getRunList() ] - - if begin: - lastdate = begin - for delta in [ 1, 5, 10, 20, 30 ]: #try searching for about 2 months after begin - firstdate = lastdate - lastdate = self.__dateString(self.__datetime(firstdate) + datetime.timedelta(delta)) - dasQuery_begin = "run date between[%s,%s] instance=%s" % (firstdate, lastdate, self.__dasinstance) - begindata = self.__getData(dasQuery_begin) - if len(begindata) > 0: - begindata.sort(key = lambda run: self.__findInJson(run, ["run", "run_number"])) - try: - runIndex = self.__find_ge( runList, self.__findInJson(begindata[0], ["run", "run_number"])) - except ValueError: - msg = ( "Your 'begin' is after the creation time of the last " - "run in the dataset\n'%s'"%( self.__name ) ) - raise AllInOneError( msg ) - firstRun = runList[runIndex] - begin = None - break - - if begin: - raise AllInOneError("No runs within a reasonable time interval after your 'begin'." - "Try using a 'begin' that has runs soon after it (within 2 months at most)") - - if end: - firstdate = end - for delta in [ 1, 5, 10, 20, 30 ]: #try searching for about 2 months before end - lastdate = firstdate - firstdate = self.__dateString(self.__datetime(lastdate) - datetime.timedelta(delta)) - dasQuery_end = "run date between[%s,%s] instance=%s" % (firstdate, lastdate, self.__dasinstance) - enddata = self.__getData(dasQuery_end) - if len(enddata) > 0: - enddata.sort(key = lambda run: self.__findInJson(run, ["run", "run_number"])) - try: - runIndex = self.__find_lt( runList, self.__findInJson(enddata[-1], ["run", "run_number"])) - except ValueError: - msg = ( "Your 'end' is before the creation time of the first " - "run in the dataset\n'%s'"%( self.__name ) ) - raise AllInOneError( msg ) - lastRun = runList[runIndex] - end = None - break - - if end: - raise AllInOneError("No runs within a reasonable time interval before your 'end'." - "Try using an 'end' that has runs soon before it (within 2 months at most)") - - if shortTuple: - return firstRun, lastRun - else: - return begin, end, firstRun, lastRun - - def dataType( self ): - if not self.__dataType: - self.__dataType = self.__getDataType() - return self.__dataType - - def magneticField( self ): - if not self.__magneticField: - self.__magneticField = self.__getMagneticField() - return self.__magneticField - - def magneticFieldForRun( self, run = -1 ): - return self.__getMagneticFieldForRun(run) - - def parentDataset( self ): - if not self.__parentDataset: - self.__parentDataset = self.__getParentDataset() - return self.__parentDataset - - def datasetSnippet( self, jsonPath = None, begin = None, end = None, - firstRun = None, lastRun = None, crab = False, parent = False ): - if not firstRun: firstRun = None - if not lastRun: lastRun = None - if not begin: begin = None - if not end: end = None - if self.__predefined and (jsonPath or begin or end or firstRun or lastRun): - msg = ( "The parameters 'JSON', 'begin', 'end', 'firstRun', and 'lastRun' " - "only work for official datasets, not predefined _cff.py files" ) - raise AllInOneError( msg ) - if self.__predefined and parent: - with open(self.__filename) as f: - if "secFiles.extend" not in f.read(): - msg = ("The predefined dataset '%s' does not contain secondary files, " - "which your validation requires!") % self.__name - if self.__official: - self.__name = self.__origName - self.__predefined = False - print(msg) - print ("Retreiving the files from DAS. You will be asked if you want " - "to overwrite the old dataset.\n" - "It will still be compatible with validations that don't need secondary files.") - else: - raise AllInOneError(msg) - - if self.__predefined: - snippet = ("process.load(\"Alignment.OfflineValidation.%s_cff\")\n" - "process.maxEvents = cms.untracked.PSet(\n" - " input = cms.untracked.int32(int(.oO[nEvents]Oo. / .oO[parallelJobs]Oo.))\n" - ")\n" - "process.source.skipEvents=cms.untracked.uint32(int(.oO[nIndex]Oo.*.oO[nEvents]Oo./.oO[parallelJobs]Oo.))" - %(self.__name)) - if not parent: - with open(self.__filename) as f: - if "secFiles.extend" in f.read(): - snippet += "\nprocess.source.secondaryFileNames = cms.untracked.vstring()" - return snippet - theMap = { "process": "process.", - "tab": " " * len( "process." ), - "nEvents": ".oO[nEvents]Oo. / .oO[parallelJobs]Oo.", - "skipEventsString": "process.source.skipEvents=cms.untracked.uint32(int(.oO[nIndex]Oo.*.oO[nEvents]Oo./.oO[parallelJobs]Oo.))\n", - "importCms": "", - "header": "" - } - datasetSnippet = self.__createSnippet( jsonPath = jsonPath, - begin = begin, - end = end, - firstRun = firstRun, - lastRun = lastRun, - repMap = theMap, - crab = crab, - parent = parent ) - if jsonPath == "" and begin == "" and end == "" and firstRun == "" and lastRun == "": - try: - self.dump_cff(parent = parent) - except AllInOneError as e: - print("Can't store the dataset as a cff:") - print(e) - print("This may be inconvenient in the future, but will not cause a problem for this validation.") - return datasetSnippet - - @cache - def dump_cff( self, outName = None, jsonPath = None, begin = None, - end = None, firstRun = None, lastRun = None, parent = False ): - if outName == None: - outName = "Dataset" + self.__name.replace("/", "_") - packageName = os.path.join( "Alignment", "OfflineValidation" ) - if not os.path.exists( os.path.join( - self.__cmssw, "src", packageName ) ): - msg = ("You try to store the predefined dataset'%s'.\n" - "For that you need to check out the package '%s' to your " - "private relase area in\n"%( outName, packageName ) - + self.__cmssw ) - raise AllInOneError( msg ) - theMap = { "process": "", - "tab": "", - "nEvents": str( -1 ), - "skipEventsString": "", - "importCms": "import FWCore.ParameterSet.Config as cms\n", - "header": "#Do not delete or (unless you know what you're doing) change these comments\n" - "#%(name)s\n" - "#data type: %(dataType)s\n" - "#magnetic field: .oO[magneticField]Oo.\n" #put in magnetic field later - %{"name": self.__name, #need to create the snippet before getting the magnetic field - "dataType": self.__dataType} #so that we know the first and last runs - } - dataset_cff = self.__createSnippet( jsonPath = jsonPath, - begin = begin, - end = end, - firstRun = firstRun, - lastRun = lastRun, - repMap = theMap, - parent = parent) - magneticField = self.__magneticField - if magneticField == "MagneticField": - magneticField = "%s, %s #%s" % (magneticField, - str(self.__getMagneticFieldForRun()).replace("\n"," ").split("#")[0].strip(), - "Use MagneticField_cff.py; the number is for determining which track selection to use." - ) - dataset_cff = dataset_cff.replace(".oO[magneticField]Oo.",magneticField) - filePath = os.path.join( self.__cmssw, "src", packageName, - "python", outName + "_cff.py" ) - if os.path.exists( filePath ): - existMsg = "The predefined dataset '%s' already exists.\n"%( outName ) - askString = "Do you want to overwrite it? [y/n]\n" - inputQuery = existMsg + askString - while True: - userInput = raw_input( inputQuery ).lower() - if userInput == "y": - break - elif userInput == "n": - return - else: - inputQuery = askString - print ( "The predefined dataset '%s' will be stored in the file\n" - %( outName ) - + filePath + - "\nFor future use you have to do 'scram b'." ) - print() - theFile = open( filePath, "w" ) - theFile.write( dataset_cff ) - theFile.close() - return - - def createdatasetfile_hippy(self, filename, filesperjob, firstrun, lastrun): - with open(filename, "w") as f: - for job in self.__chunks(self.fileList(firstRun=firstrun, lastRun=lastrun, forcerunselection=True), filesperjob): - f.write(",".join("'{}'".format(file) for file in job)+"\n") - - @staticmethod - def getrunnumberfromfilename(filename): - parts = filename.split("/") - result = error = None - if parts[0] != "" or parts[1] != "store": - error = "does not start with /store" - elif parts[2] in ["mc", "relval"]: - result = 1 - elif not parts[-1].endswith(".root"): - error = "does not end with something.root" - elif len(parts) != 12: - error = "should be exactly 11 slashes counting the first one" - else: - runnumberparts = parts[-5:-2] - if not all(len(part)==3 for part in runnumberparts): - error = "the 3 directories {} do not have length 3 each".format("/".join(runnumberparts)) - try: - result = int("".join(runnumberparts)) - except ValueError: - error = "the 3 directories {} do not form an integer".format("/".join(runnumberparts)) - - if error: - error = "could not figure out which run number this file is from:\n{}\n{}".format(filename, error) - raise AllInOneError(error) - - return result - - @cache - def fileList(self, parent=False, firstRun=None, lastRun=None, forcerunselection=False): - fileList = [ self.__findInJson(fileInfo,"name") - for fileInfo in self.fileInfoList(parent) ] - - if firstRun or lastRun: - if not firstRun: firstRun = -1 - if not lastRun: lastRun = float('infinity') - unknownfilenames, reasons = [], set() - for filename in fileList[:]: - try: - if not firstRun <= self.getrunnumberfromfilename(filename) <= lastRun: - fileList.remove(filename) - except AllInOneError as e: - if forcerunselection: raise - unknownfilenames.append(e.message.split("\n")[1]) - reasons .add (e.message.split("\n")[2]) - if reasons: - if len(unknownfilenames) == len(fileList): - print("Could not figure out the run numbers of any of the filenames for the following reason(s):") - else: - print("Could not figure out the run numbers of the following filenames:") - for filename in unknownfilenames: - print(" "+filename) - print("for the following reason(s):") - for reason in reasons: - print(" "+reason) - print("Using the files anyway. The runs will be filtered at the CMSSW level.") - return fileList - - def fileInfoList( self, parent = False ): - return self.__getFileInfoList( self.__dasLimit, parent ) - - def name( self ): - return self.__name - - def predefined( self ): - return self.__predefined - - @cache - def runList( self ): - return self.__getRunList() - - -if __name__ == '__main__': - print("Start testing...") - datasetName = '/MinimumBias/Run2012D-TkAlMinBias-v1/ALCARECO' - jsonFile = ( '/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/' - 'Collisions12/8TeV/Prompt/' - 'Cert_190456-207898_8TeV_PromptReco_Collisions12_JSON.txt' ) - dataset = Dataset( datasetName ) - print(dataset.datasetSnippet( jsonPath = jsonFile, - firstRun = "207800", - end = "20121128")) - dataset.dump_cff( outName = "Dataset_Test_TkAlMinBias_Run2012D", - jsonPath = jsonFile, - firstRun = "207800", - end = "20121128" ) diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/defaultInputFiles_cff.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/defaultInputFiles_cff.py index d0bc9b7de5e5b..69a5e48f18e19 100644 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/defaultInputFiles_cff.py +++ b/Alignment/OfflineValidation/python/TkAlAllInOneTool/defaultInputFiles_cff.py @@ -18,6 +18,8 @@ '/store/relval/CMSSW_12_5_3/RelValMinBias_14TeV/ALCARECO/TkAlMinBias-125X_mcRun4_realistic_v5_2026D88PU-v1/2590000/27b7ab93-1d2b-4f4a-a98e-68386c314b5e.root', ) +filesDefaultMC_DoubleMuonPUPhase_string = '/store/mc/Phase2Fall22DRMiniAOD/DYJetsToMuMu_M-50_TuneCP5_14TeV-madgraphMLM-pythia8/ALCARECO/TkAlZMuMu-PU200ALCA_TkAlPU200_125X_mcRun4_realistic_v5-v1/60000/9382696c-70fd-4b37-8a0f-24bd02aeda5f.root' + filesDefaultMC_MinBiasPUPhase2RECO = cms.untracked.vstring( '/store/relval/CMSSW_12_5_3/RelValMinBias_14TeV/GEN-SIM-RECO/125X_mcRun4_realistic_v5_2026D88PU-v1/2590000/22e22ae6-a353-4f2e-815e-cc5efee37af9.root', ) @@ -56,3 +58,5 @@ filesDefaultData_MinBias2018B = cms.untracked.vstring( '/store/express/Run2018B/StreamExpress/ALCARECO/TkAlMinBias-Express-v1/000/317/212/00000/00F0EFA7-8D64-E811-A594-FA163EFC96CC.root' ) + +filesDefaultData_Cosmics_string = "/store/data/Run2022G/Cosmics/ALCARECO/TkAlCosmics0T-PromptReco-v1/000/362/440/00000/47f31eaa-1c00-4f39-902b-a09fa19c27f2.root" diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py deleted file mode 100644 index 1049eb68d0270..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/genericValidation.py +++ /dev/null @@ -1,794 +0,0 @@ -from __future__ import print_function -from __future__ import absolute_import -from builtins import range -from abc import ABCMeta, abstractmethod, abstractproperty -import os -import re -import json -from . import globalDictionaries -from . import configTemplates -from .dataset import Dataset -from .helperFunctions import replaceByMap, addIndex, getCommandOutput2, boolfromstring, pythonboolstring -from .TkAlExceptions import AllInOneError - -class ValidationMetaClass(ABCMeta): - sets = ["mandatories", "optionals", "needpackages"] - dicts = ["defaults"] - def __new__(cls, clsname, bases, dct): - for setname in cls.sets: - if setname not in dct: dct[setname] = set() - dct[setname] = set.union(dct[setname], *(getattr(base, setname) for base in bases if hasattr(base, setname))) - - for dictname in cls.dicts: - if dictname not in dct: dct[dictname] = {} - for base in bases: - if not hasattr(base, dictname): continue - newdict = getattr(base, dictname) - for key in set(newdict) & set(dct[dictname]): - if newdict[key] != dct[dictname][key]: - raise ValueError("Inconsistent values of defaults[{}]: {}, {}".format(key, newdict[key], dct[dictname][key])) - dct[dictname].update(newdict) - - for setname in cls.sets: #e.g. removemandatories, used in preexistingvalidation - #use with caution - if "remove"+setname not in dct: dct["remove"+setname] = set() - dct["remove"+setname] = set.union(dct["remove"+setname], *(getattr(base, "remove"+setname) for base in bases if hasattr(base, "remove"+setname))) - - dct[setname] -= dct["remove"+setname] - - return super(ValidationMetaClass, cls).__new__(cls, clsname, bases, dct) - -class GenericValidation(object, metaclass=ValidationMetaClass): - defaultReferenceName = "DEFAULT" - mandatories = set() - defaults = { - "cmssw": os.environ['CMSSW_BASE'], - "parallelJobs": "1", - "jobid": "", - "needsproxy": "false", - } - needpackages = {"Alignment/OfflineValidation"} - optionals = {"jobmode"} - - def __init__(self, valName, alignment, config): - import random - self.name = valName - self.alignmentToValidate = alignment - self.general = config.getGeneral() - self.randomWorkdirPart = "%0i"%random.randint(1,10e9) - self.configFiles = [] - self.config = config - self.jobid = "" - - theUpdate = config.getResultingSection(self.valType+":"+self.name, - defaultDict = self.defaults, - demandPars = self.mandatories) - self.general.update(theUpdate) - self.jobmode = self.general["jobmode"] - self.NJobs = int(self.general["parallelJobs"]) - self.needsproxy = boolfromstring(self.general["needsproxy"], "needsproxy") - - # limit maximum number of parallel jobs to 40 - # (each output file is approximately 20MB) - maximumNumberJobs = 40 - if self.NJobs > maximumNumberJobs: - msg = ("Maximum allowed number of parallel jobs " - +str(maximumNumberJobs)+" exceeded!!!") - raise AllInOneError(msg) - if self.NJobs > 1 and not isinstance(self, ParallelValidation): - raise AllInOneError("Parallel jobs not implemented for {}!\n" - "Please set parallelJobs = 1.".format(type(self).__name__)) - - self.jobid = self.general["jobid"] - if self.jobid: - try: #make sure it's actually a valid jobid - output = getCommandOutput2("bjobs %(jobid)s 2>&1"%self.general) - if "is not found" in output: raise RuntimeError - except RuntimeError: - raise AllInOneError("%s is not a valid jobid.\nMaybe it finished already?"%self.jobid) - - self.cmssw = self.general["cmssw"] - badcharacters = r"\'" - for character in badcharacters: - if character in self.cmssw: - raise AllInOneError("The bad characters " + badcharacters + " are not allowed in the cmssw\n" - "path name. If you really have it in such a ridiculously named location,\n" - "try making a symbolic link somewhere with a decent name.") - try: - os.listdir(self.cmssw) - except OSError: - raise AllInOneError("Your cmssw release " + self.cmssw + ' does not exist') - - if self.cmssw == os.environ["CMSSW_BASE"]: - self.scramarch = os.environ["SCRAM_ARCH"] - self.cmsswreleasebase = os.environ["CMSSW_RELEASE_BASE"] - else: - command = ("cd '" + self.cmssw + "' && eval `scramv1 ru -sh 2> /dev/null`" - ' && echo "$CMSSW_BASE\n$SCRAM_ARCH\n$CMSSW_RELEASE_BASE"') - commandoutput = getCommandOutput2(command).split('\n') - self.cmssw = commandoutput[0] - self.scramarch = commandoutput[1] - self.cmsswreleasebase = commandoutput[2] - - self.packages = {} - for package in self.needpackages: - for placetolook in self.cmssw, self.cmsswreleasebase: - pkgpath = os.path.join(placetolook, "src", package) - if os.path.exists(pkgpath): - self.packages[package] = pkgpath - break - else: - raise AllInOneError("Package {} does not exist in {} or {}!".format(package, self.cmssw, self.cmsswreleasebase)) - - self.AutoAlternates = True - if config.has_option("alternateTemplates","AutoAlternates"): - try: - self.AutoAlternates = json.loads(config.get("alternateTemplates","AutoAlternates").lower()) - except ValueError: - raise AllInOneError("AutoAlternates needs to be true or false, not %s" % config.get("alternateTemplates","AutoAlternates")) - - knownOpts = set(self.defaults.keys())|self.mandatories|self.optionals - ignoreOpts = [] - config.checkInput(self.valType+":"+self.name, - knownSimpleOptions = knownOpts, - ignoreOptions = ignoreOpts) - - def getRepMap(self, alignment = None): - from .plottingOptions import PlottingOptions - if alignment == None: - alignment = self.alignmentToValidate - try: - result = PlottingOptions(self.config, self.valType) - except KeyError: - result = {} - result.update(alignment.getRepMap()) - result.update(self.general) - result.update({ - "workdir": os.path.join(self.general["workdir"], - self.randomWorkdirPart), - "datadir": self.general["datadir"], - "logdir": self.general["logdir"], - "CommandLineTemplate": ("#run configfile and post-proccess it\n" - "cmsRun %(cfgFile)s\n" - "%(postProcess)s "), - "CMSSW_BASE": self.cmssw, - "SCRAM_ARCH": self.scramarch, - "CMSSW_RELEASE_BASE": self.cmsswreleasebase, - "alignmentName": alignment.name, - "condLoad": alignment.getConditions(), - "LoadGlobalTagTemplate": configTemplates.loadGlobalTagTemplate, - }) - result.update(self.packages) - return result - - @abstractproperty - def filesToCompare(self): - pass - - def getCompareStrings( self, requestId = None, plain = False ): - result = {} - repMap = self.getRepMap().copy() - for validationId in self.filesToCompare: - repMap["file"] = self.filesToCompare[ validationId ] - if repMap["file"].startswith( "/castor/" ): - repMap["file"] = "rfio:%(file)s"%repMap - elif repMap["file"].startswith( "/store/" ): - repMap["file"] = "root://eoscms.cern.ch//eos/cms%(file)s"%repMap - if plain: - result[validationId]=repMap["file"] - else: - result[validationId]= "%(file)s=%(title)s|%(color)s|%(style)s"%repMap - if requestId == None: - return result - else: - if not "." in requestId: - requestId += ".%s"%self.defaultReferenceName - if not requestId.split(".")[-1] in result: - msg = ("could not find %s in reference Objects!" - %requestId.split(".")[-1]) - raise AllInOneError(msg) - return result[ requestId.split(".")[-1] ] - - def createFiles(self, fileContents, path, repMap = None, repMaps = None): - """repMap: single map for all files - repMaps: a dict, with the filenames as the keys""" - if repMap is not None and repMaps is not None: - raise AllInOneError("createFiles can only take repMap or repMaps (or neither), not both") - result = [] - for fileName in fileContents: - filePath = os.path.join(path, fileName) - result.append(filePath) - - for (i, filePathi) in enumerate(addIndex(filePath, self.NJobs)): - theFile = open( filePathi, "w" ) - fileContentsi = fileContents[ fileName ] - if repMaps is not None: - repMap = repMaps[fileName] - if repMap is not None: - repMap.update({"nIndex": str(i)}) - fileContentsi = replaceByMap(fileContentsi, repMap) - theFile.write( fileContentsi ) - theFile.close() - - return result - - def createConfiguration(self, fileContents, path, schedule = None, repMap = None, repMaps = None): - self.configFiles = self.createFiles(fileContents, - path, repMap = repMap, repMaps = repMaps) - if not schedule == None: - schedule = [os.path.join( path, cfgName) for cfgName in schedule] - for cfgName in schedule: - if not cfgName in self.configFiles: - msg = ("scheduled %s missing in generated configfiles: %s" - %(cfgName, self.configFiles)) - raise AllInOneError(msg) - for cfgName in self.configFiles: - if not cfgName in schedule: - msg = ("generated configuration %s not scheduled: %s" - %(cfgName, schedule)) - raise AllInOneError(msg) - self.configFiles = schedule - return self.configFiles - - def createScript(self, fileContents, path, downloadFiles=[], repMap = None, repMaps = None): - self.scriptFiles = self.createFiles(fileContents, - path, repMap = repMap, repMaps = repMaps) - for script in self.scriptFiles: - for scriptwithindex in addIndex(script, self.NJobs): - os.chmod(scriptwithindex,0o755) - return self.scriptFiles - - def createCrabCfg(self, fileContents, path ): - if self.NJobs > 1: - msg = ("jobmode 'crab' not supported for parallel validation." - " Please set parallelJobs = 1.") - raise AllInOneError(msg) - self.crabConfigFiles = self.createFiles(fileContents, path) - return self.crabConfigFiles - - -class GenericValidationData(GenericValidation): - """ - Subclass of `GenericValidation` which is the base for validations using - datasets. - """ - needParentFiles = False - mandatories = {"dataset", "maxevents"} - defaults = { - "runRange": "", - "firstRun": "", - "lastRun": "", - "begin": "", - "end": "", - "JSON": "", - "dasinstance": "prod/global", - "ttrhbuilder":"WithAngleAndTemplate", - "usepixelqualityflag": "True", - } - optionals = {"magneticfield"} - - def __init__(self, valName, alignment, config): - """ - This method adds additional items to the `self.general` dictionary - which are only needed for validations using datasets. - - Arguments: - - `valName`: String which identifies individual validation instances - - `alignment`: `Alignment` instance to validate - - `config`: `BetterConfigParser` instance which includes the - configuration of the validations - """ - - super(GenericValidationData, self).__init__(valName, alignment, config) - - # if maxevents is not specified, cannot calculate number of events for - # each parallel job, and therefore running only a single job - if int( self.general["maxevents"] ) < 0 and self.NJobs > 1: - msg = ("Maximum number of events (maxevents) not specified: " - "cannot use parallel jobs.") - raise AllInOneError(msg) - if int( self.general["maxevents"] ) / self.NJobs != float( self.general["maxevents"] ) / self.NJobs: - msg = ("maxevents has to be divisible by parallelJobs") - raise AllInOneError(msg) - - tryPredefinedFirst = (not self.jobmode.split( ',' )[0] == "crab" and self.general["JSON"] == "" - and self.general["firstRun"] == "" and self.general["lastRun"] == "" - and self.general["begin"] == "" and self.general["end"] == "") - - if self.general["dataset"] not in globalDictionaries.usedDatasets: - globalDictionaries.usedDatasets[self.general["dataset"]] = {} - - if self.cmssw not in globalDictionaries.usedDatasets[self.general["dataset"]]: - if globalDictionaries.usedDatasets[self.general["dataset"]] != {}: - print(("Warning: you use the same dataset '%s' in multiple cmssw releases.\n" - "This is allowed, but make sure it's not a mistake") % self.general["dataset"]) - globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw] = {False: None, True: None} - - Bfield = self.general.get("magneticfield", None) - if globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] is None: - dataset = Dataset( - self.general["dataset"], tryPredefinedFirst = tryPredefinedFirst, - cmssw = self.cmssw, cmsswrelease = self.cmsswreleasebase, magneticfield = Bfield, - dasinstance = self.general["dasinstance"]) - globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] = dataset - if tryPredefinedFirst and not dataset.predefined(): #No point finding the data twice in that case - globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][False] = dataset - - self.dataset = globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] - self.general["magneticField"] = self.dataset.magneticField() - self.general["defaultMagneticField"] = "MagneticField" - if self.general["magneticField"] == "unknown": - print("Could not get the magnetic field for this dataset.") - print("Using the default: ", self.general["defaultMagneticField"]) - self.general["magneticField"] = '.oO[defaultMagneticField]Oo.' - - if not self.jobmode.split( ',' )[0] == "crab": - try: - self.general["datasetDefinition"] = self.dataset.datasetSnippet( - jsonPath = self.general["JSON"], - firstRun = self.general["firstRun"], - lastRun = self.general["lastRun"], - begin = self.general["begin"], - end = self.general["end"], - parent = self.needParentFiles ) - except AllInOneError as e: - msg = "In section [%s:%s]: "%(self.valType, self.name) - msg += str(e) - raise AllInOneError(msg) - else: - if self.dataset.predefined(): - msg = ("For jobmode 'crab' you cannot use predefined datasets " - "(in your case: '%s')."%( self.dataset.name() )) - raise AllInOneError( msg ) - try: - theUpdate = config.getResultingSection(self.valType+":"+self.name, - demandPars = ["parallelJobs"]) - except AllInOneError as e: - msg = str(e)[:-1]+" when using 'jobmode: crab'." - raise AllInOneError(msg) - self.general.update(theUpdate) - if self.general["begin"] or self.general["end"]: - ( self.general["begin"], - self.general["end"], - self.general["firstRun"], - self.general["lastRun"] ) = self.dataset.convertTimeToRun( - firstRun = self.general["firstRun"], - lastRun = self.general["lastRun"], - begin = self.general["begin"], - end = self.general["end"], - shortTuple = False) - if self.general["begin"] == None: - self.general["begin"] = "" - if self.general["end"] == None: - self.general["end"] = "" - self.general["firstRun"] = str( self.general["firstRun"] ) - self.general["lastRun"] = str( self.general["lastRun"] ) - if ( not self.general["firstRun"] ) and \ - ( self.general["end"] or self.general["lastRun"] ): - self.general["firstRun"] = str( - self.dataset.runList()[0]["run_number"]) - if ( not self.general["lastRun"] ) and \ - ( self.general["begin"] or self.general["firstRun"] ): - self.general["lastRun"] = str( - self.dataset.runList()[-1]["run_number"]) - if self.general["firstRun"] and self.general["lastRun"]: - if int(self.general["firstRun"]) > int(self.general["lastRun"]): - msg = ( "The lower time/runrange limit ('begin'/'firstRun') " - "chosen is greater than the upper time/runrange limit " - "('end'/'lastRun').") - raise AllInOneError( msg ) - self.general["runRange"] = (self.general["firstRun"] - + '-' + self.general["lastRun"]) - try: - self.general["datasetDefinition"] = self.dataset.datasetSnippet( - jsonPath = self.general["JSON"], - firstRun = self.general["firstRun"], - lastRun = self.general["lastRun"], - begin = self.general["begin"], - end = self.general["end"], - crab = True ) - except AllInOneError as e: - msg = "In section [%s:%s]: "%(self.valType, self.name) - msg += str( e ) - raise AllInOneError( msg ) - - self.general["usepixelqualityflag"] = pythonboolstring(self.general["usepixelqualityflag"], "usepixelqualityflag") - - def getRepMap(self, alignment = None): - result = super(GenericValidationData, self).getRepMap(alignment) - outputfile = os.path.expandvars(replaceByMap( - "%s_%s_.oO[name]Oo..root" % (self.outputBaseName, self.name) - , result)) - resultfile = os.path.expandvars(replaceByMap(("/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./" + - "%s_%s_.oO[name]Oo..root" % (self.resultBaseName, self.name)) - , result)) - result.update({ - "resultFile": ".oO[resultFiles[.oO[nIndex]Oo.]]Oo.", - "resultFiles": addIndex(resultfile, self.NJobs), - "finalResultFile": resultfile, - "outputFile": ".oO[outputFiles[.oO[nIndex]Oo.]]Oo.", - "outputFiles": addIndex(outputfile, self.NJobs), - "finalOutputFile": outputfile, - "ProcessName": self.ProcessName, - "Bookkeeping": self.Bookkeeping, - "LoadBasicModules": self.LoadBasicModules, - "TrackSelectionRefitting": self.TrackSelectionRefitting, - "ValidationConfig": self.ValidationTemplate, - "FileOutputTemplate": self.FileOutputTemplate, - "DefinePath": self.DefinePath, - }) - return result - - @property - def cfgName(self): - return "%s.%s.%s_cfg.py"%( self.configBaseName, self.name, - self.alignmentToValidate.name ) - @abstractproperty - def ProcessName(self): - pass - - @property - def cfgTemplate(self): - return configTemplates.cfgTemplate - - @abstractproperty - def ValidationTemplate(self): - pass - - @property - def filesToCompare(self): - return {self.defaultReferenceName: self.getRepMap()["finalResultFile"]} - - def createConfiguration(self, path ): - repMap = self.getRepMap() - cfgs = {self.cfgName: self.cfgTemplate} - super(GenericValidationData, self).createConfiguration(cfgs, path, repMap=repMap) - - def createScript(self, path, template = configTemplates.scriptTemplate, downloadFiles=[], repMap = None, repMaps = None): - scriptName = "%s.%s.%s.sh"%(self.scriptBaseName, self.name, - self.alignmentToValidate.name ) - if repMap is None and repMaps is None: - repMap = self.getRepMap() - repMap["CommandLine"]="" - for cfg in self.configFiles: - repMap["CommandLine"]+= repMap["CommandLineTemplate"]%{"cfgFile":addIndex(cfg, self.NJobs, ".oO[nIndex]Oo."), - "postProcess":"" - } - scripts = {scriptName: template} - return super(GenericValidationData, self).createScript(scripts, path, downloadFiles = downloadFiles, - repMap = repMap, repMaps = repMaps) - - def createCrabCfg(self, path, crabCfgBaseName): - """ - Method which creates a `crab.cfg` for a validation on datasets. - - Arguments: - - `path`: Path at which the file will be stored. - - `crabCfgBaseName`: String which depends on the actual type of - validation calling this method. - """ - crabCfgName = "crab.%s.%s.%s.cfg"%( crabCfgBaseName, self.name, - self.alignmentToValidate.name ) - repMap = self.getRepMap() - repMap["script"] = "dummy_script.sh" - # repMap["crabOutputDir"] = os.path.basename( path ) - repMap["crabWorkingDir"] = crabCfgName.split( '.cfg' )[0] - self.crabWorkingDir = repMap["crabWorkingDir"] - repMap["numberOfJobs"] = self.general["parallelJobs"] - repMap["cfgFile"] = self.configFiles[0] - repMap["queue"] = self.jobmode.split( ',' )[1].split( '-q' )[1] - if self.dataset.dataType() == "mc": - repMap["McOrData"] = "events = .oO[nEvents]Oo." - elif self.dataset.dataType() == "data": - repMap["McOrData"] = "lumis = -1" - if self.jobmode.split( ',' )[0] == "crab": - print ("For jobmode 'crab' the parameter 'maxevents' will be " - "ignored and all events will be processed.") - else: - raise AllInOneError("Unknown data type! Can't run in crab mode") - crabCfg = {crabCfgName: replaceByMap( configTemplates.crabCfgTemplate, - repMap ) } - return super(GenericValidationData, self).createCrabCfg( crabCfg, path ) - - @property - def Bookkeeping(self): - return configTemplates.Bookkeeping - @property - def LoadBasicModules(self): - return configTemplates.LoadBasicModules - @abstractproperty - def TrackSelectionRefitting(self): - pass - @property - def FileOutputTemplate(self): - return configTemplates.FileOutputTemplate - @abstractproperty - def DefinePath(self): - pass - -class GenericValidationData_CTSR(GenericValidationData): - #common track selection and refitting - defaults = { - "momentumconstraint": "None", - "openmasswindow": "False", - "cosmicsdecomode": "True", - "removetrackhitfiltercommands": "", - "appendtrackhitfiltercommands": "", - } - def getRepMap(self, alignment=None): - result = super(GenericValidationData_CTSR, self).getRepMap(alignment) - - from .trackSplittingValidation import TrackSplittingValidation - result.update({ - "ValidationSequence": self.ValidationSequence, - "istracksplitting": str(isinstance(self, TrackSplittingValidation)), - "cosmics0T": str(self.cosmics0T), - "use_d0cut": str(self.use_d0cut), - "ispvvalidation": str(self.isPVValidation) - }) - - commands = [] - for removeorappend in "remove", "append": - optionname = removeorappend + "trackhitfiltercommands" - if result[optionname]: - for command in result[optionname].split(","): - command = command.strip() - commands.append('process.TrackerTrackHitFilter.commands.{}("{}")'.format(removeorappend, command)) - result["trackhitfiltercommands"] = "\n".join(commands) - - return result - @property - def use_d0cut(self): - return "Cosmics" not in self.general["trackcollection"] #use it for collisions only - @property - def isPVValidation(self): - return False # only for PV Validation sequence - @property - def TrackSelectionRefitting(self): - return configTemplates.CommonTrackSelectionRefitting - @property - def DefinePath(self): - return configTemplates.DefinePath_CommonSelectionRefitting - @abstractproperty - def ValidationSequence(self): - pass - @property - def cosmics0T(self): - if "Cosmics" not in self.general["trackcollection"]: return False - Bfield = self.dataset.magneticFieldForRun() - if Bfield < 0.5: return True - if isinstance(Bfield, str): - if "unknown " in Bfield: - msg = Bfield.replace("unknown ","",1) - elif Bfield == "unknown": - msg = "Can't get the B field for %s." % self.dataset.name() - else: - msg = "B field = {}???".format(Bfield) - raise AllInOneError(msg + "\n" - "To use this dataset, specify magneticfield = [value] in your .ini config file.") - return False - -class ParallelValidation(GenericValidation): - @classmethod - def initMerge(cls): - return "" - @abstractmethod - def appendToMerge(self): - pass - - @classmethod - def doInitMerge(cls): - from .plottingOptions import PlottingOptions - result = cls.initMerge() - result = replaceByMap(result, PlottingOptions(None, cls)) - if result and result[-1] != "\n": result += "\n" - return result - def doMerge(self): - result = self.appendToMerge() - if result[-1] != "\n": result += "\n" - result += ("if [[ tmpMergeRetCode -eq 0 ]]; then\n" - " xrdcp -f .oO[finalOutputFile]Oo. root://eoscms//eos/cms.oO[finalResultFile]Oo.\n" - "fi\n" - "if [[ ${tmpMergeRetCode} -gt ${mergeRetCode} ]]; then\n" - " mergeRetCode=${tmpMergeRetCode}\n" - "fi\n") - result = replaceByMap(result, self.getRepMap()) - return result - -class ValidationWithPlots(GenericValidation): - @classmethod - def runPlots(cls, validations): - return ("cp .oO[plottingscriptpath]Oo. .\n" - "root -x -b -q .oO[plottingscriptname]Oo.++") - @abstractmethod - def appendToPlots(self): - pass - @abstractmethod - def plottingscriptname(cls): - """override with a classmethod""" - @abstractmethod - def plottingscripttemplate(cls): - """override with a classmethod""" - @abstractmethod - def plotsdirname(cls): - """override with a classmethod""" - - @classmethod - def doRunPlots(cls, validations): - from .plottingOptions import PlottingOptions - cls.createPlottingScript(validations) - result = cls.runPlots(validations) - result = replaceByMap(result, PlottingOptions(None, cls)) - if result and result[-1] != "\n": result += "\n" - return result - @classmethod - def createPlottingScript(cls, validations): - from .plottingOptions import PlottingOptions - repmap = PlottingOptions(None, cls).copy() - filename = replaceByMap(".oO[plottingscriptpath]Oo.", repmap) - repmap["PlottingInstantiation"] = "\n".join( - replaceByMap(v.appendToPlots(), v.getRepMap()).rstrip("\n") - for v in validations - ) - plottingscript = replaceByMap(cls.plottingscripttemplate(), repmap) - with open(filename, 'w') as f: - f.write(plottingscript) - -class ValidationWithPlotsSummaryBase(ValidationWithPlots): - class SummaryItem(object): - def __init__(self, name, values, format=None, latexname=None, latexformat=None): - """ - name: name of the summary item, goes on top of the column - values: value for each alignment (in order of rows) - format: python format string (default: {:.3g}, meaning up to 3 significant digits) - latexname: name in latex form, e.g. if name=sigma you might want latexname=\sigma (default: name) - latexformat: format for latex (default: format) - """ - if format is None: format = "{:.3g}" - if latexname is None: latexname = name - if latexformat is None: latexformat = format - - self.__name = name - self.__values = values - self.__format = format - self.__latexname = latexname - self.__latexformat = latexformat - - def name(self, latex=False): - if latex: - return self.__latexname - else: - return self.__name - - def format(self, value, latex=False): - if latex: - fmt = self.__latexformat - else: - fmt = self.__format - if re.match(".*[{][^}]*[fg][}].*", fmt): - value = float(value) - return fmt.format(value) - - def values(self, latex=False): - result = [self.format(v, latex=latex) for v in self.__values] - return result - - def value(self, i, latex): - return self.values(latex)[i] - - @abstractmethod - def getsummaryitems(cls, folder): - """override with a classmethod that returns a list of SummaryItems - based on the plots saved in folder""" - - __summaryitems = None - __lastfolder = None - - @classmethod - def summaryitemsstring(cls, folder=None, latex=False, transpose=True): - if folder is None: folder = cls.plotsdirname() - if folder.startswith( "/castor/" ): - folder = "rfio:%(file)s"%repMap - elif folder.startswith( "/store/" ): - folder = "root://eoscms.cern.ch//eos/cms%(file)s"%repMap - - if cls.__summaryitems is None or cls.__lastfolder != folder: - cls.__lastfolder = folder - cls.__summaryitems = cls.getsummaryitems(folder) - - summaryitems = cls.__summaryitems - - if not summaryitems: - raise AllInOneError("No summary items!") - size = {len(_.values(latex)) for _ in summaryitems} - if len(size) != 1: - raise AllInOneError("Some summary items have different numbers of values\n{}".format(size)) - size = size.pop() - - if transpose: - columnwidths = ([max(len(_.name(latex)) for _ in summaryitems)] - + [max(len(_.value(i, latex)) for _ in summaryitems) for i in range(size)]) - else: - columnwidths = [max(len(entry) for entry in [_.name(latex)] + _.values(latex)) for _ in summaryitems] - - if latex: - join = " & " - else: - join = " " - row = join.join("{{:{}}}".format(width) for width in columnwidths) - - if transpose: - rows = [row.format(*[_.name(latex)]+_.values(latex)) for _ in summaryitems] - else: - rows = [] - rows.append(row.format(*(_.name for _ in summaryitems))) - for i in range(size): - rows.append(row.format(*(_.value(i, latex) for _ in summaryitems))) - - if latex: - join = " \\\\\n" - else: - join = "\n" - result = join.join(rows) - if latex: - result = (r"\begin{{tabular}}{{{}}}".format("|" + "|".join("c"*(len(columnwidths))) + "|") + "\n" - + result + "\n" - + r"\end{tabular}") - return result - - @classmethod - def printsummaryitems(cls, *args, **kwargs): - print(cls.summaryitemsstring(*args, **kwargs)) - @classmethod - def writesummaryitems(cls, filename, *args, **kwargs): - with open(filename, "w") as f: - f.write(cls.summaryitemsstring(*args, **kwargs)+"\n") - -class ValidationWithPlotsSummary(ValidationWithPlotsSummaryBase): - @classmethod - def getsummaryitems(cls, folder): - result = [] - with open(os.path.join(folder, "{}Summary.txt".format(cls.__name__))) as f: - for line in f: - split = line.rstrip("\n").split("\t") - kwargs = {} - for thing in split[:]: - if thing.startswith("format="): - kwargs["format"] = thing.replace("format=", "", 1) - split.remove(thing) - if thing.startswith("latexname="): - kwargs["latexname"] = thing.replace("latexname=", "", 1) - split.remove(thing) - if thing.startswith("latexformat="): - kwargs["latexformat"] = thing.replace("latexformat=", "", 1) - split.remove(thing) - - name = split[0] - values = split[1:] - result.append(cls.SummaryItem(name, values, **kwargs)) - return result - -class ValidationWithComparison(GenericValidation): - @classmethod - def doComparison(cls, validations): - from .plottingOptions import PlottingOptions - repmap = PlottingOptions(None, cls).copy() - repmap["compareStrings"] = " , ".join(v.getCompareStrings("OfflineValidation") for v in validations) - repmap["compareStringsPlain"] = " , ".join(v.getCompareStrings("OfflineValidation", True) for v in validations) - comparison = replaceByMap(cls.comparisontemplate(), repmap) - return comparison - - @classmethod - def comparisontemplate(cls): - return configTemplates.compareAlignmentsExecution - @classmethod - def comparealignmentspath(cls): - return ".oO[Alignment/OfflineValidation]Oo./scripts/.oO[compareAlignmentsName]Oo." - @abstractmethod - def comparealignmentsname(cls): - """classmethod""" - -class ValidationForPresentation(ValidationWithPlots): - @abstractmethod - def presentationsubsections(cls): - """classmethod""" diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/geometryComparison.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/geometryComparison.py deleted file mode 100644 index c77a462a827d3..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/geometryComparison.py +++ /dev/null @@ -1,340 +0,0 @@ -from __future__ import absolute_import -import os -import configparser as ConfigParser # needed for exceptions in this module -from . import configTemplates -from .genericValidation import GenericValidation -from .helperFunctions import replaceByMap, getCommandOutput2, cppboolstring, pythonboolstring, clean_name -from .TkAlExceptions import AllInOneError - - -class GeometryComparison(GenericValidation): - """ - Object representing a geometry comparison job. - """ - defaults = { - "3DSubdetector1":"1", - "3DSubdetector2":"2", - "3DTranslationalScaleFactor":"50", - "modulesToPlot":"all", - "moduleList": "./CREATE_NEW/emptyModuleList.txt", - "useDefaultRange":"false", - "plotOnlyGlobal":"true", - "plotPng":"true", - "makeProfilePlots":"true", - "dx_min":"-99999", - "dx_max":"-99999", - "dy_min":"-99999", - "dy_max":"-99999", - "dz_min":"-99999", - "dz_max":"-99999", - "dr_min":"-99999", - "dr_max":"-99999", - "rdphi_min":"-99999", - "rdphi_max":"-99999", - "dalpha_min":"-99999", - "dalpha_max":"-99999", - "dbeta_min":"-99999", - "dbeta_max":"-99999", - "dgamma_min":"-99999", - "dgamma_max":"-99999", - "multiIOV":"False", - } - mandatories = {"levels", "dbOutput"} - valType = "compare" - def __init__( self, valName, alignment, referenceAlignment, - config, copyImages = True): - """ - Constructor of the GeometryComparison class. - - Arguments: - - `valName`: String which identifies individual validation instances - - `alignment`: `Alignment` instance to validate - - `referenceAlignment`: `Alignment` instance which is compared - with `alignment` - - `config`: `BetterConfigParser` instance which includes the - configuration of the validations - - `copyImages`: Boolean which indicates whether png- and pdf-files - should be copied back from the batch farm - """ - super(GeometryComparison, self).__init__(valName, alignment, config) - self.referenceAlignment = referenceAlignment - referenceName = "IDEAL" - if not self.referenceAlignment == "IDEAL": - referenceName = self.referenceAlignment.name - - allCompares = config.getCompares() - self.__compares = {} - self.__filesToCompare = {} - if valName in allCompares: - self.__compares[valName] = allCompares[valName] - else: - msg = ("Could not find compare section '%s' in '%s'" - %(valName, allCompares)) - raise AllInOneError(msg) - self.copyImages = copyImages - - for name in "useDefaultRange", "plotOnlyGlobal", "plotPng": - self.general[name] = cppboolstring(self.general[name], name) - - - def getRepMap(self, alignment = None): - if alignment == None: - alignment = self.alignmentToValidate - repMap = super(GeometryComparison, self).getRepMap( alignment ) - referenceName = "IDEAL" - referenceTitle = "IDEAL" - if not self.referenceAlignment == "IDEAL": - referenceName = self.referenceAlignment.name - referenceTitle = self.referenceAlignment.title - - assert len(self.__compares) == 1 #? not sure how it can be anything else, but just in case - common = list(self.__compares.keys())[0] - - repMap.update({ - "common": clean_name(common), - "comparedGeometry": (".oO[alignmentName]Oo." - "ROOTGeometry.root"), - "referenceGeometry": "IDEAL", # will be replaced later - # if not compared to IDEAL - "reference": clean_name(referenceName), - "referenceTitle": referenceTitle, - "alignmentTitle": self.alignmentToValidate.title, - "moduleListBase": os.path.basename(repMap["moduleList"]), - }) - if not referenceName == "IDEAL": - repMap["referenceGeometry"] = (".oO[reference]Oo." - "ROOTGeometry.root") - repMap["name"] += "_vs_.oO[reference]Oo." - return repMap - - @property - def filesToCompare(self): - return self.__filesToCompare - - def createConfiguration(self, path ): - # self.__compares - repMap = self.getRepMap() - cfgFileName = "TkAlCompareToNTuple.%s_cfg.py"%( - self.alignmentToValidate.name) - cfgs = {cfgFileName: configTemplates.intoNTuplesTemplate} - repMaps = {cfgFileName: repMap} - if not self.referenceAlignment == "IDEAL": - referenceRepMap = self.getRepMap( self.referenceAlignment ) - cfgFileName = "TkAlCompareToNTuple.%s_cfg.py"%( - self.referenceAlignment.name ) - cfgs[cfgFileName] = configTemplates.intoNTuplesTemplate - repMaps[cfgFileName] = referenceRepMap - - cfgSchedule = list(cfgs.keys()) - for common in self.__compares: - repMap.update({ - "levels": self.__compares[common][0], - "dbOutput": pythonboolstring(self.__compares[common][1], "dbOutput") - }) - if self.__compares[common][1].split()[0] == "true": - repMap["dbOutputService"] = configTemplates.dbOutputTemplate - else: - repMap["dbOutputService"] = "" - cfgName = replaceByMap(("TkAlCompareCommon.oO[common]Oo.." - ".oO[name]Oo._cfg.py"),repMap) - cfgs[cfgName] = configTemplates.compareTemplate - repMaps[cfgName] = repMap - - cfgSchedule.append( cfgName ) - super(GeometryComparison, self).createConfiguration(cfgs, path, cfgSchedule, repMaps = repMaps) - - def createScript(self, path): - repMap = self.getRepMap() - repMap["runComparisonScripts"] = "" - scriptName = replaceByMap(("TkAlGeomCompare.%s..oO[name]Oo..sh" - %self.name), repMap) - - y_ranges = "" - plottedDifferences = ["dx","dy","dz","dr","rdphi","dalpha","dbeta","dgamma"] - for diff in plottedDifferences: - y_ranges += ","+repMap["%s_min"%diff] - y_ranges += ","+repMap["%s_max"%diff] - - for name in self.__compares: - if '"DetUnit"' in self.__compares[name][0].split(","): - repMap["outputFile"] = (".oO[name]Oo..Comparison_common"+name+".root") - repMap["nIndex"] = ("") - repMap["runComparisonScripts"] += \ - ("cp .oO[Alignment/OfflineValidation]Oo." - "/scripts/comparisonScript.C .\n" - "cp .oO[Alignment/OfflineValidation]Oo." - "/scripts/GeometryComparisonPlotter.h .\n" - "cp .oO[Alignment/OfflineValidation]Oo." - "/scripts/GeometryComparisonPlotter.cc .\n" - "root -b -q 'comparisonScript.C+(\"" - ".oO[name]Oo..Comparison_common"+name+".root\",\"" - "./\",\".oO[modulesToPlot]Oo.\",\".oO[alignmentName]Oo.\",\".oO[reference]Oo.\",.oO[useDefaultRange]Oo.,.oO[plotOnlyGlobal]Oo.,.oO[plotPng]Oo.,.oO[makeProfilePlots]Oo."+y_ranges+")'\n" - "cp "+path+"/TkAl3DVisualization_.oO[common]Oo._.oO[name]Oo..C .\n" - "root -l -b -q TkAl3DVisualization_.oO[common]Oo._.oO[name]Oo..C+\n") - if self.copyImages: - repMap["runComparisonScripts"] += \ - ("mkdir -p .oO[datadir]Oo./.oO[name]Oo." - ".Comparison_common"+name+"_Images/Translations\n") - repMap["runComparisonScripts"] += \ - ("mkdir -p .oO[datadir]Oo./.oO[name]Oo." - ".Comparison_common"+name+"_Images/Rotations\n") - - - ### At the moment translations are images with suffix _1 and _2, rotations _3 and _4 - ### The numeration depends on the order of the MakePlots(x, y) commands in comparisonScript.C - ### If comparisonScript.C is changed, check if the following lines need to be changed as well - - if repMap["plotPng"] == "true": - repMap["runComparisonScripts"] += \ - ("find . -maxdepth 1 -name \"*_1*\" " - "-print | xargs -I {} bash -c \"cp {} .oO[datadir]Oo." - "/.oO[name]Oo..Comparison_common"+name+"_Images/Translations/\" \n") - repMap["runComparisonScripts"] += \ - ("find . -maxdepth 1 -name \"*_2*\" " - "-print | xargs -I {} bash -c \"cp {} .oO[datadir]Oo." - "/.oO[name]Oo..Comparison_common"+name+"_Images/Translations/\" \n") - - repMap["runComparisonScripts"] += \ - ("find . -maxdepth 1 -name \"*_3*\" " - "-print | xargs -I {} bash -c \"cp {} .oO[datadir]Oo." - "/.oO[name]Oo..Comparison_common"+name+"_Images/Rotations/\" \n") - repMap["runComparisonScripts"] += \ - ("find . -maxdepth 1 -name \"*_4*\" " - "-print | xargs -I {} bash -c \"cp {} .oO[datadir]Oo." - "/.oO[name]Oo..Comparison_common"+name+"_Images/Rotations/\" \n") - - else: - repMap["runComparisonScripts"] += \ - ("find . -maxdepth 1 -name \"*_1*\" " - "-print | xargs -I {} bash -c \"cp {} .oO[datadir]Oo." - "/.oO[name]Oo..Comparison_common"+name+"_Images/Translations/\" \n") - - repMap["runComparisonScripts"] += \ - ("find . -maxdepth 1 -name \"*_2*\" " - "-print | xargs -I {} bash -c \"cp {} .oO[datadir]Oo." - "/.oO[name]Oo..Comparison_common"+name+"_Images/Rotations/\" \n") - - repMap["runComparisonScripts"] += \ - ("find . -maxdepth 1 -name " - "\"*.tex\" -print | xargs -I {} bash -c" - " \"cp {} .oO[datadir]Oo./.oO[name]Oo." - ".Comparison_common"+name+"_Images/\" \n") - repMap["runComparisonScripts"] += \ - ("find . -maxdepth 1 -name " - "\"TkMap_SurfDeform*.pdf\" -print | xargs -I {} bash -c" - " \"cp {} .oO[datadir]Oo./.oO[name]Oo." - ".Comparison_common"+name+"_Images/\" \n") - repMap["runComparisonScripts"] += \ - ("find . -maxdepth 1 -name " - "\"TkMap_SurfDeform*.png\" -print | xargs -I {} bash -c" - " \"cp {} .oO[datadir]Oo./.oO[name]Oo." - ".Comparison_common"+name+"_Images/\" \n") - repMap["runComparisonScripts"] += \ - ("cp .oO[Alignment/OfflineValidation]Oo." - "/macros/makeArrowPlots.C " - ".\n" - "root -b -q 'makeArrowPlots.C(\"" - ".oO[name]Oo..Comparison_common"+name - +".root\",\".oO[name]Oo.." - +name+"_ArrowPlots\")'\n") - repMap["runComparisonScripts"] += \ - ("mkdir -p .oO[datadir]Oo./.oO[name]Oo." - ".Comparison_common"+name+"_Images/ArrowPlots\n") - repMap["runComparisonScripts"] += \ - ("find .oO[name]Oo.."+name+"_ArrowPlots " - "-maxdepth 1 -name \"*.png\" -print | xargs -I {} bash " - "-c \"cp {} .oO[datadir]Oo./.oO[name]Oo." - ".Comparison_common"+name+"_Images/ArrowPlots\"\n") - repMap["runComparisonScripts"] += \ - ("find .oO[name]Oo.."+name+"_ArrowPlots " - "-maxdepth 1 -name \"*.pdf\" -print | xargs -I {} bash " - "-c \"cp {} .oO[datadir]Oo./.oO[name]Oo." - ".Comparison_common"+name+"_Images/ArrowPlots\"\n") - repMap["runComparisonScripts"] += \ - ("find . " - "-maxdepth 1 -name \".oO[common]Oo._.oO[name]Oo..Visualization_rotated.gif\" -print | xargs -I {} bash " - "-c \"cp {} .oO[datadir]Oo./.oO[name]Oo." - ".Comparison_common"+name+"_Images/.oO[common]Oo._.oO[name]Oo..Visualization.gif\"\n") - - # TkAlMap inFile=tree.root compAl=UL2018 refAl=StartGeom savePNG=True TkVersion=phase1 outDir=./test_plots/tanh colPal=2 - range_str = '' - plottedDifferences = ["dx","dy","dz","dr","rdphi","dalpha","dbeta","dgamma"] - for diff in plottedDifferences: - range_str += diff+'_range=['+str(repMap[diff+'_min'])+','+str(repMap[diff+'_max'])+'];' - repMap["runComparisonScripts"] += \ - ("mkdir -p .oO[datadir]Oo./.oO[name]Oo." - ".Comparison_common"+name+"_Images/TkAlMapPlots\n") - repMap["runComparisonScripts"] += \ - ("python .oO[Alignment/OfflineValidation]Oo./python/runGCPTkAlMap.py -b " - "inFile=.oO[name]Oo..Comparison_common"+name+".root " - "refAl=\".oO[reference]Oo.\" " - "compAl=\".oO[alignmentName]Oo.\" " - "savePNG=True " - "TkVersion=\"phase0\" " - "colPal=2 " - "defRanges=\""+range_str+"\" " - "outDir=.oO[datadir]Oo./.oO[name]Oo..Comparison_common"+name+"_Images/TkAlMapPlots\n") - #"outDir=.oO[name]Oo.."+name+"_TkMapPlots " - #"useDefaultRanges=.oO[useDefaultRange]Oo. "+range_str+"\n") - - # Copy root file for check - repMap["runComparisonScripts"] += \ - ("cp .oO[name]Oo..Comparison_common"+name+".root " - ".oO[datadir]Oo./.oO[name]Oo..Comparison_common"+name+"_Images/TkAlMapPlots/GCP.root\n") - #repMap["runComparisonScripts"] += \ - # ("cp .oO[alignmentName]Oo.ROOTGeometry.root " - # ".oO[datadir]Oo./.oO[name]Oo..Comparison_common"+name+"_Images/TkAlMapPlots/comparedGeometry.root\n") - - resultingFile = replaceByMap(("/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./compared%s_" - ".oO[name]Oo..root"%name), repMap) - resultingFile = os.path.expandvars( resultingFile ) - resultingFile = os.path.abspath( resultingFile ) - resultingFile = "root://eoscms//eos/cms" + resultingFile #needs to be AFTER abspath so that it doesn't eat the // - self.__filesToCompare[ name ] = resultingFile - - else: - raise AllInOneError("Need to have DetUnit in levels!") - - repMap["CommandLine"]="" - repMap["CommandLine"]+= \ - "# copy module list required for comparison script \n" - if repMap["moduleList"].startswith("/store"): - repMap["CommandLine"]+= \ - "xrdcp root://eoscms//eos/cms.oO[moduleList]Oo. .\n" - elif repMap["moduleList"].startswith("root://"): - repMap["CommandLine"]+= \ - "xrdcp .oO[moduleList]Oo. .\n" - elif repMap["moduleList"].startswith("./CREATE_NEW/"): - repMap["CommandLine"]+= \ - "touch .oO[moduleListBase]Oo.\n" - else: - repMap["CommandLine"]+= \ - "cp .oO[moduleList]Oo. .\n" - - try: - getCommandOutput2(replaceByMap("cd $(mktemp -d)\n.oO[CommandLine]Oo.\ncat .oO[moduleListBase]Oo.", repMap)) - except RuntimeError: - raise AllInOneError(replaceByMap(".oO[moduleList]Oo. does not exist!", repMap)) - - for cfg in self.configFiles: - # FIXME: produce this line only for enabled dbOutput - # postProcess = "cp .oO[workdir]Oo./*.db .oO[datadir]Oo.\n" - # postProcess = "cp *.db .oO[datadir]Oo.\n" - postProcess = "" - repMap["CommandLine"]+= \ - repMap["CommandLineTemplate"]%{"cfgFile":cfg, - "postProcess":postProcess} - repMap["CommandLine"]+= ("# overall postprocessing\n" - ".oO[runComparisonScripts]Oo.\n" - ) - - #~ print configTemplates.scriptTemplate - scripts = {scriptName: replaceByMap( configTemplates.scriptTemplate, repMap )} - files = {replaceByMap("TkAl3DVisualization_.oO[common]Oo._.oO[name]Oo..C", repMap ): replaceByMap(configTemplates.visualizationTrackerTemplate, repMap )} - self.createFiles(files, path) - return super(GeometryComparison, self).createScript(scripts, path) - - def createCrabCfg(self, path): - msg = ("Parallelization not supported for geometry comparison. Please " - "choose another 'jobmode'.") - raise AllInOneError(msg) diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/geometryComparisonTemplates.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/geometryComparisonTemplates.py deleted file mode 100644 index c14ea1878118f..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/geometryComparisonTemplates.py +++ /dev/null @@ -1,175 +0,0 @@ -###################################################################### -###################################################################### -intoNTuplesTemplate=""" -import FWCore.ParameterSet.Config as cms - -process = cms.Process("ValidationIntoNTuples") - -.oO[LoadGlobalTagTemplate]Oo. - -process.load("Configuration.Geometry.GeometryRecoDB_cff") - -process.load("CondCore.CondDB.CondDB_cfi") - -process.MessageLogger = cms.Service("MessageLogger", - destinations = cms.untracked.vstring('detailedInfo', - 'cout') -) - -.oO[condLoad]Oo. - -process.source = cms.Source("EmptySource", - firstRun=cms.untracked.uint32(.oO[runGeomComp]Oo.) - ) - -process.maxEvents = cms.untracked.PSet( - input = cms.untracked.int32(1) -) -process.dump = cms.EDAnalyzer("TrackerGeometryIntoNtuples", - outputFile = cms.untracked.string('.oO[alignmentName]Oo.ROOTGeometry.root'), - outputTreename = cms.untracked.string('alignTree') -) - -process.p = cms.Path(process.dump) -""" - - -###################################################################### -###################################################################### -compareTemplate=""" -import FWCore.ParameterSet.Config as cms - -process = cms.Process("validation") - -.oO[LoadGlobalTagTemplate]Oo. - -process.load("Configuration.Geometry.GeometryRecoDB_cff") - -process.load("CondCore.CondDB.CondDB_cfi") - -process.MessageLogger = cms.Service("MessageLogger", - destinations = cms.untracked.vstring('detailedInfo', - 'cout') -) - -process.source = cms.Source("EmptySource", - firstRun=cms.untracked.uint32(.oO[runGeomComp]Oo.) - ) - -process.maxEvents = cms.untracked.PSet( - input = cms.untracked.int32(1) -) - -process.siStripQualityESProducer.ListOfRecordToMerge=cms.VPSet( - cms.PSet(record = cms.string('SiStripDetCablingRcd'), - tag = cms.string('')), - cms.PSet(record = cms.string('RunInfoRcd'), - tag = cms.string('')), - cms.PSet(record = cms.string('SiStripBadChannelRcd'), - tag = cms.string('')), - cms.PSet(record = cms.string('SiStripBadFiberRcd'), - tag = cms.string('')), - cms.PSet(record = cms.string('SiStripBadModuleRcd'), - tag = cms.string('')), - cms.PSet(record = cms.string('SiStripBadStripRcd'), - tag = cms.string('')) -) - -process.load("DQM.SiStripCommon.TkHistoMap_cff") - - # configuration of the Tracker Geometry Comparison Tool - # Tracker Geometry Comparison -process.load("Alignment.OfflineValidation.TrackerGeometryCompare_cfi") - # the input "IDEAL" is special indicating to use the ideal geometry of the release - -process.TrackerGeometryCompare.inputROOTFile1 = '.oO[referenceGeometry]Oo.' -process.TrackerGeometryCompare.inputROOTFile2 = '.oO[comparedGeometry]Oo.' -process.TrackerGeometryCompare.moduleList = '.oO[moduleListBase]Oo.' -process.TrackerGeometryCompare.outputFile = ".oO[name]Oo..Comparison_common.oO[common]Oo..root" - -process.load("CommonTools.UtilAlgos.TFileService_cfi") -process.TFileService.fileName = cms.string("TkSurfDeform_.oO[name]Oo..Comparison_common.oO[common]Oo..root") - -process.TrackerGeometryCompare.levels = [ .oO[levels]Oo. ] - - ##FIXME!!!!!!!!! - ##replace TrackerGeometryCompare.writeToDB = .oO[dbOutput]Oo. - ##removed: dbOutputService - -process.p = cms.Path(process.TrackerGeometryCompare) -""" - - -###################################################################### -###################################################################### -dbOutputTemplate= """ -//_________________________ db Output ____________________________ - # setup for writing out to DB - include "CondCore/DBCommon/CondDBSetup.cfi" -# include "CondCore/DBCommon/data/CondDBCommon.cfi" - - service = PoolDBOutputService { - using CondDBSetup - VPSet toPut = { - { string record = "TrackerAlignmentRcd" string tag = ".oO[tag]Oo." }, - { string record = "TrackerAlignmentErrorExtendedRcd" string tag = ".oO[errortag]Oo." } - } - # string connect = "sqlite_file:.oO[workdir]Oo./.oO[name]Oo.Common.oO[common]Oo..db" - string connect = "sqlite_file:.oO[name]Oo.Common.oO[common]Oo..db" - # untracked string catalog = "file:alignments.xml" - untracked string timetype = "runnumber" - } -""" - -###################################################################### -###################################################################### -visualizationTrackerTemplate= """ -#include "Alignment/OfflineValidation/scripts/visualizationTracker.C" -void TkAl3DVisualization_.oO[common]Oo._.oO[name]Oo.(){ - //------------------------------ONLY NEEDED INPUTS-------------------------------// -//------Tree Read In-------- - TString inputFileName = ".oO[outputFile]Oo."; - //output file name - string outputFileName = ".oO[common]Oo._.oO[name]Oo..Visualization"; - //title - string line1 = ".oO[alignmentTitle]Oo."; - string line2 = "vs. .oO[referenceTitle]Oo."; - //set subdetectors to see - int subdetector1 = .oO[3DSubdetector1]Oo.; - int subdetector2 = .oO[3DSubdetector2]Oo.; - //translation scale factor - int sclftr = .oO[3DTranslationalScaleFactor]Oo.; - //rotation scale factor - int sclfrt = 1; - //module size scale factor - float sclfmodulesizex = 1; - float sclfmodulesizey = 1; - float sclfmodulesizez = 1; - //beam pipe radius - float piperadius = 2.25; - //beam pipe xy coordinates - float pipexcoord = 0; - float pipeycoord = 0; - //beam line xy coordinates - float linexcoord = 0; - float lineycoord = 0; -//------------------------------End of ONLY NEEDED INPUTS-------------------------------// - cout << "running visualizer" << endl; - runVisualizer(inputFileName, - outputFileName, - line1, - line2, - subdetector1, - subdetector2, - sclftr, - sclfrt, - sclfmodulesizex, - sclfmodulesizey, - sclfmodulesizez, - piperadius, - pipexcoord, - pipeycoord, - linexcoord, - lineycoord ); -} -""" diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/globalDictionaries.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/globalDictionaries.py deleted file mode 100644 index e89b8be6dc262..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/globalDictionaries.py +++ /dev/null @@ -1,7 +0,0 @@ -# Store used datasets, to avoid making the same DAS query multiple times -usedDatasets = {} - -# Needed for more than one geometry comparison for one alignment -alignRandDict = {} - -plottingOptions = {} diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/helperFunctions.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/helperFunctions.py deleted file mode 100644 index 459894b721337..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/helperFunctions.py +++ /dev/null @@ -1,225 +0,0 @@ -from __future__ import print_function -from __future__ import absolute_import -from builtins import range -import os -import re -import ROOT -import sys -from .TkAlExceptions import AllInOneError -import CondCore.Utilities.conddblib as conddblib - -####################--- Helpers ---############################ -def replaceByMap(target, the_map): - """This function replaces `.oO[key]Oo.` by `the_map[key]` in target. - - Arguments: - - `target`: String which contains symbolic tags of the form `.oO[key]Oo.` - - `the_map`: Dictionary which has to contain the `key`s in `target` as keys - """ - - result = target - for key in the_map: - lifeSaver = 10e3 - iteration = 0 - while ".oO[" in result and "]Oo." in result: - for key in the_map: - try: - result = result.replace(".oO["+key+"]Oo.",the_map[key]) - except TypeError: #try a dict - try: - for keykey, value in the_map[key].items(): - result = result.replace(".oO[" + key + "['" + keykey + "']]Oo.", value) - result = result.replace(".oO[" + key + '["' + keykey + '"]]Oo.', value) - except AttributeError: #try a list - try: - for index, value in enumerate(the_map[key]): - result = result.replace(".oO[" + key + "[" + str(index) + "]]Oo.", value) - except TypeError: - raise TypeError("Something is wrong in replaceByMap! Need a string, dict, or list, but the_map(%s)=%s!"%(repr(key), repr(the_map[key]))) - iteration += 1 - if iteration > lifeSaver: - problematicLines = "" - for line in result.splitlines(): - if ".oO[" in result and "]Oo." in line: - problematicLines += "%s\n"%line - msg = ("Oh Dear, there seems to be an endless loop in " - "replaceByMap!!\n%s\n%s"%(problematicLines, the_map)) - raise AllInOneError(msg) - return result - - -def getCommandOutput2(command): - """This function executes `command` and returns it output. - - Arguments: - - `command`: Shell command to be invoked by this function. - """ - - child = os.popen(command) - data = child.read() - err = child.close() - if err: - raise RuntimeError('%s failed w/ exit code %d' % (command, err)) - return data - - -def castorDirExists(path): - """This function checks if the directory given by `path` exists. - - Arguments: - - `path`: Path to castor directory - """ - - if path[-1] == "/": - path = path[:-1] - containingPath = os.path.join( *path.split("/")[:-1] ) - dirInQuestion = path.split("/")[-1] - try: - rawLines = getCommandOutput2("rfdir /"+containingPath).splitlines() - except RuntimeError: - return False - for line in rawLines: - if line.split()[0][0] == "d": - if line.split()[8] == dirInQuestion: - return True - return False - -def replacelast(string, old, new, count = 1): - """Replace the last occurances of a string""" - return new.join(string.rsplit(old,count)) - -fileExtensions = ["_cfg.py", ".sh", ".root"] - -def addIndex(filename, njobs, index = None): - if index is None: - return [addIndex(filename, njobs, i) for i in range(njobs)] - if njobs == 1: - return filename - - fileExtension = None - for extension in fileExtensions: - if filename.endswith(extension): - fileExtension = extension - if fileExtension is None: - raise AllInOneError(fileName + " does not end with any of the extensions " - + str(fileExtensions)) - return replacelast(filename, fileExtension, "_" + str(index) + fileExtension) - -def parsecolor(color): - try: #simplest case: it's an int - return int(color) - except ValueError: - pass - - try: #kRed, kBlue, ... - color = str(getattr(ROOT, color)) - return int(color) - except (AttributeError, ValueError): - pass - - if color.count("+") + color.count("-") == 1: #kRed+5, kGreen-2 - if "+" in color: #don't want to deal with nonassociativity of - - split = color.split("+") - color1 = parsecolor(split[0]) - color2 = parsecolor(split[1]) - return color1 + color2 - - if "-" in color: - split = color.split("-") - color1 = parsecolor(split[0]) - color2 = parsecolor(split[1]) - return color1 - color2 - - raise AllInOneError("color has to be an integer, a ROOT constant (kRed, kBlue, ...), or a two-term sum or difference (kGreen-5)!") - -def parsestyle(style): - try: #simplest case: it's an int - return int(style) - except ValueError: - pass - - try: #kStar, kDot, ... - style = str(getattr(ROOT,style)) - return int(style) - except (AttributeError, ValueError): - pass - - raise AllInOneError("style has to be an integer or a ROOT constant (kDashed, kStar, ...)!") - -def recursivesubclasses(cls): - result = [cls] - for subcls in cls.__subclasses__(): - result += recursivesubclasses(subcls) - return result - -def cache(function): - cache = {} - def newfunction(*args, **kwargs): - try: - return cache[args, tuple(sorted(kwargs.items()))] - except TypeError: - print(args, tuple(sorted(kwargs.items()))) - raise - except KeyError: - cache[args, tuple(sorted(kwargs.items()))] = function(*args, **kwargs) - return newfunction(*args, **kwargs) - newfunction.__name__ = function.__name__ - return newfunction - -def boolfromstring(string, name): - """ - Takes a string from the configuration file - and makes it into a bool - """ - #try as a string, not case sensitive - if string.lower() == "true": return True - if string.lower() == "false": return False - #try as a number - try: - return str(bool(int(string))) - except ValueError: - pass - #out of options - raise ValueError("{} has to be true or false!".format(name)) - - -def pythonboolstring(string, name): - """ - Takes a string from the configuration file - and makes it into a bool string for a python template - """ - return str(boolfromstring(string, name)) - -def cppboolstring(string, name): - """ - Takes a string from the configuration file - and makes it into a bool string for a C++ template - """ - return pythonboolstring(string, name).lower() - -def getTagsMap(db): - con = conddblib.connect(url = conddblib.make_url(db)) - session = con.session() - TAG = session.get_dbtype(conddblib.Tag) - dictionary = {} - for i in range(0,len(session.query(TAG.object_type).order_by(TAG.name).all())): - q1 = session.query(TAG.object_type).order_by(TAG.name).all()[i][0] - q2 = session.query(TAG.name).order_by(TAG.name).all()[i][0] - dictionary[q1]=q2 - - return dictionary - -def clean_name(s): - """Transforms a string into a valid variable or method name. - - Arguments: - - `s`: input string - """ - - # Remove invalid characters - s = re.sub(r"[^0-9a-zA-Z_]", "", s) - - # Remove leading characters until we find a letter or underscore - s = re.sub(r"^[^a-zA-Z_]+", "", s) - - return s diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/monteCarloValidation.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/monteCarloValidation.py deleted file mode 100644 index d4a3cc80bb533..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/monteCarloValidation.py +++ /dev/null @@ -1,43 +0,0 @@ -from __future__ import absolute_import -import os -from . import configTemplates -from . import globalDictionaries -from .dataset import Dataset -from .genericValidation import GenericValidationData -from .helperFunctions import replaceByMap -from .TkAlExceptions import AllInOneError - - -class MonteCarloValidation(GenericValidationData): - configBaseName = "TkAlMcValidate" - scriptBaseName = "TkAlMcValidate" - crabCfgBaseName = "TkAlMcValidate" - resultBaseName = "McValidation" - outputBaseName = "McValidation" - needParentFiles = True - valType = "mcValidate" - def __init__(self, valName, alignment, config): - super(MonteCarloValidation, self).__init__(valName, alignment, config) - if self.NJobs > 1: - raise AllInOneError("Parallel jobs not implemented for the MC validation!\n" - "Please set parallelJobs = 1.") - - @property - def cfgTemplate(self): - return configTemplates.mcValidateTemplate - - def createScript(self, path): - return super(MonteCarloValidation, self).createScript(path) - - def createCrabCfg(self, path): - return super(MonteCarloValidation, self).createCrabCfg(path, self.crabCfgBaseName) - - def getRepMap( self, alignment = None ): - repMap = super(MonteCarloValidation, self).getRepMap(alignment) - repMap.update({ - "nEvents": self.general["maxevents"] - }) - repMap["outputFile"] = os.path.expandvars( repMap["outputFile"] ) - repMap["resultFile"] = os.path.expandvars( repMap["resultFile"] ) - return repMap - diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/monteCarloValidationTemplates.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/monteCarloValidationTemplates.py deleted file mode 100644 index 1f3b7831189d5..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/monteCarloValidationTemplates.py +++ /dev/null @@ -1,66 +0,0 @@ -###################################################################### -###################################################################### -mcValidateTemplate=""" -import FWCore.ParameterSet.Config as cms - -process = cms.Process("TkVal") -process.load("FWCore.MessageService.MessageLogger_cfi") -process.MessageLogger = cms.Service("MessageLogger", - destinations = cms.untracked.vstring('LOGFILE_McValidate_.oO[name]Oo.', - 'cout') -) - -### standard includes -process.load('Configuration.Geometry.GeometryPilot2_cff') -process.load("Configuration.StandardSequences.RawToDigi_cff") -process.load("Configuration.EventContent.EventContent_cff") -process.load("Configuration.StandardSequences.Reconstruction_cff") -process.load("Configuration.StandardSequences..oO[magneticField]Oo._cff") -process.load("SimGeneral.MixingModule.mixNoPU_cfi") - -.oO[LoadGlobalTagTemplate]Oo. - -.oO[condLoad]Oo. - - -### validation-specific includes -process.load("SimTracker.TrackAssociatorProducers.trackAssociatorByHits_cfi") -process.load("Validation.RecoTrack.cuts_cff") -process.load("Validation.RecoTrack.MultiTrackValidator_cff") -process.load("SimGeneral.TrackingAnalysis.trackingParticles_cfi") - -### configuration MultiTrackValidator ### -process.multiTrackValidator.outputFile = '.oO[outputFile]Oo.' - -process.multiTrackValidator.associators = ['trackAssociatorByHits'] -process.multiTrackValidator.UseAssociators = cms.bool(True) -process.multiTrackValidator.label = ['generalTracks'] - -.oO[datasetDefinition]Oo. -process.source.inputCommands = cms.untracked.vstring('keep *', 'drop *_MEtoEDMConverter_*_*') # hack to get rid of the memory consumption problem in 2_2_X and beond - -process.options = cms.untracked.PSet( - wantSummary = cms.untracked.bool(False), - Rethrow = cms.untracked.vstring("ProductNotFound"), # make this exception fatal - fileMode = cms.untracked.string('NOMERGE') # no ordering needed, but calls endRun/beginRun etc. at file boundaries -) - -process.re_tracking_and_TP = cms.Sequence(process.mix*process.trackingParticles* - process.siPixelRecHits*process.siStripMatchedRecHits* - process.ckftracks* - process.cutsRecoTracks* - process.trackAssociatorByHits* - process.multiTrackValidator - ) - -process.re_tracking = cms.Sequence(process.siPixelRecHits*process.siStripMatchedRecHits* - process.ckftracks* - process.cutsRecoTracks* - process.trackAssociatorByHits* - process.multiTrackValidator - ) - -### final path and endPath -process.p = cms.Path(process.re_tracking) -""" - diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/offlineValidation.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/offlineValidation.py deleted file mode 100644 index ed677f8a0f5d0..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/offlineValidation.py +++ /dev/null @@ -1,188 +0,0 @@ -from __future__ import absolute_import -import os -from . import configTemplates -from . import globalDictionaries -from .genericValidation import GenericValidationData_CTSR, ParallelValidation, ValidationWithComparison, ValidationForPresentation, ValidationWithPlots, ValidationWithPlotsSummary -from .helperFunctions import replaceByMap, addIndex, pythonboolstring -from .presentation import SubsectionFromList, SubsectionOnePage -from .TkAlExceptions import AllInOneError - -class OfflineValidation(GenericValidationData_CTSR, ParallelValidation, ValidationWithComparison, ValidationWithPlotsSummary, ValidationForPresentation): - configBaseName = "TkAlOfflineValidation" - scriptBaseName = "TkAlOfflineValidation" - crabCfgBaseName = "TkAlOfflineValidation" - resultBaseName = "AlignmentValidation" - outputBaseName = "AlignmentValidation" - defaults = { - "offlineModuleLevelHistsTransient": "False", - "offlineModuleLevelProfiles": "True", - "stripYResiduals": "False", - "maxtracks": "0", - "chargeCut": "0", - "multiIOV": "False", - } - deprecateddefaults = { - "DMRMethod":"", - "DMRMinimum":"", - "DMROptions":"", - "OfflineTreeBaseDir":"", - "SurfaceShapes":"", - } - defaults.update(deprecateddefaults) - mandatories = {"trackcollection"} - valType = "offline" - - def __init__(self, valName, alignment, config): - super(OfflineValidation, self).__init__(valName, alignment, config) - - for name in "offlineModuleLevelHistsTransient", "offlineModuleLevelProfiles", "stripYResiduals": - self.general[name] = pythonboolstring(self.general[name], name) - - for option in self.deprecateddefaults: - if self.general[option]: - raise AllInOneError("The '%s' option has been moved to the [plots:offline] section. Please specify it there."%option) - del self.general[option] - - if self.NJobs > 1 and self.general["offlineModuleLevelHistsTransient"] == "True": - msg = ("To be able to merge results when running parallel jobs," - " set offlineModuleLevelHistsTransient to false.") - raise AllInOneError(msg) - - try: - self.NTracks = int(self.general["maxtracks"]) - if self.NTracks < 0: raise ValueError - except ValueError: - raise AllInOneError("maxtracks has to be a positive integer, or 0 for no limit") - - if self.NTracks / self.NJobs != float(self.NTracks) / self.NJobs: - raise AllInOneError("maxtracks has to be divisible by parallelJobs") - - @property - def ProcessName(self): - return "OfflineValidator" - - @property - def ValidationTemplate(self): - return configTemplates.offlineTemplate - - @property - def ValidationSequence(self): - return configTemplates.OfflineValidationSequence - - @property - def FileOutputTemplate(self): - return configTemplates.offlineFileOutputTemplate - - def createScript(self, path): - return super(OfflineValidation, self).createScript(path) - - def createCrabCfg(self, path): - return super(OfflineValidation, self).createCrabCfg(path, self.crabCfgBaseName) - - def getRepMap(self, alignment = None): - repMap = super(OfflineValidation, self).getRepMap(alignment) - repMap.update({ - "nEvents": self.general["maxevents"], - "offlineValidationMode": "Standalone", - "TrackCollection": self.general["trackcollection"], - "filetoplot": "root://eoscms//eos/cms.oO[finalResultFile]Oo.", - }) - - return repMap - - def appendToPlots(self): - return ' p.loadFileList(".oO[filetoplot]Oo.", ".oO[title]Oo.", .oO[color]Oo., .oO[style]Oo.);\n' - - @classmethod - def initMerge(cls): - from .plottingOptions import PlottingOptions - outFilePath = replaceByMap(".oO[scriptsdir]Oo./TkAlOfflineJobsMerge.C", PlottingOptions(None, cls.valType)) - print("outFilePath") - print(outFilePath) - with open(outFilePath, "w") as theFile: - theFile.write(replaceByMap(configTemplates.mergeOfflineParJobsTemplate, {})) - result = super(OfflineValidation, cls).initMerge() - result += ("cp .oO[Alignment/OfflineValidation]Oo./scripts/merge_TrackerOfflineValidation.C .\n" - "cp .oO[mergeOfflineParJobsScriptPath]Oo. .\n") - return result - - def appendToMerge(self): - repMap = self.getRepMap() - - parameters = "root://eoscms//eos/cms" + ",root://eoscms//eos/cms".join(repMap["resultFiles"]) - - mergedoutputfile = "root://eoscms//eos/cms%(finalResultFile)s"%repMap - return ('root -x -b -q -l "TkAlOfflineJobsMerge.C(\\\"' - +parameters+'\\\",\\\"'+mergedoutputfile+'\\\")"') - - @classmethod - def plottingscriptname(cls): - return "TkAlExtendedOfflineValidation.C" - - @classmethod - def plottingscripttemplate(cls): - return configTemplates.extendedValidationTemplate - - @classmethod - def plotsdirname(cls): - return "ExtendedOfflineValidation_Images" - - @classmethod - def comparealignmentsname(cls): - return "compareAlignments.cc" - - @classmethod - def presentationsubsections(cls): - return [ - SubsectionOnePage('chi2', r'$\chi^2$ plots'), - SubsectionSubdetectors('DmedianY*R_[^_]*.eps$', 'DMR'), - SubsectionSubdetectors('DmedianY*R.*plain.eps$', 'DMR'), - SubsectionSubdetectors('DmedianY*R.*split.eps$','Split DMR'), - SubsectionSubdetectors('DrmsNY*R_[^_]*.eps$', 'DRnR'), - SubsectionSubdetectors('DrmsNY*R.*plain.eps$', 'DRnR'), - SubsectionSubdetectors('SurfaceShape', 'Surface Shape'), - ] - -class SubsectionSubdetectors(SubsectionFromList): - pageidentifiers = ( - ("BPIX", "BPIX"), - ("FPIX", "FPIX"), - ("TIB", "TIB"), - ("TID", "TID"), - ("TOB", "TOB"), - ("TEC", "TEC"), - ) - -class OfflineValidationDQM(OfflineValidation): - configBaseName = "TkAlOfflineValidationDQM" - def __init__(self, valName, alignment, config): - super(OfflineValidationDQM, self).__init__(valName, alignment, config) - if not config.has_section("DQM"): - msg = "You need to have a DQM section in your configfile!" - raise AllInOneError(msg) - - self.__PrimaryDataset = config.get("DQM", "primaryDataset") - self.__firstRun = int(config.get("DQM", "firstRun")) - self.__lastRun = int(config.get("DQM", "lastRun")) - - def getRepMap(self, alignment = None): - repMap = super(OfflineValidationDQM, self).getRepMap(alignment) - repMap.update({ - "workdir": os.path.expandvars(repMap["workdir"]), - "offlineValidationMode": "Dqm", - "workflow": ("/%s/TkAl%s-.oO[alignmentName]Oo._R%09i_R%09i_" - "ValSkim-v1/ALCARECO" - %(self.__PrimaryDataset, - datetime.datetime.now().strftime("%y"), - self.__firstRun, self.__lastRun)), - "firstRunNumber": "%i"% self.__firstRun - }) - if "__" in repMap["workflow"]: - msg = ("the DQM workflow specefication must not contain '__'. " - "it is: %s"%repMap["workflow"]) - raise AllInOneError(msg) - return repMap - - @property - def FileOutputTemplate(self): - return configTemplates.offlineDqmFileOutputTemplate diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/offlineValidationTemplates.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/offlineValidationTemplates.py deleted file mode 100644 index 3dd9a759a571a..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/offlineValidationTemplates.py +++ /dev/null @@ -1,125 +0,0 @@ -###################################################################### -###################################################################### -offlineTemplate = """ -process.oneGoodVertexFilter = cms.EDFilter("VertexSelector", - src = cms.InputTag("offlinePrimaryVertices"), - cut = cms.string("!isFake && ndof > 4 && abs(z) <= 15 && position.Rho <= 2"), # tracksSize() > 3 for the older cut - filter = cms.bool(True), # otherwise it won't filter the events, just produce an empty vertex collection. - ) - - - -process.FilterGoodEvents=cms.Sequence(process.oneGoodVertexFilter) - - -process.noScraping= cms.EDFilter("FilterOutScraping", - src=cms.InputTag(".oO[TrackCollection]Oo."), - applyfilter = cms.untracked.bool(True), - debugOn = cms.untracked.bool(False), ## Or 'True' to get some per-event info - numtrack = cms.untracked.uint32(10), - thresh = cms.untracked.double(0.25) - ) -#################################### - -# Use compressions settings of TFile -# see https://root.cern.ch/root/html534/TFile.html#TFile:SetCompressionSettings -# settings = 100 * algorithm + level -# level is from 1 (small) to 9 (large compression) -# algo: 1 (ZLIB), 2 (LMZA) -# see more about compression & performance: https://root.cern.ch/root/html534/guides/users-guide/InputOutput.html#compression-and-performance -compressionSettings = 207 - - ## - ## Load and Configure OfflineValidation and Output File - ## -process.load("Alignment.OfflineValidation.TrackerOfflineValidation_.oO[offlineValidationMode]Oo._cff") -process.TrackerOfflineValidation.oO[offlineValidationMode]Oo..compressionSettings = compressionSettings -process.TrackerOfflineValidation.oO[offlineValidationMode]Oo..Tracks = 'FinalTrackRefitter' -process.TrackerOfflineValidation.oO[offlineValidationMode]Oo..trajectoryInput = 'FinalTrackRefitter' -process.TrackerOfflineValidation.oO[offlineValidationMode]Oo..moduleLevelHistsTransient = .oO[offlineModuleLevelHistsTransient]Oo. -process.TrackerOfflineValidation.oO[offlineValidationMode]Oo..moduleLevelProfiles = .oO[offlineModuleLevelProfiles]Oo. -process.TrackerOfflineValidation.oO[offlineValidationMode]Oo..stripYResiduals = .oO[stripYResiduals]Oo. -process.TrackerOfflineValidation.oO[offlineValidationMode]Oo..maxTracks = int(.oO[maxtracks]Oo./.oO[parallelJobs]Oo.) -process.TrackerOfflineValidation.oO[offlineValidationMode]Oo..chargeCut = .oO[chargeCut]Oo. -""" - -OfflineValidationSequence = "process.seqTrackerOfflineValidation.oO[offlineValidationMode]Oo." - - -###################################################################### -###################################################################### -mergeOfflineParJobsTemplate=""" -#include "Alignment/OfflineValidation/scripts/merge_TrackerOfflineValidation.C" - -int TkAlOfflineJobsMerge(TString pars, TString outFile) -{ -// load framework lite just to find the CMSSW libs... -gSystem->Load("libFWCoreFWLite"); -FWLiteEnabler::enable(); - -return hadd(pars, outFile); -} -""" - - -###################################################################### -###################################################################### -offlineFileOutputTemplate = """ -process.TFileService.fileName = '.oO[outputFile]Oo.' -""" - - -###################################################################### -###################################################################### -offlineDqmFileOutputTemplate = """ -process.DqmSaverTkAl.workflow = '.oO[workflow]Oo.' -process.DqmSaverTkAl.dirName = '.oO[workdir]Oo./.' -process.DqmSaverTkAl.forceRunNumber = .oO[firstRunNumber]Oo. -""" - - -###################################################################### -###################################################################### -extendedValidationExecution=""" -#run extended offline validation scripts -echo -e "\n\nRunning extended offline validation" - -cp .oO[extendedValScriptPath]Oo. . -root -x -b -q -l TkAlExtendedOfflineValidation.C - -""" - - -###################################################################### -###################################################################### -extendedValidationTemplate=""" -#include "Alignment/OfflineValidation/macros/PlotAlignmentValidation.C" -#include "FWCore/FWLite/interface/FWLiteEnabler.h" - -void TkAlExtendedOfflineValidation() -{ - TkAlStyle::legendheader = ".oO[legendheader]Oo."; - TkAlStyle::legendoptions = ".oO[legendoptions]Oo."; - TkAlStyle::set(.oO[publicationstatus]Oo., .oO[era]Oo., ".oO[customtitle]Oo.", ".oO[customrighttitle]Oo."); - bool bigtext = .oO[bigtext]Oo.; - gStyle->SetTitleH ( 0.07 ); - gStyle->SetTitleW ( 1.00 ); - gStyle->SetTitleFont ( 132 ); - // load framework lite just to find the CMSSW libs... - gSystem->Load("libFWCoreFWLite"); - FWLiteEnabler::enable(); - - PlotAlignmentValidation p(bigtext); -.oO[PlottingInstantiation]Oo. - p.setOutputDir(".oO[datadir]Oo./.oO[PlotsDirName]Oo."); - p.useFitForDMRplots(.oO[usefit]Oo.); - p.setTreeBaseDir(".oO[OfflineTreeBaseDir]Oo."); - p.plotDMR(".oO[DMRMethod]Oo.",.oO[DMRMinimum]Oo.,".oO[DMROptions]Oo."); - p.plotSurfaceShapes(".oO[SurfaceShapes]Oo."); - p.plotChi2("root://eoscms//eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./.oO[validationId]Oo._result.root"); - vector moduleids = {.oO[moduleid]Oo.}; - for (auto moduleid : moduleids) { - p.residual_by_moduleID(moduleid); - } -} -""" diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/overlapValidation.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/overlapValidation.py deleted file mode 100644 index 08d9508266b6d..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/overlapValidation.py +++ /dev/null @@ -1,72 +0,0 @@ -from __future__ import absolute_import - -import os - -from . import configTemplates -from .genericValidation import GenericValidationData_CTSR, ParallelValidation, ValidationWithPlots -from .helperFunctions import replaceByMap -from .presentation import SubsectionFromList, SubsectionOnePage -from .TkAlExceptions import AllInOneError - - -class OverlapValidation(GenericValidationData_CTSR, ParallelValidation, ValidationWithPlots): - configBaseName = "TkAlOverlapValidation" - scriptBaseName = "TkAlOverlapValidation" - crabCfgBaseName = "TkAlOverlapValidation" - resultBaseName = "OverlapValidation" - outputBaseName = "OverlapValidation" - mandatories = {"trackcollection"} - valType = "overlap" - - @property - def ValidationTemplate(self): - return configTemplates.overlapTemplate - - @property - def ValidationSequence(self): - return configTemplates.overlapValidationSequence - - @property - def ProcessName(self): - return "overlap" - - def getRepMap( self, alignment = None ): - repMap = super(OverlapValidation, self).getRepMap(alignment) - repMap.update({ - "nEvents": self.general["maxevents"], - "TrackCollection": self.general["trackcollection"], - }) - return repMap - - def appendToPlots(self): - """ - if no argument or "" is passed a string with an instantiation is - returned, else the validation is appended to the list - """ - return '("{file}", "{title}", {color}, {style}),'.format(file=self.getCompareStrings(plain=True)["DEFAULT"], **self.getRepMap()) - - def appendToMerge(self): - repMap = self.getRepMap() - - parameters = " ".join(os.path.join("root://eoscms//eos/cms", file.lstrip("/")) for file in repMap["resultFiles"]) - - mergedoutputfile = os.path.join("root://eoscms//eos/cms", repMap["finalResultFile"].lstrip("/")) - return "hadd -f %s %s" % (mergedoutputfile, parameters) - - @classmethod - def plottingscriptname(cls): - return "TkAlOverlapValidation.py" - - @classmethod - def plottingscripttemplate(cls): - return configTemplates.overlapPlottingTemplate - - @classmethod - def plotsdirname(cls): - return "OverlapValidationPlots" - - @classmethod - def runPlots(cls, validations): - return ("rfcp .oO[plottingscriptpath]Oo. .\n" - "python .oO[plottingscriptname]Oo.") - diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/overlapValidationTemplates.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/overlapValidationTemplates.py deleted file mode 100644 index 85f85d69ee1a1..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/overlapValidationTemplates.py +++ /dev/null @@ -1,62 +0,0 @@ -overlapTemplate = """ -# Use compressions settings of TFile -# see https://root.cern.ch/root/html534/TFile.html#TFile:SetCompressionSettings -# settings = 100 * algorithm + level -# level is from 1 (small) to 9 (large compression) -# algo: 1 (ZLIB), 2 (LMZA) -# see more about compression & performance: https://root.cern.ch/root/html534/guides/users-guide/InputOutput.html#compression-and-performance -compressionSettings = 207 -process.analysis = cms.EDAnalyzer("OverlapValidation", - usePXB = cms.bool(True), - usePXF = cms.bool(True), - useTIB = cms.bool(True), - useTOB = cms.bool(True), - useTID = cms.bool(True), - useTEC = cms.bool(True), - compressionSettings = cms.untracked.int32(compressionSettings), - ROUList = cms.vstring('TrackerHitsTIBLowTof', - 'TrackerHitsTIBHighTof', - 'TrackerHitsTOBLowTof', - 'TrackerHitsTOBHighTof'), - trajectories = cms.InputTag("FinalTrackRefitter"), - associatePixel = cms.bool(False), - associateStrip = cms.bool(False), - associateRecoTracks = cms.bool(False), - tracks = cms.InputTag("FinalTrackRefitter"), - barrelOnly = cms.bool(False) -) - -""" - -overlapValidationSequence = "process.analysis" - -overlapPlottingTemplate = """ - -import os -import ROOT -from Alignment.OfflineValidation.TkAlStyle import TkAlStyle - -TkAlStyle.legendheader = ".oO[legendheader]Oo." -TkAlStyle.set(ROOT..oO[publicationstatus]Oo., ROOT..oO[era]Oo., ".oO[customtitle]Oo.", ".oO[customrighttitle]Oo.") - -try: - os.makedirs(".oO[datadir]Oo./.oO[PlotsDirName]Oo./") -except OSError: - pass -try: - os.makedirs(".oO[datadir]Oo./.oO[PlotsDirName]Oo./Profiles") -except OSError: - pass - -from Alignment.OfflineValidation.overlapValidationPlot import plot - -subdet_ids=[True,True,True,True,True,True]#(BPIX,FPIX,TIB,TID,TOB,TEC) -module_directions=[True,True,True]#(z,r,phi) -overlap_directions=[True,True,True]#(z,r,phi) -profile_directions=[True,True,True,True]#(histogtam,z-profiles,r-profiles,phi-profiles) - - -plot(".oO[datadir]Oo./.oO[PlotsDirName]Oo./",subdet_ids,module_directions,overlap_directions,profile_directions,.oO[PlottingInstantiation]Oo.) - - -""" diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/plottingOptions.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/plottingOptions.py deleted file mode 100644 index 258734faf4244..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/plottingOptions.py +++ /dev/null @@ -1,258 +0,0 @@ -from __future__ import absolute_import -from builtins import range -import os -import random - -from . import globalDictionaries -from . import configTemplates - -from .genericValidation import ValidationMetaClass, ValidationWithComparison, ValidationWithPlots -from .helperFunctions import getCommandOutput2, replaceByMap, cppboolstring -from .offlineValidation import OfflineValidation -from .primaryVertexValidation import PrimaryVertexValidation -from .primaryVertexResolution import PrimaryVertexResolution -from .TkAlExceptions import AllInOneError -from .trackSplittingValidation import TrackSplittingValidation -from .zMuMuValidation import ZMuMuValidation -from .overlapValidation import OverlapValidation - -class BasePlottingOptions(object, metaclass=ValidationMetaClass): - defaults = { - "cmssw" : os.environ["CMSSW_BASE"], - "publicationstatus" : "", - "customtitle" : "", - "customrighttitle" : "", - "era" : "NONE", - "legendheader" : "", - "legendoptions":"all", - } - mandatories = set() - needpackages = {"Alignment/OfflineValidation"} - def __init__(self, config, valType): - import random - self.type = valType - self.general = config.getGeneral() - self.randomWorkdirPart = "%0i"%random.randint(1,10e9) - self.config = config - - theUpdate = config.getResultingSection("plots:"+self.type, - defaultDict = self.defaults, - demandPars = self.mandatories) - self.general.update(theUpdate) - - self.cmssw = self.general["cmssw"] - badcharacters = r"\'" - for character in badcharacters: - if character in self.cmssw: - raise AllInOneError("The bad characters " + badcharacters + " are not allowed in the cmssw\n" - "path name. If you really have it in such a ridiculously named location,\n" - "try making a symbolic link somewhere with a decent name.") - try: - os.listdir(self.cmssw) - except OSError: - raise AllInOneError("Your cmssw release " + self.cmssw + ' does not exist') - - if self.cmssw == os.environ["CMSSW_BASE"]: - self.scramarch = os.environ["SCRAM_ARCH"] - self.cmsswreleasebase = os.environ["CMSSW_RELEASE_BASE"] - else: - command = ("cd '" + self.cmssw + "' && eval `scramv1 ru -sh 2> /dev/null`" - ' && echo "$CMSSW_BASE\n$SCRAM_ARCH\n$CMSSW_RELEASE_BASE"') - commandoutput = getCommandOutput2(command).split('\n') - self.cmssw = commandoutput[0] - self.scramarch = commandoutput[1] - self.cmsswreleasebase = commandoutput[2] - - for package in self.needpackages: - for placetolook in self.cmssw, self.cmsswreleasebase: - pkgpath = os.path.join(placetolook, "src", package) - if os.path.exists(pkgpath): - self.general[package] = pkgpath - break - else: - raise AllInOneError("Package {} does not exist in {} or {}!".format(package, self.cmssw, self.cmsswreleasebase)) - - self.general["publicationstatus"] = self.general["publicationstatus"].upper() - self.general["era"] = self.general["era"].upper() - - if not self.general["publicationstatus"] and not self.general["customtitle"]: - self.general["publicationstatus"] = "INTERNAL" - if self.general["customtitle"] and not self.general["publicationstatus"]: - self.general["publicationstatus"] = "CUSTOM" - - if self.general["publicationstatus"] != "CUSTOM" and self.general["customtitle"]: - raise AllInOneError("If you would like to use a custom title, please leave out the 'publicationstatus' parameter") - if self.general["publicationstatus"] == "CUSTOM" and not self.general["customtitle"]: - raise AllInOneError("If you want to use a custom title, you should provide it using 'customtitle' in the [plots:%s] section" % valType) - - if self.general["era"] != "NONE" and self.general["customrighttitle"]: - raise AllInOneError("If you would like to use a custom right title, please leave out the 'era' parameter") - - publicationstatusenum = ["INTERNAL", "INTERNAL_SIMULATION", "PRELIMINARY", "PUBLIC", "SIMULATION", "UNPUBLISHED", "CUSTOM"] - eraenum = ["NONE", "CRUZET15", "CRAFT15", "COLL0T15"] - if self.general["publicationstatus"] not in publicationstatusenum: - raise AllInOneError("Publication status must be one of " + ", ".join(publicationstatusenum) + "!") - if self.general["era"] not in eraenum: - raise AllInOneError("Era must be one of " + ", ".join(eraenum) + "!") - - knownOpts = set(self.defaults.keys())|self.mandatories|self.optionals - ignoreOpts = [] - config.checkInput("plots:"+self.type, - knownSimpleOptions = knownOpts, - ignoreOptions = ignoreOpts) - - def getRepMap(self): - result = self.general - result.update({ - "workdir": os.path.join(self.general["workdir"], - self.randomWorkdirPart), - "datadir": self.general["datadir"], - "logdir": self.general["logdir"], - "CMSSW_BASE": self.cmssw, - "SCRAM_ARCH": self.scramarch, - "CMSSW_RELEASE_BASE": self.cmsswreleasebase, - "validationId": self.validationclass.__name__, - }) - if issubclass(self.validationclass, ValidationWithPlots): - result["plottingscriptname"] = self.validationclass.plottingscriptname() - result["plottingscriptpath"] = ".oO[scriptsdir]Oo./.oO[plottingscriptname]Oo." - result["PlotsDirName"] = self.validationclass.plotsdirname() - if issubclass(self.validationclass, ValidationWithComparison): - result["compareAlignmentsPath"] = self.validationclass.comparealignmentspath() - result["compareAlignmentsName"] = self.validationclass.comparealignmentsname() - return result - -class PlottingOptionsTrackSplitting(BasePlottingOptions): - defaults = { - "outliercut": "-1.0", - "subdetector": "none", - } - needpackages = {"Alignment/CommonAlignmentProducer"} - validationclass = TrackSplittingValidation - def __init__(self, config): - super(PlottingOptionsTrackSplitting, self).__init__(config, "split") - validsubdets = self.validsubdets() - if self.general["subdetector"] not in validsubdets: - raise AllInOneError("'%s' is not a valid subdetector!\n" % self.general["subdetector"] + "The options are: " + ", ".join(validsubdets)) - - def validsubdets(self): - filename = replaceByMap(".oO[Alignment/CommonAlignmentProducer]Oo./python/AlignmentTrackSelector_cfi.py", self.getRepMap()) - with open(filename) as f: - trackselector = f.read() - - minhitspersubdet = trackselector.split("minHitsPerSubDet")[1].split("(",1)[1] - - parenthesesdepth = 0 - i = 0 - for character in minhitspersubdet: - if character == "(": - parenthesesdepth += 1 - if character == ")": - parenthesesdepth -= 1 - if parenthesesdepth < 0: - break - i += 1 - minhitspersubdet = minhitspersubdet[0:i] - - results = minhitspersubdet.split(",") - empty = [] - for i in range(len(results)): - results[i] = results[i].split("=")[0].strip().replace("in", "", 1) - - results.append("none") - - return [a for a in results if a] - -class PlottingOptionsZMuMu(BasePlottingOptions): - defaults = { - "resonance": "Z", - "switchONfit": "false", - "rebinphi": "4", - "rebinetadiff": "2", - "rebineta": "2", - "rebinpt": "8", - "AutoSetRange": "false", - } - needpackages = {"MuonAnalysis/MomentumScaleCalibration"} - validationclass = ZMuMuValidation - def __init__(self, config): - super(PlottingOptionsZMuMu, self).__init__(config, "zmumu") - self.general["switchONfit"] = cppboolstring(self.general["switchONfit"], "switchONfit") - -class PlottingOptionsOffline(BasePlottingOptions): - defaults = { - "DMRMethod":"median,rmsNorm", - "DMRMinimum":"30", - "DMROptions":"", - "OfflineTreeBaseDir":"TrackHitFilter", - "SurfaceShapes":"coarse", - "bigtext":"false", - "mergeOfflineParJobsScriptPath": ".oO[scriptsdir]Oo./TkAlOfflineJobsMerge.C", - "usefit": "false","moduleid": "" - } - validationclass = OfflineValidation - def __init__(self, config): - super(PlottingOptionsOffline, self).__init__(config, "offline") - for name in "usefit", "bigtext": - self.general[name] = cppboolstring(self.general[name], name) - - -class PlottingOptionsPrimaryVertex(BasePlottingOptions): - defaults = { - "autoLimits":"false", - "doMaps":"false", - "stdResiduals":"true", - "m_dxyPhiMax":"40", - "m_dzPhiMax":"40", - "m_dxyEtaMax":"40", - "m_dzEtaMax":"40", - "m_dxyPhiNormMax":"0.5", - "m_dzPhiNormMax":"0.5", - "m_dxyEtaNormMax":"0.5", - "m_dzEtaNormMax":"0.5", - "w_dxyPhiMax":"150", - "w_dzPhiMax":"150", - "w_dxyEtaMax":"150", - "w_dzEtaMax":"1000", - "w_dxyPhiNormMax":"1.8", - "w_dzPhiNormMax":"1.8", - "w_dxyEtaNormMax":"1.8", - "w_dzEtaNormMax":"1.8", - } - validationclass = PrimaryVertexValidation - def __init__(self, config): - super(PlottingOptionsPrimaryVertex, self).__init__(config, "primaryvertex") - for name in "autoLimits", "doMaps", "stdResiduals": - self.general[name] = cppboolstring(self.general[name], name) - -class PlottingOptionsOverlap(BasePlottingOptions): - validationclass = OverlapValidation - def __init__(self, config): - super(PlottingOptionsOverlap, self).__init__(config, "overlap") - -class PlottingOptionsPVResolution(BasePlottingOptions): - defaults = {} - validationclass = PrimaryVertexResolution - def __init__(self, config): - super(PlottingOptionsPVResolution, self).__init__(config, "pvresolution") - -def PlottingOptions(config, valType): - plottingOptionsClasses = { - "offline": PlottingOptionsOffline, - "split": PlottingOptionsTrackSplitting, - "zmumu": PlottingOptionsZMuMu, - "primaryvertex": PlottingOptionsPrimaryVertex, - "overlap": PlottingOptionsOverlap, - "pvresolution": PlottingOptionsPVResolution, - } - if isinstance(valType, type): - valType = valType.valType - - if valType not in globalDictionaries.plottingOptions: - if config is None: - raise ValueError("Have to provide a config the first time you call PlottingOptions for {}".format(valType)) - globalDictionaries.plottingOptions[valType] = plottingOptionsClasses[valType](config) - return globalDictionaries.plottingOptions[valType].getRepMap() - - - diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/preexistingValidation.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/preexistingValidation.py deleted file mode 100644 index 158eebb2a8db5..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/preexistingValidation.py +++ /dev/null @@ -1,135 +0,0 @@ -from __future__ import absolute_import -import os -from .genericValidation import GenericValidation, GenericValidationData -from .geometryComparison import GeometryComparison -from .helperFunctions import boolfromstring, getCommandOutput2, parsecolor, parsestyle -from .monteCarloValidation import MonteCarloValidation -from .offlineValidation import OfflineValidation -from .primaryVertexValidation import PrimaryVertexValidation -from .plottingOptions import PlottingOptions -from .TkAlExceptions import AllInOneError -from .trackSplittingValidation import TrackSplittingValidation -from .zMuMuValidation import ZMuMuValidation - -class PreexistingValidation(GenericValidation): - """ - Object representing a validation that has already been run, - but should be included in plots. - """ - defaults = {"title": ".oO[name]Oo."} - mandatories = {"file", "color", "style", "originalValName", "eosdirName", "multiIOV"} - removemandatories = {"dataset", "maxevents", "trackcollection"} - def __init__(self, valName, config): - self.general = config.getGeneral() - self.name = self.general["name"] = valName - self.config = config - - theUpdate = config.getResultingSection("preexisting"+self.valType+":"+self.name, - defaultDict = self.defaults, - demandPars = self.mandatories) - self.general.update(theUpdate) - - self.originalValName = self.general["originalValName"] - self.title = self.general["title"] - if "|" in self.title or "," in self.title or '"' in self.title: - msg = "The characters '|', '\"', and ',' cannot be used in the alignment title!" - raise AllInOneError(msg) - self.needsproxy = boolfromstring(self.general["needsproxy"], "needsproxy") - self.jobid = self.general["jobid"] - if self.jobid: - try: #make sure it's actually a valid jobid - output = getCommandOutput2("bjobs %(jobid)s 2>&1"%self.general) - if "is not found" in output: raise RuntimeError - except RuntimeError: - raise AllInOneError("%s is not a valid jobid.\nMaybe it finished already?"%self.jobid) - - knownOpts = set(self.defaults.keys())|self.mandatories|self.optionals - ignoreOpts = [] - config.checkInput("preexisting"+self.valType+":"+self.name, - knownSimpleOptions = knownOpts, - ignoreOptions = ignoreOpts) - self.jobmode = None - - try: #initialize plotting options for this validation type - result = PlottingOptions(self.config, self.valType) - except KeyError: - pass - - @property - def filesToCompare(self): - return {self.defaultReferenceName: self.general["file"]} - - def getRepMap(self): - #do not call super - try: - result = PlottingOptions(self.config, self.valType) - except KeyError: - result = {} - result.update(self.general) - result.update({ - "color": str(parsecolor(result["color"])), - "style": str(parsestyle(result["style"])), - }) - return result - - def createFiles(self, *args, **kwargs): - raise AllInOneError("Shouldn't be here...") - def createConfiguration(self, *args, **kwargs): - pass - def createScript(self, *args, **kwargs): - raise AllInOneError("Shouldn't be here...") - def createCrabCfg(self, *args, **kwargs): - raise AllInOneError("Shouldn't be here...") - -class PreexistingOfflineValidation(PreexistingValidation, OfflineValidation): - deprecateddefaults = { - "DMRMethod":"", - "DMRMinimum":"", - "DMROptions":"", - "OfflineTreeBaseDir":"", - "SurfaceShapes":"" - } - defaults = deprecateddefaults.copy() - def __init__(self, valName, config): - super(PreexistingOfflineValidation, self).__init__(valName, config) - for option in self.deprecateddefaults: - if self.general[option]: - raise AllInOneError("The '%s' option has been moved to the [plots:offline] section. Please specify it there."%option) - - def getRepMap(self): - result = super(PreexistingOfflineValidation, self).getRepMap() - result.update({ - "filetoplot": self.general["file"], - }) - return result - - def appendToMerge(self, *args, **kwargs): - raise AllInOneError("Shouldn't be here...") - -class PreexistingPrimaryVertexValidation(PreexistingValidation, PrimaryVertexValidation): - removemandatories = {"isda","ismc","runboundary","vertexcollection","lumilist","ptCut","etaCut","runControl","numberOfBins"} - def getRepMap(self): - result = super(PreexistingPrimaryVertexValidation, self).getRepMap() - result.update({ - "filetoplot": self.general["file"], - }) - return result - - def appendToMerge(self, *args, **kwargs): - raise AllInOneError("Shouldn't be here...") - -class PreexistingTrackSplittingValidation(PreexistingValidation, TrackSplittingValidation): - def appendToMerge(self, *args, **kwargs): - raise AllInOneError("Shouldn't be here...") - -class PreexistingMonteCarloValidation(PreexistingValidation): - pass - -class PreexistingZMuMuValidation(PreexistingValidation): - def __init__(self, *args, **kwargs): - raise AllInOneError("Preexisting Z->mumu validation not implemented") - #more complicated, it has multiple output files - -class PreexistingGeometryComparison(PreexistingValidation): - def __init__(self, *args, **kwargs): - raise AllInOneError("Preexisting geometry comparison not implemented") diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/presentation.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/presentation.py deleted file mode 100644 index 3d82b0ce4f667..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/presentation.py +++ /dev/null @@ -1,194 +0,0 @@ -from __future__ import print_function -from __future__ import absolute_import -from builtins import range -import abc -import math -import os -import re - -from .genericValidation import ValidationForPresentation, ValidationWithPlotsSummary -from .helperFunctions import recursivesubclasses -from .presentationTemplates import * -from .TkAlExceptions import AllInOneError - -# Plots related to a single validation: -class ValidationPlots(object): - def __init__(self, path): - if not os.path.isdir(path): - print("Error: Directory "+path+" not found!") - exit(1) - if not path.endswith('/'): - path += '/' - path = path.replace('\\', '/') # Beacause LaTeX has issues with '\'. - self.path = path - # List of plot files in given directory: - self.plots = [file for file in os.listdir(path) - if file.endswith('.eps')] - - @property - def validationclass(self): - possiblenames = [] - for cls in recursivesubclasses(ValidationForPresentation): - if cls.__abstractmethods__: continue - if cls.plotsdirname() == os.path.basename(os.path.realpath(self.path.rstrip("/"))): - return cls - possiblenames.append(cls.plotsdirname()) - raise AllInOneError("{} does not match any of the possible folder names:\n{}".format(self.path, ", ".join(possiblenames))) - -def validationclasses(validations): - from collections import OrderedDict - classes = [validation.validationclass for validation in validations] - #remove duplicates - http://stackoverflow.com/a/39835527/5228524 - classes = list(OrderedDict.fromkeys(classes)) - return classes - -# Layout of plots on a page: -class PageLayout(object): - def __init__(self, pattern=[], width=1, height=1): - self.pattern = [] # List of rows; row contains the order numbers - # of its plots; e.g. [[1,2,3], [4,5,6]] - self.width = width # Maximum width of one plot, - # with respect to textwidth. - self.height = height # Maximum height of one plot, - # with respect to textheight. - - # Sets variables for the given plots and returns the plots - # in an appropriate order: - def fit(self, plots): - rowlengths = [] - # First, try to place plots in a square. - nplots = sum(len(p) for p in plots) - length = int(math.ceil(math.sqrt(nplots))) - # Then, fill the square from the bottom and remove extra rows. - fullRows = int(nplots/length) - residual = nplots - length*fullRows - nrows = fullRows - if residual != 0: - rowlengths.append(residual) - nrows += 1 - for _ in range(fullRows): - rowlengths.append(length) - - # Now, fill the pattern. - self.pattern = [] - if residual == 0 and len(plots[0])%length != 0 and\ - len(plots[0])%nrows == 0: - # It's better to arrange plots in columns, not rows. - self.pattern.extend(list(range(i, i+nrows*(length-1)+1, nrows)) - for i in range(1, nrows+1)) - else: - if residual != 0: - self.pattern.append(list(range(1, 1+residual))) - self.pattern.extend(list(range(i, i+length)) for i in - range(residual+1, nplots-length+2, length)) - - self.width = 1.0/length - self.height = 0.8/nrows - - -# Write a set of pages, one for each subdetector. -# Arguments: identifier: regular expression to get the wanted plots, -# used together with subdetector name -# title: title of the plot type -# validations: list of relevant ValidationPlots objects. -# Returns the parsed script. -class SubsectionBase(object): - __metaclass__ = abc.ABCMeta - def __init__(self, title): - self.title = title - def write(self, validations): - script = '\n'.join(_ for _ in self.pages(validations) if _) - if script != '': - script = subsectionTemplate.replace('[title]', self.title)+script - return script - @abc.abstractmethod - def pages(self, validations): - pass - -class SubsectionOnePage(SubsectionBase): - def __init__(self, identifier, title): - self.identifier = identifier - super(SubsectionOnePage, self).__init__(title) - def pages(self, validations): - return [writePageReg(self.identifier, self.title, validations)] - -class SubsectionFromList(SubsectionBase): - def __init__(self, identifier, title): - self.identifier = identifier - super(SubsectionFromList, self).__init__(title) - def pages(self, validations): - return [writePageReg('(?=.*%s)%s'%(pageidentifier, self.identifier), - self.title+': ' +pagetitle, validations) - for pageidentifier, pagetitle in self.pageidentifiers] - @abc.abstractproperty - def pageidentifiers(self): - pass - -class SummarySection(SubsectionBase): - def __init__(self): - super(SummarySection, self).__init__("Summary") - def pages(self, validations): - return [summaryTemplate.replace('[title]', self.title) - .replace('[summary]', validation.validationclass.summaryitemsstring(folder=validation.path, latex=True)) - .replace("tabular", "longtable") for validation in validations - if issubclass(validation.validationclass, ValidationWithPlotsSummary)] - -# Write a page containing plots of given type. -# Arguments: identifier: regular expression to get the wanted plots -# title: title of the plot type -# validations: list of relevant ValidationPlots objects -# layout: given page layout. -# Returns the parsed script. -def writePageReg(identifier, title, validations, layout=0): - plots = [] - for validation in validations: - valiplots = [validation.path+plot for plot in validation.plots - if re.search(identifier, plot)] - valiplots.sort(key=plotSortKey) - plots.append(valiplots) - if sum(len(p) for p in plots) == 0: - print('Warning: no plots matching ' + identifier) - return '' - - # Create layout, if not given. - if layout == 0: - layout = PageLayout() - layout.fit(plots) - - return writePage([p for vali in plots for p in vali], title, layout) - - -# Write the given plots on a page. -# Arguments: plots: paths of plots to be drawn on the page -# title: title of the plot type -# layout: a PageLayout object definig the layout. -# Returns the parsed script. -def writePage(plots, title, layout): - plotrows = [] - for row in layout.pattern: - plotrow = [] - for i in range(len(row)): - plotrow.append(plotTemplate.replace('[width]', str(layout.width)).\ - replace('[height]', str(layout.height)).\ - replace('[path]', plots[row[i]-1])) - plotrows.append('\n'.join(plotrow)) - script = ' \\\\\n'.join(plotrows) - - return frameTemplate.replace('[plots]', script).replace('[title]', title) - - -# Sort key to rearrange a plot list. -# Arguments: plot: to be sorted. -def plotSortKey(plot): - # Move normchi2 before chi2Prob - if plot.find('normchi2') != -1: - return 'chi2a' - if plot.find('chi2Prob') != -1: - return 'chi2b' - return plot - -import Alignment.OfflineValidation.TkAlAllInOneTool.geometryComparison -import Alignment.OfflineValidation.TkAlAllInOneTool.offlineValidation -import Alignment.OfflineValidation.TkAlAllInOneTool.trackSplittingValidation -import Alignment.OfflineValidation.TkAlAllInOneTool.primaryVertexValidation -import Alignment.OfflineValidation.TkAlAllInOneTool.zMuMuValidation diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/presentationTemplates.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/presentationTemplates.py deleted file mode 100644 index 0566c51997b63..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/presentationTemplates.py +++ /dev/null @@ -1,95 +0,0 @@ -# Templates for the production of a LaTeX presentation. - -texTemplate=r"""%Offline Alignment Validation presentation. -%Time of creation: [time] -%Created with produceOfflineValidationTex.py -\documentclass{beamer} -\usepackage[latin1]{inputenc} -\usepackage{color} -\usepackage{longtable} -%\usepackage{siunitx} -%\usepackage{epstopdf} -\usetheme{default} -\title[Offline Validation]{Title here} -\author{Author(s) here} -\institute{Institute here} -\date{Date here} - - - -\begin{document} - -\begin{frame} -\titlepage -\end{frame} - -\section{Introduction} -%--------------------------------------------- -\begin{frame}{Introduction} -{ -\begin{itemize} - \item Introduction here -\end{itemize} -} -\end{frame} - - -\section{Plots} -%--------------------------------------------- - - -[frames] - - -\section{Conclusions} -%--------------------------------------------- -\begin{frame}{Conclusions} -{ -\begin{itemize} -\item Conclusions here -\end{itemize} -} -\end{frame} - - - -\end{document} - -""" - -frameTemplate=r""" -\begin{frame}{[title]} - \begin{figure} - \centering -[plots] - %\\Comments here - \end{figure} -\end{frame} -""" - -summaryTemplate = r""" -\begin{frame}[allowframebreaks]{[title]} -\centering -[summary] -\end{frame} -""" - -plotTemplate=r""" \includegraphics[width=[width]\textwidth, height=[height]\textheight, keepaspectratio=true]{[path]}""" - -subsectionTemplate=r""" - -\subsection{[title]} -%--------------------------------------------- -""" - -toPdf=""" -#To produce a pdf presentation -#a. fill in your information, comments etc. in presentation.tex -#b. run this script: ./ToPdf.sh -latex presentation.tex -latex presentation.tex #(twice to produce the bookmarks) -dvipdf presentation.dvi -#(pdflatex doesn't like .eps-images; this way we can -#use just latex and the convert the result into pdf.) - -""" diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/primaryVertexResolution.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/primaryVertexResolution.py deleted file mode 100644 index 3aaeb7719055b..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/primaryVertexResolution.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -from . import configTemplates -from . import globalDictionaries -from .genericValidation import GenericValidationData, ValidationWithPlots, pythonboolstring -from .helperFunctions import replaceByMap -from .TkAlExceptions import AllInOneError - -class PrimaryVertexResolution(GenericValidationData, ValidationWithPlots): - configBaseName = "TkAlPrimaryVertexResolution" - scriptBaseName = "TkAlPrimaryVertexResolution" - crabCfgBaseName = "TkAlPrimaryVertexResolution" - resultBaseName = "PrimaryVertexResolution" - outputBaseName = "PrimaryVertexResolution" - defaults = { - # N.B.: the reference needs to be updated each time the format of the output is changed - "pvresolutionreference": ("/store/group/alca_trackeralign/validation/PVResolution/Reference/PrimaryVertexResolution_phaseIMC92X_upgrade2017_Ideal.root"), - "multiIOV":"False", - "startScale":"1.", - "endScale":"1000.", - "nTracksBins":"60.", - "nVtxBins":"40." - } - #mandatories = {"isda","ismc","runboundary","trackcollection","vertexcollection","lumilist","ptCut","etaCut","runControl","numberOfBins"} - mandatories = {"runControl","runboundary","doTriggerSelection","triggerBits","trackcollection"} - valType = "pvresolution" - def __init__(self, valName, alignment, config): - super(PrimaryVertexResolution, self).__init__(valName, alignment, config) - - if self.general["pvresolutionreference"].startswith("/store"): - self.general["pvresolutionreference"] = "root://eoscms//eos/cms" + self.general["pvresolutionreference"] - if self.NJobs > 1: - raise AllInOneError("Parallel jobs not implemented for the SplotVertexResolution validation!\n" - "Please set parallelJobs = 1.") - @property - def ValidationTemplate(self): - return configTemplates.PrimaryVertexResolutionTemplate - - @property - def TrackSelectionRefitting(self): - return configTemplates.SingleTrackRefitter - - @property - def DefinePath(self): - return configTemplates.PVResolutionPath - - @property - def ValidationSequence(self): - #never enters anywhere, since we use the custom DefinePath which includes the goodVertexSkim - return "" - - @property - def ProcessName(self): - return "PrimaryVertexResolution" - - def createScript(self, path): - return super(PrimaryVertexResolution, self).createScript(path, template = configTemplates.PVResolutionScriptTemplate) - - def createCrabCfg(self, path): - return super(PrimaryVertexResolution, self).createCrabCfg(path, self.crabCfgBaseName) - - def getRepMap(self, alignment = None): - if alignment == None: - alignment = self.alignmentToValidate - repMap = super(PrimaryVertexResolution, self).getRepMap(alignment) - repMap.update({ - "nEvents": self.general["maxevents"], - "TrackCollection": self.general["trackcollection"], - "eosdir": os.path.join(self.general["eosdir"]), - #"eosdir": os.path.join(self.general["eosdir"], "%s/%s/%s" % (self.outputBaseName, self.name, alignment.name)), - "workingdir": ".oO[datadir]Oo./%s/%s/%s" % (self.outputBaseName, self.name, alignment.name), - "plotsdir": ".oO[datadir]Oo./%s/%s/%s/plots" % (self.outputBaseName, self.name, alignment.name), - }) - - return repMap - - def appendToMerge(self): - """ - if no argument or "" is passed a string with an instantiation is returned, - else the validation is appended to the list - """ - repMap = self.getRepMap() - - parameters = " ".join(os.path.join("root://eoscms//eos/cms", file.lstrip("/")) for file in repMap["resultFiles"]) - - mergedoutputfile = os.path.join("root://eoscms//eos/cms", repMap["finalResultFile"].lstrip("/")) - return "hadd -f %s %s\n" % (mergedoutputfile, parameters) - - def appendToPlots(self): - repMap = self.getRepMap() - return (' PVResolution::loadFileList("root://eoscms//eos/cms%(finalResultFile)s",' - '"PrimaryVertexResolution","%(title)s", %(color)s, %(style)s);\n')%repMap - - @classmethod - def runPlots(cls, validations): - return configTemplates.PVResolutionPlotExecution - - @classmethod - def plottingscriptname(cls): - return "TkAlPrimaryVertexResolutionPlot.C" - - @classmethod - def plottingscripttemplate(cls): - return configTemplates.PVResolutionPlotTemplate - - @classmethod - def plotsdirname(cls): - return "PrimaryVertexResolution" diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/primaryVertexResolutionTemplates.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/primaryVertexResolutionTemplates.py deleted file mode 100644 index 293ef9381a67e..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/primaryVertexResolutionTemplates.py +++ /dev/null @@ -1,205 +0,0 @@ -PrimaryVertexResolutionTemplate=""" - -HLTSel = .oO[doTriggerSelection]Oo. - -################################################################### -# Runs and events -################################################################### -runboundary = .oO[runboundary]Oo. -isMultipleRuns=False -if(isinstance(runboundary, (list, tuple))): - isMultipleRuns=True - print("Multiple Runs are selected") - -if(isMultipleRuns): - process.source.firstRun = cms.untracked.uint32(int(runboundary[0])) -else: - process.source.firstRun = cms.untracked.uint32(int(runboundary)) - - -################################################################### -# The trigger filter module -################################################################### -from HLTrigger.HLTfilters.triggerResultsFilter_cfi import * -process.theHLTFilter = triggerResultsFilter.clone( - triggerConditions = cms.vstring(.oO[triggerBits]Oo.), - hltResults = cms.InputTag( "TriggerResults", "", "HLT" ), - l1tResults = cms.InputTag( "" ), - throw = cms.bool(False) -) - -################################################################### -# PV refit -################################################################### -process.load("TrackingTools.TransientTrack.TransientTrackBuilder_cfi") - -from RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi import offlinePrimaryVertices -process.offlinePrimaryVerticesFromRefittedTrks = offlinePrimaryVertices.clone() -process.offlinePrimaryVerticesFromRefittedTrks.TrackLabel = cms.InputTag("TrackRefitter") -process.offlinePrimaryVerticesFromRefittedTrks.vertexCollections.maxDistanceToBeam = 1 -process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.maxNormalizedChi2 = 20 -process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.minSiliconLayersWithHits = 5 -process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.maxD0Significance = 5.0 -# as it was prior to https://github.com/cms-sw/cmssw/commit/c8462ae4313b6be3bbce36e45373aa6e87253c59 -process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.maxD0Error = 1.0 -process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.maxDzError = 1.0 -process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.minPixelLayersWithHits = 2 - -# Use compressions settings of TFile -# see https://root.cern.ch/root/html534/TFile.html#TFile:SetCompressionSettings -# settings = 100 * algorithm + level -# level is from 1 (small) to 9 (large compression) -# algo: 1 (ZLIB), 2 (LMZA) -# see more about compression & performance: https://root.cern.ch/root/html534/guides/users-guide/InputOutput.html#compression-and-performance -compressionSettings = 207 - -################################################################### -# The PV resolution module -################################################################### -process.PrimaryVertexResolution = cms.EDAnalyzer('SplitVertexResolution', - compressionSettings = cms.untracked.int32(compressionSettings), - storeNtuple = cms.bool(False), - vtxCollection = cms.InputTag("offlinePrimaryVerticesFromRefittedTrks"), - trackCollection = cms.InputTag("TrackRefitter"), - minVertexNdf = cms.untracked.double(10.), - minVertexMeanWeight = cms.untracked.double(0.5), - runControl = cms.untracked.bool(.oO[runControl]Oo.), - runControlNumber = cms.untracked.vuint32(runboundary), - sumpTStartScale = cms.untracked.double(.oO[startScale]Oo.), - sumpTEndScale = cms.untracked.double(.oO[endScale]Oo.), - nTrackBins = cms.untracked.double(.oO[nTracksBins]Oo.), - nVtxBins = cms.untracked.double(.oO[nVtxBins]Oo.) - ) -""" - -#################################################################### -#################################################################### -PVResolutionPath=""" - -process.theValidSequence = cms.Sequence(process.offlineBeamSpot + - process.TrackRefitter + - process.offlinePrimaryVerticesFromRefittedTrks + - process.PrimaryVertexResolution) -if (HLTSel): - process.p = cms.Path(process.theHLTFilter + process.theValidSequence) -else: - process.p = cms.Path(process.theValidSequence) -""" - -#################################################################### -#################################################################### -PVResolutionScriptTemplate="""#!/bin/bash -source /afs/cern.ch/cms/caf/setup.sh -export X509_USER_PROXY=.oO[scriptsdir]Oo./.user_proxy - -source /afs/cern.ch/cms/caf/setup.sh - -echo ----------------------- -echo Job started at `date` -echo ----------------------- - -export theLabel=.oO[alignmentName]Oo. -export theDate=.oO[runboundary]Oo. - -cwd=`pwd` -cd .oO[CMSSW_BASE]Oo./src -export SCRAM_ARCH=.oO[SCRAM_ARCH]Oo. -eval `scram runtime -sh` -cd $cwd - -mkdir -p .oO[datadir]Oo. -mkdir -p .oO[workingdir]Oo. -mkdir -p .oO[logdir]Oo. -rm -f .oO[logdir]Oo./*.stdout -rm -f .oO[logdir]Oo./*.stderr - -if [[ $HOSTNAME = lxplus[0-9]*[.a-z0-9]* ]] # check for interactive mode -then - mkdir -p .oO[workdir]Oo. - rm -f .oO[workdir]Oo./* - cd .oO[workdir]Oo. -else - mkdir -p $cwd/TkAllInOneTool - cd $cwd/TkAllInOneTool -fi - -.oO[CommandLine]Oo. - -ls -lh . - -eos mkdir -p /store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./plots/ - -for RootOutputFile in $(ls *root ) -do - xrdcp -f ${RootOutputFile} root://eoscms//eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./${RootOutputFile} - cp ${RootOutputFile} .oO[workingdir]Oo. -done - -cp .oO[Alignment/OfflineValidation]Oo./macros/FitPVResolution.C . -cp .oO[Alignment/OfflineValidation]Oo./macros/CMS_lumi.C . -cp .oO[Alignment/OfflineValidation]Oo./macros/CMS_lumi.h . - - if [[ .oO[pvresolutionreference]Oo. == *store* ]]; then xrdcp -f .oO[pvresolutionreference]Oo. PVValidation_reference.root; else ln -fs .oO[pvresolutionreference]Oo. ./PVResolution_reference.root; fi - -root -b -q "FitPVResolution.C(\\"${PWD}/${RootOutputFile}=${theLabel},${PWD}/PVValidation_reference.root=Design simulation\\",\\"$theDate\\")" - -mkdir -p .oO[plotsdir]Oo. -for PngOutputFile in $(ls *png ); do - xrdcp -f ${PngOutputFile} root://eoscms//eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./plots/${PngOutputFile} - cp ${PngOutputFile} .oO[plotsdir]Oo. -done - -for PdfOutputFile in $(ls *pdf ); do - xrdcp -f ${PdfOutputFile} root://eoscms//eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./plots/${PdfOutputFile} - cp ${PdfOutputFile} .oO[plotsdir]Oo. -done - -echo ----------------------- -echo Job ended at `date` -echo ----------------------- - -""" - -###################################################################### -###################################################################### - -PVResolutionPlotExecution=""" -#make primary vertex validation plots - -cp .oO[plottingscriptpath]Oo. . -root -x -b -q .oO[plottingscriptname]Oo.++ - -for PdfOutputFile in $(ls *pdf ); do - xrdcp -f ${PdfOutputFile} root://eoscms//eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./plots/${PdfOutputFile} - cp ${PdfOutputFile} .oO[datadir]Oo./.oO[PlotsDirName]Oo. -done - -for PngOutputFile in $(ls *png ); do - xrdcp -f ${PngOutputFile} root://eoscms//eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./plots/${PngOutputFile} - cp ${PngOutputFile} .oO[datadir]Oo./.oO[PlotsDirName]Oo. -done - -""" - -###################################################################### -###################################################################### - -PVResolutionPlotTemplate=""" -/**************************************** -This can be run directly in root, or you - can run ./TkAlMerge.sh in this directory -****************************************/ - -#include "Alignment/OfflineValidation/macros/FitPVResolution.C" - -void TkAlPrimaryVertexResolutionPlot() -{ - - // initialize the plot y-axis ranges - .oO[PlottingInstantiation]Oo. - FitPVResolution("",""); - -} -""" - - diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/primaryVertexValidation.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/primaryVertexValidation.py deleted file mode 100644 index 5037c10cf1a6c..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/primaryVertexValidation.py +++ /dev/null @@ -1,113 +0,0 @@ -from __future__ import absolute_import -import os -from . import configTemplates -from . import globalDictionaries -from .genericValidation import GenericValidationData_CTSR, ParallelValidation, ValidationWithPlots, pythonboolstring -from .helperFunctions import replaceByMap -from .TkAlExceptions import AllInOneError - -class PrimaryVertexValidation(GenericValidationData_CTSR, ParallelValidation, ValidationWithPlots): - configBaseName = "TkAlPrimaryVertexValidation" - scriptBaseName = "TkAlPrimaryVertexValidation" - crabCfgBaseName = "TkAlPrimaryVertexValidation" - resultBaseName = "PrimaryVertexValidation" - outputBaseName = "PrimaryVertexValidation" - defaults = { - # N.B.: the reference needs to be updated each time the format of the output is changed - "pvvalidationreference": ("/store/group/alca_trackeralign/validation/PVValidation/Reference/PrimaryVertexValidation_phaseIMC92X_upgrade2017_Ideal.root"), - "doBPix":"True", - "doFPix":"True", - "forceBeamSpot":"False", - "multiIOV":"False", - } - mandatories = {"isda","ismc","runboundary","trackcollection","vertexcollection","lumilist","ptCut","etaCut","runControl","numberOfBins"} - valType = "primaryvertex" - def __init__(self, valName, alignment, config): - super(PrimaryVertexValidation, self).__init__(valName, alignment, config) - - for name in "doBPix", "doFPix", "forceBeamSpot": - self.general[name] = pythonboolstring(self.general[name], name) - - if self.general["pvvalidationreference"].startswith("/store"): - self.general["pvvalidationreference"] = "root://eoscms//eos/cms" + self.general["pvvalidationreference"] - - @property - def ValidationTemplate(self): - return configTemplates.PrimaryVertexValidationTemplate - - @property - def DefinePath(self): - return configTemplates.PVValidationPath - - @property - def ValidationSequence(self): - #never enters anywhere, since we use the custom DefinePath which includes the goodVertexSkim - return "" - - @property - def use_d0cut(self): - return False - - @property - def isPVValidation(self): - return True - - @property - def ProcessName(self): - return "PrimaryVertexValidation" - - def createScript(self, path): - return super(PrimaryVertexValidation, self).createScript(path, template = configTemplates.PVValidationScriptTemplate) - - def createCrabCfg(self, path): - return super(PrimaryVertexValidation, self).createCrabCfg(path, self.crabCfgBaseName) - - def getRepMap(self, alignment = None): - if alignment == None: - alignment = self.alignmentToValidate - repMap = super(PrimaryVertexValidation, self).getRepMap(alignment) - repMap.update({ - "nEvents": self.general["maxevents"], - "TrackCollection": self.general["trackcollection"], - "VertexCollection": self.general["vertexcollection"], - "eosdir": os.path.join(self.general["eosdir"]), - #"eosdir": os.path.join(self.general["eosdir"], "%s/%s/%s" % (self.outputBaseName, self.name, alignment.name)), - "workingdir": ".oO[datadir]Oo./%s/%s/%s" % (self.outputBaseName, self.name, alignment.name), - "plotsdir": ".oO[datadir]Oo./%s/%s/%s/plots" % (self.outputBaseName, self.name, alignment.name), - "filetoplot": "root://eoscms//eos/cms.oO[finalResultFile]Oo.", - }) - - return repMap - - def appendToMerge(self): - """ - if no argument or "" is passed a string with an instantiation is returned, - else the validation is appended to the list - """ - repMap = self.getRepMap() - - parameters = " ".join(os.path.join("root://eoscms//eos/cms", file.lstrip("/")) for file in repMap["resultFiles"]) - - mergedoutputfile = os.path.join("root://eoscms//eos/cms", repMap["finalResultFile"].lstrip("/")) - return "hadd -f %s %s\n" % (mergedoutputfile, parameters) - - def appendToPlots(self): - repMap = self.getRepMap() - return (' loadFileList("%(filetoplot)s",' - '"PVValidation", "%(title)s", %(color)s, %(style)s);\n')%repMap - - @classmethod - def runPlots(cls, validations): - return configTemplates.PrimaryVertexPlotExecution - - @classmethod - def plottingscriptname(cls): - return "TkAlPrimaryVertexValidationPlot.C" - - @classmethod - def plottingscripttemplate(cls): - return configTemplates.PrimaryVertexPlotTemplate - - @classmethod - def plotsdirname(cls): - return "PrimaryVertexValidation" diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/primaryVertexValidationTemplates.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/primaryVertexValidationTemplates.py deleted file mode 100644 index 4c13a0bd56727..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/primaryVertexValidationTemplates.py +++ /dev/null @@ -1,320 +0,0 @@ -PrimaryVertexValidationTemplate=""" - -isDA = .oO[isda]Oo. -isMC = .oO[ismc]Oo. - -################################################################### -# Runs and events -################################################################### -runboundary = .oO[runboundary]Oo. -isMultipleRuns=False -if(isinstance(runboundary, (list, tuple))): - isMultipleRuns=True - print("Multiple Runs are selected") - -if(isMultipleRuns): - process.source.firstRun = cms.untracked.uint32(int(runboundary[0])) -else: - process.source.firstRun = cms.untracked.uint32(int(runboundary)) - -################################################################### -# JSON Filtering -################################################################### -if isMC: - print(">>>>>>>>>> testPVValidation_cfg.py: msg%-i: This is simulation!") - runboundary = 1 -else: - print(">>>>>>>>>> testPVValidation_cfg.py: msg%-i: This is real DATA!") - if ('.oO[lumilist]Oo.'): - print(">>>>>>>>>> testPVValidation_cfg.py: msg%-i: JSON filtering with: .oO[lumilist]Oo. ") - import FWCore.PythonUtilities.LumiList as LumiList - process.source.lumisToProcess = LumiList.LumiList(filename ='.oO[lumilist]Oo.').getVLuminosityBlockRange() - -#################################################################### -# Produce the Transient Track Record in the event -#################################################################### -process.load("TrackingTools.TransientTrack.TransientTrackBuilder_cfi") - -#################################################################### -# Load and Configure event selection -#################################################################### -process.primaryVertexFilter = cms.EDFilter("VertexSelector", - src = cms.InputTag(".oO[VertexCollection]Oo."), - cut = cms.string("!isFake && ndof > 4 && abs(z) <= 24 && position.Rho <= 2"), - filter = cms.bool(True) - ) - -process.noscraping = cms.EDFilter("FilterOutScraping", - applyfilter = cms.untracked.bool(True), - src = cms.untracked.InputTag(".oO[TrackCollection]Oo."), - debugOn = cms.untracked.bool(False), - numtrack = cms.untracked.uint32(10), - thresh = cms.untracked.double(0.25) - ) - - -process.load("Alignment.CommonAlignment.filterOutLowPt_cfi") -process.filterOutLowPt.src = ".oO[TrackCollection]Oo." -process.filterOutLowPt.ptmin = .oO[ptCut]Oo. -process.filterOutLowPt.runControl = .oO[runControl]Oo. -if(isMultipleRuns): - process.filterOutLowPt.runControlNumber.extend((runboundary)) -else: - process.filterOutLowPt.runControlNumber = [runboundary] - -if isMC: - process.goodvertexSkim = cms.Sequence(process.noscraping + process.filterOutLowPt) -else: - process.goodvertexSkim = cms.Sequence(process.primaryVertexFilter + process.noscraping + process.filterOutLowPt) - -#################################################################### -# Imports of parameters -#################################################################### -from RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi import offlinePrimaryVertices -## modify the parameters which differ -FilteringParams = offlinePrimaryVertices.TkFilterParameters.clone( - maxNormalizedChi2 = 5.0, # chi2ndof < 5 - maxD0Significance = 5.0, # fake cut (requiring 1 PXB hit) - maxEta = 5.0, # as per recommendation in PR #18330 -) - -## MM 04.05.2017 (use settings as in: https://github.com/cms-sw/cmssw/pull/18330) -from RecoVertex.PrimaryVertexProducer.TkClusParameters_cff import DA_vectParameters -DAClusterizationParams = DA_vectParameters.clone() - -GapClusterizationParams = cms.PSet(algorithm = cms.string('gap'), - TkGapClusParameters = cms.PSet(zSeparation = cms.double(0.2)) # 0.2 cm max separation betw. clusters - ) - -#################################################################### -# Deterministic annealing clustering or Gap clustering -#################################################################### -def switchClusterizerParameters(da): - if da: - print(">>>>>>>>>> testPVValidation_cfg.py: msg%-i: Running DA Algorithm!") - return DAClusterizationParams - else: - print(">>>>>>>>>> testPVValidation_cfg.py: msg%-i: Running GAP Algorithm!") - return GapClusterizationParams - -# Use compressions settings of TFile -# see https://root.cern.ch/root/html534/TFile.html#TFile:SetCompressionSettings -# settings = 100 * algorithm + level -# level is from 1 (small) to 9 (large compression) -# algo: 1 (ZLIB), 2 (LMZA) -# see more about compression & performance: https://root.cern.ch/root/html534/guides/users-guide/InputOutput.html#compression-and-performance -compressionSettings = 207 - -#################################################################### -# Configure the PVValidation Analyzer module -#################################################################### -process.PVValidation = cms.EDAnalyzer("PrimaryVertexValidation", - compressionSettings = cms.untracked.int32(compressionSettings), - TrackCollectionTag = cms.InputTag("FinalTrackRefitter"), - VertexCollectionTag = cms.InputTag(".oO[VertexCollection]Oo."), - Debug = cms.bool(False), - storeNtuple = cms.bool(False), - useTracksFromRecoVtx = cms.bool(False), - isLightNtuple = cms.bool(True), - askFirstLayerHit = cms.bool(False), - forceBeamSpot = cms.untracked.bool(.oO[forceBeamSpot]Oo.), - probePt = cms.untracked.double(.oO[ptCut]Oo.), - probeEta = cms.untracked.double(.oO[etaCut]Oo.), - doBPix = cms.untracked.bool(.oO[doBPix]Oo.), - doFPix = cms.untracked.bool(.oO[doFPix]Oo.), - numberOfBins = cms.untracked.int32(.oO[numberOfBins]Oo.), - runControl = cms.untracked.bool(.oO[runControl]Oo.), - runControlNumber = cms.untracked.vuint32(runboundary), - TkFilterParameters = FilteringParams, - TkClusParameters = switchClusterizerParameters(isDA) - ) -""" - -#################################################################### -#################################################################### -PVValidationPath=""" -process.p = cms.Path(process.goodvertexSkim* - process.seqTrackselRefit* - process.PVValidation) -""" - -#################################################################### -#################################################################### -PVValidationScriptTemplate="""#!/bin/bash -source /afs/cern.ch/cms/caf/setup.sh -export X509_USER_PROXY=.oO[scriptsdir]Oo./.user_proxy - -echo ----------------------- -echo Job started at `date` -echo ----------------------- - -export theLabel=.oO[alignmentName]Oo. -export theDate=.oO[runboundary]Oo. - -cwd=`pwd` -cd .oO[CMSSW_BASE]Oo./src -export SCRAM_ARCH=.oO[SCRAM_ARCH]Oo. -eval `scram runtime -sh` -cd $cwd - -mkdir -p .oO[datadir]Oo. -mkdir -p .oO[workingdir]Oo. -mkdir -p .oO[logdir]Oo. -rm -f .oO[logdir]Oo./*.stdout -rm -f .oO[logdir]Oo./*.stderr - -if [[ $HOSTNAME = lxplus[0-9]*[.a-z0-9]* ]] # check for interactive mode -then - mkdir -p .oO[workdir]Oo. - rm -f .oO[workdir]Oo./* - cd .oO[workdir]Oo. -else - mkdir -p $cwd/TkAllInOneTool - cd $cwd/TkAllInOneTool -fi - -.oO[CommandLine]Oo. - -ls -lh . - -eos mkdir -p /store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./plots/ -for RootOutputFile in $(ls *root ) -do - xrdcp -f ${RootOutputFile} root://eoscms//eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./${RootOutputFile} - cp ${RootOutputFile} .oO[workingdir]Oo. -done - -cp .oO[Alignment/OfflineValidation]Oo./macros/FitPVResiduals.C . -cp .oO[Alignment/OfflineValidation]Oo./macros/CMS_lumi.C . -cp .oO[Alignment/OfflineValidation]Oo./macros/CMS_lumi.h . - - if [[ .oO[pvvalidationreference]Oo. == *store* ]]; then xrdcp -f .oO[pvvalidationreference]Oo. PVValidation_reference.root; else ln -fs .oO[pvvalidationreference]Oo. ./PVValidation_reference.root; fi - -echo "I am going to produce the comparison with IDEAL geometry of ${RootOutputFile}" -root -b -q "FitPVResiduals.C++g(\\"${PWD}/${RootOutputFile}=${theLabel},${PWD}/PVValidation_reference.root=Design simulation\\",true,true,\\"$theDate\\")" - -mkdir -p .oO[plotsdir]Oo. -for PngOutputFile in $(ls *png ); do - xrdcp -f ${PngOutputFile} root://eoscms//eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./plots/${PngOutputFile} - cp ${PngOutputFile} .oO[plotsdir]Oo. -done - -for PdfOutputFile in $(ls *pdf ); do - xrdcp -f ${PdfOutputFile} root://eoscms//eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./plots/${PdfOutputFile} - cp ${PdfOutputFile} .oO[plotsdir]Oo. -done - -mkdir .oO[plotsdir]Oo./Biases/ -mkdir .oO[plotsdir]Oo./Biases/dzPhi -mkdir .oO[plotsdir]Oo./Biases/dxyPhi -mkdir .oO[plotsdir]Oo./Biases/dzEta -mkdir .oO[plotsdir]Oo./Biases/dxyEta -mkdir .oO[plotsdir]Oo./Fit -mkdir .oO[plotsdir]Oo./dxyVsEta -mkdir .oO[plotsdir]Oo./dzVsEta -mkdir .oO[plotsdir]Oo./dxyVsPhi -mkdir .oO[plotsdir]Oo./dzVsPhi -mkdir .oO[plotsdir]Oo./dxyVsEtaNorm -mkdir .oO[plotsdir]Oo./dzVsEtaNorm -mkdir .oO[plotsdir]Oo./dxyVsPhiNorm -mkdir .oO[plotsdir]Oo./dzVsPhiNorm - -mv .oO[plotsdir]Oo./BiasesCanvas* .oO[plotsdir]Oo./Biases/ -mv .oO[plotsdir]Oo./dzPhiBiasCanvas* .oO[plotsdir]Oo./Biases/dzPhi -mv .oO[plotsdir]Oo./dxyPhiBiasCanvas* .oO[plotsdir]Oo./Biases/dxyPhi -mv .oO[plotsdir]Oo./dzEtaBiasCanvas* .oO[plotsdir]Oo./Biases/dzEta -mv .oO[plotsdir]Oo./dxyEtaBiasCanvas* .oO[plotsdir]Oo./Biases/dxyEta -mv .oO[plotsdir]Oo./dzPhiTrendFit* .oO[plotsdir]Oo./Fit -mv .oO[plotsdir]Oo./dxyEtaTrendNorm* .oO[plotsdir]Oo./dxyVsEtaNorm -mv .oO[plotsdir]Oo./dzEtaTrendNorm* .oO[plotsdir]Oo./dzVsEtaNorm -mv .oO[plotsdir]Oo./dxyPhiTrendNorm* .oO[plotsdir]Oo./dxyVsPhiNorm -mv .oO[plotsdir]Oo./dzPhiTrendNorm* .oO[plotsdir]Oo./dzVsPhiNorm -mv .oO[plotsdir]Oo./dxyEtaTrend* .oO[plotsdir]Oo./dxyVsEta -mv .oO[plotsdir]Oo./dzEtaTrend* .oO[plotsdir]Oo./dzVsEta -mv .oO[plotsdir]Oo./dxyPhiTrend* .oO[plotsdir]Oo./dxyVsPhi -mv .oO[plotsdir]Oo./dzPhiTrend* .oO[plotsdir]Oo./dzVsPhi - -wget https://raw.githubusercontent.com/mmusich/PVToolScripts/master/PolishedScripts/index.php - -cp index.php .oO[plotsdir]Oo./Biases/ -cp index.php .oO[plotsdir]Oo./Biases/dzPhi -cp index.php .oO[plotsdir]Oo./Biases/dxyPhi -cp index.php .oO[plotsdir]Oo./Biases/dzEta -cp index.php .oO[plotsdir]Oo./Biases/dxyEta -cp index.php .oO[plotsdir]Oo./Fit -cp index.php .oO[plotsdir]Oo./dxyVsEta -cp index.php .oO[plotsdir]Oo./dzVsEta -cp index.php .oO[plotsdir]Oo./dxyVsPhi -cp index.php .oO[plotsdir]Oo./dzVsPhi -cp index.php .oO[plotsdir]Oo./dxyVsEtaNorm -cp index.php .oO[plotsdir]Oo./dzVsEtaNorm -cp index.php .oO[plotsdir]Oo./dxyVsPhiNorm -cp index.php .oO[plotsdir]Oo./dzVsPhiNorm - - -echo ----------------------- -echo Job ended at `date` -echo ----------------------- - -""" - -###################################################################### -###################################################################### - -PrimaryVertexPlotExecution=""" -#make primary vertex validation plots - -cp .oO[plottingscriptpath]Oo. . -root -x -b -q .oO[plottingscriptname]Oo.++ - -for PdfOutputFile in $(ls *pdf ); do - xrdcp -f ${PdfOutputFile} root://eoscms//eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./plots/${PdfOutputFile} - cp ${PdfOutputFile} .oO[datadir]Oo./.oO[PlotsDirName]Oo. -done - -for PngOutputFile in $(ls *png ); do - xrdcp -f ${PngOutputFile} root://eoscms//eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./plots/${PngOutputFile} - cp ${PngOutputFile} .oO[datadir]Oo./.oO[PlotsDirName]Oo. -done - -""" - -###################################################################### -###################################################################### - -PrimaryVertexPlotTemplate=""" -/**************************************** -This can be run directly in root, or you - can run ./TkAlMerge.sh in this directory -****************************************/ - -#include "Alignment/OfflineValidation/macros/FitPVResiduals.C" - -void TkAlPrimaryVertexValidationPlot() -{ - - // initialize the plot y-axis ranges - thePlotLimits->init(.oO[m_dxyPhiMax]Oo., // mean of dxy vs Phi - .oO[m_dzPhiMax]Oo., // mean of dz vs Phi - .oO[m_dxyEtaMax]Oo., // mean of dxy vs Eta - .oO[m_dzEtaMax]Oo., // mean of dz vs Eta - .oO[m_dxyPhiNormMax]Oo., // mean of dxy vs Phi (norm) - .oO[m_dzPhiNormMax]Oo., // mean of dz vs Phi (norm) - .oO[m_dxyEtaNormMax]Oo., // mean of dxy vs Eta (norm) - .oO[m_dzEtaNormMax]Oo., // mean of dz vs Eta (norm) - .oO[w_dxyPhiMax]Oo., // width of dxy vs Phi - .oO[w_dzPhiMax]Oo., // width of dz vs Phi - .oO[w_dxyEtaMax]Oo., // width of dxy vs Eta - .oO[w_dzEtaMax]Oo., // width of dz vs Eta - .oO[w_dxyPhiNormMax]Oo., // width of dxy vs Phi (norm) - .oO[w_dzPhiNormMax]Oo., // width of dz vs Phi (norm) - .oO[w_dxyEtaNormMax]Oo., // width of dxy vs Eta (norm) - .oO[w_dzEtaNormMax]Oo. // width of dz vs Eta (norm) - ); - - .oO[PlottingInstantiation]Oo. - FitPVResiduals("",.oO[stdResiduals]Oo.,.oO[doMaps]Oo.,"",.oO[autoLimits]Oo.); -} -""" - - diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/trackSplittingValidation.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/trackSplittingValidation.py deleted file mode 100644 index b27297c90ff78..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/trackSplittingValidation.py +++ /dev/null @@ -1,97 +0,0 @@ -from __future__ import absolute_import -import os -from . import configTemplates -from .genericValidation import GenericValidationData_CTSR, ParallelValidation, ValidationForPresentation, ValidationWithPlotsSummary -from .helperFunctions import replaceByMap -from .presentation import SubsectionFromList, SubsectionOnePage -from .TkAlExceptions import AllInOneError - - -class TrackSplittingValidation(GenericValidationData_CTSR, ParallelValidation, ValidationWithPlotsSummary, ValidationForPresentation): - configBaseName = "TkAlTrackSplitting" - scriptBaseName = "TkAlTrackSplitting" - crabCfgBaseName = "TkAlTrackSplitting" - resultBaseName = "TrackSplitting" - outputBaseName = "TrackSplitting" - defaults = {"multiIOV":"False"} - mandatories = {"trackcollection"} - valType = "split" - - @property - def ValidationTemplate(self): - return configTemplates.TrackSplittingTemplate - - @property - def ValidationSequence(self): - return configTemplates.TrackSplittingSequence - - @property - def ProcessName(self): - return "splitter" - - def createScript(self, path): - return super(TrackSplittingValidation, self).createScript(path) - - def createCrabCfg(self, path): - return super(TrackSplittingValidation, self).createCrabCfg(path, self.crabCfgBaseName) - - def getRepMap( self, alignment = None ): - repMap = super(TrackSplittingValidation, self).getRepMap(alignment) - if repMap["subdetector"] == "none": - subdetselection = "" - else: - subdetselection = "process.AlignmentTrackSelector.minHitsPerSubDet.in.oO[subdetector]Oo. = 2" - repMap.update({ - "nEvents": self.general["maxevents"], - "TrackCollection": self.general["trackcollection"], - "subdetselection": subdetselection, - }) - # repMap["outputFile"] = os.path.abspath( repMap["outputFile"] ) - # if self.jobmode.split( ',' )[0] == "crab": - # repMap["outputFile"] = os.path.basename( repMap["outputFile"] ) - return repMap - - def appendToPlots(self): - """ - if no argument or "" is passed a string with an instantiation is - returned, else the validation is appended to the list - """ - repMap = self.getRepMap() - comparestring = self.getCompareStrings("TrackSplittingValidation") - return ' "{},"'.format(comparestring) - - def appendToMerge(self): - repMap = self.getRepMap() - - parameters = " ".join(os.path.join("root://eoscms//eos/cms", file.lstrip("/")) for file in repMap["resultFiles"]) - - mergedoutputfile = os.path.join("root://eoscms//eos/cms", repMap["finalResultFile"].lstrip("/")) - return "hadd -f %s %s" % (mergedoutputfile, parameters) - - @classmethod - def plottingscriptname(cls): - return "TkAlTrackSplitPlot.C" - - @classmethod - def plottingscripttemplate(cls): - return configTemplates.trackSplitPlotTemplate - - @classmethod - def plotsdirname(cls): - return "TrackSplittingPlots" - - @classmethod - def presentationsubsections(cls): - return [ - SubsectionTrackSplitting('hist.*eps$', 'Track splitting'), -# Uncomment and edit to highlight one or more profiles -# SubsectionOnePage("profile.phi_org.Delta_phi.*.eps", "modulation"), - ] - -class SubsectionTrackSplitting(SubsectionFromList): - pageidentifiers = ( - ("hist[.]Delta_pt", "$p_T$"), - ("hist[.]Delta_(eta|phi)", "Angles"), - ("hist[.]Delta_d(xy|z)", "Vertex"), - ) - diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/trackSplittingValidationTemplates.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/trackSplittingValidationTemplates.py deleted file mode 100644 index e9c7e9a76cb5c..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/trackSplittingValidationTemplates.py +++ /dev/null @@ -1,152 +0,0 @@ -###################################################################### -###################################################################### -TrackSplittingTemplate=""" - -#adding this ~doubles the efficiency of selection -process.FittingSmootherRKP5.EstimateCut = -1 - -.oO[subdetselection]Oo. - -# Use compressions settings of TFile -# see https://root.cern.ch/root/html534/TFile.html#TFile:SetCompressionSettings -# settings = 100 * algorithm + level -# level is from 1 (small) to 9 (large compression) -# algo: 1 (ZLIB), 2 (LMZA) -# see more about compression & performance: https://root.cern.ch/root/html534/guides/users-guide/InputOutput.html#compression-and-performance -compressionSettings = 207 -process.cosmicValidation = cms.EDAnalyzer("CosmicSplitterValidation", - compressionSettings = cms.untracked.int32(compressionSettings), - ifSplitMuons = cms.bool(False), - ifTrackMCTruth = cms.bool(False), - checkIfGolden = cms.bool(False), - splitTracks = cms.InputTag("FinalTrackRefitter","","splitter"), - splitGlobalMuons = cms.InputTag("muons","","splitter"), - originalTracks = cms.InputTag("FirstTrackRefitter","","splitter"), - originalGlobalMuons = cms.InputTag("muons","","Rec") -) -""" - -###################################################################### -###################################################################### -TrackSplittingSequence = "process.cosmicValidation" - - -###################################################################### -###################################################################### -trackSplitPlotExecution=""" -#make track splitting plots - -cp .oO[trackSplitPlotScriptPath]Oo. . -root -x -b -q TkAlTrackSplitPlot.C++ - -""" - -###################################################################### -###################################################################### - -trackSplitPlotTemplate=""" -#include "Alignment/OfflineValidation/macros/trackSplitPlot.C" - -/**************************************** -This can be run directly in root, or you - can run ./TkAlMerge.sh in this directory -It can be run as is, or adjusted to fit - for misalignments or to only make - certain plots -****************************************/ - -/******************************** -To make ALL plots (247 in total): - leave this file as is -********************************/ - -/************************************************************************** -to make all plots involving a single x or y variable, or both: -Uncomment the line marked (B), and fill in for xvar and yvar - -Examples: - - xvar = "dxy", yvar = "ptrel" - makes plots of dxy vs Delta_pT/pT - (4 total - profile and resolution, - of Delta_pT/pT and its pull - distribution) - xvar = "all", yvar = "pt" - makes all plots involving Delta_pT - (not Delta_pT/pT) - (30 plots total: - histogram and pull distribution, and - their mean and width as a function - of the 7 x variables) - xvar = "", yvar = "all" - makes all histograms of all y variables - (including Delta_pT/pT) - (16 plots total - 8 y variables, - regular and pull histograms) -**************************************************************************/ - -/************************************************************************************** -To make a custom selection of plots: -Uncomment the lines marked (C) and this section, and fill in matrix however you want */ - -/* -Bool_t plotmatrix[xsize][ysize]; -void fillmatrix() -{ - for (int x = 0; x < xsize; x++) - for (int y = 0; y < ysize; y++) - plotmatrix[x][y] = (.............................); -} -*/ - -/* -The variables are defined in Alignment/OfflineValidation/macros/trackSplitPlot.h - as follows: -TString xvariables[xsize] = {"", "pt", "eta", "phi", "dz", "dxy", "theta", - "qoverpt"}; - -TString yvariables[ysize] = {"pt", "pt", "eta", "phi", "dz", "dxy", "theta", - "qoverpt", ""}; -Bool_t relativearray[ysize] = {true, false, false, false, false, false, false, - false, false}; -Use matrix[x][y] = true to make that plot, and false not to make it. -**************************************************************************************/ - -/************************************************************************************* -To fit for a misalignment, which can be combined with any other option: -Uncomment the line marked (A) and this section, and choose your misalignment */ - -/* -TString misalignment = "choose one"; -double *values = 0; -double *phases = 0; -//or: -// double values[number of files] = {...}; -// double phases[number of files] = {...}; -*/ - -/* -The options for misalignment are sagitta, elliptical, skew, telescope, or layerRot. -If the magnitude and phase of the misalignment are known (i.e. Monte Carlo data using - a geometry produced by the systematic misalignment tool), make values and phases into - arrays, with one entry for each file, to make a plot of the result of the fit vs. the - misalignment value. -phases must be filled in for sagitta, elliptical, and skew if values is; - for the others it has no effect -*************************************************************************************/ - -void TkAlTrackSplitPlot() -{ - TkAlStyle::legendheader = ".oO[legendheader]Oo."; - TkAlStyle::legendoptions = ".oO[legendoptions]Oo."; - TkAlStyle::set(.oO[publicationstatus]Oo., .oO[era]Oo., ".oO[customtitle]Oo.", ".oO[customrighttitle]Oo."); - outliercut = .oO[outliercut]Oo.; - //fillmatrix(); //(C) - subdetector = ".oO[subdetector]Oo."; - makePlots( -.oO[PlottingInstantiation]Oo. - , - //misalignment,values,phases, //(A) - ".oO[datadir]Oo./.oO[PlotsDirName]Oo." - //,"xvar","yvar" //(B) - //,plotmatrix //(C) - ); -} -""" diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/zMuMuValidation.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/zMuMuValidation.py deleted file mode 100644 index 99453561480bb..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/zMuMuValidation.py +++ /dev/null @@ -1,132 +0,0 @@ -from __future__ import absolute_import -import os -from . import configTemplates -from . import globalDictionaries -from .genericValidation import GenericValidationData, ValidationWithPlots -from .helperFunctions import replaceByMap -from .TkAlExceptions import AllInOneError - - -class ZMuMuValidation(GenericValidationData, ValidationWithPlots): - configBaseName = "TkAlZMuMuValidation" - scriptBaseName = "TkAlZMuMuValidation" - crabCfgBaseName = "TkAlZMuMuValidation" - resultBaseName = "ZMuMuValidation" - outputBaseName = "ZMuMuValidation" - defaults = { - "zmumureference": ("/store/caf/user/emiglior/Alignment/TkAlDiMuonValidation/Reference/BiasCheck_DYToMuMu_Summer12_TkAlZMuMu_IDEAL.root"), - "minpt" : "0.", - "maxpt" : "1000.", - "etamaxneg" : "2.4", - "etaminneg" : "-2.4", - "etamaxpos" : "2.4", - "etaminpos" : "-2.4", - "CustomMinY": "90.85", - "CustomMaxY": "91.4", - "multiIOV":"False", - } - deprecateddefaults = { - "resonance": "", - "switchONfit": "", - "rebinphi": "", - "rebinetadiff": "", - "rebineta": "", - "rebinpt": "", - } - defaults.update(deprecateddefaults) - needpackages = {'MuonAnalysis/MomentumScaleCalibration'} - valType = "zmumu" - def __init__(self, valName, alignment, config): - super(ZMuMuValidation, self).__init__(valName, alignment, config) - if self.general["zmumureference"].startswith("/store"): - self.general["zmumureference"] = "root://eoscms//eos/cms" + self.general["zmumureference"] - if self.NJobs > 1: - raise AllInOneError("Parallel jobs not implemented for the Z->mumu validation!\n" - "Please set parallelJobs = 1.") - for option in self.deprecateddefaults: - if self.general[option]: - raise AllInOneError("The '%s' option has been moved to the [plots:zmumu] section. Please specify it there."%option) - del self.general[option] - - @property - def filesToCompare(self): - return {self.defaultReferenceName: replaceByMap(".oO[eosdir]Oo./0_zmumuHisto.root", self.getRepMap())} - - @property - def ValidationTemplate(self): - return configTemplates.ZMuMuValidationTemplate - - @property - def ProcessName(self): - return "ONLYHISTOS" - - @property - def FileOutputTemplate(self): - return "" - - @property - def LoadBasicModules(self): - return super(ZMuMuValidation, self).LoadBasicModules + configTemplates.LoadMuonModules - - @property - def TrackSelectionRefitting(self): - return configTemplates.SingleTrackRefitter - - @property - def DefinePath(self): - return configTemplates.ZMuMuPath - - def createScript(self, path): - return super(ZMuMuValidation, self).createScript(path, template = configTemplates.zMuMuScriptTemplate) - - def createCrabCfg(self, path): - return super(ZMuMuValidation, self).createCrabCfg(path, self.crabCfgBaseName) - - def getRepMap(self, alignment = None): - if alignment == None: - alignment = self.alignmentToValidate - repMap = super(ZMuMuValidation, self).getRepMap(alignment) - repMap.update({ - "nEvents": self.general["maxevents"], - "outputFile": ("0_zmumuHisto.root" - ",genSimRecoPlots.root" - ",FitParameters.txt"), - "eosdir": os.path.join(self.general["eosdir"], "%s/%s/%s" % (self.outputBaseName, self.name, alignment.name)), - "workingdir": ".oO[datadir]Oo./%s/%s/%s" % (self.outputBaseName, self.name, alignment.name), - "plotsdir": ".oO[datadir]Oo./%s/%s/%s/plots" % (self.outputBaseName, self.name, alignment.name), - "TrackCollection": self.trackcollection, - }) - return repMap - - @property - def trackcollection(self): - from .plottingOptions import PlottingOptions - resonance = PlottingOptions(self.config, self.valType)["resonance"] - if resonance == "Z": - return 'ALCARECOTkAlZMuMu' - elif resonance == "JPsi": - return 'ALCARECOTkAlJpsiMuMu' - elif resonance in ("Y1S", "Y2S", "Y3S"): - return 'ALCARECOTkAlUpsilonMuMu' - else: - raise AllInOneError("Unknown resonance {}!".format(resonance)) - - def appendToPlots(self): - """ - if no argument or "" is passed a string with an instantiation is - returned, else the validation is appended to the list - """ - repMap = self.getRepMap() - return replaceByMap(' filenames.push_back("root://eoscms//eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./BiasCheck.root"); titles.push_back(".oO[title]Oo."); colors.push_back(.oO[color]Oo.); linestyles.push_back(.oO[style]Oo.);\n', repMap) - - @classmethod - def plottingscriptname(cls): - return "TkAlMergeZmumuPlots.C" - - @classmethod - def plottingscripttemplate(cls): - return configTemplates.mergeZmumuPlotsTemplate - - @classmethod - def plotsdirname(cls): - return ".oO[resonance]Oo.MuMuPlots" diff --git a/Alignment/OfflineValidation/python/TkAlAllInOneTool/zMuMuValidationTemplates.py b/Alignment/OfflineValidation/python/TkAlAllInOneTool/zMuMuValidationTemplates.py deleted file mode 100644 index e1ea2121c27e4..0000000000000 --- a/Alignment/OfflineValidation/python/TkAlAllInOneTool/zMuMuValidationTemplates.py +++ /dev/null @@ -1,318 +0,0 @@ -ZMuMuValidationTemplate=""" -compressionSettings = 207 -###### MuSclFit SETTINGS ############################################## - -### MuScleFit specific configuration - -process.looper = cms.Looper( - "MuScleFit", - # Only used when reading events from a root tree - MaxEventsFromRootTree = cms.int32(-1), - - # Specify a file if you want to read events from a root tree in a local file. - # In this case the input source should be an empty source with 0 events. - - InputRootTreeFileName = cms.string(""), - - # Specify the file name where you want to save a root tree with the muon pairs. - # Leave empty if no file should be written. - - OutputRootTreeFileName = cms.string(""), - - - # Choose the kind of muons you want to run on - # ------------------------------------------- - MuonLabel = cms.InputTag("TrackRefitter"), - - - #MuonType = cms.int32(11), - MuonType = cms.int32(5), - - # This line allows to switch to PAT muons. Default is false. - # Note that the onia selection works only with onia patTuples. - PATmuons = cms.untracked.bool(False), - - # ---------------- # - # Select resonance # - # ---------------- # - # The resonances are to be specified in this order: - # Z0, Y(3S), Y(2S), Y(1S), Psi(2S), J/Psi - # ------------------------------------------------- - resfind = cms.vint32( - int(".oO[resonance]Oo." == "Z"), - int(".oO[resonance]Oo." == "Y3S"), - int(".oO[resonance]Oo." == "Y2S"), - int(".oO[resonance]Oo." == "Y1S"), - int(".oO[resonance]Oo." == "Psi2S"), - int(".oO[resonance]Oo." == "JPsi") - ), - - # Likelihood settings - # ------------------- - maxLoopNumber = cms.untracked.int32(1), - # Select which fits to do in which loop (0 = do not, 1 = do) - doResolFit = cms.vint32(0), - doScaleFit = cms.vint32(0), - doBackgroundFit = cms.vint32(0), - doCrossSectionFit = cms.vint32(0), - - # Use the probability file or not. If not it will perform a simpler selection taking the muon pair with - # invariant mass closer to the pdf value and will crash if some fit is attempted. - UseProbsFile = cms.untracked.bool(False), - - # False = use also MC information - speedup = cms.bool(True), - # Set this to false if you do not want to use simTracks. - # (Note that this is skipped anyway if speedup == True). - compareToSimTracks = cms.bool(False), - - # Output settings - # --------------- - # Use compressions settings of TFile - # see https://root.cern.ch/root/html534/TFile.html#TFile:SetCompressionSettings - # settings = 100 * algorithm + level - # level is from 1 (small) to 9 (large compression) - # algo: 1 (ZLIB), 2 (LMZA) - # see more about compression & performance: https://root.cern.ch/root/html534/guides/users-guide/InputOutput.html#compression-and-performance - - OutputFileName = cms.untracked.string("zmumuHisto.root"), - compressionSettings = cms.untracked.int32(compressionSettings), - - # BiasType=0 means no bias to muon momenta - # ---------------------------------------- - BiasType = cms.int32(0), - parBias = cms.vdouble(), - - # SmearType=0 means no smearing applied to muon momenta - # ----------------------------------------------------- - SmearType = cms.int32(0), - parSmear = cms.vdouble(), - - ### taken from J/Psi ######################### -# ResolFitType = cms.int32(14), -# parResol = cms.vdouble(0.007,0.015, -0.00077, 0.0063, 0.0018, 0.0164), -# parResolFix = cms.vint32(0, 0, 0,0, 0,0), -# parResolOrder = cms.vint32(0, 0, 0, 0, 0, 0), - ResolFitType = cms.int32(0), - parResol = cms.vdouble(0), - parResolFix = cms.vint32(0), - parResolOrder = cms.vint32(0), - - - # -------------------- # - # Scale fit parameters # - # -------------------- # - - # ----------------------------------------------------------------------------------- -# ScaleFitType = cms.int32(18), -# parScaleOrder = cms.vint32(0, 0, 0, 0), -# parScaleFix = cms.vint32(0, 0, 0, 0), -# parScale = cms.vdouble(1, 1, 1, 1), - ScaleFitType = cms.int32(0), - parScaleOrder = cms.vint32(0), - parScaleFix = cms.vint32(0), - parScale = cms.vdouble(0), - - - - # ---------------------------- # - # Cross section fit parameters # - # ---------------------------- # - # Note that the cross section fit works differently than the others, it - # fits ratios of parameters. Fix and Order should not be used as is, they - # are there mainly for compatibility. - parCrossSectionOrder = cms.vint32(0, 0, 0, 0, 0, 0), - parCrossSectionFix = cms.vint32(0, 0, 0, 0, 0, 0), - parCrossSection = cms.vdouble(1.233, 2.07, 6.33, 13.9, 2.169, 127.2), - - # ------------------------- # - # Background fit parameters # - # ------------------------- # - - # Window factors for: Z, Upsilons and (J/Psi,Psi2S) regions - LeftWindowBorder = cms.vdouble(70., 8., 1.391495), - RightWindowBorder = cms.vdouble(110., 12., 5.391495), - - # The two parameters of BgrFitType=2 are respectively: - # bgr fraction, (negative of) bgr exp. slope, bgr constant - # -------------------------------------------------------- - # The function types for resonances in a region must be the same - BgrFitType = cms.vint32(2, 2, 2), # regions - # These empty parameters should be used when there is no background - parBgr = cms.vdouble(0., 0., 0., 0., 0., 0., - 0., 0., 0., 0., 0., 0., 0.,0., 0.,0., 0.,0.), - parBgrFix = cms.vint32(0, 0, 0, 0, 0, 0, - # The rest of the parameters is used for the resonance regions. They are automatically fixed in the code - # because they are never used to fit the background, but only after the rescaling. - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), - parBgrOrder = cms.vint32(0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), - - - # ----------------------- # - - # Set Minuit fit strategy - FitStrategy = cms.int32(1), - - - # Fit accuracy and debug parameters - StartWithSimplex = cms.bool(True), - ComputeMinosErrors = cms.bool(False), - MinimumShapePlots = cms.bool(True), - - ########## TO BE ENABLED ################################ - # Set the cuts on muons to be used in the fit - MinMuonPt = cms.untracked.double(.oO[minpt]Oo.), - MaxMuonPt = cms.untracked.double(.oO[maxpt]Oo.), - MinMuonEtaFirstRange = cms.untracked.double(.oO[etaminneg]Oo.), - MaxMuonEtaFirstRange = cms.untracked.double(.oO[etamaxneg]Oo.), - MinMuonEtaSecondRange = cms.untracked.double(.oO[etaminpos]Oo.), - MaxMuonEtaSecondRange = cms.untracked.double(.oO[etamaxpos]Oo.), - PileUpSummaryInfo = cms.untracked.InputTag("addPileupInfo"), - PrimaryVertexCollection = cms.untracked.InputTag("offlinePrimaryVertices"), - - # The following parameters can be used to filter events - TriggerResultsLabel = cms.untracked.string("TriggerResults"), - TriggerResultsProcess = cms.untracked.string("HLT"), - TriggerPath = cms.untracked.vstring(""), - # Negate the result of the trigger - NegateTrigger = cms.untracked.bool(False), - debug = cms.untracked.int32(0), -) - -""" - - -#################################################################### -#################################################################### -LoadMuonModules = """ -process.load("RecoMuon.DetLayers.muonDetLayerGeometry_cfi") -process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi") -process.load("RecoMuon.TrackingTools.MuonServiceProxy_cff") -process.load("Configuration.StandardSequences.Reconstruction_cff") -""" - - -#################################################################### -#################################################################### -ZMuMuPath = """ -process.p = cms.Path( - process.offlineBeamSpot*process.TrackRefitter - ) -""" - - -#################################################################### -#################################################################### -zMuMuScriptTemplate="""#!/bin/bash -source /afs/cern.ch/cms/caf/setup.sh -export X509_USER_PROXY=.oO[scriptsdir]Oo./.user_proxy - -echo ----------------------- -echo Job started at `date` -echo ----------------------- - -cwd=`pwd` -cd .oO[CMSSW_BASE]Oo./src -export SCRAM_ARCH=.oO[SCRAM_ARCH]Oo. -eval `scram runtime -sh` -cd $cwd - -mkdir -p .oO[datadir]Oo. -mkdir -p .oO[workingdir]Oo. -mkdir -p .oO[logdir]Oo. -rm -f .oO[logdir]Oo./*.stdout -rm -f .oO[logdir]Oo./*.stderr - -if [[ $HOSTNAME = lxplus[0-9]*[.a-z0-9]* ]] # check for interactive mode -then - mkdir -p .oO[workdir]Oo. - rm -f .oO[workdir]Oo./* - cd .oO[workdir]Oo. -else - mkdir -p $cwd/TkAllInOneTool - cd $cwd/TkAllInOneTool -fi - - -.oO[CommandLine]Oo. - -ls -lh . - -cp .oO[MuonAnalysis/MomentumScaleCalibration]Oo./test/Macros/RooFit/CompareBias.oO[resonance]Oo.Validation.cc . -cp .oO[MuonAnalysis/MomentumScaleCalibration]Oo./test/Macros/RooFit/Legend.h . -cp .oO[MuonAnalysis/MomentumScaleCalibration]Oo./test/Macros/RooFit/FitMassSlices.cc . -cp .oO[MuonAnalysis/MomentumScaleCalibration]Oo./test/Macros/RooFit/FitSlices.cc . -cp .oO[MuonAnalysis/MomentumScaleCalibration]Oo./test/Macros/RooFit/FitXslices.cc . -cp .oO[MuonAnalysis/MomentumScaleCalibration]Oo./test/Macros/RooFit/FitWithRooFit.cc . -cp .oO[MuonAnalysis/MomentumScaleCalibration]Oo./test/Macros/RooFit/FitMass1D.cc . - -root -q -b -l "CompareBias.oO[resonance]Oo.Validation.cc+(.oO[rebinphi]Oo., .oO[rebinetadiff]Oo., .oO[rebineta]Oo., .oO[rebinpt]Oo.)" - -cp .oO[MuonAnalysis/MomentumScaleCalibration]Oo./test/Macros/RooFit/tdrstyle.C . -cp .oO[MuonAnalysis/MomentumScaleCalibration]Oo./test/Macros/RooFit/MultiHistoOverlap_.oO[resonance]Oo..C . - -if [[ .oO[zmumureference]Oo. == *store* ]]; then xrdcp -f .oO[zmumureference]Oo. BiasCheck_Reference.root; else ln -fs .oO[zmumureference]Oo. ./BiasCheck_Reference.root; fi -root -q -b -l MultiHistoOverlap_.oO[resonance]Oo..C - -eos mkdir -p /store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./plots/ -for RootOutputFile in $(ls *root ) -do - xrdcp -f ${RootOutputFile} root://eoscms//eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./ - cp ${RootOutputFile} .oO[workingdir]Oo. -done - -mkdir -p .oO[plotsdir]Oo. -for PngOutputFile in $(ls *png ); do - xrdcp -f ${PngOutputFile} root://eoscms//eos/cms/store/group/alca_trackeralign/AlignmentValidation/.oO[eosdir]Oo./plots/ - cp ${PngOutputFile} .oO[plotsdir]Oo. -done - - -echo ----------------------- -echo Job ended at `date` -echo ----------------------- - -""" - -###################################################################### -###################################################################### - -mergeZmumuPlotsExecution=""" -#merge Z->mumu histograms - -cp .oO[mergeZmumuPlotsScriptPath]Oo. . -root -l -x -b -q TkAlMergeZmumuPlots.C++ - -""" - -###################################################################### -###################################################################### - -mergeZmumuPlotsTemplate=""" -#include "MuonAnalysis/MomentumScaleCalibration/test/Macros/RooFit/MultiHistoOverlapAll_.oO[resonance]Oo..C" -#include -#include - -template string separatebycommas(vector v){ - if (v.size()==0) return ""; - stringstream s; - s << v[0]; - for (unsigned int i = 1; i < v.size(); i++) s << "," << v[i]; - return s.str(); -} - -void TkAlMergeZmumuPlots(){ - vector filenames; vector titles; vector colors; vector linestyles; - -.oO[PlottingInstantiation]Oo. - - vector linestyles_new, markerstyles_new; - for (unsigned int j=0; j + + + + + + @@ -27,6 +33,10 @@ + + + + diff --git a/Alignment/OfflineValidation/test/PVValidation_TEMPL_cfg.py b/Alignment/OfflineValidation/test/PVValidation_TEMPL_cfg.py index 1b5482137751a..968ad2c66aac6 100644 --- a/Alignment/OfflineValidation/test/PVValidation_TEMPL_cfg.py +++ b/Alignment/OfflineValidation/test/PVValidation_TEMPL_cfg.py @@ -216,7 +216,7 @@ def customiseKinksAndBows(process): ) ## MM 04.05.2017 (use settings as in: https://github.com/cms-sw/cmssw/pull/18330) -from RecoVertex.PrimaryVertexProducer.TkClusParameters_cff import DA_vectParameters +from RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi import DA_vectParameters DAClusterizationParams = DA_vectParameters.clone() GapClusterizationParams = cms.PSet(algorithm = cms.string('gap'), diff --git a/Alignment/OfflineValidation/test/PVValidation_T_cfg.py b/Alignment/OfflineValidation/test/PVValidation_T_cfg.py index 866e9413c8235..3349d4acc1879 100644 --- a/Alignment/OfflineValidation/test/PVValidation_T_cfg.py +++ b/Alignment/OfflineValidation/test/PVValidation_T_cfg.py @@ -279,7 +279,7 @@ def customiseKinksAndBows(process): ) ## MM 04.05.2017 (use settings as in: https://github.com/cms-sw/cmssw/pull/18330) -from RecoVertex.PrimaryVertexProducer.TkClusParameters_cff import DA_vectParameters +from RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi import DA_vectParameters DAClusterizationParams = DA_vectParameters.clone() GapClusterizationParams = cms.PSet(algorithm = cms.string('gap'), diff --git a/Alignment/OfflineValidation/test/TkAlV0sAnalyzer_cfg.py b/Alignment/OfflineValidation/test/TkAlV0sAnalyzer_cfg.py new file mode 100644 index 0000000000000..2170da537a5a4 --- /dev/null +++ b/Alignment/OfflineValidation/test/TkAlV0sAnalyzer_cfg.py @@ -0,0 +1,83 @@ +import FWCore.ParameterSet.Config as cms + +process = cms.Process('Analysis') + +################################################################### +# import of standard configurations +################################################################### +process.load('Configuration.StandardSequences.Services_cff') +process.load('Configuration.StandardSequences.GeometryRecoDB_cff') +process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff') +process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff') + +################################################################### +# Configure the Global Tag +################################################################### +from Configuration.AlCa.GlobalTag import GlobalTag +process.GlobalTag = GlobalTag(process.GlobalTag, '133X_mcRun3_2023_realistic_v3', '') + +process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) + +################################################################### +# Messages +################################################################### +process.load('FWCore.MessageService.MessageLogger_cfi') + +################################################################### +# Source +################################################################### +readFiles = cms.untracked.vstring('file:../../../TkAlV0s.root') +process.source = cms.Source("PoolSource", + fileNames = readFiles, + #skipEvents = cms.untracked.uint32(45000) +) + +################################################################### +# TransientTrack from https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideTransientTracks +################################################################### +process.load("TrackingTools.TransientTrack.TransientTrackBuilder_cfi") +process.load('TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagatorOpposite_cfi') +process.load('TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagatorAlong_cfi') +process.load('TrackingTools.TrackAssociator.DetIdAssociatorESProducer_cff') + +#################################################################### +# Get the BeamSpot +#################################################################### +process.load("RecoVertex.BeamSpotProducer.BeamSpot_cff") + +#################################################################### +# Track Refitter +#################################################################### +process.load("RecoTracker.TrackProducer.TrackRefitters_cff") +import RecoTracker.TrackProducer.TrackRefitters_cff +process.TrackRefitter = RecoTracker.TrackProducer.TrackRefitter_cfi.TrackRefitter.clone() +process.TrackRefitter.src = "ALCARECOTkAlKShortTracks" +process.TrackRefitter.TrajectoryInEvent = True +process.TrackRefitter.NavigationSchool = '' +process.TrackRefitter.TTRHBuilder = "WithAngleAndTemplate" + +#################################################################### +# Output file +#################################################################### +process.TFileService = cms.Service("TFileService",fileName=cms.string("TkAlV0Analysis.root")) + +#################################################################### +# Sequence +#################################################################### +process.seqTrackselRefit = cms.Sequence(process.offlineBeamSpot* + # in case NavigatioSchool is set !='' + #process.MeasurementTrackerEvent* + process.TrackRefitter) + +#################################################################### +# Additional output definition +#################################################################### +process.analysis = cms.EDAnalyzer('TkAlV0sAnalyzer', + #tracks = cms.untracked.InputTag('TrackRefitter')) + tracks = cms.untracked.InputTag('ALCARECOTkAlKShortTracks')) + +#################################################################### +# Path +#################################################################### +process.p = cms.Path(#process.seqTrackselRefit + + process.analysis) diff --git a/Alignment/OfflineValidation/test/eopElecTreeWriter_cfg.py b/Alignment/OfflineValidation/test/eopElecTreeWriter_cfg.py index cc00fd049b3d0..4a0c73ac8ec16 100644 --- a/Alignment/OfflineValidation/test/eopElecTreeWriter_cfg.py +++ b/Alignment/OfflineValidation/test/eopElecTreeWriter_cfg.py @@ -131,7 +131,7 @@ else: print( "NO REFIT") -process.load("Alignment.OfflineValidation.eopElecTreeWriter_cfi") +process.load("Alignment.OfflineValidation.energyOverMomentumTreeElec_cfi") if REFIT: print( "REFIT") diff --git a/Alignment/OfflineValidation/test/inspectData_cfg.py b/Alignment/OfflineValidation/test/inspectData_cfg.py index f1a71b0422f44..261e89d7f4b48 100644 --- a/Alignment/OfflineValidation/test/inspectData_cfg.py +++ b/Alignment/OfflineValidation/test/inspectData_cfg.py @@ -1,6 +1,8 @@ +import math import glob +import importlib import FWCore.ParameterSet.Config as cms -from Alignment.OfflineValidation.TkAlAllInOneTool.defaultInputFiles_cff import filesDefaultData_Comissioning2022_Cosmics_string +from Alignment.OfflineValidation.TkAlAllInOneTool.defaultInputFiles_cff import filesDefaultData_Comissioning2022_Cosmics_string,filesDefaultMC_DoubleMuonPUPhase_string ################################################################### # Setup 'standard' options @@ -31,6 +33,18 @@ VarParsing.VarParsing.varType.bool, # string, int, or float "is it a unit test?") +options.register('isDiMuonData', + False, # default value + VarParsing.VarParsing.multiplicity.singleton, # singleton or list + VarParsing.VarParsing.varType.bool, # string, int, or float + "is it running on DiMuon data?") + +options.register('isCosmics', + False, # default value + VarParsing.VarParsing.multiplicity.singleton, # singleton or list + VarParsing.VarParsing.varType.bool, # string, int, or float + "is it running on cosmics data?") + options.register('inputData', "/eos/cms/store/express/Commissioning2022/ExpressCosmics/FEVT/Express-v1/000/350/010/00000/*", VarParsing.VarParsing.multiplicity.singleton, # singleton or list @@ -43,9 +57,33 @@ VarParsing.VarParsing.varType.int, # string, int, or float "num. events to run") +options.register('Detector', + '2023', + VarParsing.VarParsing.multiplicity.singleton, # singleton or list + VarParsing.VarParsing.varType.string, # string, int, or float + "Detector to run upon") + options.parseArguments() -process = cms.Process("AlCaRECOAnalysis") + +from Configuration.PyReleaseValidation.upgradeWorkflowComponents import upgradeProperties +ConditionsInfo = {} +if 'D' in options.Detector: + ConditionsInfo = upgradeProperties[2026][options.Detector] # so if the default changes, change wf only here +else: + ConditionsInfo = upgradeProperties[2017][options.Detector] + +era_value = ConditionsInfo['Era'] +era_module_name = f'Configuration.Eras.Era_{era_value}_cff' +config_name = f'{era_value}' +era_module = importlib.import_module(era_module_name) +era_config = getattr(era_module, config_name, None) + +if era_config is not None: + # Use the configurations from the imported module in the process setup + process = cms.Process("AlCaRECOAnalysis", era_config) +else: + print(f"Error: Could not find configuration {config_name} in module {era_module_name}.") ################################################################### # Message logger service @@ -71,7 +109,12 @@ ################################################################### process.load("RecoVertex.BeamSpotProducer.BeamSpot_cff") process.load("Configuration.StandardSequences.Services_cff") -process.load("Configuration.StandardSequences.GeometryRecoDB_cff") +if 'D' in options.Detector: + geom = options.Detector # Replace with your actual dynamic part + process.load(f'Configuration.Geometry.GeometryExtended{geom}Reco_cff') +else: + process.load("Configuration.StandardSequences.GeometryRecoDB_cff") + process.load('Configuration.StandardSequences.MagneticField_cff') #process.load("Configuration.StandardSequences.MagneticField_0T_cff") process.load("CondCore.CondDB.CondDB_cfi") @@ -81,7 +124,7 @@ #################################################################### process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff") from Configuration.AlCa.GlobalTag import GlobalTag -process.GlobalTag = GlobalTag(process.GlobalTag,options.globalTag, '') +process.GlobalTag = GlobalTag(process.GlobalTag,options.globalTag if (options.globalTag != '') else ConditionsInfo['GT'], '') ################################################################### # Source @@ -91,11 +134,19 @@ the_files=[] if(options.unitTest): ## fixed input for the unit test - readFiles.extend([filesDefaultData_Comissioning2022_Cosmics_string]) + if('D' in options.Detector) : + # it's for phase-2 + readFiles.extend([filesDefaultMC_DoubleMuonPUPhase_string]) + else: + # it's for phase-1 + readFiles.extend([filesDefaultData_Comissioning2022_Cosmics_string]) else: file_list = glob.glob(options.inputData) for f in file_list: - the_files.append(f.replace("/eos/cms","")) + if '/eos/cms' in f: + the_files.append(f.replace("/eos/cms","")) + else: + the_files.append(f.replace("./","file:")) print(the_files) readFiles.extend(the_files) @@ -135,13 +186,21 @@ ################################################################### process.load("RecoTracker.TrackProducer.TrackRefitters_cff") import RecoTracker.TrackProducer.TrackRefitters_cff -process.TrackRefitter1 = process.TrackRefitterP5.clone( - src = options.trackCollection, #'AliMomConstraint', - TrajectoryInEvent = True, - TTRHBuilder = "WithAngleAndTemplate", #"WithTrackAngle" - NavigationSchool = "", - #constraint = 'momentum', ### SPECIFIC FOR CRUZET - #srcConstr='AliMomConstraint' ### SPECIFIC FOR CRUZET$works only with tag V02-10-02 TrackingTools/PatternTools / or CMSSW >=31X +if options.isCosmics: + process.TrackRefitter1 = process.TrackRefitterP5.clone( + src = options.trackCollection, #'AliMomConstraint', + TrajectoryInEvent = True, + TTRHBuilder = "WithAngleAndTemplate", #"WithTrackAngle" + NavigationSchool = "", + #constraint = 'momentum', ### SPECIFIC FOR CRUZET + #srcConstr='AliMomConstraint' ### SPECIFIC FOR CRUZET$works only with tag V02-10-02 TrackingTools/PatternTools / or CMSSW >=31X + ) +else: + process.TrackRefitter1 = RecoTracker.TrackProducer.TrackRefitter_cfi.TrackRefitter.clone( + src = options.trackCollection, #'AliMomConstraint', + TrajectoryInEvent = True, + TTRHBuilder = "WithAngleAndTemplate", #"WithTrackAngle" + NavigationSchool = "", ) ################################################################### @@ -161,11 +220,12 @@ ################################################################### process.myanalysis = cms.EDAnalyzer("GeneralPurposeTrackAnalyzer", TkTag = cms.InputTag('TrackRefitter1'), - isCosmics = cms.bool(True)) + #TkTag = cms.InputTag(options.trackCollection), + isCosmics = cms.bool(options.isCosmics)) process.fastdmr = cms.EDAnalyzer("DMRChecker", TkTag = cms.InputTag('TrackRefitter1'), - isCosmics = cms.bool(True)) + isCosmics = cms.bool(options.isCosmics)) ################################################################### # Output name @@ -173,17 +233,88 @@ process.TFileService = cms.Service("TFileService", fileName = cms.string(options.outFileName)) + +################################################################### +# TransientTrack from https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideTransientTracks +################################################################### +process.load("TrackingTools.TransientTrack.TransientTrackBuilder_cfi") +process.load('TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagatorOpposite_cfi') +process.load('TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagatorAlong_cfi') +process.load('TrackingTools.TrackAssociator.DetIdAssociatorESProducer_cff') + +process.DiMuonVertexValidation = cms.EDAnalyzer("DiMuonVertexValidation", + useReco = cms.bool(False), + muonTracks = cms.InputTag('TrackRefitter1'), + tracks = cms.InputTag(''), + vertices = cms.InputTag('offlinePrimaryVertices')) + +from Alignment.OfflineValidation.diMuonValidation_cfi import diMuonValidation as _diMuonValidation +process.DiMuonMassValidation = _diMuonValidation.clone( + #TkTag = 'refittedMuons', + TkTag = 'TrackRefitter1', + # mu mu mass + Pair_mass_min = 80., + Pair_mass_max = 120., + Pair_mass_nbins = 80, + Pair_etaminpos = -2.4, + Pair_etamaxpos = 2.4, + Pair_etaminneg = -2.4, + Pair_etamaxneg = 2.4, + # cosTheta CS + Variable_CosThetaCS_xmin = -1., + Variable_CosThetaCS_xmax = 1., + Variable_CosThetaCS_nbins = 20, + # DeltaEta + Variable_DeltaEta_xmin = -4.8, + Variable_DeltaEta_xmax = 4.8, + Variable_DeltaEta_nbins = 20, + # EtaMinus + Variable_EtaMinus_xmin = -2.4, + Variable_EtaMinus_xmax = 2.4, + Variable_EtaMinus_nbins = 12, + # EtaPlus + Variable_EtaPlus_xmin = -2.4, + Variable_EtaPlus_xmax = 2.4, + Variable_EtaPlus_nbins = 12, + # Phi CS + Variable_PhiCS_xmin = -math.pi/2., + Variable_PhiCS_xmax = math.pi/2., + Variable_PhiCS_nbins = 20, + # Phi Minus + Variable_PhiMinus_xmin = -math.pi, + Variable_PhiMinus_xmax = math.pi, + Variable_PhiMinus_nbins = 16, + # Phi Plus + Variable_PhiPlus_xmin = -math.pi, + Variable_PhiPlus_xmax = math.pi, + Variable_PhiPlus_nbins = 16, + # mu mu pT + Variable_PairPt_xmin = 0., + Variable_PairPt_xmax = 100., + Variable_PairPt_nbins = 100) + ################################################################### # Path ################################################################### process.p1 = cms.Path(process.offlineBeamSpot #*process.AliMomConstraint # for 0T - *process.TrackRefitter1 - *process.myanalysis - *process.fastdmr) + * process.TrackRefitter1 + * process.myanalysis + * process.fastdmr) ################################################################### -# preprend the filter +# append di muon analysis ################################################################### -if(options.unitTest): +if(options.isDiMuonData): + process.p1.insert(5,process.DiMuonVertexValidation) + process.p1.insert(6,process.DiMuonMassValidation) + +################################################################### +# preprend the filter for unit tests +################################################################### +if(options.unitTest and not options.isDiMuonData): process.p1.insert(0, process.preAnaSeq) + + + + diff --git a/Alignment/OfflineValidation/test/testG4Refitter_cfg.py b/Alignment/OfflineValidation/test/testG4Refitter_cfg.py index 050c40634534e..ab79fb97960a3 100644 --- a/Alignment/OfflineValidation/test/testG4Refitter_cfg.py +++ b/Alignment/OfflineValidation/test/testG4Refitter_cfg.py @@ -130,7 +130,7 @@ ) ## MM 04.05.2017 (use settings as in: https://github.com/cms-sw/cmssw/pull/18330) -from RecoVertex.PrimaryVertexProducer.TkClusParameters_cff import DA_vectParameters +from RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi import DA_vectParameters DAClusterizationParams = DA_vectParameters.clone() #################################################################### diff --git a/Alignment/OfflineValidation/test/testPrimaryVertexRelatedValidations_cfg.py b/Alignment/OfflineValidation/test/testPrimaryVertexRelatedValidations_cfg.py index 8ec25c822b0ca..d4ef8bc13880c 100644 --- a/Alignment/OfflineValidation/test/testPrimaryVertexRelatedValidations_cfg.py +++ b/Alignment/OfflineValidation/test/testPrimaryVertexRelatedValidations_cfg.py @@ -285,7 +285,7 @@ class RefitType(Enum): ) ## MM 04.05.2017 (use settings as in: https://github.com/cms-sw/cmssw/pull/18330) -from RecoVertex.PrimaryVertexProducer.TkClusParameters_cff import DA_vectParameters +from RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi import DA_vectParameters DAClusterizationParams = DA_vectParameters.clone() GapClusterizationParams = cms.PSet(algorithm = cms.string('gap'), diff --git a/Alignment/OfflineValidation/test/testShortenedTrackValidation_cfg.py b/Alignment/OfflineValidation/test/testShortenedTrackValidation_cfg.py new file mode 100644 index 0000000000000..2901ef5a7fc83 --- /dev/null +++ b/Alignment/OfflineValidation/test/testShortenedTrackValidation_cfg.py @@ -0,0 +1,153 @@ +import FWCore.ParameterSet.Config as cms +import FWCore.Utilities.FileUtils as FileUtils +from FWCore.ParameterSet.VarParsing import VarParsing + +options = VarParsing('analysis') +options.register('scenario', + '0', + VarParsing.multiplicity.singleton, + VarParsing.varType.string, + "Name of input misalignment scenario") +options.parseArguments() + +valid_scenarios = ['-10e-6','-8e-6','-6e-6','-4e-6','-2e-6','0','2e-6','4e-6','6e-6','8e-6','10e-6'] + +if options.scenario not in valid_scenarios: + print("Error: Invalid scenario specified. Please choose from the following list: ") + print(valid_scenarios) + exit(1) + +process = cms.Process("TrackingResolution") + +##################################################################### +# import of standard configurations +##################################################################### +process.load('Configuration.StandardSequences.Services_cff') +process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi') +process.load('FWCore.MessageService.MessageLogger_cfi') +process.MessageLogger.cerr.FwkReport.reportEvery = 100000 +process.load('Configuration.EventContent.EventContent_cff') +process.load('Configuration.StandardSequences.GeometryRecoDB_cff') +process.load('Configuration.StandardSequences.MagneticField_cff') +process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff') + +##################################################################### +## BeamSpot from database (i.e. GlobalTag), needed for Refitter +##################################################################### +process.load("RecoVertex.BeamSpotProducer.BeamSpot_cfi") + +##################################################################### +# Load and Configure Measurement Tracker Event +##################################################################### +process.load("RecoTracker.MeasurementDet.MeasurementTrackerEventProducer_cfi") +process.MeasurementTrackerEvent.pixelClusterProducer = "ALCARECOTkAlDiMuon" +process.MeasurementTrackerEvent.stripClusterProducer = "ALCARECOTkAlDiMuon" +process.MeasurementTrackerEvent.inactivePixelDetectorLabels = cms.VInputTag() +process.MeasurementTrackerEvent.inactiveStripDetectorLabels = cms.VInputTag() + +process.maxEvents = cms.untracked.PSet( + input = cms.untracked.int32(1000000) +) + +##################################################################### +# Input source +##################################################################### +# filelist = FileUtils.loadListFromFile("listOfFiles_idealMC_TkAlDiMuonAndVertex.txt") +# readFiles = cms.untracked.vstring( *filelist) +# events taken from /DYJetsToMuMu_M-50_TuneCP5_13p6TeV-madgraphMLM-pythia8/Run3Winter23Reco-TkAlDiMuonAndVertex-TRKDesignNoPU_AlcaRecoTRKMu_designGaussSigmaZ4cm_125X_mcRun3_2022_design_v6-v1/ALCARECO +readFiles = cms.untracked.vstring('/store/mc/Run3Winter23Reco/DYJetsToMuMu_M-50_TuneCP5_13p6TeV-madgraphMLM-pythia8/ALCARECO/TkAlDiMuonAndVertex-TRKDesignNoPU_AlcaRecoTRKMu_designGaussSigmaZ4cm_125X_mcRun3_2022_design_v6-v1/60000/d3af17a5-2409-4551-9c3d-00deb2f3f64f.root') +process.source = cms.Source("PoolSource",fileNames = readFiles) + +process.options = cms.untracked.PSet() + +#################################################################### +# Output file +#################################################################### +process.TFileService = cms.Service("TFileService", + fileName = cms.string("shortenedTrackResolution_LayerRotation_"+options.scenario+".root")) + +##################################################################### +# Other statements +##################################################################### +from Configuration.AlCa.GlobalTag import GlobalTag +process.GlobalTag = GlobalTag(process.GlobalTag, "125X_mcRun3_2022_design_v6", '') +if (options.scenario=='null'): + print("null scenario, do nothing") + pass +else: + process.GlobalTag.toGet = cms.VPSet(cms.PSet(connect = cms.string("frontier://FrontierPrep/CMS_CONDITIONS"), + record = cms.string('TrackerAlignmentRcd'), + tag = cms.string("LayerRotation_"+options.scenario))) + +##################################################################### +# The DQM analysis sequence +##################################################################### +process.load("DQM.TrackingMonitorSource.shortTrackResolution_cff") + +##################################################################### +# The changes to cope with ALCARECO data format +##################################################################### +process.load("RecoTracker.TrackProducer.TrackRefitters_cff") +import RecoTracker.TrackProducer.TrackRefitters_cff +process.LongTracksRefit = process.TrackRefitter.clone( + src = 'SingleLongTrackProducer', + TrajectoryInEvent = True, + TTRHBuilder = "WithAngleAndTemplate", + NavigationSchool = '' +) + +process.ShortTrackCandidates3.src = cms.InputTag("LongTracksRefit") +process.ShortTrackCandidates4.src = cms.InputTag("LongTracksRefit") +process.ShortTrackCandidates5.src = cms.InputTag("LongTracksRefit") +process.ShortTrackCandidates6.src = cms.InputTag("LongTracksRefit") +process.ShortTrackCandidates7.src = cms.InputTag("LongTracksRefit") +process.ShortTrackCandidates8.src = cms.InputTag("LongTracksRefit") + +process.SingleLongTrackProducer.requiredDr = cms.double(-9999.) # do not require any matchings +process.SingleLongTrackProducer.matchMuons = cms.InputTag("muons") # for ALCA irrelevant (see above) +process.SingleLongTrackProducer.allTracks = cms.InputTag("ALCARECOTkAlDiMuon") + +##################################################################### +# The Analysis module +##################################################################### +from Alignment.OfflineValidation.shortenedTrackValidation_cfi import shortenedTrackValidation as _shortenedTrackValidation +process.ShortenedTrackValidation = _shortenedTrackValidation.clone(folderName = "ShortTrackResolution", + hitsRemainInput = ["3","4","5","6","7","8"], + minTracksEtaInput = 0.0, + maxTracksEtaInput = 2.2, + minTracksPtInput = 15.0, + maxTracksPtInput = 99999.9, + maxDrInput = 0.01, + tracksInputTag = "SingleLongTrackProducer", + tracksRerecoInputTag = ["RefittedShortTracks3", + "RefittedShortTracks4", + "RefittedShortTracks5", + "RefittedShortTracks6", + "RefittedShortTracks7", + "RefittedShortTracks8"]) + +##################################################################### +# Path +##################################################################### +process.analysis_step = cms.Path(process.offlineBeamSpot * + process.MeasurementTrackerEvent * + process.SingleLongTrackProducer * + process.LongTracksRefit * + process.ShortTrackCandidates3 * + process.ShortTrackCandidates4 * + process.ShortTrackCandidates5 * + process.ShortTrackCandidates6 * + process.ShortTrackCandidates7 * + process.ShortTrackCandidates8 * + process.RefittedShortTracks3 * + process.RefittedShortTracks4 * + process.RefittedShortTracks5 * + process.RefittedShortTracks6 * + process.RefittedShortTracks7 * + process.RefittedShortTracks8 * + process.ShortenedTrackValidation) + +################################################################### +# Set the process to run multi-threaded +################################################################### +process.options.numberOfThreads = 8 diff --git a/Alignment/OfflineValidation/test/testTkAlStyle.C b/Alignment/OfflineValidation/test/testTkAlStyle.C index 039561b000528..e1148728d4dcc 100644 --- a/Alignment/OfflineValidation/test/testTkAlStyle.C +++ b/Alignment/OfflineValidation/test/testTkAlStyle.C @@ -6,51 +6,50 @@ #include "TPaveText.h" #include "TROOT.h" -#include "../macros/TkAlStyle.cc" - +#include "../interface/TkAlStyle.h" void testTkAlStyle() { - gROOT->ProcessLine(".L ../macros/TkAlStyle.cc+"); - TkAlStyle::set(PRELIMINARY); // set publication status + //gROOT->ProcessLine(".L ../src/TkAlStyle.cc++g"); + TkAlStyle::set(PRELIMINARY); // set publication status - TCanvas* can = new TCanvas("can","can",500,500); + TCanvas* can = new TCanvas("can", "can", 500, 500); can->cd(); - // Create dummy histograms representing validation plots, // e.g. DMR plots, for a particular alignment object, using // line style accordingly - TH1* h1 = new TH1D("h1",";x title;y title",100,-10,10); - h1->FillRandom("gaus",1000); + TH1* h1 = new TH1D("h1", ";x title;y title", 100, -10, 10); + h1->FillRandom("gaus", 1000); h1->SetLineColor(TkAlStyle::color(IDEALAlign)); h1->SetLineStyle(TkAlStyle::style(IDEALAlign)); - h1->GetYaxis()->SetRangeUser(0,110); + h1->GetYaxis()->SetRangeUser(0, 110); - TH1* h2 = new TH1D("h2",";x title;y title",100,-10,10); - h2->FillRandom("gaus",500); + TH1* h2 = new TH1D("h2", ";x title;y title", 100, -10, 10); + h2->FillRandom("gaus", 500); h2->SetLineColor(TkAlStyle::color(CRAFTAlign)); h2->SetLineStyle(TkAlStyle::style(CRAFTAlign)); - h2->GetYaxis()->SetRangeUser(0,110); + h2->GetYaxis()->SetRangeUser(0, 110); h1->Draw(); h2->Draw("same"); - // Add a title that specifies the data-taking era // (title specifies also the publication label "CMS Preliminary" // etc. according to the status set above) - TPaveText* title = TkAlStyle::standardTitle(CRAFT15); + TPaveText* title = TkAlStyle::standardRightTitle(CRAFT15); title->Draw("same"); - // Add a legend at the top left with 2 entries stretching // over 60% of the pad's width. Legend labels depend on // the alignment object. - TLegend* leg = TkAlStyle::legend("top left",2,0.6); - leg->AddEntry(h1,toTString(IDEALAlign),"L"); - leg->AddEntry(h2,toTString(CRAFTAlign),"L"); + TLegend* leg = TkAlStyle::legend("top left", 2, 0.6); + leg->AddEntry(h1, TkAlStyle::toTString(IDEALAlign), "L"); + leg->AddEntry(h2, TkAlStyle::toTString(CRAFTAlign), "L"); leg->Draw("same"); gPad->RedrawAxis(); can->SaveAs("test.pdf"); } + +// main function for unit test +int main(int argc, char** argv) { testTkAlStyle(); } diff --git a/Alignment/OfflineValidation/test/testTrackAnalyzers.cc b/Alignment/OfflineValidation/test/testTrackAnalyzers.cc index 1874fcbacbd2a..a90a28a2e6bb6 100644 --- a/Alignment/OfflineValidation/test/testTrackAnalyzers.cc +++ b/Alignment/OfflineValidation/test/testTrackAnalyzers.cc @@ -2,36 +2,35 @@ #include "FWCore/Utilities/interface/Exception.h" #include "FWCore/ServiceRegistry/interface/Service.h" #include "CommonTools/UtilAlgos/interface/TFileService.h" +#include "Alignment/OfflineValidation/interface/TkAlStyle.h" #define CATCH_CONFIG_MAIN #include "catch.hpp" -TEST_CASE("GeneralPurposeTrackAnalyzer tests", "[GeneralPurposeTrackAnalyzer]") { - //The python configuration - const std::string baseConfig{ - R"_(from FWCore.TestProcessor.TestProcess import * -from Alignment.OfflineValidation.generalPurposeTrackAnalyzer_cfi import generalPurposeTrackAnalyzer -process = TestProcess() -process.trackAnalyzer = generalPurposeTrackAnalyzer -process.moduleToTest(process.trackAnalyzer) -process.add_(cms.Service('MessageLogger')) -process.add_(cms.Service('JobReportService')) -process.add_(cms.Service('TFileService',fileName=cms.string('tesTrackAnalyzer1.root'))) -)_"}; - +// Function to run the catch2 tests +//___________________________________________________________________________________________ +void runTestForAnalyzer(const std::string& baseConfig, const std::string& analyzerName) { edm::test::TestProcessor::Config config{baseConfig}; - SECTION("base configuration is OK") { REQUIRE_NOTHROW(edm::test::TestProcessor(config)); } - // SECTION("No event data") { - // edm::test::TestProcessor tester(config); - // REQUIRE_NOTHROW(tester.test()); - // } + SECTION(analyzerName + " base configuration is OK") { REQUIRE_NOTHROW(edm::test::TestProcessor(config)); } + + SECTION(analyzerName + " No Runs data") { + edm::test::TestProcessor tester(config); + REQUIRE_NOTHROW(tester.testWithNoRuns()); + } - SECTION("beginJob and endJob only") { + SECTION(analyzerName + " beginJob and endJob only") { edm::test::TestProcessor tester(config); REQUIRE_NOTHROW(tester.testBeginAndEndJobOnly()); } + // Add more sections as needed + + //SECTION("No event data") { + // edm::test::TestProcessor tester(config); + // REQUIRE_NOTHROW(tester.test()); + //} + // SECTION("Run with no LuminosityBlocks") { // edm::test::TestProcessor tester(config); // REQUIRE_NOTHROW(tester.testRunWithNoLuminosityBlocks()); @@ -43,65 +42,122 @@ process.add_(cms.Service('TFileService',fileName=cms.string('tesTrackAnalyzer1.r // } } -TEST_CASE("DMRChecker tests", "[DMRChecker]") { - //The python configuration - const std::string baseConfig{ - R"_(from FWCore.TestProcessor.TestProcess import * -from Alignment.OfflineValidation.dmrChecker_cfi import dmrChecker +// Function to generate base configuration string +//___________________________________________________________________________________________ +std::string generateBaseConfig(const std::string& analyzerName, const std::string& rootFileName) { + // Define a raw string literal + constexpr const char* rawString = R"_(from FWCore.TestProcessor.TestProcess import * +from Alignment.OfflineValidation.{}_cfi import {} process = TestProcess() -process.dmrAnalyzer = dmrChecker -process.moduleToTest(process.dmrAnalyzer) +process.trackAnalyzer = {} +process.moduleToTest(process.trackAnalyzer) process.add_(cms.Service('MessageLogger')) process.add_(cms.Service('JobReportService')) -process.add_(cms.Service('TFileService',fileName=cms.string('tesTrackAnalyzer2.root'))) -)_"}; +process.add_(cms.Service('TFileService',fileName=cms.string('{}'))) + )_"; - edm::test::TestProcessor::Config config{baseConfig}; - SECTION("base configuration is OK") { REQUIRE_NOTHROW(edm::test::TestProcessor(config)); } - - // SECTION("No event data") { - // edm::test::TestProcessor tester(config); - // REQUIRE_NOTHROW(tester.test()); - // } + // Format the raw string literal using fmt::format + return fmt::format(rawString, analyzerName, analyzerName, analyzerName, rootFileName); +} - SECTION("beginJob and endJob only") { - edm::test::TestProcessor tester(config); - REQUIRE_NOTHROW(tester.testBeginAndEndJobOnly()); - } +//___________________________________________________________________________________________ +TEST_CASE("GeneralPurposeTrackAnalyzer tests", "[GeneralPurposeTrackAnalyzer]") { + const std::string baseConfig = generateBaseConfig("generalPurposeTrackAnalyzer", "tesTrackAnalyzer0.root"); + runTestForAnalyzer(baseConfig, "GeneralPurposeTrackAnalyzer"); +} - // SECTION("Run with no LuminosityBlocks") { - // edm::test::TestProcessor tester(config); - // REQUIRE_NOTHROW(tester.testRunWithNoLuminosityBlocks()); - // } +//___________________________________________________________________________________________ +TEST_CASE("GeneralPurposeVertexAnalyzer tests", "[GeneralPurposeVertexAnalyzer]") { + const std::string baseConfig = generateBaseConfig("generalPurposeVertexAnalyzer", "tesVertexAnalyzer1.root"); + runTestForAnalyzer(baseConfig, "GeneralPurposeVertexAnalyzer"); +} - // SECTION("LuminosityBlock with no Events") { - // edm::test::TestProcessor tester(config); - // REQUIRE_NOTHROW(tester.testLuminosityBlockWithNoEvents()); - // } +//___________________________________________________________________________________________ +TEST_CASE("DMRChecker tests", "[DMRChecker]") { + const std::string baseConfig = generateBaseConfig("dmrChecker", "tesTrackAnalyzer2.root"); + runTestForAnalyzer(baseConfig, "DMRChecker"); } +//___________________________________________________________________________________________ TEST_CASE("JetHTAnalyzer tests", "[JetHTAnalyzer]") { - //The python configuration - edm::test::TestProcessor::Config config{ - R"_(import FWCore.ParameterSet.Config as cms -from FWCore.TestProcessor.TestProcess import * -from Alignment.OfflineValidation.jetHTAnalyzer_cfi import jetHTAnalyzer -process = TestProcess() -process.JetHTAnalyzer = jetHTAnalyzer -process.moduleToTest(process.JetHTAnalyzer) -process.add_(cms.Service('JobReportService')) -process.add_(cms.Service('TFileService',fileName=cms.string('tesTrackAnalyzer3.root'))) -)_"}; + const std::string baseConfig = generateBaseConfig("jetHTAnalyzer", "tesTrackAnalyzer3.root"); + runTestForAnalyzer(baseConfig, "JetHTAnalyzer"); +} - SECTION("base configuration is OK") { REQUIRE_NOTHROW(edm::test::TestProcessor(config)); } +//___________________________________________________________________________________________ +TEST_CASE("DiMuonValidation tests", "[DiMuonValidation]") { + const std::string baseConfig = generateBaseConfig("diMuonValidation", "tesTrackAnalyzer4.root"); + runTestForAnalyzer(baseConfig, "DiMuonValidation"); +} - SECTION("beginJob and endJob only") { - edm::test::TestProcessor tester(config); - REQUIRE_NOTHROW(tester.testBeginAndEndJobOnly()); - } +//___________________________________________________________________________________________ +TEST_CASE("CosmicSplitterValidation tests", "[CosmicsSplitterValidation]") { + const std::string baseConfig = generateBaseConfig("cosmicSplitterValidation", "tesTrackAnalyzer5.root"); + runTestForAnalyzer(baseConfig, "CosmicSplitterValidation"); +} - // SECTION("No event data") { - // edm::test::TestProcessor tester(config); - // REQUIRE_NOTHROW(tester.test()); - //} +//___________________________________________________________________________________________ +TEST_CASE("DiElectronVertexValidation tests", "[DiElectronVertexValidation]") { + const std::string baseConfig = generateBaseConfig("diElectronVertexValidation", "tesTrackAnalyzer6.root"); + runTestForAnalyzer(baseConfig, "DiElectronVertexValidation"); +} + +//___________________________________________________________________________________________ +TEST_CASE("DiMuonVertexValidation tests", "[DiMuonVertexValidation]") { + const std::string baseConfig = generateBaseConfig("diMuonVertexValidation", "tesTrackAnalyzer7.root"); + runTestForAnalyzer(baseConfig, "DiMuonVertexValidation"); +} + +//___________________________________________________________________________________________ +TEST_CASE("EopElecTreeWriter tests", "[EopElecTreeWriter]") { + const std::string baseConfig = generateBaseConfig("eopElecTreeWriter", "tesTrackAnalyzer8.root"); + runTestForAnalyzer(baseConfig, "EopElecTreeWriter"); +} + +//___________________________________________________________________________________________ +TEST_CASE("EopTreeWriter tests", "[EopTreeWriter]") { + const std::string baseConfig = generateBaseConfig("eopTreeWriter", "tesTrackAnalyzer9.root"); + runTestForAnalyzer(baseConfig, "EopTreeWriter"); +} + +//___________________________________________________________________________________________ +TEST_CASE("OverlapValidation tests", "[OverlapValidation]") { + const std::string baseConfig = generateBaseConfig("overlapValidation", "tesTrackAnalyzer10.root"); + runTestForAnalyzer(baseConfig, "OverlapValidation"); +} + +//___________________________________________________________________________________________ +TEST_CASE("PixelBaryCentreAnalyzer tests", "[PixelBaryCentreAnalyzer]") { + const std::string baseConfig = generateBaseConfig("pixelBaryCentreAnalyzer", "tesTrackAnalyzer11.root"); + runTestForAnalyzer(baseConfig, "PixelBaryCentreAnalyzer"); +} + +//___________________________________________________________________________________________ +TEST_CASE("PrimaryVertexValidation tests", "[PrimaryVertexValidation]") { + const std::string baseConfig = generateBaseConfig("primaryVertexValidation", "tesTrackAnalyzer12.root"); + runTestForAnalyzer(baseConfig, "PrimaryVertexValidation"); +} + +//___________________________________________________________________________________________ +TEST_CASE("SplitVertexResolution tests", "[SplitVertexResolution]") { + const std::string baseConfig = generateBaseConfig("splitVertexResolution", "tesTrackAnalyzer13.root"); + runTestForAnalyzer(baseConfig, "SplitVertexResolution"); +} + +//___________________________________________________________________________________________ +TEST_CASE("TrackerGeometryIntoNtuples tests", "[TrackerGeometryIntoNtuples]") { + const std::string baseConfig = generateBaseConfig("trackerGeometryIntoNtuples", "tesTrackAnalyzer14.root"); + runTestForAnalyzer(baseConfig, "TrackerGeometryIntoNtuples"); +} + +//___________________________________________________________________________________________ +TEST_CASE("TrackerOfflineValidation tests", "[TrackerOfflineValidation]") { + const std::string baseConfig = generateBaseConfig("TrackerOfflineValidation", "tesTrackAnalyzer15.root"); + runTestForAnalyzer(baseConfig, "TrackerOfflineValidation"); +} + +//___________________________________________________________________________________________ +TEST_CASE("TrackerGeometryCompare tests", "[TrackerGeometryCompare]") { + const std::string baseConfig = generateBaseConfig("trackerGeometryCompare", "tesTrackAnalyzer16.root"); + runTestForAnalyzer(baseConfig, "trackerGeometryCompare"); } diff --git a/Alignment/OfflineValidation/test/test_all_Phase2_cfg.py b/Alignment/OfflineValidation/test/test_all_Phase2_cfg.py index cca0e44db5153..1c9a437574dc1 100644 --- a/Alignment/OfflineValidation/test/test_all_Phase2_cfg.py +++ b/Alignment/OfflineValidation/test/test_all_Phase2_cfg.py @@ -258,7 +258,7 @@ class RefitType(Enum): ) ## MM 04.05.2017 (use settings as in: https://github.com/cms-sw/cmssw/pull/18330) -from RecoVertex.PrimaryVertexProducer.TkClusParameters_cff import DA_vectParameters +from RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi import DA_vectParameters DAClusterizationParams = DA_vectParameters.clone() GapClusterizationParams = cms.PSet(algorithm = cms.string('gap'), diff --git a/Alignment/OfflineValidation/test/test_all_cfg.py b/Alignment/OfflineValidation/test/test_all_cfg.py index 024b97bd3d9d7..c6bc6945fdbe5 100644 --- a/Alignment/OfflineValidation/test/test_all_cfg.py +++ b/Alignment/OfflineValidation/test/test_all_cfg.py @@ -256,7 +256,7 @@ class RefitType(Enum): ) ## MM 04.05.2017 (use settings as in: https://github.com/cms-sw/cmssw/pull/18330) -from RecoVertex.PrimaryVertexProducer.TkClusParameters_cff import DA_vectParameters +from RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi import DA_vectParameters DAClusterizationParams = DA_vectParameters.clone() GapClusterizationParams = cms.PSet(algorithm = cms.string('gap'), diff --git a/Alignment/OfflineValidation/test/testingScripts/test_unitDiMuonV.sh b/Alignment/OfflineValidation/test/testingScripts/test_unitDiMuonV.sh new file mode 100755 index 0000000000000..6d01a1d61385b --- /dev/null +++ b/Alignment/OfflineValidation/test/testingScripts/test_unitDiMuonV.sh @@ -0,0 +1,16 @@ +#! /bin/bash + +function die { echo $1: status $2 ; exit $2; } + +echo "TESTING Alignment/DiMuonV single configuration with json..." +pushd test_yaml/DiMuonV/single/testUnits/unitTestDiMuonVMC/1/ +./cmsRun validation_cfg.py config=validation.json || die "Failure running DiMuonV single configuration with json" $? + +echo "TESTING Alignment/DiMuonV single configuration standalone..." +./cmsRun validation_cfg.py || die "Failure running DiMuonV single configuration standalone" $? +popd + +echo "TESTING DiMuonV merge step" +pushd test_yaml/DiMuonV/merge/testUnits/1/ +./DiMuonVmerge validation.json --verbose || die "Failure running DiMuonV merge step" $? +popd diff --git a/Alignment/OfflineValidation/test/testingScripts/test_unitMTS.sh b/Alignment/OfflineValidation/test/testingScripts/test_unitMTS.sh new file mode 100755 index 0000000000000..71945e6a00b63 --- /dev/null +++ b/Alignment/OfflineValidation/test/testingScripts/test_unitMTS.sh @@ -0,0 +1,23 @@ +#! /bin/bash + +function die { echo $1: status $2 ; exit $2; } + +echo "TESTING Alignment/MTS single configuration with json..." +pushd test_yaml/MTS/single/testSingleMTS/PromptNewTemplate/1 +./cmsRun validation_cfg.py config=validation.json || die "Failure running MTS single configuration with json" $? + +echo "TESTING Alignment/MTS single configuration standalone..." +./cmsRun validation_cfg.py || die "Failure running MTS single configuration standalone" $? +popd + +pushd test_yaml/MTS/single/testSingleMTS/mp3619/1 +./cmsRun validation_cfg.py config=validation.json || die "Failure running MTS single configuration with json (part 2)" $? + +echo "TESTING Alignment/MTS single configuration standalone..." +./cmsRun validation_cfg.py || die "Failure running MTS single configuration standalone (part 2)" $? +popd + +echo "TESTING MTS merge step" +pushd test_yaml/MTS/merge/testSingleMTS/1 +./MTSmerge validation.json --verbose || die "Failure running MTS merge step" $? +popd diff --git a/Alignment/OfflineValidation/test/testingScripts/test_unitMiscellanea.sh b/Alignment/OfflineValidation/test/testingScripts/test_unitMiscellanea.sh index 1a4735bac6287..1f339a3bb3779 100755 --- a/Alignment/OfflineValidation/test/testingScripts/test_unitMiscellanea.sh +++ b/Alignment/OfflineValidation/test/testingScripts/test_unitMiscellanea.sh @@ -2,7 +2,10 @@ function die { echo $1: status $2 ; exit $2; } echo "TESTING inspect ALCARECO data ..." -cmsRun ${CMSSW_BASE}/src/Alignment/OfflineValidation/test/inspectData_cfg.py unitTest=True trackCollection=ALCARECOTkAlCosmicsCTF0T || die "Failure running inspectData_cfg.py" $? +cmsRun ${CMSSW_BASE}/src/Alignment/OfflineValidation/test/inspectData_cfg.py unitTest=True isCosmics=True trackCollection=ALCARECOTkAlCosmicsCTF0T || die "Failure running inspectData_cfg.py" $? + +echo "TESTING inspect Phase2 ALCARECO data ..." +cmsRun ${CMSSW_BASE}/src/Alignment/OfflineValidation/test/inspectData_cfg.py unitTest=True isCosmics=False globalTag='' trackCollection=ALCARECOTkAlZMuMu isDiMuonData=True Detector='2026D98' || die "Failure running inspectData_cfg.py on Phase-2 input" $? echo "TESTING G4e refitter ..." cmsRun ${CMSSW_BASE}/src/Alignment/OfflineValidation/test/testG4Refitter_cfg.py maxEvents=10 || die "Failure running testG4Refitter_cfg.py" $? diff --git a/Alignment/OfflineValidation/test/unit_test.json b/Alignment/OfflineValidation/test/unit_test.json index d8f4bf780cbb1..773866c51fec5 100644 --- a/Alignment/OfflineValidation/test/unit_test.json +++ b/Alignment/OfflineValidation/test/unit_test.json @@ -15,6 +15,12 @@ "style": "2001", "title": "unit test" }, + "unitTestDiMuonVMC": { + "color": "1", + "globaltag": "auto:auto:phase1_2022_realistic", + "style": "2101", + "title": "unit test" + }, "ideal": { "color": "1", "globaltag": "auto:phase1_2017_design", @@ -50,7 +56,45 @@ "globaltag": "auto:phase1_2018_realistic", "style": "2101", "title": "unit test" - } + }, + "PromptNewTemplate" : { + "name" : "PromptNewTemplate", + "color" : "1", + "globaltag" : "124X_dataRun3_Prompt_v10", + "style" : "2301", + "title" : "Alignment in prompt with 400V pixel templates", + "conditions" : { + "SiPixelTemplateDBObjectRcd" : { + "connect" : "frontier://FrontierProd/CMS_CONDITIONS", + "tag" : "SiPixelTemplateDBObject_phase1_38T_2022_v9" + }, + "SiPixel2DTemplateDBObjectRcd" : { + "connect" : "frontier://FrontierProd/CMS_CONDITIONS", + "tag" : "SiPixel2DTemplateDBObject_phase1_38T_2022_v9" + } + } + }, + "mp3619" : { + "name" : "mp3619", + "color" : "2", + "globaltag" : "124X_dataRun3_Prompt_v10", + "style" : "2001", + "title" : "mp3619", + "conditions" : { + "TrackerAlignmentRcd" : { + "connect" : "frontier://FrontierProd/CMS_CONDITIONS", + "tag" : "TrackerAlignment_collisions22_v13" + }, + "SiPixelTemplateDBObjectRcd" : { + "connect" : "frontier://FrontierProd/CMS_CONDITIONS", + "tag" : "SiPixelTemplateDBObject_phase1_38T_2022_v9" + }, + "SiPixel2DTemplateDBObjectRcd" : { + "connect" : "frontier://FrontierProd/CMS_CONDITIONS", + "tag" : "SiPixel2DTemplateDBObject_phase1_38T_2022_v9" + } + } + } }, "validations": { "DMR": { @@ -140,6 +184,40 @@ } } }, + "DiMuonV" : { + "merge": { + "testUnits": { + "singles": ["testUnits"] + } + }, + "single" : { + "testUnits" : { + "IOV" : ["1"], + "alignments" : ["unitTestDiMuonVMC"], + "trackcollection" : "generalTracks", + "maxevents" : "10" + } + } + }, + "MTS" : { + "merge" : { + "testSingleMTS" : { + "singles" : ["testSingleMTS"] + } + }, + "single" : { + "testSingleMTS" : { + "IOV" : ["1"], + "alignments": ["PromptNewTemplate","mp3619"], + "maxevents" : 200000, + "trackcollection" : "ALCARECOTkAlCosmicsCTF0T", + "tthrbuilder" : "WithAngleAndTemplate", + "usePixelQualityFlag" : "True", + "cosmicsZeroTesla" : "False", + "magneticfield" : 3.8 + } + } + }, "GCP": { "GCPdetUnits": { "levels": "DetUnit", diff --git a/Alignment/OfflineValidation/test/unit_test.yaml b/Alignment/OfflineValidation/test/unit_test.yaml index 2e4f076d9e13d..6274962d8076c 100644 --- a/Alignment/OfflineValidation/test/unit_test.yaml +++ b/Alignment/OfflineValidation/test/unit_test.yaml @@ -39,7 +39,40 @@ alignments: globaltag: auto:phase1_2018_realistic style: 2101 title: unit test - + unitTestDiMuonVMC: + color: 1 + globaltag: auto:phase1_2022_realistic + style: 2101 + title: unit test + PromptNewTemplate: + name: PromptNewTemplate + color: 1 + globaltag: 124X_dataRun3_Prompt_v10 + style: 2301 + title: Alignment in prompt with 400V pixel templates + conditions: + SiPixelTemplateDBObjectRcd: + connect: frontier://FrontierProd/CMS_CONDITIONS + tag: SiPixelTemplateDBObject_phase1_38T_2022_v9 + SiPixel2DTemplateDBObjectRcd: + connect: frontier://FrontierProd/CMS_CONDITIONS + tag: SiPixel2DTemplateDBObject_phase1_38T_2022_v9 + mp3619: + name: mp3619 + color: 2 + globaltag: 124X_dataRun3_Prompt_v10 + style: 2001 + title: mp3619 + conditions: + TrackerAlignmentRcd: + connect: frontier://FrontierProd/CMS_CONDITIONS + tag: TrackerAlignment_collisions22_v13 + SiPixelTemplateDBObjectRcd: + connect: frontier://FrontierProd/CMS_CONDITIONS + tag: SiPixelTemplateDBObject_phase1_38T_2022_v9 + SiPixel2DTemplateDBObjectRcd: + connect: frontier://FrontierProd/CMS_CONDITIONS + tag: SiPixel2DTemplateDBObject_phase1_38T_2022_v9 validations: DMR: single: @@ -136,6 +169,37 @@ validations: HLTSelection: False triggerBits: HLT_* maxevents: 10 + DiMuonV: + merge: + testUnits: + singles: + - testUnits + single: + testUnits: + IOV: + - 1 + alignments: + - unitTestDiMuonVMC + trackcollection: generalTracks + maxevents: 10 + MTS: + merge: + testSingleMTS: + singles: + - testSingleMTS + single: + testSingleMTS: + IOV: + - 1 + alignments: + - PromptNewTemplate + - mp3619 + maxevents: 200000 + trackcollection: ALCARECOTkAlCosmicsCTF0T + tthrbuilder: WithAngleAndTemplate + usePixelQualityFlag: True + cosmicsZeroTesla: False + magneticfield: 3.8 GCP: GCPdetUnits: levels: DetUnit diff --git a/Alignment/ReferenceTrajectories/src/ReferenceTrajectory.cc b/Alignment/ReferenceTrajectories/src/ReferenceTrajectory.cc index 6fe8cc45ab15d..cd55c5564ae84 100644 --- a/Alignment/ReferenceTrajectories/src/ReferenceTrajectory.cc +++ b/Alignment/ReferenceTrajectories/src/ReferenceTrajectory.cc @@ -956,7 +956,14 @@ bool ReferenceTrajectory::addMaterialEffectsLocalGbl(const std::vector rejected + // - for the Phase 2 Strips in PS modules (Length ~ 2.4 cm) is 2.08 => accepted + // - for the Phase 2 Strips in 2S modules (Length ~ 5 cm) is 0.48 => accepted + const double minPrec = 0.3; AlgebraicMatrix OffsetToLocal(5, 2); // dLocal/dU OffsetToLocal[3][0] = 1.; @@ -1038,7 +1045,14 @@ bool ReferenceTrajectory::addMaterialEffectsCurvlinGbl(const std::vector rejected + // - for the Phase 2 Strips in PS modules (Length ~ 2.4 cm) is 2.08 => accepted + // - for the Phase 2 Strips in 2S modules (Length ~ 5 cm) is 0.48 => accepted + const double minPrec = 0.3; int ierr = 0; AlgebraicMatrix OffsetToCurv(5, 2); // dCurv/dU diff --git a/CUDADataFormats/EcalRecHitSoA/BuildFile.xml b/CUDADataFormats/EcalRecHitSoA/BuildFile.xml index a684d9a23f1c6..6d67c5d5f6220 100644 --- a/CUDADataFormats/EcalRecHitSoA/BuildFile.xml +++ b/CUDADataFormats/EcalRecHitSoA/BuildFile.xml @@ -3,6 +3,7 @@ + diff --git a/CUDADataFormats/EcalRecHitSoA/interface/EcalRecHit.h b/CUDADataFormats/EcalRecHitSoA/interface/EcalRecHit.h index 731b8b801407f..3e312218a112f 100644 --- a/CUDADataFormats/EcalRecHitSoA/interface/EcalRecHit.h +++ b/CUDADataFormats/EcalRecHitSoA/interface/EcalRecHit.h @@ -5,7 +5,7 @@ #include #include "CUDADataFormats/CaloCommon/interface/Common.h" -#include "CUDADataFormats/EcalRecHitSoA/interface/RecoTypes.h" +#include "DataFormats/EcalRecHit/interface/RecoTypes.h" #include "HeterogeneousCore/CUDAUtilities/interface/HostAllocator.h" namespace ecal { diff --git a/CUDADataFormats/EcalRecHitSoA/interface/EcalUncalibratedRecHit.h b/CUDADataFormats/EcalRecHitSoA/interface/EcalUncalibratedRecHit.h index 7497f71269089..a48850e68858f 100644 --- a/CUDADataFormats/EcalRecHitSoA/interface/EcalUncalibratedRecHit.h +++ b/CUDADataFormats/EcalRecHitSoA/interface/EcalUncalibratedRecHit.h @@ -1,12 +1,9 @@ #ifndef CUDADataFormats_EcalRecHitSoA_interface_EcalUncalibratedRecHit_h #define CUDADataFormats_EcalRecHitSoA_interface_EcalUncalibratedRecHit_h -#include -#include - #include "CUDADataFormats/CaloCommon/interface/Common.h" -#include "CUDADataFormats/EcalRecHitSoA/interface/RecoTypes.h" #include "DataFormats/EcalDigi/interface/EcalDataFrame.h" +#include "DataFormats/EcalRecHit/interface/RecoTypes.h" namespace ecal { diff --git a/CUDADataFormats/EcalRecHitSoA/interface/RecoTypes.h b/CUDADataFormats/EcalRecHitSoA/interface/RecoTypes.h deleted file mode 100644 index 87c4252a5e949..0000000000000 --- a/CUDADataFormats/EcalRecHitSoA/interface/RecoTypes.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef CUDADataFormats_EcalRecHitSoA_interface_RecoTypes_h -#define CUDADataFormats_EcalRecHitSoA_interface_RecoTypes_h - -namespace ecal { - namespace reco { - - using ComputationScalarType = float; - using StorageScalarType = float; - - } // namespace reco -} // namespace ecal - -#endif // CUDADataFormats_EcalRecHitSoA_interface_RecoTypes_h diff --git a/CUDADataFormats/SiPixelDigi/interface/SiPixelDigisCUDA.h b/CUDADataFormats/SiPixelDigi/interface/SiPixelDigisCUDA.h index 5888cd04a6128..3beeaa4830c83 100644 --- a/CUDADataFormats/SiPixelDigi/interface/SiPixelDigisCUDA.h +++ b/CUDADataFormats/SiPixelDigi/interface/SiPixelDigisCUDA.h @@ -7,29 +7,18 @@ #include "HeterogeneousCore/CUDAUtilities/interface/host_unique_ptr.h" #include "HeterogeneousCore/CUDAUtilities/interface/cudaCompat.h" #include "CUDADataFormats/Common/interface/PortableDeviceCollection.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisSoA.h" #include "DataFormats/SoATemplate/interface/SoALayout.h" -GENERATE_SOA_LAYOUT(SiPixelDigisSoALayout, - SOA_COLUMN(int32_t, clus), - SOA_COLUMN(uint32_t, pdigi), - SOA_COLUMN(uint32_t, rawIdArr), - SOA_COLUMN(uint16_t, adc), - SOA_COLUMN(uint16_t, xx), - SOA_COLUMN(uint16_t, yy), - SOA_COLUMN(uint16_t, moduleId)) - -using SiPixelDigisCUDASOA = SiPixelDigisSoALayout<>; -using SiPixelDigisCUDASOAView = SiPixelDigisCUDASOA::View; -using SiPixelDigisCUDASOAConstView = SiPixelDigisCUDASOA::ConstView; - // TODO: The class is created via inheritance of the PortableDeviceCollection. // This is generally discouraged, and should be done via composition. // See: https://github.com/cms-sw/cmssw/pull/40465#discussion_r1067364306 -class SiPixelDigisCUDA : public cms::cuda::PortableDeviceCollection> { +class SiPixelDigisCUDA : public cms::cuda::PortableDeviceCollection { public: SiPixelDigisCUDA() = default; explicit SiPixelDigisCUDA(size_t maxFedWords, cudaStream_t stream) - : PortableDeviceCollection>(maxFedWords + 1, stream) {} + : PortableDeviceCollection(maxFedWords + 1, stream) {} + ~SiPixelDigisCUDA() = default; SiPixelDigisCUDA(SiPixelDigisCUDA &&) = default; diff --git a/CalibCalorimetry/EBPhase2TPGTools/BuildFile.xml b/CalibCalorimetry/EBPhase2TPGTools/BuildFile.xml new file mode 100644 index 0000000000000..07af00943a026 --- /dev/null +++ b/CalibCalorimetry/EBPhase2TPGTools/BuildFile.xml @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/CalibCalorimetry/EBPhase2TPGTools/plugins/BuildFile.xml b/CalibCalorimetry/EBPhase2TPGTools/plugins/BuildFile.xml new file mode 100644 index 0000000000000..a9dd20350ca2f --- /dev/null +++ b/CalibCalorimetry/EBPhase2TPGTools/plugins/BuildFile.xml @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/CalibCalorimetry/EBPhase2TPGTools/plugins/EcalEBPhase2TPParamProducer.cc b/CalibCalorimetry/EBPhase2TPGTools/plugins/EcalEBPhase2TPParamProducer.cc new file mode 100644 index 0000000000000..fee2d6b50f29e --- /dev/null +++ b/CalibCalorimetry/EBPhase2TPGTools/plugins/EcalEBPhase2TPParamProducer.cc @@ -0,0 +1,435 @@ +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/one/EDAnalyzer.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/ParameterSet/interface/FileInPath.h" +// +#include "Geometry/CaloGeometry/interface/CaloGeometry.h" +#include "Geometry/CaloGeometry/interface/CaloSubdetectorGeometry.h" +#include "Geometry/CaloGeometry/interface/CaloCellGeometry.h" +#include "Geometry/Records/interface/CaloGeometryRecord.h" +#include "Geometry/CaloTopology/interface/EcalTrigTowerConstituentsMap.h" +#include "Geometry/EcalMapping/interface/EcalElectronicsMapping.h" +#include "Geometry/EcalMapping/interface/EcalMappingRcd.h" +#include "CondFormats/EcalObjects/interface/EcalTPGCrystalStatus.h" +#include "CondFormats/EcalObjects/interface/EcalLiteDTUPedestals.h" +#include "CondFormats/DataRecord/interface/EcalLiteDTUPedestalsRcd.h" +#include "DataFormats/EcalDigi/interface/EcalConstants.h" +#include "DataFormats/EcalDetId/interface/EcalSubdetector.h" +#include "DataFormats/EcalDetId/interface/EBDetId.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +/** +\class EcalEBPhase2TPParamProducer +\author L. Lutton, N. Marinelli - Univ. of Notre Dame +\brief TPG Param Builder for Phase2 +*/ + +class EcalEBPhase2TPParamProducer : public edm::one::EDAnalyzer<> { +public: + explicit EcalEBPhase2TPParamProducer(edm::ParameterSet const& pSet); + ~EcalEBPhase2TPParamProducer() override; + void analyze(const edm::Event& evt, const edm::EventSetup& evtSetup) override; + void beginJob() override; + static void fillDescriptions(edm::ConfigurationDescriptions&); + +private: + std::vector computeWeights(int type); + + void getNumericalDeriv(TGraph graph, TGraph& deriv); + void fillFMat(std::vector clockSampleSet, + bool useThirdPulse, + std::vector sampleSet, + std::vector sampleDotSet, + TMatrix& FMat, + unsigned int binOfMaximum); + void getGMatrix(TMatrix FMat, float scaleMatrixBy, TMatrix& GMat); + void getPulseSampleSet(TGraph pulseGraph, float phaseShift, std::vector& sampleSet); + bool computeLinearizerParam(double theta, double gainRatio, double calibCoeff, int& shift, int& mult); + + const edm::ESGetToken theBarrelGeometryToken_; + const edm::FileInPath inFile_; + const std::string outFile_; + const int nSamplesToUse_; + const bool useBXPlusOne_; + const double phaseShift_; + const unsigned int nWeightGroups_; + const edm::ESGetToken theEcalTPGPedestals_Token_; + + gzFile out_file_; + TGraph* thePulse_; + TGraph* pulseDot_; + + const UInt_t NPoints_ = 1599; //With the CMSSW pulse + + static constexpr float norm_ = 1 / 503.109; // with the CMSSW pulse shape + static constexpr float offset_ = 0.; // with the CMSSW pulse shape + int multToInt_ = 0x1000; + + int i2cSub_[2] = {0, 0}; + + const double et_sat_; + const double xtal_LSB_; + const unsigned int binOfMaximum_; + static const int linTopRange_; +}; + +EcalEBPhase2TPParamProducer::EcalEBPhase2TPParamProducer(edm::ParameterSet const& pSet) + : theBarrelGeometryToken_(esConsumes(edm::ESInputTag("", "EcalBarrel"))), + inFile_(pSet.getParameter("inputFile")), + outFile_(pSet.getUntrackedParameter("outputFile")), + nSamplesToUse_(pSet.getParameter("nSamplesToUse")), + useBXPlusOne_(pSet.getParameter("useBXPlusOne")), + phaseShift_(pSet.getParameter("phaseShift")), + nWeightGroups_(pSet.getParameter("nWeightGroups")), + theEcalTPGPedestals_Token_(esConsumes(edm::ESInputTag("EcalLiteDTUPedestals", ""))), + et_sat_(pSet.getParameter("Et_sat")), + xtal_LSB_(pSet.getParameter("xtal_LSB")), + binOfMaximum_(pSet.getParameter("binOfMaximum")) + +{ + out_file_ = gzopen(outFile_.c_str(), "wb"); + + std::string filename = inFile_.fullPath(); + TFile* inFile = new TFile(filename.c_str(), "READ"); + + inFile->GetObject("average-pulse", thePulse_); + delete inFile; + + if (binOfMaximum_ != 6 && binOfMaximum_ != 8) + edm::LogError("EcalEBPhase2TPParamProducer") + << " Value for binOfMaximum " << binOfMaximum_ << " is wrong, The default binOfMaximum=6 will be used"; + + if (nSamplesToUse_ != 6 && nSamplesToUse_ != 8 && nSamplesToUse_ != 12) + edm::LogError("EcalEBPhase2TPParamProducer") + << " Value for nSamplesToUse " << nSamplesToUse_ << " is wrong, The default nSamplesToUse=8 will be used"; +} + +EcalEBPhase2TPParamProducer::~EcalEBPhase2TPParamProducer() { gzclose(out_file_); } + +void EcalEBPhase2TPParamProducer::beginJob() {} + +void EcalEBPhase2TPParamProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("inputFile"); + desc.addUntracked("outputFile"); + desc.add("nSamplesToUse", 8); + desc.add("useBXPlusOne", false); + desc.add("phaseShift", 2.581); + desc.add("nWeightGroups", 61200); + desc.add("Et_sat", 1998.36); + desc.add("xtal_LSB", 0.0488); + desc.add("binOfMaximum", 6); + descriptions.add("ecalEBPhase2TPParamProducerDefault", desc); +} + +void EcalEBPhase2TPParamProducer::analyze(const edm::Event& evt, const edm::EventSetup& evtSetup) { + using namespace edm; + using namespace std; + + const EcalLiteDTUPedestals* peds = nullptr; + const auto* theBarrelGeometry = &evtSetup.getData(theBarrelGeometryToken_); + const auto* theEcalTPPedestals = &evtSetup.getData(theEcalTPGPedestals_Token_); + + std::string tmpStringConv; + const char* tmpStringOut; + + // Compute weights // + std::vector ampWeights[nWeightGroups_]; + std::vector timeWeights[nWeightGroups_]; + + for (unsigned int iGr = 0; iGr < nWeightGroups_; iGr++) { + ampWeights[iGr] = computeWeights(1); + timeWeights[iGr] = computeWeights(2); + } + + /* write to compressed file */ + std::stringstream toCompressStream(""); + for (unsigned int iGr = 0; iGr < nWeightGroups_; iGr++) { + toCompressStream << " WEIGHTAMP " << dec << iGr << std::endl; + for (long unsigned int i = 0; i < ampWeights[iGr].size(); i++) { + if (ampWeights[iGr][i] < 0) + toCompressStream << "-0x" << std::hex << abs(ampWeights[iGr][i]) << " "; + else + toCompressStream << "0x" << std::hex << ampWeights[iGr][i] << " "; + } + toCompressStream << "\n"; + } + toCompressStream << "\n"; + tmpStringConv = toCompressStream.str(); + tmpStringOut = tmpStringConv.c_str(); + gzwrite(out_file_, tmpStringOut, std::strlen(tmpStringOut)); + toCompressStream.str(std::string()); + + for (unsigned int iGr = 0; iGr < nWeightGroups_; iGr++) { + toCompressStream << "WEIGHTTIME " << dec << iGr << std::endl; + for (long unsigned int i = 0; i < timeWeights[iGr].size(); i++) { + if (timeWeights[iGr][i] < 0) + toCompressStream << "-0x" << std::hex << abs(timeWeights[iGr][i]) << " "; + else + toCompressStream << "0x" << std::hex << timeWeights[iGr][i] << " "; + } + toCompressStream << "\n"; + } + + toCompressStream << "\n"; + tmpStringConv = toCompressStream.str(); + tmpStringOut = tmpStringConv.c_str(); + gzwrite(out_file_, tmpStringOut, std::strlen(tmpStringOut)); + toCompressStream.str(std::string()); + + // fill map between xTals and groups. If each xTal is a group there is a one-to-one map + const std::vector& ebCells = theBarrelGeometry->getValidDetIds(DetId::Ecal, EcalBarrel); + std::map mapXtalToGroup; + + int iGroup = 0; + for (const auto& it : ebCells) { + EBDetId id(it); + std::pair xTalToGroup(id.rawId(), iGroup); + mapXtalToGroup.insert(xTalToGroup); + iGroup++; + } + + //write to file + + for (std::map::const_iterator it = mapXtalToGroup.begin(); it != mapXtalToGroup.end(); it++) { + toCompressStream << "CRYSTAL " << dec << it->first << std::endl; + toCompressStream << it->second << std::endl; + } + tmpStringConv = toCompressStream.str(); + tmpStringOut = tmpStringConv.c_str(); + gzwrite(out_file_, tmpStringOut, std::strlen(tmpStringOut)); + toCompressStream.str(std::string()); + + ///////////////////////////////////// + + for (const auto& it : ebCells) { + EBDetId id(it); + toCompressStream << "LINCONST " << dec << id.rawId() << std::endl; + double theta = theBarrelGeometry->getGeometry(id)->getPosition().theta(); + EcalLiteDTUPedestalsMap::const_iterator itped = theEcalTPPedestals->getMap().find(id); + + if (itped != theEcalTPPedestals->end()) { + peds = &(*itped); + + } else { + edm::LogError("EcalEBPhase2TPParamProducer") << " could not find EcalLiteDTUPedestal entry for " << id; + throw cms::Exception("could not find pedestals"); + } + + int shift, mult; + double calibCoeff = 1.; + bool ok; + for (unsigned int i = 0; i < ecalPh2::NGAINS; ++i) { + ok = computeLinearizerParam(theta, ecalph2::gains[ecalPh2::NGAINS - 1 - i], calibCoeff, shift, mult); + if (!ok) { + edm::LogError("EcalEBPhase2TPParamProducer") + << "unable to compute the parameters for SM=" << id.ism() << " xt=" << id.ic() << " " << id.rawId(); + throw cms::Exception("unable to compute the parameters"); + + } else { + int tmpPedByGain = (int)(peds->mean(i) + 0.5); + toCompressStream << std::hex << " 0x" << tmpPedByGain << " 0x" << mult << " 0x" << shift << " " << i2cSub_[i] + << std::endl; + } + } + } + tmpStringConv = toCompressStream.str(); + tmpStringOut = tmpStringConv.c_str(); + gzwrite(out_file_, tmpStringOut, std::strlen(tmpStringOut)); + toCompressStream.str(std::string()); +} + +std::vector EcalEBPhase2TPParamProducer::computeWeights(int type) { + std::vector sampleSet; + std::vector sampleDotSet; + std::vector clockSampleSet; + double scaleMatrixBy = 1.; + int lbinOfMaximum = binOfMaximum_; + + switch (binOfMaximum_) { + case 6: + break; + case 8: + break; + default: + lbinOfMaximum = 6; + break; + } + + switch (nSamplesToUse_) { + case 12: + clockSampleSet = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; + break; + case 8: + switch (lbinOfMaximum) { + case 8: + clockSampleSet = {2, 3, 4, 5, 6, 7, 8, 9}; + break; + case 6: + clockSampleSet = {0, 1, 2, 3, 4, 5, 6, 7}; + break; + } + break; + + case 6: + switch (lbinOfMaximum) { + case 8: + clockSampleSet = {3, 4, 6, 7, 8, 9}; + break; + case 6: + clockSampleSet = {1, 2, 4, 5, 6, 7}; + break; + } + break; + + default: + clockSampleSet = {0, 1, 2, 3, 4, 5, 6, 7}; + break; + } + + getPulseSampleSet(*thePulse_, phaseShift_, sampleSet); + pulseDot_ = new TGraph(); + getNumericalDeriv(*thePulse_, *pulseDot_); + getPulseSampleSet(*pulseDot_, phaseShift_, sampleDotSet); + + unsigned int fMatColumns = useBXPlusOne_ ? 6 : 4; + + TMatrix fMat(clockSampleSet.size(), fMatColumns); + fillFMat(clockSampleSet, useBXPlusOne_, sampleSet, sampleDotSet, fMat, lbinOfMaximum); + TMatrix gMat(fMatColumns, clockSampleSet.size()); + + getGMatrix(fMat, scaleMatrixBy, gMat); + + std::vector tmpWeightVec; + std::vector tmpTimeWeightVec; + unsigned int iClock = 0; + for (unsigned int iSample = 0; iSample < 12; iSample++) { + bool inSampleSet = false; + for (unsigned int clockSample = 0; clockSample < clockSampleSet.size(); clockSample++) { + if (iSample == clockSampleSet[clockSample]) { + inSampleSet = true; + iClock = clockSample; + break; + } + } + if (inSampleSet) { + if (type == 1) + tmpWeightVec.push_back(round(gMat(2, iClock) * multToInt_)); // amp weights + if (type == 2) + tmpWeightVec.push_back(round(gMat(3, iClock) * multToInt_)); // time weights + } else { + if (type == 1) + tmpWeightVec.push_back(0); // amp weights + if (type == 2) + tmpWeightVec.push_back(0); // time weights + } + } + + return tmpWeightVec; +} + +void EcalEBPhase2TPParamProducer::getNumericalDeriv(TGraph graph, TGraph& deriv) { + UInt_t numPoints = graph.GetN(); + if (numPoints != NPoints_) { + edm::LogWarning("EcalEBPhase2TPParamProducer") << "Error! Wrong amount of points in pulse graph! "; + } + Double_t xval; + Double_t yval; + Double_t xvalPOne; + Double_t yvalPOne; + + for (UInt_t p = 0; p < NPoints_ - 1; p++) { + graph.GetPoint(p, xval, yval); + graph.GetPoint(p + 1, xvalPOne, yvalPOne); + float midpoint = (xvalPOne + xval) / 2; + float rise = yvalPOne - yval; + float run = xvalPOne - xval; + deriv.SetPoint(deriv.GetN(), midpoint, rise / run); + } + deriv.SetName("pulse_prime"); +} + +void EcalEBPhase2TPParamProducer::fillFMat(std::vector clockSampleSet, + bool useThirdPulse, + std::vector sampleSet, + std::vector sampleDotSet, + TMatrix& fMat, + uint binOfMaximum) { + Int_t iShift = 8 - binOfMaximum; + for (UInt_t i = 0; i < clockSampleSet.size(); i++) { + Int_t tmpClockToSample = clockSampleSet[i] + iShift; + fMat(i, 0) = sampleSet[tmpClockToSample]; + fMat(i, 1) = sampleDotSet[tmpClockToSample]; + if (tmpClockToSample > 4) { + fMat(i, 2) = sampleSet[tmpClockToSample - 4]; + fMat(i, 3) = sampleDotSet[tmpClockToSample - 4]; + } + if (clockSampleSet[i] > 8 && useThirdPulse) { + fMat(i, 4) = sampleSet[tmpClockToSample - 8]; + fMat(i, 5) = sampleDotSet[tmpClockToSample - 8]; + } + } +} + +void EcalEBPhase2TPParamProducer::getGMatrix(TMatrix fMat, float scaleMatrixBy, TMatrix& gMat) { + TMatrix FT = fMat; + FT.T(); + TMatrix tmpFT = FT; + TMatrix FTDotF = TMatrix(tmpFT, TMatrix::kMult, fMat); + TMatrix InvFTDotF = FTDotF; + + //Possible for this bit to fail depending on the sample set and phase shift + InvFTDotF.Invert(); + + TMatrix tmpMat(InvFTDotF, TMatrix::kMult, FT); + gMat = tmpMat; + gMat *= scaleMatrixBy; +} + +void EcalEBPhase2TPParamProducer::getPulseSampleSet(TGraph pulseGraph, + float phaseShift, + std::vector& sampleSet) { + for (UInt_t i = 0; i < ecalPh2::sampleSize; i++) { + float t = (ecalPh2::Samp_Period * i) + phaseShift; + float y = pulseGraph.Eval(t + offset_) * norm_; + sampleSet.push_back(y); + } +} + +bool EcalEBPhase2TPParamProducer::computeLinearizerParam( + double theta, double gainRatio, double calibCoeff, int& shift, int& mult) { + bool result = false; + + static constexpr double linTopRange_ = 16383.; + // linTopRange_ 16383 = (2**14)-1 is setting the top of the range for the linearizer output + double factor = (linTopRange_ * (xtal_LSB_ * gainRatio * calibCoeff * sin(theta))) / et_sat_; + //first with shift_ = 0 + //add 0.5 (for rounding) and set to int + //Here we are getting mult with a max bit length of 8 + //and shift_ with a max bit length of 4 + mult = (int)(factor + 0.5); + for (shift = 0; shift < 15; shift++) { + if (mult >= 128 && mult < 256) { + result = true; + break; + } + factor *= 2; + mult = (int)(factor + 0.5); + } + + return result; +} + +// DEfine this module as a plug-in +DEFINE_FWK_MODULE(EcalEBPhase2TPParamProducer); diff --git a/CalibCalorimetry/EBPhase2TPGTools/python/ecalEBPhase2TPParamProducer_cfi.py b/CalibCalorimetry/EBPhase2TPGTools/python/ecalEBPhase2TPParamProducer_cfi.py new file mode 100644 index 0000000000000..1faa813593464 --- /dev/null +++ b/CalibCalorimetry/EBPhase2TPGTools/python/ecalEBPhase2TPParamProducer_cfi.py @@ -0,0 +1,24 @@ +import FWCore.ParameterSet.Config as cms + +EBPhase2TPGParamProducer = cms.EDAnalyzer("EcalEBPhase2TPParamProducer", +inputFile = cms.FileInPath('SimCalorimetry/EcalEBTrigPrimProducers/data/CMSSWPhaseIIPulseGraphAlt.root'), +outputFile = cms.untracked.string('../../../SimCalorimetry/EcalEBTrigPrimProducers/data/AmpTimeOnPeakXtalWeightsCMSSWPulse_8samples_peakOnSix_WithAndyFixes.txt.gz'), + nSamplesToUse = cms.uint32(8), + useBXPlusOne = cms.bool(False), + phaseShift = cms.double (2.581), + nWeightGroups = cms.uint32(61200), + Et_sat = cms.double(1998.36), + xtal_LSB = cms.double(0.0488), + binOfMaximum = cms.uint32(6) + + +## allowed values of nSamplesToUse: 12, 8, 6. nSamplesToUse=8 is the default +## allowed values of binOfMaximum: 6, 8. binOfMaximum=6 is the default +#### The DEFAULT is nSamplesToUse=8, binOfMaximum=6 ####### + +## If nSamplesToUse is 12 ==> useBXPlusOne is True +## If nSamplesToUse is 8 ==> useBXPlusOne is False +## If nSamplesToUse is 6 ==> useBXPlusOne is False + +) + diff --git a/CalibCalorimetry/EBPhase2TPGTools/test/runEBPhase2TPParamProducer.py b/CalibCalorimetry/EBPhase2TPGTools/test/runEBPhase2TPParamProducer.py new file mode 100644 index 0000000000000..eea149b2c2029 --- /dev/null +++ b/CalibCalorimetry/EBPhase2TPGTools/test/runEBPhase2TPParamProducer.py @@ -0,0 +1,91 @@ +import FWCore.ParameterSet.Config as cms +import CondTools.Ecal.db_credentials as auth +import FWCore.ParameterSet.VarParsing as VarParsing + + +from Configuration.Eras.Era_Phase2C17I13M9_cff import Phase2C17I13M9 +from Configuration.Eras.Modifier_phase2_ecal_devel_cff import phase2_ecal_devel + + +#process = cms.Process("ProdTPGParam") +process = cms.Process('DIGI',Phase2C17I13M9,phase2_ecal_devel) + +process.load('Configuration.StandardSequences.Services_cff') +process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi') +process.load('FWCore.MessageService.MessageLogger_cfi') +process.load('Configuration.EventContent.EventContent_cff') +process.load('SimGeneral.MixingModule.mixNoPU_cfi') +process.load('Configuration.Geometry.GeometryExtended2026D88Reco_cff') +process.load('Configuration.Geometry.GeometryExtended2026D88_cff') +process.load('Configuration.StandardSequences.MagneticField_cff') +process.load('Configuration.StandardSequences.Generator_cff') +process.load('IOMC.EventVertexGenerators.VtxSmearedHLLHC14TeV_cfi') +process.load('GeneratorInterface.Core.genFilterSummary_cff') +process.load('Configuration.StandardSequences.SimIdeal_cff') +process.load('Configuration.StandardSequences.Digi_cff') +process.load('Configuration.StandardSequences.EndOfProcess_cff') +#process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff') TO BE FIXED +process.load('CalibCalorimetry.EBPhase2TPGTools.ecalEBPhase2TPParamProducer_cfi') +""" +options = VarParsing.VarParsing('tpg') + +options.register ('outFile', + 'testtest.txt', + VarParsing.VarParsing.multiplicity.singleton, + VarParsing.VarParsing.varType.string, + "Output file") + +options.parseArguments() +""" +# Calo geometry service model +#process.load("Configuration.StandardSequences.GeometryDB_cff") + +# ecal mapping +process.eegeom = cms.ESSource("EmptyESSource", + recordName = cms.string('EcalMappingRcd'), + iovIsRunNotTime = cms.bool(True), + firstValid = cms.vuint32(1) +) + +# Get hardcoded conditions the same used for standard digitization before CMSSW_3_1_x +## process.load("CalibCalorimetry.EcalTrivialCondModules.EcalTrivialCondRetriever_cfi") +# or Get DB parameters +# process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff') +process.load("CondCore.CondDB.CondDB_cfi") + +process.CondDB.connect = 'frontier://FrontierProd/CMS_CONDITIONS' +process.CondDB.DBParameters.authenticationPath = '/nfshome0/popcondev/conddb' ###P5 stuff + +""" +process.PoolDBESSource = cms.ESSource("PoolDBESSource", + process.CondDB, + timetype = cms.untracked.string('runnumber'), + toGet = cms.VPSet( + cms.PSet( + record = cms.string('EcalPedestalsRcd'), + #tag = cms.string('EcalPedestals_v5_online') + #tag = cms.string('EcalPedestals_2009runs_hlt') ### obviously diff w.r.t previous + tag = cms.string('EcalPedestals_hlt'), ### modif-alex 22/02/2011 + ), + cms.PSet( + record = cms.string('EcalMappingElectronicsRcd'), + tag = cms.string('EcalMappingElectronics_EEMap_v1_mc') + ) + ) + ) +""" + +######################### +process.source = cms.Source("EmptySource", + ##firstRun = cms.untracked.uint32(100000000) ### need to use latest run to pick-up update values from DB + firstRun = cms.untracked.uint32(161310) +) + + +process.maxEvents = cms.untracked.PSet( + input = cms.untracked.int32(1) +) + + + +process.p = cms.Path(process.EBPhase2TPGParamProducer) diff --git a/CalibCalorimetry/EcalPedestalOffsets/interface/TSinglePedEntry.h b/CalibCalorimetry/EcalPedestalOffsets/interface/TSinglePedEntry.h index 984fb71aae27c..b7a0c797666e7 100644 --- a/CalibCalorimetry/EcalPedestalOffsets/interface/TSinglePedEntry.h +++ b/CalibCalorimetry/EcalPedestalOffsets/interface/TSinglePedEntry.h @@ -15,12 +15,14 @@ class TSinglePedEntry { //! ctor TSinglePedEntry(); //! copy ctor - TSinglePedEntry(const TSinglePedEntry &orig); + TSinglePedEntry(const TSinglePedEntry& orig); + //! assignment op + TSinglePedEntry& operator=(const TSinglePedEntry& orig) = default; //! dtor ~TSinglePedEntry(); //! add a single value - void insert(const int &pedestal); + void insert(const int& pedestal); //! get the average of the inserted values double average() const; //! get the RMS of the inserted values diff --git a/CalibCalorimetry/HcalAlgos/interface/HcalLedAnalysis.h b/CalibCalorimetry/HcalAlgos/interface/HcalLedAnalysis.h index fe0f900cb1ee0..b54d6a3ef5bf9 100644 --- a/CalibCalorimetry/HcalAlgos/interface/HcalLedAnalysis.h +++ b/CalibCalorimetry/HcalAlgos/interface/HcalLedAnalysis.h @@ -83,7 +83,6 @@ class HcalLedAnalysis { std::string m_outputFileText; std::string m_outputFileX; std::ofstream m_outFile; - std::ofstream m_logFile; std::ofstream m_outputFileXML; int m_startTS; diff --git a/CalibCalorimetry/HcalAlgos/interface/HcalPedestalAnalysis.h b/CalibCalorimetry/HcalAlgos/interface/HcalPedestalAnalysis.h index 6556b2ef0454a..a4651e6d56143 100644 --- a/CalibCalorimetry/HcalAlgos/interface/HcalPedestalAnalysis.h +++ b/CalibCalorimetry/HcalAlgos/interface/HcalPedestalAnalysis.h @@ -107,7 +107,7 @@ class HcalPedestalAnalysis { std::string m_outputFileROOT; std::string m_outputFileMean; std::string m_outputFileWidth; - std::ofstream m_logFile; + int m_startTS; int m_endTS; int m_nevtsample; diff --git a/CalibCalorimetry/HcalAlgos/src/HcalLedAnalysis.cc b/CalibCalorimetry/HcalAlgos/src/HcalLedAnalysis.cc index fb2c2522ce32c..d14534442ca27 100644 --- a/CalibCalorimetry/HcalAlgos/src/HcalLedAnalysis.cc +++ b/CalibCalorimetry/HcalAlgos/src/HcalLedAnalysis.cc @@ -1,8 +1,7 @@ - #include "CalibFormats/HcalObjects/interface/HcalDbService.h" #include "CondFormats/HcalObjects/interface/HcalQIECoder.h" #include "CondFormats/HcalObjects/interface/HcalPedestals.h" - +#include "FWCore/MessageLogger/interface/MessageLogger.h" #include "CalibCalorimetry/HcalAlgos/interface/HcalLedAnalysis.h" #include "TFile.h" #include @@ -23,12 +22,12 @@ HcalLedAnalysis::HcalLedAnalysis(const edm::ParameterSet& ps) { m_outputFileText = ps.getUntrackedParameter("outputFileText", ""); m_outputFileX = ps.getUntrackedParameter("outputFileXML", ""); if (!m_outputFileText.empty()) { - cout << "Hcal LED results will be saved to " << m_outputFileText.c_str() << endl; + edm::LogInfo("HcalLedAnalysis") << "Hcal LED results will be saved to " << m_outputFileText.c_str() << endl; m_outFile.open(m_outputFileText.c_str()); } m_outputFileROOT = ps.getUntrackedParameter("outputFileHist", ""); if (!m_outputFileROOT.empty()) { - cout << "Hcal LED histograms will be saved to " << m_outputFileROOT.c_str() << endl; + edm::LogInfo("HcalLedAnalysis") << "Hcal LED histograms will be saved to " << m_outputFileROOT.c_str() << endl; } m_nevtsample = ps.getUntrackedParameter("nevtsample", 9999999); @@ -49,7 +48,6 @@ HcalLedAnalysis::HcalLedAnalysis(const edm::ParameterSet& ps) { m_startTS = 0; m_endTS = ps.getUntrackedParameter("lastTS", 9); m_usecalib = ps.getUntrackedParameter("usecalib", false); - m_logFile.open("HcalLedAnalysis.log"); int runNum = ps.getUntrackedParameter("runNumber", 999999); @@ -555,7 +553,7 @@ void HcalLedAnalysis::LedDone() { // Write the histo file and close it // m_file->Write(); m_file->Close(); - cout << "Hcal histograms written to " << m_outputFileROOT.c_str() << endl; + edm::LogInfo("HcalLedAnalysis") << "Hcal histograms written to " << m_outputFileROOT.c_str() << endl; } //----------------------------------------------------------------------------- @@ -573,73 +571,65 @@ void HcalLedAnalysis::processLedEvent(const HBHEDigiCollection& hbhe, // Calib if (m_usecalib) { - try { - if (calib.empty()) - throw (int)calib.size(); - // this is effectively a loop over electronic channels - for (HcalCalibDigiCollection::const_iterator j = calib.begin(); j != calib.end(); ++j) { - const HcalCalibDataFrame digi = (const HcalCalibDataFrame)(*j); - HcalElectronicsId elecId = digi.elecId(); - HcalCalibDetId calibId = digi.id(); - ProcessCalibEvent(elecId.fiberChanId(), - calibId, - digi); //Shouldn't depend on anything in elecId but not sure how else to do it - } - } catch (int i) { - // m_logFile<< "Event with " << i<<" Calib Digis passed." << std::endl; + if (calib.empty()) { + edm::LogError("HcalLedAnalysis") << "Event with " << (int)calib.size() << " Calib Digis passed."; + return; + } + // this is effectively a loop over electronic channels + for (HcalCalibDigiCollection::const_iterator j = calib.begin(); j != calib.end(); ++j) { + const HcalCalibDataFrame digi = (const HcalCalibDataFrame)(*j); + HcalElectronicsId elecId = digi.elecId(); + HcalCalibDetId calibId = digi.id(); + ProcessCalibEvent(elecId.fiberChanId(), + calibId, + digi); //Shouldn't depend on anything in elecId but not sure how else to do it } } // HB + HE - try { - if (hbhe.empty()) - throw (int)hbhe.size(); - // this is effectively a loop over electronic channels - for (HBHEDigiCollection::const_iterator j = hbhe.begin(); j != hbhe.end(); ++j) { - const HBHEDataFrame digi = (const HBHEDataFrame)(*j); - for (int k = 0; k < (int)state.size(); k++) - state[k] = true; - // See if histos exist for this channel, and if not, create them - _meol = hbHists.LEDTRENDS.find(digi.id()); - if (_meol == hbHists.LEDTRENDS.end()) { - SetupLEDHists(0, digi.id(), hbHists.LEDTRENDS); - } - LedHBHEHists(digi.id(), digi, hbHists.LEDTRENDS, cond); + if (hbhe.empty()) { + edm::LogError("HcalLedAnalysis") << "Event with " << (int)hbhe.size() << " HBHE Digis passed."; + return; + } + // this is effectively a loop over electronic channels + for (HBHEDigiCollection::const_iterator j = hbhe.begin(); j != hbhe.end(); ++j) { + const HBHEDataFrame digi = (const HBHEDataFrame)(*j); + for (int k = 0; k < (int)state.size(); k++) + state[k] = true; + // See if histos exist for this channel, and if not, create them + _meol = hbHists.LEDTRENDS.find(digi.id()); + if (_meol == hbHists.LEDTRENDS.end()) { + SetupLEDHists(0, digi.id(), hbHists.LEDTRENDS); } - } catch (int i) { - // m_logFile<< "Event with " << i<<" HBHE Digis passed." << std::endl; + LedHBHEHists(digi.id(), digi, hbHists.LEDTRENDS, cond); } // HO - try { - if (ho.empty()) - throw (int)ho.size(); - for (HODigiCollection::const_iterator j = ho.begin(); j != ho.end(); ++j) { - const HODataFrame digi = (const HODataFrame)(*j); - _meol = hoHists.LEDTRENDS.find(digi.id()); - if (_meol == hoHists.LEDTRENDS.end()) { - SetupLEDHists(1, digi.id(), hoHists.LEDTRENDS); - } - LedHOHists(digi.id(), digi, hoHists.LEDTRENDS, cond); + if (ho.empty()) { + edm::LogError("HcalLedAnalysis") << "Event with " << (int)ho.size() << " HO Digis passed."; + return; + } + for (HODigiCollection::const_iterator j = ho.begin(); j != ho.end(); ++j) { + const HODataFrame digi = (const HODataFrame)(*j); + _meol = hoHists.LEDTRENDS.find(digi.id()); + if (_meol == hoHists.LEDTRENDS.end()) { + SetupLEDHists(1, digi.id(), hoHists.LEDTRENDS); } - } catch (int i) { - // m_logFile << "Event with " << i<<" HO Digis passed." << std::endl; + LedHOHists(digi.id(), digi, hoHists.LEDTRENDS, cond); } // HF - try { - if (hf.empty()) - throw (int)hf.size(); - for (HFDigiCollection::const_iterator j = hf.begin(); j != hf.end(); ++j) { - const HFDataFrame digi = (const HFDataFrame)(*j); - _meol = hfHists.LEDTRENDS.find(digi.id()); - if (_meol == hfHists.LEDTRENDS.end()) { - SetupLEDHists(2, digi.id(), hfHists.LEDTRENDS); - } - LedHFHists(digi.id(), digi, hfHists.LEDTRENDS, cond); + if (hf.empty()) { + edm::LogError("HcalLedAnalysis") << "Event with " << (int)hf.size() << " HF Digis passed."; + return; + } + for (HFDigiCollection::const_iterator j = hf.begin(); j != hf.end(); ++j) { + const HFDataFrame digi = (const HFDataFrame)(*j); + _meol = hfHists.LEDTRENDS.find(digi.id()); + if (_meol == hfHists.LEDTRENDS.end()) { + SetupLEDHists(2, digi.id(), hfHists.LEDTRENDS); } - } catch (int i) { - // m_logFile << "Event with " << i<<" HF Digis passed." << std::endl; + LedHFHists(digi.id(), digi, hfHists.LEDTRENDS, cond); } // Call the function every m_nevtsample events diff --git a/CalibCalorimetry/HcalAlgos/src/HcalPedestalAnalysis.cc b/CalibCalorimetry/HcalAlgos/src/HcalPedestalAnalysis.cc index db37e1c639b11..e778bf3cd768d 100644 --- a/CalibCalorimetry/HcalAlgos/src/HcalPedestalAnalysis.cc +++ b/CalibCalorimetry/HcalAlgos/src/HcalPedestalAnalysis.cc @@ -4,6 +4,7 @@ #include "CondFormats/HcalObjects/interface/HcalPedestalWidths.h" #include "Geometry/CaloTopology/interface/HcalTopology.h" #include "CalibCalorimetry/HcalAlgos/interface/HcalPedestalAnalysis.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" #include "TFile.h" #include @@ -35,15 +36,15 @@ HcalPedestalAnalysis::HcalPedestalAnalysis(const edm::ParameterSet& ps) // user cfg parameters m_outputFileMean = ps.getUntrackedParameter("outputFileMeans", ""); if (!m_outputFileMean.empty()) { - cout << "Hcal pedestal means will be saved to " << m_outputFileMean.c_str() << endl; + edm::LogInfo("HcalPedestalAnalysis") << "Hcal pedestal means will be saved to " << m_outputFileMean.c_str(); } m_outputFileWidth = ps.getUntrackedParameter("outputFileWidths", ""); if (!m_outputFileWidth.empty()) { - cout << "Hcal pedestal widths will be saved to " << m_outputFileWidth.c_str() << endl; + edm::LogInfo("HcalPedestalAnalysis") << "Hcal pedestal widths will be saved to " << m_outputFileWidth.c_str(); } m_outputFileROOT = ps.getUntrackedParameter("outputFileHist", ""); if (!m_outputFileROOT.empty()) { - cout << "Hcal pedestal histograms will be saved to " << m_outputFileROOT.c_str() << endl; + edm::LogInfo("HcalPedestalAnalysis") << "Hcal pedestal histograms will be saved to " << m_outputFileROOT.c_str(); } m_nevtsample = ps.getUntrackedParameter("nevtsample", 0); // for compatibility with previous versions @@ -55,9 +56,9 @@ HcalPedestalAnalysis::HcalPedestalAnalysis(const edm::ParameterSet& ps) if (m_pedValflag < 0) m_pedValflag = 0; if (m_nevtsample > 0 && m_pedValflag > 0) { - cout << "WARNING - incompatible cfg options: nevtsample = " << m_nevtsample << ", pedValflag = " << m_pedValflag - << endl; - cout << "Setting pedValflag = 0" << endl; + edm::LogWarning("HcalPedestalAnalysis") + << "WARNING - incompatible cfg options: nevtsample = " << m_nevtsample << ", pedValflag = " << m_pedValflag; + edm::LogWarning("HcalPedestalAnalysis") << "Setting pedValflag = 0"; m_pedValflag = 0; } if (m_pedValflag > 1) @@ -144,93 +145,87 @@ void HcalPedestalAnalysis::processEvent(const HBHEDigiCollection& hbhe, // Get data for every CAPID. // HBHE - try { - if (hbhe.empty()) - throw (int)hbhe.size(); - for (HBHEDigiCollection::const_iterator j = hbhe.begin(); j != hbhe.end(); ++j) { - const HBHEDataFrame digi = (const HBHEDataFrame)(*j); - m_coder = cond.getHcalCoder(digi.id()); - m_shape = cond.getHcalShape(m_coder); - for (int k = 0; k < (int)state.size(); k++) - state[k] = true; - // here we loop over pairs of time slices, it is more convenient - // in order to extract the correlation matrix - for (int i = m_startTS; i < digi.size() && i <= m_endTS; i++) { - for (int flag = 0; flag < 4; flag++) { - if (i + flag < digi.size() && i + flag <= m_endTS) { - per2CapsHists(flag, 0, digi.id(), digi.sample(i), digi.sample(i + flag), hbHists.PEDTRENDS, cond); - } + if (hbhe.empty()) { + edm::LogError("HcalPedestalAnalysis") << "Event with " << (int)hbhe.size() << " HBHE Digis passed."; + return; + } + for (HBHEDigiCollection::const_iterator j = hbhe.begin(); j != hbhe.end(); ++j) { + const HBHEDataFrame digi = (const HBHEDataFrame)(*j); + m_coder = cond.getHcalCoder(digi.id()); + m_shape = cond.getHcalShape(m_coder); + for (int k = 0; k < (int)state.size(); k++) + state[k] = true; + // here we loop over pairs of time slices, it is more convenient + // in order to extract the correlation matrix + for (int i = m_startTS; i < digi.size() && i <= m_endTS; i++) { + for (int flag = 0; flag < 4; flag++) { + if (i + flag < digi.size() && i + flag <= m_endTS) { + per2CapsHists(flag, 0, digi.id(), digi.sample(i), digi.sample(i + flag), hbHists.PEDTRENDS, cond); } } - if (m_startTS == 0 && m_endTS > 4) { - AllChanHists(digi.id(), - digi.sample(0), - digi.sample(1), - digi.sample(2), - digi.sample(3), - digi.sample(4), - digi.sample(5), - hbHists.PEDTRENDS); - } } - } catch (int i) { - // m_logFile<< "Event with " << i<<" HBHE Digis passed." << std::endl; + if (m_startTS == 0 && m_endTS > 4) { + AllChanHists(digi.id(), + digi.sample(0), + digi.sample(1), + digi.sample(2), + digi.sample(3), + digi.sample(4), + digi.sample(5), + hbHists.PEDTRENDS); + } } // HO - try { - if (ho.empty()) - throw (int)ho.size(); - for (HODigiCollection::const_iterator j = ho.begin(); j != ho.end(); ++j) { - const HODataFrame digi = (const HODataFrame)(*j); - m_coder = cond.getHcalCoder(digi.id()); - for (int i = m_startTS; i < digi.size() && i <= m_endTS; i++) { - for (int flag = 0; flag < 4; flag++) { - if (i + flag < digi.size() && i + flag <= m_endTS) { - per2CapsHists(flag, 1, digi.id(), digi.sample(i), digi.sample(i + flag), hoHists.PEDTRENDS, cond); - } + if (ho.empty()) { + edm::LogError("HcalPedestalAnalysis") << "Event with " << (int)ho.size() << " HO Digis passed."; + return; + } + for (HODigiCollection::const_iterator j = ho.begin(); j != ho.end(); ++j) { + const HODataFrame digi = (const HODataFrame)(*j); + m_coder = cond.getHcalCoder(digi.id()); + for (int i = m_startTS; i < digi.size() && i <= m_endTS; i++) { + for (int flag = 0; flag < 4; flag++) { + if (i + flag < digi.size() && i + flag <= m_endTS) { + per2CapsHists(flag, 1, digi.id(), digi.sample(i), digi.sample(i + flag), hoHists.PEDTRENDS, cond); } } - if (m_startTS == 0 && m_endTS > 4) { - AllChanHists(digi.id(), - digi.sample(0), - digi.sample(1), - digi.sample(2), - digi.sample(3), - digi.sample(4), - digi.sample(5), - hoHists.PEDTRENDS); - } } - } catch (int i) { - // m_logFile << "Event with " << i<<" HO Digis passed." << std::endl; + if (m_startTS == 0 && m_endTS > 4) { + AllChanHists(digi.id(), + digi.sample(0), + digi.sample(1), + digi.sample(2), + digi.sample(3), + digi.sample(4), + digi.sample(5), + hoHists.PEDTRENDS); + } } // HF - try { - if (hf.empty()) - throw (int)hf.size(); - for (HFDigiCollection::const_iterator j = hf.begin(); j != hf.end(); ++j) { - const HFDataFrame digi = (const HFDataFrame)(*j); - m_coder = cond.getHcalCoder(digi.id()); - for (int i = m_startTS; i < digi.size() && i <= m_endTS; i++) { - for (int flag = 0; flag < 4; flag++) { - if (i + flag < digi.size() && i + flag <= m_endTS) { - per2CapsHists(flag, 2, digi.id(), digi.sample(i), digi.sample(i + flag), hfHists.PEDTRENDS, cond); - } + if (hf.empty()) { + edm::LogError("HcalPedestalAnalysis") << "Event with " << (int)hf.size() << " HF Digis passed."; + return; + } + for (HFDigiCollection::const_iterator j = hf.begin(); j != hf.end(); ++j) { + const HFDataFrame digi = (const HFDataFrame)(*j); + m_coder = cond.getHcalCoder(digi.id()); + for (int i = m_startTS; i < digi.size() && i <= m_endTS; i++) { + for (int flag = 0; flag < 4; flag++) { + if (i + flag < digi.size() && i + flag <= m_endTS) { + per2CapsHists(flag, 2, digi.id(), digi.sample(i), digi.sample(i + flag), hfHists.PEDTRENDS, cond); } } - if (m_startTS == 0 && m_endTS > 4) { - AllChanHists(digi.id(), - digi.sample(0), - digi.sample(1), - digi.sample(2), - digi.sample(3), - digi.sample(4), - digi.sample(5), - hfHists.PEDTRENDS); - } } - } catch (int i) { - // m_logFile << "Event with " << i<<" HF Digis passed." << std::endl; + if (m_startTS == 0 && m_endTS > 4) { + AllChanHists(digi.id(), + digi.sample(0), + digi.sample(1), + digi.sample(2), + digi.sample(3), + digi.sample(4), + digi.sample(5), + hfHists.PEDTRENDS); + } } // Call the function every m_nevtsample events if (m_nevtsample > 0) { @@ -678,7 +673,7 @@ int HcalPedestalAnalysis::done(const HcalPedestals* fInputPedestals, hfHists.PEDMEAN->Write(); m_file->Close(); - cout << "Hcal histograms written to " << m_outputFileROOT.c_str() << endl; + edm::LogInfo("HcalPedestalAnalysis") << "Hcal histograms written to " << m_outputFileROOT.c_str(); return (int)m_AllPedsOK; } diff --git a/CalibFormats/SiStripObjects/interface/SiStripHashedDetId.h b/CalibFormats/SiStripObjects/interface/SiStripHashedDetId.h index 8bedf235344f0..96e11bc09818d 100644 --- a/CalibFormats/SiStripObjects/interface/SiStripHashedDetId.h +++ b/CalibFormats/SiStripObjects/interface/SiStripHashedDetId.h @@ -29,6 +29,23 @@ class SiStripHashedDetId { /** Copy constructor. */ SiStripHashedDetId(const SiStripHashedDetId &); + /** Assignment operator. */ + SiStripHashedDetId &operator=(const SiStripHashedDetId &other) { + if (this != &other) { // Self-assignment check + this->id_ = 0; + this->iter_ = other.begin(); + // auxilliary vector to store the list of raw IDs + std::vector raw_ids; + raw_ids.reserve(other.size()); + + // Copy elements from input vector to detIds_ vector + std::copy(other.begin(), other.end(), std::back_inserter(raw_ids)); + + this->init(raw_ids); + } + return *this; + } + /** Public default constructor. */ SiStripHashedDetId(); @@ -56,6 +73,8 @@ class SiStripHashedDetId { inline const_iterator end() const; + inline const size_t size() const { return detIds_.size(); } + private: void init(const std::vector &); diff --git a/CalibFormats/SiStripObjects/src/SiStripHashedDetId.cc b/CalibFormats/SiStripObjects/src/SiStripHashedDetId.cc index 1e789185a7f1d..d8ab17408cd7c 100644 --- a/CalibFormats/SiStripObjects/src/SiStripHashedDetId.cc +++ b/CalibFormats/SiStripObjects/src/SiStripHashedDetId.cc @@ -36,8 +36,15 @@ SiStripHashedDetId::SiStripHashedDetId(const std::vector &det_ids) : detI SiStripHashedDetId::SiStripHashedDetId(const SiStripHashedDetId &input) : detIds_(), id_(0), iter_(detIds_.begin()) { LogTrace(mlCabling_) << "[SiStripHashedDetId::" << __func__ << "]" << " Constructing object..."; - detIds_.reserve(input.end() - input.begin()); - std::copy(input.begin(), input.end(), detIds_.begin()); + + // auxilliary vector to store the list of raw IDs + std::vector raw_ids; + raw_ids.reserve(input.size()); + + // Copy elements from input vector to detIds_ vector + std::copy(input.begin(), input.end(), std::back_inserter(raw_ids)); + + init(raw_ids); } // ----------------------------------------------------------------------------- diff --git a/CalibFormats/SiStripObjects/test/BuildFile.xml b/CalibFormats/SiStripObjects/test/BuildFile.xml index c8787ef0b7f29..eade084ae7e5d 100644 --- a/CalibFormats/SiStripObjects/test/BuildFile.xml +++ b/CalibFormats/SiStripObjects/test/BuildFile.xml @@ -15,4 +15,11 @@ + + + + + + + diff --git a/CalibFormats/SiStripObjects/test/test_catch2_SiStripHashedDetId.cc b/CalibFormats/SiStripObjects/test/test_catch2_SiStripHashedDetId.cc new file mode 100644 index 0000000000000..1b5cae7703782 --- /dev/null +++ b/CalibFormats/SiStripObjects/test/test_catch2_SiStripHashedDetId.cc @@ -0,0 +1,247 @@ +#include "CalibFormats/SiStripObjects/interface/SiStripHashedDetId.h" +#include "CalibFormats/SiStripObjects/interface/SiStripDetInfo.h" +#include "CalibTracker/SiStripCommon/interface/SiStripDetInfoFileReader.h" +#include "FWCore/ParameterSet/interface/FileInPath.h" +#include "catch.hpp" + +#include +#include +#include +#include + +TEST_CASE("SiStripHashedDetId testing", "[SiStripHashedDetId]") { + //_____________________________________________________________ + SECTION("Check constructing SiStripHashedDetId from DetId list") { + const auto& detInfo = + SiStripDetInfoFileReader::read(edm::FileInPath(SiStripDetInfoFileReader::kDefaultFile).fullPath()); + const auto& detIds = detInfo.getAllDetIds(); + SiStripHashedDetId hash(detIds); + std::cout << "[testSiStripHashedDetId::" << __func__ << "]" + << " Successfully created hash!" << std::endl; + REQUIRE(true); + } + + //_____________________________________________________________ + SECTION("Check SiStripHashedDetId copy constructor") { + const auto& detInfo = + SiStripDetInfoFileReader::read(edm::FileInPath(SiStripDetInfoFileReader::kDefaultFile).fullPath()); + const auto& dets = detInfo.getAllDetIds(); + + std::cout << "[testSiStripHashedDetId::" << __func__ << "]" + << " dets.size(): " << dets.size() << std::endl; + + SiStripHashedDetId hash(dets); + + std::cout << "[testSiStripHashedDetId::" << __func__ << "]" + << " hash.size(): " << hash.size() << std::endl; + + // Retrieve hashed indices + std::vector hashes; + hashes.clear(); + hashes.reserve(dets.size()); + for (const auto& idet : dets) { + hashes.push_back(hash.hashedIndex(idet)); + } + + std::sort(hashes.begin(), hashes.end()); + + SiStripHashedDetId hash2(hash); + + std::cout << "[testSiStripHashedDetId::" << __func__ << "]" + << " Successfully copied hash map!" << std::endl; + + // Retrieve hashed indices + std::vector hashes2; + + std::cout << "[testSiStripHashedDetId::" << __func__ << "]" + << " hashs2.size(): " << hash2.size() << std::endl; + + hashes2.clear(); + hashes2.reserve(dets.size()); + for (const auto& idet : dets) { + hashes2.push_back(hash2.hashedIndex(idet)); + } + + std::sort(hashes2.begin(), hashes2.end()); + + std::cout << "[testSiStripHashedDetId::" << __func__ << "]" + << " Successfully sorted second hash map!" << std::endl; + + // Convert vectors to sets for easy set operations + std::set set1(hashes.begin(), hashes.end()); + std::set set2(hashes2.begin(), hashes2.end()); + + std::vector diff1to2, diff2to1; + + // Find elements in vec1 that are not in vec2 + std::set_difference(set1.begin(), set1.end(), set2.begin(), set2.end(), std::inserter(diff1to2, diff1to2.begin())); + + // Find elements in vec2 that are not in vec1 + std::set_difference(set2.begin(), set2.end(), set1.begin(), set1.end(), std::inserter(diff2to1, diff2to1.begin())); + + // Output the differences + if (!diff1to2.empty()) { + std::cout << "[testSiStripHashedDetId::" << __func__ << "]" + << " Elements in hash that are not in hash2: "; + for (const auto& elem : diff1to2) { + std::cout << elem << " "; + } + std::cout << std::endl; + } + + if (!diff2to1.empty()) { + std::cout << "[testSiStripHashedDetId::" << __func__ << "]" + << " Elements in hash2 that are not in hash: "; + for (const auto& elem : diff2to1) { + std::cout << elem << " "; + } + std::cout << std::endl; + } + + REQUIRE(hashes == hashes2); + } + + //_____________________________________________________________ + SECTION("Check SiStripHashedDetId assignment operator") { + const auto& detInfo = + SiStripDetInfoFileReader::read(edm::FileInPath(SiStripDetInfoFileReader::kDefaultFile).fullPath()); + const auto& dets = detInfo.getAllDetIds(); + + SiStripHashedDetId hash(dets); + SiStripHashedDetId hash2; + + // Retrieve hashed indices + std::vector hashes; + hashes.clear(); + hashes.reserve(dets.size()); + for (const auto& idet : dets) { + hashes.push_back(hash.hashedIndex(idet)); + } + + std::sort(hashes.begin(), hashes.end()); + + // assign hash to hash2 + hash2 = hash; + + // Retrieve hashed indices + std::vector hashes2; + hashes2.clear(); + hashes2.reserve(dets.size()); + for (const auto& idet : dets) { + hashes2.push_back(hash2.hashedIndex(idet)); + } + + std::sort(hashes2.begin(), hashes2.end()); + + if (hashes == hashes2) { + std::cout << "[testSiStripHashedDetId::" << __func__ << "]" + << " Assigned SiStripHashedDetId matches original one!" << std::endl; + } else { + std::cout << "[testSiStripHashedDetId::" << __func__ << "]" + << " Assigned SiStripHashedDetId does not match the original one!" << std::endl; + } + + REQUIRE(hashes == hashes2); + } + + //_____________________________________________________________ + SECTION("Check manipulating SiStripHashedDetId") { + const auto& detInfo = + SiStripDetInfoFileReader::read(edm::FileInPath(SiStripDetInfoFileReader::kDefaultFile).fullPath()); + + const auto& unsortedDets = detInfo.getAllDetIds(); + + // unfortunately SiStripDetInfo::getAllDetIds() returns a const vector + // so in order to sory we're gonna need to copy it first + + std::vector dets; + dets.reserve(unsortedDets.size()); + std::copy(unsortedDets.begin(), unsortedDets.end(), std::back_inserter(dets)); + + // sort the vector of detIds (otherwise the test won't work!) + std::sort(dets.begin(), dets.end()); + + SiStripHashedDetId hash(dets); + + // Retrieve hashed indices + std::vector hashes; + uint32_t istart = time(NULL); + hashes.clear(); + hashes.reserve(dets.size()); + for (const auto& idet : dets) { + hashes.push_back(hash.hashedIndex(idet)); + } + + // Some debug + std::stringstream ss; + ss << "[testSiStripHashedDetId::" << __func__ << "]"; + uint16_t cntr1 = 0; + for (const auto& ii : hashes) { + if (ii == sistrip::invalid32_) { + cntr1++; + ss << std::endl << " Invalid index " << ii; + continue; + } + uint32_t detid = hash.unhashIndex(ii); + std::vector::const_iterator iter = find(dets.begin(), dets.end(), detid); + if (iter == dets.end()) { + cntr1++; + ss << std::endl << " Did not find value " << detid << " at index " << ii - *(hashes.begin()) << " in vector!"; + } else if (ii != static_cast(iter - dets.begin())) { + cntr1++; + ss << std::endl + << " Found same value " << detid << " at different indices " << ii << " and " << iter - dets.begin(); + } + } + + if (cntr1) { + ss << std::endl << " Found " << cntr1 << " incompatible values!"; + } else { + ss << " Found no incompatible values!"; + } + std::cout << ss.str() << std::endl; + + std::cout << "[testSiStripHashedDetId::" << __func__ << "]" + << " Processed " << hashes.size() << " DetIds in " << (time(NULL) - istart) << " seconds" << std::endl; + + REQUIRE(cntr1 == 0); + + // Retrieve DetIds + std::vector detids; + uint32_t jstart = time(NULL); + // meaasurement! + detids.clear(); + detids.reserve(dets.size()); + for (uint16_t idet = 0; idet < dets.size(); ++idet) { + detids.push_back(hash.unhashIndex(idet)); + } + + // Some debug + std::stringstream sss; + sss << "[testSiStripHashedDetId::" << __func__ << "]"; + uint16_t cntr2 = 0; + std::vector::const_iterator iii = detids.begin(); + for (; iii != detids.end(); ++iii) { + if (*iii != dets.at(iii - detids.begin())) { + cntr2++; + sss << std::endl + << " Diff values " << *iii << " and " << dets.at(iii - detids.begin()) << " found at index " + << iii - detids.begin() << " "; + } + } + if (cntr2) { + sss << std::endl << " Found " << cntr2 << " incompatible values!"; + } else { + sss << " Found no incompatible values!"; + } + std::cout << sss.str() << std::endl; + + std::cout << "[testSiStripHashedDetId::" << __func__ << "]" + << " Processed " << detids.size() << " hashed indices in " << (time(NULL) - jstart) << " seconds" + << std::endl; + + REQUIRE(cntr2 == 0); + + REQUIRE(true); + } +} diff --git a/CalibFormats/SiStripObjects/test/test_catch2_main.cc b/CalibFormats/SiStripObjects/test/test_catch2_main.cc new file mode 100644 index 0000000000000..e6d1d565b15c0 --- /dev/null +++ b/CalibFormats/SiStripObjects/test/test_catch2_main.cc @@ -0,0 +1,2 @@ +#define CATCH_CONFIG_MAIN // This tells Catch to provide a main() - only do this in one cpp file +#include "catch.hpp" diff --git a/CalibMuon/DTCalibration/plugins/DTGeometryParserFromDDD.cc b/CalibMuon/DTCalibration/plugins/DTGeometryParserFromDDD.cc index ab9c57624aac6..62462e6c70c59 100644 --- a/CalibMuon/DTCalibration/plugins/DTGeometryParserFromDDD.cc +++ b/CalibMuon/DTCalibration/plugins/DTGeometryParserFromDDD.cc @@ -24,16 +24,14 @@ DTGeometryParserFromDDD::DTGeometryParserFromDDD( } catch (const cms::Exception& e) { std::cerr << "DTGeometryParserFromDDD::build() : DDD Exception: something went wrong during XML parsing!" << std::endl - << " Message: " << e << std::endl - << " Terminating execution ... " << std::endl; + << " Message: " << e << std::endl; throw; } catch (const exception& e) { std::cerr << "DTGeometryParserFromDDD::build() : an unexpected exception occured: " << e.what() << std::endl; throw; } catch (...) { - std::cerr << "DTGeometryParserFromDDD::build() : An unexpected exception occured!" << std::endl - << " Terminating execution ... " << std::endl; - std::unexpected(); + std::cerr << "DTGeometryParserFromDDD::build() : An unexpected exception occured!" << std::endl; + throw; } } diff --git a/CalibPPS/ESProducers/plugins/CTPPSBeamParametersFromLHCInfoESSource.cc b/CalibPPS/ESProducers/plugins/CTPPSBeamParametersFromLHCInfoESSource.cc index 27b33c4eeada6..1b921e643c666 100644 --- a/CalibPPS/ESProducers/plugins/CTPPSBeamParametersFromLHCInfoESSource.cc +++ b/CalibPPS/ESProducers/plugins/CTPPSBeamParametersFromLHCInfoESSource.cc @@ -13,7 +13,7 @@ #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "FWCore/MessageLogger/interface/MessageLogger.h" -#include "CondFormats/DataRecord/interface/LHCInfoRcd.h" +#include "CondTools/RunInfo/interface/LHCInfoCombined.h" #include "CondFormats/DataRecord/interface/CTPPSBeamParametersRcd.h" #include "CondFormats/RunInfo/interface/LHCInfo.h" @@ -31,7 +31,10 @@ class CTPPSBeamParametersFromLHCInfoESSource : public edm::ESProducer { static void fillDescriptions(edm::ConfigurationDescriptions&); private: - const edm::ESGetToken lhcInfoToken_; + edm::ESGetToken lhcInfoToken_; + edm::ESGetToken lhcInfoPerLSToken_; + edm::ESGetToken lhcInfoPerFillToken_; + const bool useNewLHCInfo_; CTPPSBeamParameters defaultParameters_; }; @@ -39,8 +42,12 @@ class CTPPSBeamParametersFromLHCInfoESSource : public edm::ESProducer { //---------------------------------------------------------------------------------------------------- CTPPSBeamParametersFromLHCInfoESSource::CTPPSBeamParametersFromLHCInfoESSource(const edm::ParameterSet& iConfig) - : lhcInfoToken_( - setWhatProduced(this).consumes(edm::ESInputTag("", iConfig.getParameter("lhcInfoLabel")))) { + : useNewLHCInfo_(iConfig.getParameter("useNewLHCInfo")) { + auto cc = setWhatProduced(this); + lhcInfoToken_ = cc.consumes(edm::ESInputTag("", iConfig.getParameter("lhcInfoLabel"))); + lhcInfoPerLSToken_ = cc.consumes(edm::ESInputTag("", iConfig.getParameter("lhcInfoPerLSLabel"))); + lhcInfoPerFillToken_ = cc.consumes(edm::ESInputTag("", iConfig.getParameter("lhcInfoPerFillLabel"))); + defaultParameters_.setBeamDivergenceX45(iConfig.getParameter("beamDivX45")); defaultParameters_.setBeamDivergenceY45(iConfig.getParameter("beamDivX56")); defaultParameters_.setBeamDivergenceX56(iConfig.getParameter("beamDivY45")); @@ -62,13 +69,16 @@ CTPPSBeamParametersFromLHCInfoESSource::CTPPSBeamParametersFromLHCInfoESSource(c std::unique_ptr CTPPSBeamParametersFromLHCInfoESSource::produce( const CTPPSBeamParametersRcd& iRecord) { - LHCInfo const& lhcInfo = iRecord.get(lhcInfoToken_); + auto lhcInfoCombined = + LHCInfoCombined::createLHCInfoCombined>( + iRecord, lhcInfoPerLSToken_, lhcInfoPerFillToken_, lhcInfoToken_, useNewLHCInfo_); auto bp = std::make_unique(defaultParameters_); - const auto beamMom = lhcInfo.energy(); - const auto betaStar = lhcInfo.betaStar() * 1E2; // conversion m --> cm - const auto xangle = lhcInfo.crossingAngle() * 1E-6; // conversion mu rad --> rad + const auto beamMom = lhcInfoCombined.energy; + const auto betaStar = lhcInfoCombined.betaStarX * 1E2; // conversion m --> cm + const auto xangle = lhcInfoCombined.crossingAngle() * 1E-6; // conversion mu rad --> rad bp->setBeamMom45(beamMom); bp->setBeamMom56(beamMom); @@ -92,6 +102,9 @@ void CTPPSBeamParametersFromLHCInfoESSource::fillDescriptions(edm::Configuration edm::ParameterSetDescription desc; desc.add("lhcInfoLabel", ""); + desc.add("lhcInfoPerLSLabel", ""); + desc.add("lhcInfoPerFillLabel", ""); + desc.add("useNewLHCInfo", false); // beam divergence (rad) desc.add("beamDivX45", 0.1); @@ -112,7 +125,7 @@ void CTPPSBeamParametersFromLHCInfoESSource::fillDescriptions(edm::Configuration desc.add("vtxStddevY", 2.e-2); desc.add("vtxStddevZ", 2.e-2); - descriptions.add("ctppsBeamParametersFromLHCInfoESSource", desc); + descriptions.add("ctppsBeamParametersFromLHCInfoESSourceDefault", desc); } //---------------------------------------------------------------------------------------------------- diff --git a/CalibPPS/ESProducers/plugins/CTPPSInterpolatedOpticalFunctionsESSource.cc b/CalibPPS/ESProducers/plugins/CTPPSInterpolatedOpticalFunctionsESSource.cc index e4d0f26bb3c5d..fb25ff49ebee1 100644 --- a/CalibPPS/ESProducers/plugins/CTPPSInterpolatedOpticalFunctionsESSource.cc +++ b/CalibPPS/ESProducers/plugins/CTPPSInterpolatedOpticalFunctionsESSource.cc @@ -62,7 +62,7 @@ void CTPPSInterpolatedOpticalFunctionsESSource::fillDescriptions(edm::Configurat desc.add("opticsLabel", "")->setComment("label of the optics records"); desc.add("useNewLHCInfo", false)->setComment("flag whether to use new LHCInfoPer* records or old LHCInfo"); - descriptions.add("ctppsInterpolatedOpticalFunctionsESSource", desc); + descriptions.add("ctppsInterpolatedOpticalFunctionsESSourceDefault", desc); } //---------------------------------------------------------------------------------------------------- diff --git a/CalibPPS/ESProducers/python/ctppsBeamParametersFromLHCInfoESSource_cfi.py b/CalibPPS/ESProducers/python/ctppsBeamParametersFromLHCInfoESSource_cfi.py new file mode 100644 index 0000000000000..b03973c02cc2a --- /dev/null +++ b/CalibPPS/ESProducers/python/ctppsBeamParametersFromLHCInfoESSource_cfi.py @@ -0,0 +1,8 @@ +from CalibPPS.ESProducers.ctppsBeamParametersFromLHCInfoESSourceDefault_cfi import ctppsBeamParametersFromLHCInfoESSourceDefault as _ctppsBeamParametersFromLHCInfoESSourceDefault +ctppsBeamParametersFromLHCInfoESSource = _ctppsBeamParametersFromLHCInfoESSourceDefault.clone() + +from Configuration.Eras.Modifier_run3_common_cff import run3_common +run3_common.toModify(ctppsBeamParametersFromLHCInfoESSource, useNewLHCInfo = True) + +from Configuration.Eras.Modifier_ctpps_directSim_cff import ctpps_directSim +ctpps_directSim.toModify(ctppsBeamParametersFromLHCInfoESSource, useNewLHCInfo = False) diff --git a/CalibPPS/ESProducers/python/ctppsInterpolatedOpticalFunctionsESSource_cff.py b/CalibPPS/ESProducers/python/ctppsInterpolatedOpticalFunctionsESSource_cff.py deleted file mode 100644 index e6cc78ccdd3f9..0000000000000 --- a/CalibPPS/ESProducers/python/ctppsInterpolatedOpticalFunctionsESSource_cff.py +++ /dev/null @@ -1,3 +0,0 @@ -from CalibPPS.ESProducers.ctppsInterpolatedOpticalFunctionsESSource_cfi import * -from Configuration.Eras.Modifier_run3_common_cff import run3_common -run3_common.toModify(ctppsInterpolatedOpticalFunctionsESSource, useNewLHCInfo = True) \ No newline at end of file diff --git a/CalibPPS/ESProducers/python/ctppsInterpolatedOpticalFunctionsESSource_cfi.py b/CalibPPS/ESProducers/python/ctppsInterpolatedOpticalFunctionsESSource_cfi.py new file mode 100644 index 0000000000000..25426226c1be7 --- /dev/null +++ b/CalibPPS/ESProducers/python/ctppsInterpolatedOpticalFunctionsESSource_cfi.py @@ -0,0 +1,8 @@ +from CalibPPS.ESProducers.ctppsInterpolatedOpticalFunctionsESSourceDefault_cfi import ctppsInterpolatedOpticalFunctionsESSourceDefault as _ctppsInterpolatedOpticalFunctionsESSourceDefault +ctppsInterpolatedOpticalFunctionsESSource = _ctppsInterpolatedOpticalFunctionsESSourceDefault.clone() + +from Configuration.Eras.Modifier_run3_common_cff import run3_common +run3_common.toModify(ctppsInterpolatedOpticalFunctionsESSource, useNewLHCInfo = True) + +from Configuration.Eras.Modifier_ctpps_directSim_cff import ctpps_directSim +ctpps_directSim.toModify(ctppsInterpolatedOpticalFunctionsESSource, useNewLHCInfo = False) diff --git a/CalibPPS/ESProducers/python/ctppsOpticalFunctions_cff.py b/CalibPPS/ESProducers/python/ctppsOpticalFunctions_cff.py index 00017a8690f84..7a123f6792d0d 100644 --- a/CalibPPS/ESProducers/python/ctppsOpticalFunctions_cff.py +++ b/CalibPPS/ESProducers/python/ctppsOpticalFunctions_cff.py @@ -25,4 +25,4 @@ #ctppsOpticalFunctionsESSource.configuration.append(config_2016_preTS2) # optics interpolation between crossing angles -from CalibPPS.ESProducers.ctppsInterpolatedOpticalFunctionsESSource_cff import * +from CalibPPS.ESProducers.ctppsInterpolatedOpticalFunctionsESSource_cfi import * diff --git a/CalibPPS/ESProducers/python/ctppsOpticalFunctions_non_DB_cff.py b/CalibPPS/ESProducers/python/ctppsOpticalFunctions_non_DB_cff.py index 72f1a07f4bbdd..946d7b3b68e1a 100644 --- a/CalibPPS/ESProducers/python/ctppsOpticalFunctions_non_DB_cff.py +++ b/CalibPPS/ESProducers/python/ctppsOpticalFunctions_non_DB_cff.py @@ -136,4 +136,4 @@ ctppsOpticalFunctionsESSource.configuration.append(optics_2022) # optics interpolation between crossing angles -from CalibPPS.ESProducers.ctppsInterpolatedOpticalFunctionsESSource_cff import * +from CalibPPS.ESProducers.ctppsInterpolatedOpticalFunctionsESSource_cfi import * diff --git a/CalibTracker/Records/interface/SiPixelGainCalibrationForHLTSoARcd.h b/CalibTracker/Records/interface/SiPixelGainCalibrationForHLTSoARcd.h new file mode 100644 index 0000000000000..f0f2e5f5103ab --- /dev/null +++ b/CalibTracker/Records/interface/SiPixelGainCalibrationForHLTSoARcd.h @@ -0,0 +1,14 @@ +#ifndef CalibTracker_Records_SiPixelGainCalibrationForHLTSoARcd_h +#define CalibTracker_Records_SiPixelGainCalibrationForHLTSoARcd_h + +#include "CondFormats/DataRecord/interface/SiPixelGainCalibrationForHLTRcd.h" +#include "FWCore/Framework/interface/DependentRecordImplementation.h" +#include "FWCore/Framework/interface/EventSetupRecordImplementation.h" +#include "Geometry/Records/interface/TrackerDigiGeometryRecord.h" + +class SiPixelGainCalibrationForHLTSoARcd + : public edm::eventsetup::DependentRecordImplementation< + SiPixelGainCalibrationForHLTSoARcd, + edm::mpl::Vector> {}; + +#endif // CalibTracker_Records_SiPixelGainCalibrationForHLTSoARcd_h diff --git a/CalibTracker/Records/interface/SiPixelMappingSoARecord.h b/CalibTracker/Records/interface/SiPixelMappingSoARecord.h new file mode 100644 index 0000000000000..d8c31754cd8d9 --- /dev/null +++ b/CalibTracker/Records/interface/SiPixelMappingSoARecord.h @@ -0,0 +1,17 @@ +#ifndef CalibTracker_Records_interface_SiPixelMappingSoARecord_h +#define CalibTracker_Records_interface_SiPixelMappingSoARecord_h + +#include "CondFormats/DataRecord/interface/SiPixelFedCablingMapRcd.h" +#include "CondFormats/DataRecord/interface/SiPixelGainCalibrationForHLTRcd.h" +#include "CondFormats/DataRecord/interface/SiPixelQualityRcd.h" +#include "FWCore/Framework/interface/DependentRecordImplementation.h" +#include "Geometry/Records/interface/TrackerDigiGeometryRecord.h" + +class SiPixelMappingSoARecord + : public edm::eventsetup::DependentRecordImplementation> {}; + +#endif // CalibTracker_Records_interface_SiPixelMappingSoARecord_h diff --git a/CalibTracker/Records/src/SiPixelGainCalibrationForHLTSoARcd.cc b/CalibTracker/Records/src/SiPixelGainCalibrationForHLTSoARcd.cc new file mode 100644 index 0000000000000..6634cee007301 --- /dev/null +++ b/CalibTracker/Records/src/SiPixelGainCalibrationForHLTSoARcd.cc @@ -0,0 +1,5 @@ +#include "CalibTracker/Records/interface/SiPixelGainCalibrationForHLTSoARcd.h" +#include "FWCore/Framework/interface/eventsetuprecord_registration_macro.h" +#include "FWCore/Utilities/interface/typelookup.h" + +EVENTSETUP_RECORD_REG(SiPixelGainCalibrationForHLTSoARcd); diff --git a/CalibTracker/Records/src/SiPixelMappingSoARcd.cc b/CalibTracker/Records/src/SiPixelMappingSoARcd.cc new file mode 100644 index 0000000000000..fea2c978c1539 --- /dev/null +++ b/CalibTracker/Records/src/SiPixelMappingSoARcd.cc @@ -0,0 +1,5 @@ +#include "CalibTracker/Records/interface/SiPixelMappingSoARecord.h" +#include "FWCore/Framework/interface/eventsetuprecord_registration_macro.h" +#include "FWCore/Utilities/interface/typelookup.h" + +EVENTSETUP_RECORD_REG(SiPixelMappingSoARecord); diff --git a/CalibTracker/SiPixelESProducers/plugins/BuildFile.xml b/CalibTracker/SiPixelESProducers/plugins/BuildFile.xml index 05446593b6229..8de546ff8856b 100644 --- a/CalibTracker/SiPixelESProducers/plugins/BuildFile.xml +++ b/CalibTracker/SiPixelESProducers/plugins/BuildFile.xml @@ -1,4 +1,3 @@ - @@ -11,6 +10,14 @@ + + + + + + + + diff --git a/CalibTracker/SiPixelESProducers/plugins/alpaka/SiPixelCablingSoAESProducer.cc b/CalibTracker/SiPixelESProducers/plugins/alpaka/SiPixelCablingSoAESProducer.cc new file mode 100644 index 0000000000000..37f4bc6bd5945 --- /dev/null +++ b/CalibTracker/SiPixelESProducers/plugins/alpaka/SiPixelCablingSoAESProducer.cc @@ -0,0 +1,140 @@ +#include "CalibTracker/Records/interface/SiPixelMappingSoARecord.h" +#include "CondFormats/DataRecord/interface/SiPixelFedCablingMapRcd.h" +#include "CondFormats/DataRecord/interface/SiPixelQualityRcd.h" +#include "CondFormats/SiPixelObjects/interface/SiPixelFedCablingMap.h" +#include "CondFormats/SiPixelObjects/interface/SiPixelFedCablingTree.h" +#include "CondFormats/SiPixelObjects/interface/SiPixelMappingHost.h" +#include "CondFormats/SiPixelObjects/interface/SiPixelQuality.h" +#include "CondFormats/SiPixelObjects/interface/alpaka/SiPixelMappingDevice.h" +#include "DataFormats/SiPixelClusterSoA/interface/ClusteringConstants.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/Utilities/interface/ESGetToken.h" +#include "Geometry/Records/interface/TrackerDigiGeometryRecord.h" +#include "Geometry/TrackerGeometryBuilder/interface/TrackerGeometry.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ESProducer.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + using namespace cms::alpakatools; + + class SiPixelCablingSoAESProducer : public ESProducer { + public: + SiPixelCablingSoAESProducer(edm::ParameterSet const& iConfig) + : ESProducer(iConfig), useQuality_(iConfig.getParameter("UseQualityInfo")) { + auto cc = setWhatProduced(this); + cablingMapToken_ = cc.consumes(edm::ESInputTag{"", iConfig.getParameter("CablingMapLabel")}); + if (useQuality_) { + qualityToken_ = cc.consumes(); + } + geometryToken_ = cc.consumes(); + } + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("CablingMapLabel", "")->setComment("CablingMap label"); + desc.add("UseQualityInfo", false); + descriptions.addWithDefaultLabel(desc); + } + + std::optional produce(const SiPixelMappingSoARecord& iRecord) { + auto cablingMap = iRecord.getTransientHandle(cablingMapToken_); + const SiPixelQuality* quality = nullptr; + if (useQuality_) { + auto qualityInfo = iRecord.getTransientHandle(qualityToken_); + quality = qualityInfo.product(); + } + + auto geom = iRecord.getTransientHandle(geometryToken_); + SiPixelMappingHost product(pixelgpudetails::MAX_SIZE, cms::alpakatools::host()); + std::vector const& fedIds = cablingMap->fedIds(); + std::unique_ptr const& cabling = cablingMap->cablingTree(); + + unsigned int startFed = fedIds.front(); + unsigned int endFed = fedIds.back(); + + sipixelobjects::CablingPathToDetUnit path; + int index = 1; + + auto mapView = product.view(); + + mapView.hasQuality() = useQuality_; + for (unsigned int fed = startFed; fed <= endFed; fed++) { + for (unsigned int link = 1; link <= pixelgpudetails::MAX_LINK; link++) { + for (unsigned int roc = 1; roc <= pixelgpudetails::MAX_ROC; roc++) { + path = {fed, link, roc}; + const sipixelobjects::PixelROC* pixelRoc = cabling->findItem(path); + mapView[index].fed() = fed; + mapView[index].link() = link; + mapView[index].roc() = roc; + if (pixelRoc != nullptr) { + mapView[index].rawId() = pixelRoc->rawId(); + mapView[index].rocInDet() = pixelRoc->idInDetUnit(); + mapView[index].modToUnpDefault() = false; + if (quality != nullptr) + mapView[index].badRocs() = quality->IsRocBad(pixelRoc->rawId(), pixelRoc->idInDetUnit()); + else + mapView[index].badRocs() = false; + } else { // store some dummy number + mapView[index].rawId() = pixelClustering::invalidModuleId; + mapView[index].rocInDet() = pixelClustering::invalidModuleId; + mapView[index].badRocs() = true; + mapView[index].modToUnpDefault() = true; + } + index++; + } + } + } // end of FED loop + // Given FedId, Link and idinLnk; use the following formula + // to get the rawId and idinDU + // index = (FedID-1200) * MAX_LINK* MAX_ROC + (Link-1)* MAX_ROC + idinLnk; + // where, MAX_LINK = 48, MAX_ROC = 8 + // FedID varies between 1200 to 1338 (In total 108 FED's) + // Link varies between 1 to 48 + // idinLnk varies between 1 to 8 + + auto trackerGeom = iRecord.getTransientHandle(geometryToken_); + + for (int i = 1; i < index; i++) { + if (mapView[i].rawId() == pixelClustering::invalidModuleId) { + mapView[i].moduleId() = pixelClustering::invalidModuleId; + } else { + auto gdet = trackerGeom->idToDetUnit(mapView[i].rawId()); + if (!gdet) { + LogDebug("SiPixelCablingSoAESProducer") << " Not found: " << mapView[i].rawId() << std::endl; + continue; + } + mapView[i].moduleId() = gdet->index(); + } + LogDebug("SiPixelCablingSoAESProducer") + << "----------------------------------------------------------------------------" << std::endl; + LogDebug("SiPixelCablingSoAESProducer") << i << std::setw(20) << mapView[i].fed() << std::setw(20) + << mapView[i].link() << std::setw(20) << mapView[i].roc() << std::endl; + LogDebug("SiPixelCablingSoAESProducer") + << i << std::setw(20) << mapView[i].rawId() << std::setw(20) << mapView[i].rocInDet() << std::setw(20) + << mapView[i].moduleId() << std::endl; + LogDebug("SiPixelCablingSoAESProducer") + << i << std::setw(20) << mapView[i].badRocs() << std::setw(20) << std::endl; + LogDebug("SiPixelCablingSoAESProducer") + << "----------------------------------------------------------------------------" << std::endl; + } + + mapView.size() = index - 1; + + return product; + } + + private: + edm::ESGetToken cablingMapToken_; + edm::ESGetToken qualityToken_; + edm::ESGetToken geometryToken_; + const bool useQuality_; + }; +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ModuleFactory.h" +DEFINE_FWK_EVENTSETUP_ALPAKA_MODULE(SiPixelCablingSoAESProducer); diff --git a/CalibTracker/SiPixelESProducers/plugins/alpaka/SiPixelGainCalibrationForHLTSoAESProducer.cc b/CalibTracker/SiPixelESProducers/plugins/alpaka/SiPixelGainCalibrationForHLTSoAESProducer.cc new file mode 100644 index 0000000000000..935d141793a40 --- /dev/null +++ b/CalibTracker/SiPixelESProducers/plugins/alpaka/SiPixelGainCalibrationForHLTSoAESProducer.cc @@ -0,0 +1,128 @@ +#include "CondFormats/SiPixelObjects/interface/SiPixelGainCalibrationForHLTHost.h" +#include "CalibTracker/Records/interface/SiPixelGainCalibrationForHLTSoARcd.h" +#include "CondFormats/DataRecord/interface/SiPixelGainCalibrationForHLTRcd.h" +#include "CondFormats/SiPixelObjects/interface/SiPixelGainCalibrationForHLT.h" +#include "DataFormats/Portable/interface/alpaka/PortableCollection.h" +#include "FWCore/Framework/interface/ESProducer.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/ESHandle.h" +#include "FWCore/Framework/interface/ModuleFactory.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "Geometry/TrackerGeometryBuilder/interface/TrackerGeometry.h" +#include "Geometry/Records/interface/TrackerDigiGeometryRecord.h" +#include "Geometry/CommonDetUnit/interface/GeomDetType.h" + +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ESProducer.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ModuleFactory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" + +#include + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + class SiPixelGainCalibrationForHLTSoAESProducer : public ESProducer { + public: + explicit SiPixelGainCalibrationForHLTSoAESProducer(const edm::ParameterSet& iConfig); + std::unique_ptr produce(const SiPixelGainCalibrationForHLTSoARcd& iRecord); + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + + private: + edm::ESGetToken gainsToken_; + edm::ESGetToken geometryToken_; + }; + + SiPixelGainCalibrationForHLTSoAESProducer::SiPixelGainCalibrationForHLTSoAESProducer(const edm::ParameterSet& iConfig) + : ESProducer(iConfig) { + auto cc = setWhatProduced(this); + gainsToken_ = cc.consumes(); + geometryToken_ = cc.consumes(); + } + + void SiPixelGainCalibrationForHLTSoAESProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + descriptions.addWithDefaultLabel(desc); + } + + std::unique_ptr SiPixelGainCalibrationForHLTSoAESProducer::produce( + const SiPixelGainCalibrationForHLTSoARcd& iRecord) { + auto const& gains = iRecord.get(gainsToken_); + auto const& geom = iRecord.get(geometryToken_); + + auto product = std::make_unique(gains.data().size(), cms::alpakatools::host()); + + // bizzarre logic (looking for fist strip-det) don't ask + auto const& dus = geom.detUnits(); + unsigned int n_detectors = dus.size(); + for (unsigned int i = 1; i < 7; ++i) { + const auto offset = geom.offsetDU(GeomDetEnumerators::tkDetEnum[i]); + if (offset != dus.size() && dus[offset]->type().isTrackerStrip()) { + if (n_detectors > offset) + n_detectors = offset; + } + } + + LogDebug("SiPixelGainCalibrationForHLTSoA") + << "caching calibs for " << n_detectors << " pixel detectors of size " << gains.data().size() << '\n' + << "sizes " << sizeof(char) << ' ' << sizeof(uint8_t) << ' ' << sizeof(siPixelGainsSoA::DecodingStructure); + + for (size_t i = 0; i < gains.data().size(); i = i + 2) { + product->view().v_pedestals()[i / 2].gain = gains.data()[i]; + product->view().v_pedestals()[i / 2].ped = gains.data()[i + 1]; + } + + //std::copy here + // do not read back from the (possibly write-combined) memory buffer + auto minPed = gains.getPedLow(); + auto maxPed = gains.getPedHigh(); + auto minGain = gains.getGainLow(); + auto maxGain = gains.getGainHigh(); + auto nBinsToUseForEncoding = 253; + + // we will simplify later (not everything is needed....) + product->view().minPed() = minPed; + product->view().maxPed() = maxPed; + product->view().minGain() = minGain; + product->view().maxGain() = maxGain; + + product->view().numberOfRowsAveragedOver() = 80; + product->view().nBinsToUseForEncoding() = nBinsToUseForEncoding; + product->view().deadFlag() = 255; + product->view().noisyFlag() = 254; + + product->view().pedPrecision() = static_cast(maxPed - minPed) / nBinsToUseForEncoding; + product->view().gainPrecision() = static_cast(maxGain - minGain) / nBinsToUseForEncoding; + + LogDebug("SiPixelGainCalibrationForHLTSoA") + << "precisions g " << product->view().pedPrecision() << ' ' << product->view().gainPrecision(); + + // fill the index map + auto const& ind = gains.getIndexes(); + LogDebug("SiPixelGainCalibrationForHLTSoA") << ind.size() << " " << n_detectors; + + for (auto i = 0U; i < n_detectors; ++i) { + auto p = std::lower_bound( + ind.begin(), ind.end(), dus[i]->geographicalId().rawId(), SiPixelGainCalibrationForHLT::StrictWeakOrdering()); + assert(p != ind.end() && p->detid == dus[i]->geographicalId()); + assert(p->iend <= gains.data().size()); + assert(p->iend >= p->ibegin); + assert(0 == p->ibegin % 2); + assert(0 == p->iend % 2); + assert(p->ibegin != p->iend); + assert(p->ncols > 0); + + product->view().modStarts()[i] = p->ibegin; + product->view().modEnds()[i] = p->iend; + product->view().modCols()[i] = p->ncols; + + if (ind[i].detid != dus[i]->geographicalId()) + LogDebug("SiPixelGainCalibrationForHLTSoA") << ind[i].detid << "!=" << dus[i]->geographicalId(); + } + + return product; + } + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE +DEFINE_FWK_EVENTSETUP_ALPAKA_MODULE(SiPixelGainCalibrationForHLTSoAESProducer); diff --git a/CalibTracker/SiStripCommon/plugins/SiStripGainCalibTableProducer.cc b/CalibTracker/SiStripCommon/plugins/SiStripGainCalibTableProducer.cc new file mode 100644 index 0000000000000..8a56dcfc9e1cd --- /dev/null +++ b/CalibTracker/SiStripCommon/plugins/SiStripGainCalibTableProducer.cc @@ -0,0 +1,164 @@ +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" + +#include "DataFormats/GeometrySurface/interface/TrapezoidalPlaneBounds.h" +#include "DataFormats/GeometrySurface/interface/RectangularPlaneBounds.h" +#include "Geometry/TrackerGeometryBuilder/interface/TrackerGeometry.h" +#include "Geometry/TrackerGeometryBuilder/interface/StripGeomDetUnit.h" +#include "Geometry/CommonDetUnit/interface/PixelGeomDetUnit.h" + +#include "CalibFormats/SiStripObjects/interface/SiStripGain.h" +#include "CalibTracker/Records/interface/SiStripGainRcd.h" + +#include "CalibTracker/SiStripCommon/interface/SiStripOnTrackClusterTableProducerBase.h" + +class SiStripGainCalibTableProducer : public SiStripOnTrackClusterTableProducerBase { +public: + explicit SiStripGainCalibTableProducer(const edm::ParameterSet& params) + : SiStripOnTrackClusterTableProducerBase(params), m_tkGeomToken{esConsumes<>()}, m_gainToken{esConsumes<>()} {} + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("name", "cluster"); + desc.add("doc", ""); + desc.add("extension", false); + desc.add("Tracks", edm::InputTag{"generalTracks"}); + descriptions.add("siStripGainCalibTable", desc); + } + + void fillTable(const std::vector& clusters, + const edm::View& tracks, + nanoaod::FlatTable* table, + const edm::EventSetup& iSetup) final; + +private: + const edm::ESGetToken m_tkGeomToken; + const edm::ESGetToken m_gainToken; + + std::map m_thicknessMap; + double thickness(DetId id, const TrackerGeometry* tGeom); +}; + +namespace { + bool isFarFromBorder(const TrajectoryStateOnSurface& trajState, uint32_t detId, const TrackerGeometry* tGeom) { + const auto gdu = tGeom->idToDetUnit(detId); + if ((!dynamic_cast(gdu)) && (!dynamic_cast(gdu))) { + edm::LogWarning("SiStripGainCalibTableProducer") + << "DetId " << detId << " does not seem to belong to the tracker"; + return false; + } + const auto plane = gdu->surface(); + const auto trapBounds = dynamic_cast(&plane.bounds()); + const auto rectBounds = dynamic_cast(&plane.bounds()); + + static constexpr double distFromBorder = 1.0; + double halfLength = 0.; + if (trapBounds) { + halfLength = trapBounds->parameters()[3]; + } else if (rectBounds) { + halfLength = .5 * gdu->surface().bounds().length(); + } else { + return false; + } + + const auto pos = trajState.localPosition(); + const auto posError = trajState.localError().positionError(); + if (std::abs(pos.y()) + posError.yy() >= (halfLength - distFromBorder)) + return false; + + return true; + } +} // namespace + +double SiStripGainCalibTableProducer::thickness(DetId id, const TrackerGeometry* tGeom) { + const auto it = m_thicknessMap.find(id); + if (m_thicknessMap.end() != it) { + return it->second; + } else { + double detThickness = 1.; + const auto gdu = tGeom->idToDetUnit(id); + const auto isPixel = (dynamic_cast(gdu) != nullptr); + const auto isStrip = (dynamic_cast(gdu) != nullptr); + if (!isPixel && !isStrip) { + edm::LogWarning("SiStripGainCalibTableProducer") + << "DetId " << id.rawId() << " doesn't seem to belong to the Tracker"; + } else { + detThickness = gdu->surface().bounds().thickness(); + } + m_thicknessMap[id] = detThickness; + return detThickness; + } +} + +void SiStripGainCalibTableProducer::fillTable(const std::vector& clusters, + const edm::View& tracks, + nanoaod::FlatTable* table, + const edm::EventSetup& iSetup) { + edm::ESHandle tGeom = iSetup.getHandle(m_tkGeomToken); + edm::ESHandle stripGains = iSetup.getHandle(m_gainToken); + + std::vector c_localdirx; + std::vector c_localdiry; + std::vector c_localdirz; + std::vector c_firststrip; + std::vector c_nstrips; + std::vector c_saturation; + std::vector c_overlapping; + std::vector c_farfromedge; + std::vector c_charge; + std::vector c_path; + // NOTE only very few types are supported by NanoAOD, but more could be added (to discuss with XPOG / core software) + // NOTE removed amplitude vector, I don't think it was used anywhere + std::vector c_gainused; // NOTE was double + std::vector c_gainusedTick; // NOTE was double + for (const auto clus : clusters) { + const auto& ampls = clus.cluster->amplitudes(); + const int firstStrip = clus.cluster->firstStrip(); + const int nStrips = ampls.size(); + double prevGain = -1; + double prevGainTick = -1; + if (stripGains.isValid()) { + prevGain = stripGains->getApvGain(firstStrip / 128, stripGains->getRange(clus.det, 1), 1); + prevGainTick = stripGains->getApvGain(firstStrip / 128, stripGains->getRange(clus.det, 0), 1); + } + const unsigned int charge = clus.cluster->charge(); + const bool saturation = std::any_of(ampls.begin(), ampls.end(), [](uint8_t amp) { return amp >= 254; }); + + const bool overlapping = (((firstStrip % 128) == 0) || ((firstStrip / 128) != ((firstStrip + nStrips) / 128)) || + (((firstStrip + nStrips) % 128) == 127)); + const auto& trajState = clus.measurement.updatedState(); + const auto trackDir = trajState.localDirection(); + const auto cosine = trackDir.z() / trackDir.mag(); + const auto path = (10. * thickness(clus.det, tGeom.product())) / std::abs(cosine); + const auto farFromEdge = isFarFromBorder(trajState, clus.det, tGeom.product()); + c_localdirx.push_back(trackDir.x()); + c_localdiry.push_back(trackDir.y()); + c_localdirz.push_back(trackDir.z()); + c_firststrip.push_back(firstStrip); + c_nstrips.push_back(nStrips); + c_saturation.push_back(saturation); + c_overlapping.push_back(overlapping); + c_farfromedge.push_back(farFromEdge); + c_charge.push_back(charge); + c_path.push_back(path); + c_gainused.push_back(prevGain); + c_gainusedTick.push_back(prevGainTick); + } + // addColumn(table, "localdirx", c_localdirx, ""); + // addColumn(table, "localdiry", c_localdiry, ""); + // addColumn(table, "localdirz", c_localdirz, ""); + // addColumn(table, "firststrip", c_firststrip, ""); + // addColumn(table, "nstrips", c_nstrips, ""); + addColumn(table, "saturation", c_saturation, ""); + addColumn(table, "overlapping", c_overlapping, ""); + addColumn(table, "farfromedge", c_farfromedge, ""); + addColumn(table, "charge", c_charge, ""); + // addColumn(table, "path", c_path, ""); + // ExtendedCalibTree: also charge/path + addColumn(table, "gainused", c_gainused, ""); + addColumn(table, "gainusedTick", c_gainusedTick, ""); +} + +#include "FWCore/PluginManager/interface/ModuleDef.h" +#include "FWCore/Framework/interface/MakerMacros.h" +DEFINE_FWK_MODULE(SiStripGainCalibTableProducer); diff --git a/CalibTracker/SiStripCommon/plugins/TkInstLumiTableProducer.cc b/CalibTracker/SiStripCommon/plugins/TkInstLumiTableProducer.cc new file mode 100644 index 0000000000000..a422e1309ece3 --- /dev/null +++ b/CalibTracker/SiStripCommon/plugins/TkInstLumiTableProducer.cc @@ -0,0 +1,73 @@ +#include "DataFormats/NanoAOD/interface/FlatTable.h" +#include "DataFormats/OnlineMetaData/interface/OnlineLuminosityRecord.h" +#include "DataFormats/Scalers/interface/LumiScalers.h" +#include "FWCore/Framework/interface/ConsumesCollector.h" +#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" + +class TkInstLumiTableProducer : public edm::stream::EDProducer<> { +public: + explicit TkInstLumiTableProducer(const edm::ParameterSet& params) + : m_name(params.getParameter("name")), + m_doc(params.existsAs("doc") ? params.getParameter("doc") : ""), + m_extension(params.existsAs("extension") ? params.getParameter("extension") : false), + m_scalerToken(consumes(params.getParameter("lumiScalers"))), + m_metaDataToken(consumes(params.getParameter("metadata"))) { + produces(); + } + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("name", ""); + desc.add("doc", ""); + desc.add("extension", false); + desc.add("lumiScalers", edm::InputTag("scalersRawToDigi")); + desc.add("metadata", edm::InputTag("onlineMetaDataDigis")); + descriptions.add("tkInstLumiTable", desc); + } + + void produce(edm::Event& iEvent, const edm::EventSetup& iSetup) override; + +private: + const std::string m_name; + const std::string m_doc; + bool m_extension; + + const edm::EDGetTokenT m_scalerToken; + const edm::EDGetTokenT m_metaDataToken; +}; + +void TkInstLumiTableProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { + auto out = std::make_unique(1, m_name, true, m_extension); + + out->addColumnValue("bx", iEvent.bunchCrossing(), "Bunch-crossing ID"); + + float instLumi{0.}, pu{0.}; + edm::Handle lumiScalers = iEvent.getHandle(m_scalerToken); + edm::Handle metaData = iEvent.getHandle(m_metaDataToken); + + if (lumiScalers.isValid() && !lumiScalers->empty()) { + if (lumiScalers->begin() != lumiScalers->end()) { + instLumi = lumiScalers->begin()->instantLumi(); + pu = lumiScalers->begin()->pileup(); + } + } else if (metaData.isValid()) { + instLumi = metaData->instLumi(); + pu = metaData->avgPileUp(); + } else { + edm::LogInfo("TkInstLumiTableProducer") + << "Luminosity related collections not found in the event; will write dummy values"; + } + + out->addColumnValue("instLumi", instLumi, "Instantaneous luminosity"); + out->addColumnValue("PU", pu, "Pileup"); + + iEvent.put(std::move(out)); +} + +#include "FWCore/PluginManager/interface/ModuleDef.h" +#include "FWCore/Framework/interface/MakerMacros.h" +DEFINE_FWK_MODULE(TkInstLumiTableProducer); diff --git a/CalibTracker/SiStripCommon/test/testCalibTree_nano.py b/CalibTracker/SiStripCommon/test/testCalibTree_nano.py new file mode 100644 index 0000000000000..ffdf36102538f --- /dev/null +++ b/CalibTracker/SiStripCommon/test/testCalibTree_nano.py @@ -0,0 +1,99 @@ +from __future__ import print_function + +## adapted from produceCalibrationTree_template_cfg.py + +import FWCore.ParameterSet.Config as cms +##from CalibTracker.SiStripCommon.shallowTree_test_template import * ## TODO get rid of this one + +process = cms.Process('CALIB') +process.load('Configuration/StandardSequences/MagneticField_cff') +process.load('Configuration.Geometry.GeometryRecoDB_cff') +process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff') + +from Configuration.AlCa.GlobalTag import GlobalTag +process.GlobalTag = GlobalTag(process.GlobalTag, "auto:run3_data_PromptAnalysis") + +process.load('FWCore.MessageService.MessageLogger_cfi') +process.load('Configuration.StandardSequences.Services_cff') + +process.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(-1)) + +process.source = cms.Source("PoolSource", + fileNames = cms.untracked.vstring("/store/express/Run2023F/StreamExpress/ALCARECO/SiStripCalMinBias-Express-v1/000/373/710/00000/e2df2f78-b95a-4f33-ae22-add59aa2903f.root")) + +process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) ) +process.MessageLogger.cerr.FwkReport.reportEvery = 10000 + +inTracks = cms.InputTag("ALCARECOSiStripCalMinBias") + +process.load('CalibTracker.SiStripCommon.prescaleEvent_cfi') +process.load('CalibTracker.Configuration.Filter_Refit_cff') +## use CalibrationTracks (for clusters) and CalibrationTracksRefit (for tracks) +process.CalibrationTracks.src = inTracks + +tracksForCalib = cms.InputTag("CalibrationTracksRefit") + +process.prescaleEvent.prescale = 1 + +process.TkCalSeq = cms.Sequence(process.prescaleEvent*process.MeasurementTrackerEvent*process.trackFilterRefit) + +process.load("PhysicsTools.NanoAOD.nano_cff") +process.load("PhysicsTools.NanoAOD.NanoAODEDMEventContent_cff") + +## as a test: it should be possible to add tracks fully at configuration level (+ declaring the plugin) +from PhysicsTools.NanoAOD.common_cff import * +## this is equivalent to ShallowTrackProducer as configured for the gain calibration +process.tracksTable = cms.EDProducer("SimpleTrackFlatTableProducer", + src=tracksForCalib, + cut=cms.string(""), + name=cms.string("track"), + doc=cms.string("SiStripCalMinBias ALCARECO tracks"), + singleton=cms.bool(False), + extension=cms.bool(False), + variables=cms.PSet( + #chi2=Var("chi2()", float), + #ndof=Var("ndof()", int), + chi2ndof=Var("chi2()/ndof", float), + #charge=Var("charge()", float), + momentum=Var("p()", float), + pt=Var("pt()", float), + #pterr=Var("ptError()", float), + hitsvalid=Var("numberOfValidHits()", int), ## unsigned? + #hitslost=Var("numberOfLostHits()", int), ## unsigned? + #theta=Var("theta()", float), + #thetaerr=Var("thetaError()", float), + phi=Var("phi()", float), + #phierr=Var("phiError()", float), + eta=Var("eta()", float), + #etaerr=Var("etaError()", float), + #dxy=Var("dxy()", float), + #dxyerr=Var("dxyError()", float), + #dsz=Var("dsz()", float), + #dszerr=Var("dszError()", float), + #qoverp=Var("qoverp()", float), + #qoverperr=Var("qoverpError()", float), + #vx=Var("vx()", float), + #vy=Var("vy()", float), + #vz=Var("vz()", float), + algo=Var("algo()", int) + ) + ) +process.load("CalibTracker.SiStripCommon.tkInstLumiTable_cfi") +process.tkInstLumiTable.extension = True +process.load("CalibTracker.SiStripCommon.siStripPositionCorrectionsTable_cfi") +process.load("CalibTracker.SiStripCommon.siStripGainCalibTable_cfi") +process.siStripPositionCorrectionsTable.Tracks = tracksForCalib +process.siStripGainCalibTable.Tracks = tracksForCalib + +process.nanoCTPath = cms.Path(process.TkCalSeq * + process.nanoMetadata * + process.tkInstLumiTable * + process.tracksTable * + process.siStripPositionCorrectionsTable) #* + #process.siStripGainCalibTable) + +process.out = cms.OutputModule("NanoAODOutputModule", + fileName=cms.untracked.string("CalibTreeMC_nano.root"), + outputCommands=process.NANOAODEventContent.outputCommands) + +process.end = cms.EndPath(process.out) diff --git a/CalibTracker/SiStripCommon/test/testCalibTree_nano_G2.py b/CalibTracker/SiStripCommon/test/testCalibTree_nano_G2.py new file mode 100644 index 0000000000000..b532ee28b5dff --- /dev/null +++ b/CalibTracker/SiStripCommon/test/testCalibTree_nano_G2.py @@ -0,0 +1,77 @@ +from __future__ import print_function + +## adapted from produceCalibrationTree_template_cfg.py + +import FWCore.ParameterSet.Config as cms +##from CalibTracker.SiStripCommon.shallowTree_test_template import * ## TODO get rid of this one + +process = cms.Process('CALIB') +process.load('Configuration/StandardSequences/MagneticField_cff') +process.load('Configuration.Geometry.GeometryRecoDB_cff') +process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff') + +from Configuration.AlCa.GlobalTag import GlobalTag +process.GlobalTag = GlobalTag(process.GlobalTag, "auto:run3_data_PromptAnalysis") + +process.load('FWCore.MessageService.MessageLogger_cfi') +process.load('Configuration.StandardSequences.Services_cff') + +process.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(-1)) + +process.source = cms.Source("PoolSource", + fileNames = cms.untracked.vstring("/store/express/Run2023F/StreamExpress/ALCARECO/SiStripCalMinBias-Express-v1/000/373/710/00000/e2df2f78-b95a-4f33-ae22-add59aa2903f.root")) + +process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) ) +process.MessageLogger.cerr.FwkReport.reportEvery = 10000 + +inTracks = cms.InputTag("ALCARECOSiStripCalMinBias") + +process.load('CalibTracker.SiStripCommon.prescaleEvent_cfi') +process.load('CalibTracker.Configuration.Filter_Refit_cff') +## use CalibrationTracks (for clusters) and CalibrationTracksRefit (for tracks) +process.CalibrationTracks.src = inTracks + +tracksForCalib = cms.InputTag("CalibrationTracksRefit") + +process.prescaleEvent.prescale = 1 + +process.TkCalSeq = cms.Sequence(process.prescaleEvent*process.MeasurementTrackerEvent*process.trackFilterRefit) + +process.load("PhysicsTools.NanoAOD.nano_cff") +process.load("PhysicsTools.NanoAOD.NanoAODEDMEventContent_cff") + +## as a test: it should be possible to add tracks fully at configuration level (+ declaring the plugin) +from PhysicsTools.NanoAOD.common_cff import * +## this is equivalent to ShallowTrackProducer as configured for the gain calibration +process.tracksTable = cms.EDProducer("SimpleTrackFlatTableProducer", + src=tracksForCalib, + cut=cms.string(""), + name=cms.string("track"), + doc=cms.string("SiStripCalMinBias ALCARECO tracks"), + singleton=cms.bool(False), + extension=cms.bool(False), + variables=cms.PSet( + chi2ndof=Var("chi2()/ndof", float), + pt=Var("pt()", float), + hitsvalid=Var("numberOfValidHits()", int), ## unsigned? + phi=Var("phi()", float), + eta=Var("eta()", float), + ) + ) +process.load("CalibTracker.SiStripCommon.tkInstLumiTable_cfi") +process.tkInstLumiTable.extension = True +process.load("CalibTracker.SiStripCommon.siStripGainCalibTable_cfi") +process.siStripGainCalibTable.Tracks = tracksForCalib + +process.nanoCTPath = cms.Path(process.TkCalSeq* + process.nanoMetadata*process.tkInstLumiTable + *process.tracksTable + *process.siStripGainCalibTable + ) + +process.out = cms.OutputModule("NanoAODOutputModule", + fileName=cms.untracked.string("CalibTreeMC_nano_G2.root"), + outputCommands=process.NANOAODEventContent.outputCommands + ) + +process.end = cms.EndPath(process.out) diff --git a/CalibTracker/SiStripLorentzAngle/interface/SiStripLorentzAngleCalibrationStruct.h b/CalibTracker/SiStripLorentzAngle/interface/SiStripLorentzAngleCalibrationStruct.h index 6d6f74f58db98..bb99e599409c2 100644 --- a/CalibTracker/SiStripLorentzAngle/interface/SiStripLorentzAngleCalibrationStruct.h +++ b/CalibTracker/SiStripLorentzAngle/interface/SiStripLorentzAngleCalibrationStruct.h @@ -7,6 +7,7 @@ // user includes #include "DQMServices/Core/interface/DQMStore.h" +#include "CalibFormats/SiStripObjects/interface/SiStripHashedDetId.h" struct SiStripLorentzAngleCalibrationHistograms { public: @@ -28,9 +29,11 @@ struct SiStripLorentzAngleCalibrationHistograms { std::map p_; // These are vectors since std:map::find is expensive - // we're going to profi of the dense indexing offered by + // we're going to profit of the dense indexing offered by // SiStripHashedDetId and index the histogram position // with the natural booking order + SiStripHashedDetId hash_; + std::vector h2_ct_w_m_; std::vector h2_ct_var2_m_; std::vector h2_ct_var3_m_; diff --git a/CalibTracker/SiStripLorentzAngle/plugins/SiStripLorentzAnglePCLMonitor.cc b/CalibTracker/SiStripLorentzAngle/plugins/SiStripLorentzAnglePCLMonitor.cc index 56603db4c95c2..22d983621fa52 100644 --- a/CalibTracker/SiStripLorentzAngle/plugins/SiStripLorentzAnglePCLMonitor.cc +++ b/CalibTracker/SiStripLorentzAngle/plugins/SiStripLorentzAnglePCLMonitor.cc @@ -22,7 +22,6 @@ #include // user include files -#include "CalibFormats/SiStripObjects/interface/SiStripHashedDetId.h" #include "CalibTracker/Records/interface/SiStripDependentRecords.h" #include "CalibTracker/SiStripCommon/interface/ShallowTools.h" #include "CalibTracker/SiStripLorentzAngle/interface/SiStripLorentzAngleCalibrationHelpers.h" @@ -74,7 +73,6 @@ class SiStripLorentzAnglePCLMonitor : public DQMEDAnalyzer { // ------------ member data ------------ SiStripClusterInfo m_clusterInfo; SiStripLorentzAngleCalibrationHistograms iHists_; - SiStripHashedDetId m_hash; // for magnetic field conversion static constexpr float teslaToInverseGeV_ = 2.99792458e-3f; @@ -183,17 +181,22 @@ void SiStripLorentzAnglePCLMonitor::dqmBeginRun(edm::Run const& run, edm::EventS // Sorted DetId list gives max performance, anything else is worse std::sort(c_rawid.begin(), c_rawid.end()); - // initialized the hash map - m_hash = SiStripHashedDetId(c_rawid); + // initialize the hash map + // in case it's not already initialized + if (iHists_.hash_.size() == 0) { + iHists_.hash_ = SiStripHashedDetId(c_rawid); + } //reserve the size of the vector - iHists_.h2_ct_w_m_.reserve(c_rawid.size()); - iHists_.h2_ct_var2_m_.reserve(c_rawid.size()); - iHists_.h2_ct_var3_m_.reserve(c_rawid.size()); + if (saveHistosMods_) { + iHists_.h2_ct_w_m_.reserve(c_rawid.size()); + iHists_.h2_ct_var2_m_.reserve(c_rawid.size()); + iHists_.h2_ct_var3_m_.reserve(c_rawid.size()); - iHists_.h2_t_w_m_.reserve(c_rawid.size()); - iHists_.h2_t_var2_m_.reserve(c_rawid.size()); - iHists_.h2_t_var3_m_.reserve(c_rawid.size()); + iHists_.h2_t_w_m_.reserve(c_rawid.size()); + iHists_.h2_t_var2_m_.reserve(c_rawid.size()); + iHists_.h2_t_var3_m_.reserve(c_rawid.size()); + } } std::string SiStripLorentzAnglePCLMonitor::moduleLocationType(const uint32_t& mod, const TrackerTopology* tTopo) { @@ -308,7 +311,13 @@ void SiStripLorentzAnglePCLMonitor::analyze(const edm::Event& iEvent, const edm: if (locationtype.empty()) return; - const auto& hashedIndex = m_hash.hashedIndex(mod); + // retrive the hashed index + const auto& hashedIndex = iHists_.hash_.hashedIndex(mod); + + if (saveHistosMods_) { + LogDebug("SiStripLorentzAnglePCLMonitor") << "module ID: " << mod << " hashedIndex: " << hashedIndex; + iHists_.h1_["occupancyPerIndex"]->Fill(hashedIndex); + } TVector3 localdir(c_localdirx, c_localdiry, c_localdirz); int sign = iHists_.orientation_[mod]; @@ -324,6 +333,12 @@ void SiStripLorentzAnglePCLMonitor::analyze(const edm::Event& iEvent, const edm: iHists_.h2_[Form("%s_tanthcosphtrk_nstrip", locationtype.c_str())]->Fill(sign * cosphi * tantheta, c_nstrips); iHists_.h2_[Form("%s_thetatrk_nstrip", locationtype.c_str())]->Fill(sign * theta * cosphi, c_nstrips); + if (saveHistosMods_) { + iHists_.h1_[Form("%s_%d_nstrips", locationtype.c_str(), mod)]->Fill(c_nstrips); + iHists_.h1_[Form("%s_%d_tanthetatrk", locationtype.c_str(), mod)]->Fill(sign * tantheta); + iHists_.h1_[Form("%s_%d_cosphitrk", locationtype.c_str(), mod)]->Fill(cosphi); + } + // variance for width == 2 if (c_nstrips == 2) { iHists_.h1_[Form("%s_variance_w2", locationtype.c_str())]->Fill(c_variance); @@ -332,6 +347,8 @@ void SiStripLorentzAnglePCLMonitor::analyze(const edm::Event& iEvent, const edm: // not in PCL if (saveHistosMods_) { + LogDebug("SiStripLorentzAnglePCLMonitor") << iHists_.h2_ct_var2_m_[hashedIndex]->getName(); + iHists_.h1_[Form("%s_%d_variance_w2", locationtype.c_str(), mod)]->Fill(c_variance); iHists_.h2_ct_var2_m_[hashedIndex]->Fill(sign * cosphi * tantheta, c_variance); iHists_.h2_t_var2_m_[hashedIndex]->Fill(sign * cosphi * theta, c_variance); } @@ -344,10 +361,12 @@ void SiStripLorentzAnglePCLMonitor::analyze(const edm::Event& iEvent, const edm: // not in PCL if (saveHistosMods_) { + iHists_.h1_[Form("%s_%d_variance_w3", locationtype.c_str(), mod)]->Fill(c_variance); iHists_.h2_ct_var3_m_[hashedIndex]->Fill(sign * cosphi * tantheta, c_variance); iHists_.h2_t_var3_m_[hashedIndex]->Fill(sign * cosphi * theta, c_variance); } } + // not in PCL if (saveHistosMods_) { iHists_.h2_ct_w_m_[hashedIndex]->Fill(sign * cosphi * tantheta, c_nstrips); @@ -392,6 +411,14 @@ void SiStripLorentzAnglePCLMonitor::bookHistograms(DQMStore::IBooker& ibook, "track_etaxchi2_2d", "track #chi^{2}/ndf vs track #eta;track #eta;track #chi^{2};tracks", 60, -3, 3, 100, 0, 5); // clang-format on + if (saveHistosMods_) { + iHists_.h1_["occupancyPerIndex"] = ibook.book1D("ClusterOccupancyPerHashedIndex", + "cluster occupancy;hashed index;# clusters per module", + iHists_.hash_.size(), + -0.5, + iHists_.hash_.size() - 0.5); + } + // fill in the module types iHists_.nlayers_["TIB"] = 4; iHists_.nlayers_["TOB"] = 6; @@ -451,6 +478,7 @@ void SiStripLorentzAnglePCLMonitor::bookHistograms(DQMStore::IBooker& ibook, if (saveHistosMods_) { ibook.setCurrentFolder(folderToBook + "/modules"); for (const auto& [mod, locationType] : iHists_.moduleLocationType_) { + ibook.setCurrentFolder(folderToBook + "/modules" + Form("/%s", locationType.c_str())); // histograms for each module iHists_.h1_[Form("%s_%d_nstrips", locationType.c_str(), mod)] = ibook.book1D(Form("%s_%d_nstrips", locationType.c_str(), mod), "", 10, 0, 10); @@ -465,9 +493,12 @@ void SiStripLorentzAnglePCLMonitor::bookHistograms(DQMStore::IBooker& ibook, } int counter{0}; - SiStripHashedDetId::const_iterator iter = m_hash.begin(); - for (; iter != m_hash.end(); ++iter) { + SiStripHashedDetId::const_iterator iter = iHists_.hash_.begin(); + for (; iter != iHists_.hash_.end(); ++iter) { + LogDebug("SiStripLorentzAnglePCLMonitor") + << "detId: " << (*iter) << " hashed index: " << iHists_.hash_.hashedIndex((*iter)); const auto& locationType = iHists_.moduleLocationType_[(*iter)]; + ibook.setCurrentFolder(folderToBook + "/modules" + Form("/%s", locationType.c_str())); iHists_.h2_ct_w_m_.push_back( ibook.book2D(Form("ct_w_m_%s_%d", locationType.c_str(), *iter), "", 90, -0.9, 0.9, 10, 0, 10)); iHists_.h2_t_w_m_.push_back( diff --git a/CalibTracker/SiStripLorentzAngle/test/BuildFile.xml b/CalibTracker/SiStripLorentzAngle/test/BuildFile.xml new file mode 100644 index 0000000000000..b14cdc229877d --- /dev/null +++ b/CalibTracker/SiStripLorentzAngle/test/BuildFile.xml @@ -0,0 +1 @@ + diff --git a/CalibTracker/SiStripLorentzAngle/test/step_PromptCalibProdSiStripLA_cfg.py b/CalibTracker/SiStripLorentzAngle/test/step_PromptCalibProdSiStripLA_cfg.py index 821a59181417a..bb949a84c6362 100644 --- a/CalibTracker/SiStripLorentzAngle/test/step_PromptCalibProdSiStripLA_cfg.py +++ b/CalibTracker/SiStripLorentzAngle/test/step_PromptCalibProdSiStripLA_cfg.py @@ -13,6 +13,7 @@ process.load('Configuration.StandardSequences.Services_cff') process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi') process.load('FWCore.MessageService.MessageLogger_cfi') +process.MessageLogger.cerr.FwkReport.reportEvery = 100 # limit the output for the unit test process.load('Configuration.EventContent.EventContentCosmics_cff') process.load('Configuration.StandardSequences.GeometryRecoDB_cff') process.load('Configuration.StandardSequences.MagneticField_cff') @@ -21,13 +22,13 @@ process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff') process.maxEvents = cms.untracked.PSet( - input = cms.untracked.int32(100000), + input = cms.untracked.int32(1000), # 1000000 output = cms.optional.untracked.allowed(cms.int32,cms.PSet) ) # Input source process.source = cms.Source("PoolSource", - fileNames = cms.untracked.vstring('/store/data/Commissioning2023/Cosmics/ALCARECO/SiStripCalCosmics-PromptReco-v1/000/364/141/00000/062e670e-40e3-4950-b0bb-dd354844d16f.root'), + fileNames = cms.untracked.vstring('/store/data/Commissioning2023/Cosmics/ALCARECO/SiStripCalCosmics-PromptReco-v1/000/364/174/00000/59a465b4-6e25-4ea0-8fe3-2319bdea7fcb.root'), secondaryFileNames = cms.untracked.vstring() ) @@ -104,11 +105,12 @@ from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask associatePatAlgosToolsTask(process) -#Setup FWK for multithreaded +# Setup FWK for multithreaded process.options.numberOfThreads = 4 process.options.numberOfStreams = 0 - +# Save the per-histogram modules in order to test the SiStripHashedDetId +process.ALCARECOSiStripLACalib.saveHistoMods = cms.bool(True) # Customisation from command line diff --git a/CalibTracker/SiStripLorentzAngle/test/testPromptCalibProdSiStripLA.sh b/CalibTracker/SiStripLorentzAngle/test/testPromptCalibProdSiStripLA.sh new file mode 100755 index 0000000000000..53721c333e56b --- /dev/null +++ b/CalibTracker/SiStripLorentzAngle/test/testPromptCalibProdSiStripLA.sh @@ -0,0 +1,10 @@ +#!/bin/sh +function die { echo $1: status $2 ; exit $2; } + +# test worker +printf "TESTING SiStrip Lorentz Angle Worker ...\n\n" +cmsRun ${SCRAM_TEST_PATH}/step_PromptCalibProdSiStripLA_cfg.py || die "Failure running step_PromptCalibProdSiStripLA_cfg.py" $? + +# test harvester +printf "TESTING SiStrip Lorentz Angle Harvester ...\n\n" +cmsRun ${SCRAM_TEST_PATH}/step_PromptCalibProdSiStripLA_ALCAHARVEST_cfg.py || die "Failure running step_PromptCalibProdSiStripLA_ALCAHARVEST_cfg.py" $? diff --git a/CalibTracker/SiStripQuality/plugins/SiStripBadModuleByHandBuilder.cc b/CalibTracker/SiStripQuality/plugins/SiStripBadModuleByHandBuilder.cc index f01d0f3486712..2b57e88602c03 100644 --- a/CalibTracker/SiStripQuality/plugins/SiStripBadModuleByHandBuilder.cc +++ b/CalibTracker/SiStripQuality/plugins/SiStripBadModuleByHandBuilder.cc @@ -10,7 +10,6 @@ #include #include #include -#include class SiStripBadModuleByHandBuilder : public ConditionDBWriter { public: diff --git a/Calibration/EcalCalibAlgos/python/EcalPhiSymRecoSequence_cff.py b/Calibration/EcalCalibAlgos/python/EcalPhiSymRecoSequence_cff.py index d171115387a04..7aa1598a63789 100644 --- a/Calibration/EcalCalibAlgos/python/EcalPhiSymRecoSequence_cff.py +++ b/Calibration/EcalCalibAlgos/python/EcalPhiSymRecoSequence_cff.py @@ -155,8 +155,8 @@ def customise(process): """ # Change input collection for the /AlCaPhiSym/*/RAW stream dataformat - process.ecalMultiFitUncalibRecHit.cpu.EBdigiCollection = cms.InputTag("hltEcalPhiSymFilter", "phiSymEcalDigisEB") - process.ecalMultiFitUncalibRecHit.cpu.EEdigiCollection = cms.InputTag("hltEcalPhiSymFilter", "phiSymEcalDigisEE") + process.ecalMultiFitUncalibRecHitCPU.EBdigiCollection = "hltEcalPhiSymFilter:phiSymEcalDigisEB" + process.ecalMultiFitUncalibRecHitCPU.EEdigiCollection = "hltEcalPhiSymFilter:phiSymEcalDigisEE" process.ecalRecHit.cpu.killDeadChannels = cms.bool( False ) process.ecalRecHit.cpu.recoverEBVFE = cms.bool( False ) process.ecalRecHit.cpu.recoverEEVFE = cms.bool( False ) diff --git a/Calibration/HcalAlCaRecoProducers/plugins/AlCaHBHEMuonProducer.cc b/Calibration/HcalAlCaRecoProducers/plugins/AlCaHBHEMuonProducer.cc index 095a17166d565..ce7b2ebb3dab2 100644 --- a/Calibration/HcalAlCaRecoProducers/plugins/AlCaHBHEMuonProducer.cc +++ b/Calibration/HcalAlCaRecoProducers/plugins/AlCaHBHEMuonProducer.cc @@ -60,12 +60,10 @@ class AlCaHBHEMuonProducer : public edm::stream::EDProducer("BeamSpotLabel")), labelVtx_(iConfig.getParameter("VertexLabel")), @@ -235,15 +232,6 @@ void AlCaHBHEMuonProducer::fillDescriptions(edm::ConfigurationDescriptions& desc descriptions.add("alcaHBHEMuonProducer", desc); } -void AlCaHBHEMuonProducer::beginRun(edm::Run const& iRun, edm::EventSetup const& iSetup) { - edm::LogVerbatim("HcalHBHEMuon") << "Run[" << nRun_ << "] " << iRun.run(); -} - -void AlCaHBHEMuonProducer::endRun(edm::Run const& iRun, edm::EventSetup const&) { - ++nRun_; - edm::LogVerbatim("HcalHBHEMuon") << "endRun[" << nRun_ << "] " << iRun.run(); -} - bool AlCaHBHEMuonProducer::select(const reco::MuonCollection& muons) { bool ok(false); for (unsigned int k = 0; k < muons.size(); ++k) { diff --git a/Calibration/HcalCalibAlgos/macros/CalibCorr.C b/Calibration/HcalCalibAlgos/macros/CalibCorr.C index a2b6866911042..ba233bc3993dc 100644 --- a/Calibration/HcalCalibAlgos/macros/CalibCorr.C +++ b/Calibration/HcalCalibAlgos/macros/CalibCorr.C @@ -112,28 +112,32 @@ unsigned int truncateId(unsigned int detId, int truncateFlag, bool debug = false if (debug) { std::cout << "Truncate 1 " << std::hex << detId << " " << id << std::dec << " Flag " << truncateFlag << std::endl; } + int truncate0 = ((truncateFlag / 1) % 10); + int truncate1 = ((truncateFlag / 10) % 10); int subdet, depth, zside, ieta, iphi; unpackDetId(detId, subdet, zside, ieta, iphi, depth); - if (truncateFlag == 1) { + if (truncate1 == 1) + zside = 1; + if (truncate0 == 1) { //Ignore depth index of ieta values of 15 and 16 of HB if ((subdet == 1) && (ieta > 14)) depth = 1; - } else if (truncateFlag == 2) { + } else if (truncate0 == 2) { //Ignore depth index of all ieta values depth = 1; - } else if (truncateFlag == 3) { + } else if (truncate0 == 3) { //Ignore depth index for depth > 1 in HE if ((subdet == 2) && (depth > 1)) depth = 2; else depth = 1; - } else if (truncateFlag == 4) { + } else if (truncate0 == 4) { //Ignore depth index for depth > 1 in HB if ((subdet == 1) && (depth > 1)) depth = 2; else depth = 1; - } else if (truncateFlag == 5) { + } else if (truncate0 == 5) { //Ignore depth index for depth > 1 in HB and HE if (depth > 1) depth = 2; @@ -174,16 +178,17 @@ unsigned int repackId(int subdet, int ieta, int iphi, int depth) { bool ifHB(int ieta, int depth) { return ((std::abs(ieta) < 16) || ((std::abs(ieta) == 16) && (depth != 4))); } int truncateDepth(int ieta, int depth, int truncateFlag) { + int truncate0 = ((truncateFlag / 1) % 10); int d(depth); - if (truncateFlag == 5) { + if (truncate0 == 5) { d = (depth == 1) ? 1 : 2; - } else if (truncateFlag == 4) { + } else if (truncate0 == 4) { d = ifHB(ieta, depth) ? ((depth == 1) ? 1 : 2) : depth; - } else if (truncateFlag == 3) { + } else if (truncate0 == 3) { d = (!ifHB(ieta, depth)) ? ((depth == 1) ? 1 : 2) : depth; - } else if (truncateFlag == 2) { + } else if (truncate0 == 2) { d = 1; - } else if (truncateFlag == 1) { + } else if (truncate0 == 1) { d = ((std::abs(ieta) == 15) || (std::abs(ieta) == 16)) ? 1 : depth; } return d; diff --git a/Calibration/HcalCalibAlgos/macros/CalibFitPlots.C b/Calibration/HcalCalibAlgos/macros/CalibFitPlots.C index a5d910b1de907..1ce81432a8706 100644 --- a/Calibration/HcalCalibAlgos/macros/CalibFitPlots.C +++ b/Calibration/HcalCalibAlgos/macros/CalibFitPlots.C @@ -21,15 +21,15 @@ // Defaults: append=true, iname=2 // // For plotting stored histograms from FitHist's -// PlotHist(infile, prefix, text, modePlot, kopt, lumi, ener, dataMC, +// PlotHist(infile, prefix, text, modePlot, kopt, lumi, ener, isRealData, // drawStatBox, save); -// Defaults: modePlot=4, kopt=100, lumi=0, ener=13, dataMC=false, +// Defaults: modePlot=4, kopt=100, lumi=0, ener=13, isRealData=false, // drawStatBox=true, save=0 // // For plotting histograms corresponding to individual ieta's -// PlotHistEta(infile, prefix, text, iene, numb, ieta, lumi, ener, dataMC, +// PlotHistEta(infile, prefix, text, iene, numb, ieta, lumi, ener, isRealData, // drawStatBox, save); -// Defaults iene=3, numb=50, ieta=0, lumi=0, ener=13.0, dataMC=false, +// Defaults iene=3, numb=50, ieta=0, lumi=0, ener=13.0, isRealData=false, // drawStatBox=true, save=0 // // For plotting several histograms in the same plot @@ -64,9 +64,10 @@ // Defaults: save=0 // // For plotting correction factors -// PlotHistCorrFactor(infile, text, prefixF, scale, nmin, dataMC, +// PlotHistCorrFactor(infile, text, prefixF, scale, nmin, isRealData, // drawStatBox, iformat, save); -// Defaults: dataMC=true, drwaStatBox=false, nmin=100, iformat=0, save=0 +// Defaults: isRealData=true, drwaStatBox=false, nmin=100, iformat=0, +// save=0 // // For plotting (fractional) asymmetry in the correction factors // @@ -78,8 +79,8 @@ // // PlotHistCorrFactors(infile1, text1, infile2, text2, infile3, text3, // infile4, text4, infile5, text5, prefixF, ratio, -// drawStatBox, nmin, dataMC, year, iformat, save) -// Defaults: ratio=false, drawStatBox=true, nmin=100, dataMC=false, +// drawStatBox, nmin, isRealData, year, iformat, save) +// Defaults: ratio=false, drawStatBox=true, nmin=100, isRealData=false, // year=2018, iformat=0, save=0 // // For plotting correction factors including systematics @@ -118,10 +119,10 @@ // drawStatBox = true, save = 0 // // For plotting histograms created by CalibPlotProperties -// PlotPropertyHist(infile, prefix, text, etaMax, lumi, ener, dataMC, +// PlotPropertyHist(infile, prefix, text, etaMax, lumi, ener, isRealData, // drawStatBox, save) // Defaults etaMax = 25 (draws for eta = 1 .. etaMax), lumi = 0, -// ener = 13.0, dataMC = false, drawStatBox = true, save = 0 +// ener = 13.0, isRealData = false, drawStatBox = true, save = 0 // // For plotting mean response and resolution as a function of // particle momentum @@ -136,8 +137,8 @@ // Width of response and uts error for the 4 regions // // For plotting depth dependent correction factors from muon study -// PlotDepthCorrFactor(infile, text, prefix, dataMC, drawStatBox, save) -// Defaults prefix = "", dataMC = true, drawStatBox = true, save = 0 +// PlotDepthCorrFactor(infile, text, prefix, isRealData, drawStatBox, save) +// Defaults prefix = "", isRealData = true, drawStatBox = true, save = 0 // Format for the input file: ieta and correcrion factor with its // uncertainty for each depth // @@ -145,12 +146,16 @@ // give by infileX for 2 depths (depth1, depth2) as a function of // ieta obaned from 2 sources of data (defined by text1 and text2) // PlotHistCorrRatio(infile1, text1, infile2, text2, depth1, depth2, prefix, -// text0, etaMax, doFit, dataMC, year, iformat, save) -// Defaults etaMax = -1, doFit = true, dataMC = true, year = 2022, -// iformat = 0, save = 0 +// text0, etaMin, etaMax, doFit, isRealData, year, iformat, +// save) +// Defaults etaMin = -1, etaMax = -1, doFit = true, isRealData = true, +// year = 2022, iformat = 0, save = 0 // text0 is a general description common to both sets of corr factors -// etaMax > 0 will take ieta range from -etaMax to +etaMax; otherwise -// determine from data files; doFit determines if a Pol0 fit is to be done +// etaMin < 0 and etaMax > 0 will take ieta range from -etaMax to +etaMax; +// etaMin > 0 will select ieta's where |ieta| is greater than etaMin +// with the plot either between -etaMax to etaMax if etaMax > 0 otherwise +// determined from data files; +// doFit determines if a Pol0 fit is to be done // // where: // infile (std::string) = Name of the input ROOT file @@ -224,6 +229,7 @@ #include #include #include +#include #include #include #include @@ -1225,7 +1231,7 @@ void PlotHist(const char* infile, int kopt = 100, double lumi = 0, double ener = 13.0, - bool dataMC = false, + bool isRealData = false, bool drawStatBox = true, int save = 0) { std::string name0[6] = {"ratio00", "ratio10", "ratio20", "ratio30", "ratio40", "ratio50"}; @@ -1329,7 +1335,7 @@ void PlotHist(const char* infile, } else { if (mode == 5) hist->GetYaxis()->SetRangeUser(0.1, 0.50); - else if (dataMC) + else if (isRealData) hist->GetYaxis()->SetRangeUser(0.5, 1.50); else hist->GetYaxis()->SetRangeUser(0.8, 1.20); @@ -1406,12 +1412,12 @@ void PlotHist(const char* infile, } txt1->AddText(txt); txt1->Draw("same"); - double xmax = (dataMC) ? 0.33 : 0.44; + double xmax = (isRealData) ? 0.33 : 0.44; ymi = (lumi > 0.1) ? 0.91 : 0.84; ymx = ymi + 0.05; TPaveText* txt2 = new TPaveText(0.11, ymi, xmax, ymx, "blNDC"); txt2->SetFillColor(0); - if (dataMC) + if (isRealData) sprintf(txt, "CMS Preliminary"); else sprintf(txt, "CMS Simulation Preliminary"); @@ -1438,7 +1444,7 @@ void PlotHistEta(const char* infile, int ieta = 0, double lumi = 0, double ener = 13.0, - bool dataMC = false, + bool isRealData = false, bool drawStatBox = true, int save = 0) { std::string name0 = "ratio"; @@ -1522,12 +1528,12 @@ void PlotHistEta(const char* infile, } txt1->AddText(txt); txt1->Draw("same"); - double xmax = (dataMC) ? 0.33 : 0.44; + double xmax = (isRealData) ? 0.33 : 0.44; ymi = (lumi > 0.1) ? 0.91 : 0.84; ymx = ymi + 0.05; TPaveText* txt2 = new TPaveText(0.11, ymi, xmax, ymx, "blNDC"); txt2->SetFillColor(0); - if (dataMC) + if (isRealData) sprintf(txt, "CMS Preliminary"); else sprintf(txt, "CMS Simulation Preliminary"); @@ -2160,7 +2166,7 @@ void PlotHistCorrFactor(char* infile, std::string prefixF = "", double scale = 1.0, int nmin = 100, - bool dataMC = false, + bool isRealData = false, bool drawStatBox = true, int iformat = 0, int save = 0) { @@ -2266,10 +2272,10 @@ void PlotHistCorrFactor(char* infile, pad->Update(); } char txt1[30]; - double xmax = (dataMC) ? 0.33 : 0.44; + double xmax = (isRealData) ? 0.33 : 0.44; TPaveText* txt2 = new TPaveText(0.11, 0.85, xmax, 0.89, "blNDC"); txt2->SetFillColor(0); - if (dataMC) + if (isRealData) sprintf(txt1, "CMS Preliminary"); else sprintf(txt1, "CMS Simulation Preliminary"); @@ -2393,7 +2399,7 @@ void PlotHistCorrFactors(char* infile1, bool ratio = false, bool drawStatBox = true, int nmin = 100, - bool dataMC = false, + bool isRealData = false, int year = 2018, int iformat = 0, int save = 0) { @@ -2595,7 +2601,7 @@ void PlotHistCorrFactors(char* infile1, TPaveText* txt0 = new TPaveText(0.12, 0.84, 0.49, 0.89, "blNDC"); txt0->SetFillColor(0); char txt[40]; - if (dataMC) + if (isRealData) sprintf(txt, "CMS Preliminary (%d)", year); else sprintf(txt, "CMS Simulation Preliminary (%d)", year); @@ -3430,7 +3436,7 @@ void PlotPropertyHist(const char* infile, int etaMax = 25, double lumi = 0, double ener = 13.0, - bool dataMC = false, + bool isRealData = false, bool drawStatBox = true, int save = 0) { std::string name0[3] = {"energyE2", "energyH2", "energyP2"}; @@ -3514,12 +3520,12 @@ void PlotPropertyHist(const char* infile, } txt1->AddText(txt); txt1->Draw("same"); - double xmax = (dataMC) ? 0.24 : 0.35; + double xmax = (isRealData) ? 0.24 : 0.35; ymi = 0.91; ymx = ymi + 0.05; TPaveText* txt2 = new TPaveText(0.02, ymi, xmax, ymx, "blNDC"); txt2->SetFillColor(0); - if (dataMC) + if (isRealData) sprintf(txt, "CMS Preliminary"); else sprintf(txt, "CMS Simulation Preliminary"); @@ -3603,12 +3609,12 @@ void PlotPropertyHist(const char* infile, } txt1->AddText(txt); txt1->Draw("same"); - double xmax = (dataMC) ? 0.24 : 0.35; + double xmax = (isRealData) ? 0.24 : 0.35; ymi = 0.91; ymx = ymi + 0.05; TPaveText* txt2 = new TPaveText(0.02, ymi, xmax, ymx, "blNDC"); txt2->SetFillColor(0); - if (dataMC) + if (isRealData) sprintf(txt, "CMS Preliminary"); else sprintf(txt, "CMS Simulation Preliminary"); @@ -3763,7 +3769,7 @@ void PlotMeanError(const std::string infilest, int reg = 3, bool resol = false, void PlotDepthCorrFactor(char* infile, std::string text, std::string prefix = "", - bool dataMC = true, + bool isRealData = true, bool drawStatBox = true, int save = 0) { std::map cfacs; @@ -3910,10 +3916,10 @@ void PlotDepthCorrFactor(char* infile, pad->Update(); } char txt1[30]; - double xmax = (dataMC) ? 0.33 : 0.44; + double xmax = (isRealData) ? 0.33 : 0.44; TPaveText* txt2 = new TPaveText(0.11, 0.85, xmax, 0.89, "blNDC"); txt2->SetFillColor(0); - if (dataMC) + if (isRealData) sprintf(txt1, "CMS Preliminary"); else sprintf(txt1, "CMS Simulation Preliminary"); @@ -3930,7 +3936,7 @@ void PlotDepthCorrFactor(char* infile, } } -void DrawHistPhiSymmetry(TH1D* hist0, bool dataMC, bool drawStatBox, bool save) { +void DrawHistPhiSymmetry(TH1D* hist0, bool isRealData, bool drawStatBox, bool save) { char name[30], namep[30], txt1[30]; TH1D* hist = (TH1D*)(hist0->Clone()); sprintf(namep, "c_%s", hist->GetName()); @@ -3959,7 +3965,7 @@ void DrawHistPhiSymmetry(TH1D* hist0, bool dataMC, bool drawStatBox, bool save) } TPaveText* txt2 = new TPaveText(0.11, 0.85, 0.44, 0.89, "blNDC"); txt2->SetFillColor(0); - if (dataMC) + if (isRealData) sprintf(txt1, "CMS Preliminary"); else sprintf(txt1, "CMS Simulation Preliminary"); @@ -3974,7 +3980,7 @@ void DrawHistPhiSymmetry(TH1D* hist0, bool dataMC, bool drawStatBox, bool save) } void PlotPhiSymmetryResults( - char* infile, bool dataMC = true, bool drawStatBox = true, bool debug = false, bool save = false) { + char* infile, bool isRealData = true, bool drawStatBox = true, bool debug = false, bool save = false) { const int maxDepthHB(4), maxDepthHE(7); const double cfacMin(0.70), cfacMax(1.5); const int nbin = (100.0 * (cfacMax - cfacMin)); @@ -4069,12 +4075,12 @@ void PlotPhiSymmetryResults( // HB first for (unsigned int k = 0; k < histHB.size(); ++k) { - DrawHistPhiSymmetry(histHB[k], dataMC, drawStatBox, save); + DrawHistPhiSymmetry(histHB[k], isRealData, drawStatBox, save); } // Then HE for (unsigned int k = 0; k < histHE.size(); ++k) { - DrawHistPhiSymmetry(histHE[k], dataMC, drawStatBox, save); + DrawHistPhiSymmetry(histHE[k], isRealData, drawStatBox, save); } } @@ -4086,9 +4092,10 @@ void PlotHistCorrRatio(char* infile1, int depth2, std::string prefixF, std::string text0, + int etaMin = -1, int etaMax = -1, bool doFit = true, - bool dataMC = true, + bool isRealData = true, int year = 2022, int iformat = 0, int save = 0) { @@ -4142,7 +4149,8 @@ void PlotHistCorrRatio(char* infile1, int npt(0); for (std::map::const_iterator itr = cfacs[ih].begin(); itr != cfacs[ih].end(); ++itr) { int ieta = (itr->second).ieta; - if ((ieta >= etamin) && (ieta <= etamax) && ((itr->second).depth == depth1)) { + bool seleta = (etaMin > 0) ? (std::abs(ieta) > etaMin) : true; + if ((ieta >= etamin) && (ieta <= etamax) && seleta && ((itr->second).depth == depth1)) { ++npt; int bin = ieta - etamin + 1; for (std::map::const_iterator ktr = cfacs[ih].begin(); ktr != cfacs[ih].end(); ++ktr) { @@ -4222,7 +4230,7 @@ void PlotHistCorrRatio(char* infile1, TPaveText* txt0 = new TPaveText(0.12, 0.91, 0.49, 0.96, "blNDC"); txt0->SetFillColor(0); char txt[40]; - if (dataMC) + if (isRealData) sprintf(txt, "CMS Preliminary (%d)", year); else sprintf(txt, "CMS Simulation Preliminary (%d)", year); diff --git a/Calibration/HcalCalibAlgos/macros/CalibMonitor.C b/Calibration/HcalCalibAlgos/macros/CalibMonitor.C index 170d1831bf10f..a8dfba3fac4ea 100644 --- a/Calibration/HcalCalibAlgos/macros/CalibMonitor.C +++ b/Calibration/HcalCalibAlgos/macros/CalibMonitor.C @@ -3,9 +3,9 @@ // .L CalibMonitor.C+g // CalibMonitor c1(fname, dirname, dupFileName, comFileName, outFileName, // prefix, corrFileName, rcorFileName, puCorr, flag, numb, -// dataMC, truncateFlag, useGen, scale, useScale, etalo, etahi, -// runlo, runhi, phimin, phimax, zside, nvxlo, nvxhi, rbxFile, -// exclude, etamax); +// isRealData, truncateFlag, useGen, scale, useScale, etalo, +// etahi, runlo, runhi, phimin, phimax, zside, nvxlo, nvxhi, +// rbxFile, exclude, etamax); // c1.Loop(nmax, debug); // c1.savePlot(histFileName,append,all); // @@ -70,14 +70,19 @@ // o = 0/1/2 for tight / loose / flexible // selection). Default = 1031 // numb (int) = number of eta bins (50 for -25:25) -// dataMC (bool) = true/false for data/MC (default true) -// truncateFlag (int) = Flag to treat different depths differently (0) -// both depths of ieta 15, 16 of HB as depth 1 (1) -// all depths as depth 1 (2), all depths in HE -// with values > 1 as depth 2 (3), all depths in -// HB with values > 1 as depth 2 (4), all depths -// in HB and HE with values > 1 as depth 2 (5) -// (default = 0) +// isRealData (bool) = true/false for data/MC (default true) +// truncateFlag (int) = A two digit flag (dr) with the default value 0. +// The digit *r* is used to treat depth values: +// (0) treat each depth independently; (1) all +// depths of ieta 15, 16 of HB as depth 1; (2) +// all depths in HB and HE as depth 1; (3) all +// depths in HE with values > 1 as depth 2; (4) +// all depths in HB with values > 1 as depth 2; +// (5) all depths in HB and HE with values > 1 +// as depth 2. +// The digit *d* is used if zside is to be +// ignored (1) or not (0) +// (Default 0) // useGen (bool) = true/false to use generator level momentum // or reconstruction level momentum // (default = false) @@ -300,7 +305,7 @@ private: CalibDuplicate *cDuplicate_; const std::string fname_, dirnm_, prefix_, outFileName_; const int corrPU_, flag_, numb_; - const bool dataMC_, useGen_; + const bool isRealData_, useGen_; const int truncateFlag_; const int etalo_, etahi_; int runlo_, runhi_; @@ -319,6 +324,7 @@ private: std::vector h_etaX[npbin]; std::vector h_etaR[npbin], h_nvxR[npbin], h_dL1R[npbin]; std::vector h_pp[npbin]; + std::vector h_p; }; CalibMonitor::CalibMonitor(const char *fname, @@ -332,7 +338,7 @@ CalibMonitor::CalibMonitor(const char *fname, int puCorr, int flag, int numb, - bool dataMC, + bool isRealData, int truncate, bool useGen, double scale, @@ -360,7 +366,7 @@ CalibMonitor::CalibMonitor(const char *fname, corrPU_(puCorr), flag_(flag), numb_(numb), - dataMC_(dataMC), + isRealData_(isRealData), useGen_(useGen), truncateFlag_(truncate), etalo_(etalo), @@ -822,6 +828,12 @@ void CalibMonitor::Init(TChain *tree, const char *comFileName, const char *outFi h_rbx[j - 1]->Sumw2(); } } + for (unsigned int j = 1; j < npbin; ++j) { + sprintf(name, "%sp%d", prefix_.c_str(), j); + sprintf(title, "Momentum (GeV) of selected track (p = %d:%d GeV)", ipbin[j], ipbin[j + 1]); + h_p.push_back(new TH1D(name, title, 100, ipbin[j], ipbin[j + 1])); + h_p[j - 1]->Sumw2(); + } } Bool_t CalibMonitor::Notify() { @@ -1320,6 +1332,11 @@ void CalibMonitor::Loop(Long64_t nmax, bool debug) { } } if (rat > rcut) { + if (debug) + std::cout << "kp " << kp << " " << h_p[kp - 1]->GetName() << " p " << pmom << " wt " << t_EventWeight + << std::endl; + if (kp > 0) + h_p[kp - 1]->Fill(pmom, t_EventWeight); if (p4060) ++kount50[15]; if (kp == 0) { @@ -1369,7 +1386,7 @@ void CalibMonitor::Loop(Long64_t nmax, bool debug) { runSum[t_Run] = knt; } } - if ((!dataMC_) || (t_mindR1 > 0.5) || (t_DataType == 1)) { + if ((!isRealData_) || (t_mindR1 > 0.5) || (t_DataType == 1)) { if (p4060) ++kount50[16]; if (kp == 0) { @@ -1695,6 +1712,12 @@ void CalibMonitor::savePlot(const std::string &theName, bool append, bool all) { } } } + for (unsigned int k = 0; k < h_p.size(); ++k) { + if (h_p[k] != 0) { + TH1D *h1 = (TH1D *)h_p[k]->Clone(); + h1->Write(); + } + } std::cout << "All done" << std::endl; theFile->Close(); } diff --git a/Calibration/HcalCalibAlgos/macros/CalibPlotProperties.C b/Calibration/HcalCalibAlgos/macros/CalibPlotProperties.C index a4352d5323ef9..ebbbb32ebf235 100644 --- a/Calibration/HcalCalibAlgos/macros/CalibPlotProperties.C +++ b/Calibration/HcalCalibAlgos/macros/CalibPlotProperties.C @@ -2,10 +2,10 @@ // Usage: // .L CalibPlotProperties.C+g // CalibPlotProperties c1(fname, dirname, dupFileName, prefix, corrFileName, -// rcorFileName, puCorr, flag, dataMC, truncateFlag, -// useGen, scale, useScale, etalo, etahi, runlo, runhi, -// phimin, phimax, zside, nvxlo, nvxhi, rbxFile, -// exclude, etamax); +// rcorFileName, puCorr, flag, isRealData, +// truncateFlag, useGen, scale, useScale, etalo, etahi, +// runlo, runhi, phimin, phimax, zside, nvxlo, nvxhi, +// rbxFile, exclude, etamax); // c1.Loop(nentries); // c1.savePlot(histFileName, append, all, debug); // @@ -16,10 +16,18 @@ // // This will plot the heistograms and save the canvases // +// PlotPHist(hisFileName, prefix, pLow, pHigh, isRealData, save) +// +// This will plot histograms of momenta spectra of types between +// pLow and pHigh and save the canvases +// // .L CalibPlotProperties.C+g // CalibSplit c1(fname, dirname, outFileName, pmin, pmax, debug); // c1.Loop(nentries); // +// This will split the tree and keep for tacks with momenta between +// pmin nd pm +// // where: // // fname (const char*) = file name of the input ROOT tree @@ -60,7 +68,7 @@ // d =0/1 flag to create basic set of histograms; // o =0/1/2 for tight / loose / flexible // selection). Default = 101111 -// dataMC (bool) = true/false for data/MC (default true) +// isRealData (bool) = true/false for data/MC (default true) // truncateFlag (int) = Flag to treat different depths differently (0) // both depths of ieta 15, 16 of HB as depth 1 (1) // all depths as depth 1 (2), all depths in HE @@ -266,7 +274,7 @@ public: const char *rcorFileName = "", int puCorr = -8, int flag = 101111, - bool dataMC = true, + bool isRealData = true, int truncateFlag = 0, bool useGen = false, double scale = 1.0, @@ -304,7 +312,7 @@ private: CalibDuplicate *cDuplicate_; const std::string fname_, dirnm_, prefix_, outFileName_; const int corrPU_, flag_; - const bool dataMC_, useGen_; + const bool isRealData_, useGen_; const int truncateFlag_; const int etalo_, etahi_; int runlo_, runhi_; @@ -339,7 +347,7 @@ CalibPlotProperties::CalibPlotProperties(const char *fname, const char *rcorFileName, int puCorr, int flag, - bool dataMC, + bool isRealData, int truncate, bool useGen, double scl, @@ -365,7 +373,7 @@ CalibPlotProperties::CalibPlotProperties(const char *fname, prefix_(prefix), corrPU_(puCorr), flag_(flag), - dataMC_(dataMC), + isRealData_(isRealData), useGen_(useGen), truncateFlag_(truncate), etalo_(etalo), @@ -984,7 +992,7 @@ void CalibPlotProperties::Loop(Long64_t nentries) { if (plotHists_) { if ((std::fabs(rat - 1) < 0.15) && (kp == kp50) && ((std::abs(t_ieta) < 15) || (std::abs(t_ieta) > 17))) { - float weight = (dataMC_ ? t_EventWeight : t_EventWeight * puweight(t_nVtx)); + float weight = (isRealData_ ? t_EventWeight : t_EventWeight * puweight(t_nVtx)); h_etaE->Fill(t_ieta, eHcal, weight); sel += weight; std::vector bv(7, 0.0f), ev(7, 0.0f); @@ -1256,7 +1264,7 @@ void CalibPlotProperties::correctEnergy(double &eHcal) { } } -void PlotThisHist(TH1D *hist, const std::string &text, int save) { +void PlotThisHist(TH1D *hist, const std::string &text, bool isRealData, int save) { char namep[120]; sprintf(namep, "c_%s", hist->GetName()); TCanvas *pad = new TCanvas(namep, namep, 700, 500); @@ -1278,7 +1286,10 @@ void PlotThisHist(TH1D *hist, const std::string &text, int save) { TPaveText *txt0 = new TPaveText(0.12, 0.91, 0.49, 0.96, "blNDC"); txt0->SetFillColor(0); char txt[100]; - sprintf(txt, "CMS Simulation Preliminary"); + if (isRealData) + sprintf(txt, "CMS Preliminary"); + else + sprintf(txt, "CMS Simulation Preliminary"); txt0->AddText(txt); txt0->Draw("same"); TPaveText *txt1 = new TPaveText(0.51, 0.91, 0.90, 0.96, "blNDC"); @@ -1316,6 +1327,7 @@ void PlotHist(const char *hisFileName, gStyle->SetOptTitle(0); gStyle->SetOptStat(1110); + bool isRealData = false; bool plotBasic = (((flagC / 1) % 10) > 0); bool plotEnergy = (((flagC / 10) % 10) > 0); bool plotHists = (((flagC / 100) % 10) > 0); @@ -1328,17 +1340,17 @@ void PlotHist(const char *hisFileName, hist = (TH1D *)(file->FindObjectAny("hnvtx")); if (hist != nullptr) { hist->GetXaxis()->SetTitle("Number of vertices (selected entries)"); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } hist = (TH1D *)(file->FindObjectAny("hnvtxEv")); if (hist != nullptr) { hist->GetXaxis()->SetTitle("Number of vertices (selected events)"); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } hist = (TH1D *)(file->FindObjectAny("hnvtxTk")); if (hist != nullptr) { hist->GetXaxis()->SetTitle("Number of vertices (selected tracks)"); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } for (int k = 0; k < CalibPlots::ntitles; ++k) { sprintf(name, "%sp%d", prefix.c_str(), k); @@ -1346,14 +1358,14 @@ void PlotHist(const char *hisFileName, if (hist != nullptr) { sprintf(title, "Momentum for %s (GeV)", CalibPlots::getTitle(k).c_str()); hist->GetXaxis()->SetTitle(title); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } sprintf(name, "%seta%d", prefix.c_str(), k); hist = (TH1D *)(file->FindObjectAny(name)); if (hist != nullptr) { sprintf(title, "#eta for %s", CalibPlots::getTitle(k).c_str()); hist->GetXaxis()->SetTitle(title); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } } for (int k = 0; k < CalibPlots::npbin; ++k) { @@ -1366,7 +1378,7 @@ void PlotHist(const char *hisFileName, CalibPlots::getP(k), CalibPlots::getP(k + 1)); hist->GetXaxis()->SetTitle(title); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } sprintf(name, "%seta1%d", prefix.c_str(), k); hist = (TH1D *)(file->FindObjectAny(name)); @@ -1377,7 +1389,7 @@ void PlotHist(const char *hisFileName, CalibPlots::getP(k), CalibPlots::getP(k + 1)); hist->GetXaxis()->SetTitle(title); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } sprintf(name, "%seta2%d", prefix.c_str(), k); hist = (TH1D *)(file->FindObjectAny(name)); @@ -1388,7 +1400,7 @@ void PlotHist(const char *hisFileName, CalibPlots::getP(k), CalibPlots::getP(k + 1)); hist->GetXaxis()->SetTitle(title); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } sprintf(name, "%seta3%d", prefix.c_str(), k); hist = (TH1D *)(file->FindObjectAny(name)); @@ -1399,7 +1411,7 @@ void PlotHist(const char *hisFileName, CalibPlots::getP(k), CalibPlots::getP(k + 1)); hist->GetXaxis()->SetTitle(title); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } sprintf(name, "%seta4%d", prefix.c_str(), k); hist = (TH1D *)(file->FindObjectAny(name)); @@ -1410,21 +1422,21 @@ void PlotHist(const char *hisFileName, CalibPlots::getP(k), CalibPlots::getP(k + 1)); hist->GetXaxis()->SetTitle(title); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } sprintf(name, "%sdl1%d", prefix.c_str(), k); hist = (TH1D *)(file->FindObjectAny(name)); if (hist != nullptr) { sprintf(title, "Distance from L1 (p = %d:%d GeV)", CalibPlots::getP(k), CalibPlots::getP(k + 1)); hist->GetXaxis()->SetTitle(title); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } sprintf(name, "%svtx%d", prefix.c_str(), k); hist = (TH1D *)(file->FindObjectAny(name)); if (hist != nullptr) { sprintf(title, "N_{Vertex} (p = %d:%d GeV)", CalibPlots::getP(k), CalibPlots::getP(k + 1)); hist->GetXaxis()->SetTitle(title); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } } } @@ -1452,7 +1464,7 @@ void PlotHist(const char *hisFileName, CalibPlots::getP(k + 1), j); hist->GetXaxis()->SetTitle(title); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } sprintf(name, "%senergyP%d%d", prefix.c_str(), k, j); hist = (TH1D *)(file->FindObjectAny(name)); @@ -1473,7 +1485,7 @@ void PlotHist(const char *hisFileName, CalibPlots::getP(k + 1), j); hist->GetXaxis()->SetTitle(title); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } sprintf(name, "%senergyE%d%d", prefix.c_str(), k, j); hist = (TH1D *)(file->FindObjectAny(name)); @@ -1494,7 +1506,7 @@ void PlotHist(const char *hisFileName, CalibPlots::getP(k + 1), j); hist->GetXaxis()->SetTitle(title); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } sprintf(name, "%senergyER%d%d", prefix.c_str(), k, j); hist = (TH1D *)(file->FindObjectAny(name)); @@ -1518,7 +1530,7 @@ void PlotHist(const char *hisFileName, CalibPlots::getEta(j - 1), CalibPlots::getEta(j)); hist->GetXaxis()->SetTitle(title); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } sprintf(name, "%senergyP%d", prefix.c_str(), j); hist = (TH1D *)(file->FindObjectAny(name)); @@ -1532,7 +1544,7 @@ void PlotHist(const char *hisFileName, CalibPlots::getEta(j - 1), CalibPlots::getEta(j)); hist->GetXaxis()->SetTitle(title); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } sprintf(name, "%senergyE%d", prefix.c_str(), j); hist = (TH1D *)(file->FindObjectAny(name)); @@ -1546,7 +1558,7 @@ void PlotHist(const char *hisFileName, CalibPlots::getEta(j - 1), CalibPlots::getEta(j)); hist->GetXaxis()->SetTitle(title); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } } } @@ -1558,42 +1570,42 @@ void PlotHist(const char *hisFileName, if (hist != nullptr) { sprintf(title, "Total RecHit energy in depth %d (Barrel)", i + 1); hist->GetXaxis()->SetTitle(title); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } sprintf(name, "b_recedepth%d", i); hist = (TH1D *)(file->FindObjectAny(name)); if (hist != nullptr) { sprintf(title, "RecHit energy in depth %d (Barrel)", i + 1); hist->GetXaxis()->SetTitle(title); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } sprintf(name, "b_nrecdepth%d", i); hist = (TH1D *)(file->FindObjectAny(name)); if (hist != nullptr) { sprintf(title, "#RecHits in depth %d (Barrel)", i + 1); hist->GetXaxis()->SetTitle(title); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } sprintf(name, "e_edepth%d", i); hist = (TH1D *)(file->FindObjectAny(name)); if (hist != nullptr) { sprintf(title, "Total RecHit energy in depth %d (Endcap)", i + 1); hist->GetXaxis()->SetTitle(title); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } sprintf(name, "e_recedepth%d", i); hist = (TH1D *)(file->FindObjectAny(name)); if (hist != nullptr) { sprintf(title, "RecHit energy in depth %d (Endcap)", i + 1); hist->GetXaxis()->SetTitle(title); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } sprintf(name, "e_nrecdepth%d", i); hist = (TH1D *)(file->FindObjectAny(name)); if (hist != nullptr) { sprintf(title, "#RecHits in depth %d (Endcap)", i + 1); hist->GetXaxis()->SetTitle(title); - PlotThisHist(hist, text, save); + PlotThisHist(hist, text, isRealData, save); } } TH2F *h_etaE = (TH2F *)(file->FindObjectAny("heta")); @@ -1633,6 +1645,36 @@ void PlotHist(const char *hisFileName, } } +void PlotPHist(const char *hisFileName, + const std::string &prefix = "", + const std::string &text = "", + int pLow = 1, + int pHigh = 5, + bool isRealData = true, + int save = 0) { + gStyle->SetCanvasBorderMode(0); + gStyle->SetCanvasColor(kWhite); + gStyle->SetPadColor(kWhite); + gStyle->SetFillColor(kWhite); + gStyle->SetOptTitle(0); + gStyle->SetOptStat(1110); + + TFile *file = new TFile(hisFileName); + char name[100]; + TH1D *hist; + if (file != nullptr) { + for (int ip = pLow; ip <= pHigh; ++ip) { + sprintf(name, "%sp%d", prefix.c_str(), ip); + hist = (TH1D *)(file->FindObjectAny(name)); + if (hist != nullptr) { + hist->GetXaxis()->SetTitle(hist->GetTitle()); + hist->GetYaxis()->SetTitle("Tracks"); + PlotThisHist(hist, text, isRealData, save); + } + } + } +} + class CalibSplit { public: TChain *fChain; //!pointer to the analyzed TTree or TChain diff --git a/Calibration/HcalCalibAlgos/macros/CalibTree.C b/Calibration/HcalCalibAlgos/macros/CalibTree.C index aa2254332394a..2ee92923632c2 100644 --- a/Calibration/HcalCalibAlgos/macros/CalibTree.C +++ b/Calibration/HcalCalibAlgos/macros/CalibTree.C @@ -50,12 +50,17 @@ // applied: 0 no check; 1 only to events with // datatype not equal to 1; 2 to all (1) // l1Cut (double) = Cut value for the closeness parameter (0.5) -// truncateFlag (int) = Flag to treat different depths differently (0) -// both depths of ieta 15, 16 of HB as depth 1 (1) -// all depths as depth 1 (2), all depths in HE -// with values > 1 as depth 2 (3), all depths in -// HB with values > 1 as depth 2 (4), all depths -// in HB and HE with values > 1 as depth 2 (5) +// truncateFlag (int) = A two digit flag (dr) with the default value 0. +// The digit *r* is used to treat depth values: +// (0) treat each depth independently; (1) all +// depths of ieta 15, 16 of HB as depth 1; (2) +// all depths in HB and HE as depth 1; (3) all +// depths in HE with values > 1 as depth 2; (4) +// all depths in HB with values > 1 as depth 2; +// (5) all depths in HB and HE with values > 1 +// as depth 2. +// The digit *d* is used if zside is to be +// ignored (1) or not (0) // (Default 0) // maxIter (int) = number of iterations (30) // drForm (int) = type of threshold/dupFileName/rcorFileName (hdr) diff --git a/Calibration/HcalCalibAlgos/plugins/HcalHBHEMuonAnalyzer.cc b/Calibration/HcalCalibAlgos/plugins/HcalHBHEMuonAnalyzer.cc index 99817c62e13bd..e3e99896655f3 100644 --- a/Calibration/HcalCalibAlgos/plugins/HcalHBHEMuonAnalyzer.cc +++ b/Calibration/HcalCalibAlgos/plugins/HcalHBHEMuonAnalyzer.cc @@ -1,10 +1,6 @@ -#include -#include #include #include -#include #include -#include "TPRegexp.h" // user include files #include "FWCore/Framework/interface/Frameworkfwd.h" diff --git a/Calibration/HcalCalibAlgos/plugins/HcalHBHEMuonHighEtaAnalyzer.cc b/Calibration/HcalCalibAlgos/plugins/HcalHBHEMuonHighEtaAnalyzer.cc index 0205f6726b287..07c2dcfb76986 100644 --- a/Calibration/HcalCalibAlgos/plugins/HcalHBHEMuonHighEtaAnalyzer.cc +++ b/Calibration/HcalCalibAlgos/plugins/HcalHBHEMuonHighEtaAnalyzer.cc @@ -1,10 +1,6 @@ -#include -#include #include #include -#include #include -#include "TPRegexp.h" // user include files #include "FWCore/Framework/interface/Frameworkfwd.h" diff --git a/Calibration/HcalCalibAlgos/plugins/HcalHBHENewMuonAnalyzer.cc b/Calibration/HcalCalibAlgos/plugins/HcalHBHENewMuonAnalyzer.cc index b001352d98228..54387826b0e4c 100644 --- a/Calibration/HcalCalibAlgos/plugins/HcalHBHENewMuonAnalyzer.cc +++ b/Calibration/HcalCalibAlgos/plugins/HcalHBHENewMuonAnalyzer.cc @@ -1,10 +1,6 @@ -#include -#include #include #include -#include #include -#include "TPRegexp.h" // user include files #include "FWCore/Framework/interface/Frameworkfwd.h" diff --git a/Calibration/HcalCalibAlgos/plugins/HcalIsoTrackAnalyzer.cc b/Calibration/HcalCalibAlgos/plugins/HcalIsoTrackAnalyzer.cc index e5fcb542eb507..01376cb873727 100644 --- a/Calibration/HcalCalibAlgos/plugins/HcalIsoTrackAnalyzer.cc +++ b/Calibration/HcalCalibAlgos/plugins/HcalIsoTrackAnalyzer.cc @@ -1,19 +1,9 @@ // system include files -#include -#include -#include #include #include // Root objects -#include "TROOT.h" -#include "TSystem.h" -#include "TFile.h" -#include "TProfile.h" -#include "TDirectory.h" #include "TTree.h" -#include "TLorentzVector.h" -#include "TInterpreter.h" #include "DataFormats/HcalCalibObjects/interface/HcalIsoTrkCalibVariables.h" #include "DataFormats/HcalCalibObjects/interface/HcalIsoTrkEventVariables.h" diff --git a/Calibration/HcalCalibAlgos/plugins/HcalIsoTrkAnalyzer.cc b/Calibration/HcalCalibAlgos/plugins/HcalIsoTrkAnalyzer.cc index ed0c30fa0668c..4420c90393a8a 100644 --- a/Calibration/HcalCalibAlgos/plugins/HcalIsoTrkAnalyzer.cc +++ b/Calibration/HcalCalibAlgos/plugins/HcalIsoTrkAnalyzer.cc @@ -1,19 +1,11 @@ // system include files -#include #include -#include #include #include // Root objects -#include "TROOT.h" -#include "TSystem.h" -#include "TFile.h" -#include "TProfile.h" -#include "TDirectory.h" -#include "TTree.h" #include "TLorentzVector.h" -#include "TInterpreter.h" +#include "TTree.h" #include "CondFormats/DataRecord/interface/EcalPFRecHitThresholdsRcd.h" #include "CondFormats/EcalObjects/interface/EcalPFRecHitThresholds.h" diff --git a/Calibration/HcalCalibAlgos/plugins/HcalIsoTrkSimAnalyzer.cc b/Calibration/HcalCalibAlgos/plugins/HcalIsoTrkSimAnalyzer.cc index 03ce9e0a43f3c..62cd215dbdc8e 100644 --- a/Calibration/HcalCalibAlgos/plugins/HcalIsoTrkSimAnalyzer.cc +++ b/Calibration/HcalCalibAlgos/plugins/HcalIsoTrkSimAnalyzer.cc @@ -1,19 +1,11 @@ // system include files -#include #include -#include #include #include // Root objects -#include "TROOT.h" -#include "TSystem.h" -#include "TFile.h" -#include "TProfile.h" -#include "TDirectory.h" #include "TTree.h" #include "TLorentzVector.h" -#include "TInterpreter.h" #include "CondFormats/EcalObjects/interface/EcalPFRecHitThresholds.h" #include "CondFormats/DataRecord/interface/EcalPFRecHitThresholdsRcd.h" diff --git a/Calibration/PPSAlCaRecoProducer/test/test_express_PPSAlCaReco_output.py b/Calibration/PPSAlCaRecoProducer/test/test_express_PPSAlCaReco_output.py index 5b4aedc191531..df89229ede911 100644 --- a/Calibration/PPSAlCaRecoProducer/test/test_express_PPSAlCaReco_output.py +++ b/Calibration/PPSAlCaRecoProducer/test/test_express_PPSAlCaReco_output.py @@ -6,7 +6,7 @@ process = cms.Process( 'TEST',ctpps_2018) # LHCInfo plotter -process.load('Validation.CTPPS.ctppsLHCInfoPlotter_cff') +process.load('Validation.CTPPS.ctppsLHCInfoPlotter_cfi') process.ctppsLHCInfoPlotter.outputFile = "alcareco_lhc_info_express.root" # Load geometry from DB diff --git a/Calibration/PPSAlCaRecoProducer/test/test_prompt_PPSAlCaReco_output.py b/Calibration/PPSAlCaRecoProducer/test/test_prompt_PPSAlCaReco_output.py index 2b6a2db3240a6..27cb8bb23aeb1 100644 --- a/Calibration/PPSAlCaRecoProducer/test/test_prompt_PPSAlCaReco_output.py +++ b/Calibration/PPSAlCaRecoProducer/test/test_prompt_PPSAlCaReco_output.py @@ -6,7 +6,7 @@ process = cms.Process( 'TEST',ctpps_2018) # LHCInfo plotter -process.load('Validation.CTPPS.ctppsLHCInfoPlotter_cff') +process.load('Validation.CTPPS.ctppsLHCInfoPlotter_cfi') process.ctppsLHCInfoPlotter.outputFile = "alcareco_lhc_info_prompt.root" # Load geometry from DB diff --git a/Calibration/TkAlCaRecoProducers/test/parseFwkJobReport.py b/Calibration/TkAlCaRecoProducers/test/parseFwkJobReport.py index 38d858aa33e9a..a809da3fb3fb6 100644 --- a/Calibration/TkAlCaRecoProducers/test/parseFwkJobReport.py +++ b/Calibration/TkAlCaRecoProducers/test/parseFwkJobReport.py @@ -18,7 +18,8 @@ 'SiStripBadStripRcdHitEff_pcl', 'SiStripLA_pcl', 'SiPixelAli_pcl', # Alignment - 'SiPixelAliHG_pcl'] + 'SiPixelAliHG_pcl', + 'SiPixelAliHGCombined_pcl'] TARGET_DQM_FILES=1 TARGET_DQM_FILENAME='./DQM_V0001_R000325022__Express__PCLTest__ALCAPROMPT.root' TARGET_DB_FILES=len(TARGET_LIST_OF_TAGS) diff --git a/Calibration/TkAlCaRecoProducers/test/testPCLAlCaHarvesting.py b/Calibration/TkAlCaRecoProducers/test/testPCLAlCaHarvesting.py index 67ef31a7adfe8..7d4d2b22f8afb 100644 --- a/Calibration/TkAlCaRecoProducers/test/testPCLAlCaHarvesting.py +++ b/Calibration/TkAlCaRecoProducers/test/testPCLAlCaHarvesting.py @@ -83,6 +83,7 @@ def findRunStopTime(run_number): process.PoolDBOutputService.toPut.append(process.ALCAHARVESTSiStripHitEff_dbOutput) process.PoolDBOutputService.toPut.append(process.ALCAHARVESTSiPixelAli_dbOutput) process.PoolDBOutputService.toPut.append(process.ALCAHARVESTSiPixelAliHG_dbOutput) +process.PoolDBOutputService.toPut.append(process.ALCAHARVESTSiPixelAliHGCombined_dbOutput) process.PoolDBOutputService.toPut.append(process.ALCAHARVESTSiPixelLA_dbOutput) process.PoolDBOutputService.toPut.append(process.ALCAHARVESTSiPixelLAMCS_dbOutput) process.PoolDBOutputService.toPut.append(process.ALCAHARVESTSiStripLA_dbOutput) @@ -106,6 +107,7 @@ def findRunStopTime(run_number): process.pclMetadataWriter.recordsToMap.append(process.ALCAHARVESTSiStripHitEff_metadata) process.pclMetadataWriter.recordsToMap.append(process.ALCAHARVESTSiPixelAli_metadata) process.pclMetadataWriter.recordsToMap.append(process.ALCAHARVESTSiPixelAliHG_metadata) +process.pclMetadataWriter.recordsToMap.append(process.ALCAHARVESTSiPixelAliHGCombined_metadata) process.pclMetadataWriter.recordsToMap.append(process.ALCAHARVESTSiPixelLA_metadata) process.pclMetadataWriter.recordsToMap.append(process.ALCAHARVESTSiPixelLAMCS_metadata) process.pclMetadataWriter.recordsToMap.append(process.ALCAHARVESTSiStripLA_metadata) @@ -164,6 +166,7 @@ def findRunStopTime(run_number): process.SiStripHitEff, process.SiPixelAli, process.SiPixelAliHG, + process.SiPixelAliHGCombined, process.SiPixelLA, process.SiPixelLAMCS, process.SiStripLA, diff --git a/CommonTools/RecoAlgos/plugins/BooleanFlagFilter.cc b/CommonTools/RecoAlgos/plugins/BooleanFlagFilter.cc index 1242df76c266b..7cae7f72190e6 100644 --- a/CommonTools/RecoAlgos/plugins/BooleanFlagFilter.cc +++ b/CommonTools/RecoAlgos/plugins/BooleanFlagFilter.cc @@ -35,17 +35,9 @@ class BooleanFlagFilter : public edm::global::EDFilter<> { public: explicit BooleanFlagFilter(const edm::ParameterSet&); - ~BooleanFlagFilter() override; private: - //virtual void beginJob() override; bool filter(edm::StreamID, edm::Event&, const edm::EventSetup&) const override; - //virtual void endJob() override; - - //virtual void beginRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void endRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - //virtual void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; // ----------member data --------------------------- edm::EDGetTokenT inputToken_; @@ -69,11 +61,6 @@ BooleanFlagFilter::BooleanFlagFilter(const edm::ParameterSet& iConfig) { reverse_ = iConfig.getParameter("reverseDecision"); } -BooleanFlagFilter::~BooleanFlagFilter() { - // do anything here that needs to be done at desctruction time - // (e.g. close files, deallocate resources etc.) -} - // // member functions // @@ -96,52 +83,5 @@ bool BooleanFlagFilter::filter(edm::StreamID, edm::Event& iEvent, const edm::Eve return result; } -// ------------ method called once each job just before starting event loop ------------ -/* -void -BooleanFlagFilter::beginJob() -{ -} -*/ - -// ------------ method called once each job just after ending the event loop ------------ -/* -void -BooleanFlagFilter::endJob() { -} -*/ - -// ------------ method called when starting to processes a run ------------ -/* -void -BooleanFlagFilter::beginRun(edm::Run const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when ending the processing of a run ------------ -/* -void -BooleanFlagFilter::endRun(edm::Run const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when starting to processes a luminosity block ------------ -/* -void -BooleanFlagFilter::beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when ending the processing of a luminosity block ------------ -/* -void -BooleanFlagFilter::endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) -{ -} -*/ - //define this as a plug-in DEFINE_FWK_MODULE(BooleanFlagFilter); diff --git a/CommonTools/UtilAlgos/interface/Merger.h b/CommonTools/UtilAlgos/interface/Merger.h index 63c72cc1fe6fd..054baa5943a7f 100644 --- a/CommonTools/UtilAlgos/interface/Merger.h +++ b/CommonTools/UtilAlgos/interface/Merger.h @@ -19,6 +19,8 @@ */ #include "FWCore/Framework/interface/global/EDProducer.h" #include "FWCore/Framework/interface/Event.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "FWCore/Utilities/interface/transform.h" #include "FWCore/Utilities/interface/InputTag.h" @@ -34,19 +36,20 @@ class Merger : public edm::global::EDProducer<> { explicit Merger(const edm::ParameterSet&); /// destructor ~Merger() override; + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: /// process an event void produce(edm::StreamID, edm::Event&, const edm::EventSetup&) const override; /// vector of strings - typedef std::vector > vtoken; + typedef std::vector> vtoken; /// labels of the collections to be merged vtoken srcToken_; }; template Merger::Merger(const edm::ParameterSet& par) - : srcToken_(edm::vector_transform(par.template getParameter >("src"), + : srcToken_(edm::vector_transform(par.template getParameter>("src"), [this](edm::InputTag const& tag) { return consumes(tag); })) { produces(); } @@ -69,4 +72,15 @@ void Merger::produce(edm::StreamID, evt.put(std::move(coll)); } +template +void Merger::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add>("src", + { + edm::InputTag("collection1"), + edm::InputTag("collection2"), + }); + descriptions.addWithDefaultLabel(desc); +} + #endif diff --git a/CommonTools/Utils/interface/PdgIdSelector.h b/CommonTools/Utils/interface/PdgIdSelector.h index 2d6db6699a984..9b2e16896f516 100644 --- a/CommonTools/Utils/interface/PdgIdSelector.h +++ b/CommonTools/Utils/interface/PdgIdSelector.h @@ -17,6 +17,7 @@ struct PdgIdSelector { end_ = pdgId_.end(); } PdgIdSelector(const PdgIdSelector& o) : pdgId_(o.pdgId_), begin_(pdgId_.begin()), end_(pdgId_.end()) {} + PdgIdSelector& operator=(const PdgIdSelector& o) = default; PdgIdSelector& operator==(const PdgIdSelector& o) { *this = o; return *this; diff --git a/CommonTools/Utils/interface/StatusSelector.h b/CommonTools/Utils/interface/StatusSelector.h index f5fb76acb6c11..4d320dac52128 100644 --- a/CommonTools/Utils/interface/StatusSelector.h +++ b/CommonTools/Utils/interface/StatusSelector.h @@ -17,6 +17,7 @@ struct StatusSelector { end_ = status_.end(); } StatusSelector(const StatusSelector& o) : status_(o.status_), begin_(status_.begin()), end_(status_.end()) {} + StatusSelector& operator=(const StatusSelector& o) = default; StatusSelector& operator==(const StatusSelector& o) { *this = o; return *this; diff --git a/CondCore/AlignmentPlugins/interface/AlignmentPayloadInspectorHelper.h b/CondCore/AlignmentPlugins/interface/AlignmentPayloadInspectorHelper.h index b7dfc237ea682..425546e04a2df 100644 --- a/CondCore/AlignmentPlugins/interface/AlignmentPayloadInspectorHelper.h +++ b/CondCore/AlignmentPlugins/interface/AlignmentPayloadInspectorHelper.h @@ -15,8 +15,9 @@ #include "Alignment/CommonAlignment/interface/Utilities.h" #include "CondFormats/Alignment/interface/Alignments.h" #include "DataFormats/GeometryVector/interface/GlobalPoint.h" -#include "DataFormats/Math/interface/deltaPhi.h" // for deltaPhi #include "DataFormats/Math/interface/Rounding.h" // for rounding +#include "DataFormats/Math/interface/deltaPhi.h" // for deltaPhi +#include "DataFormats/SiStripDetId/interface/StripSubdetector.h" #include "DataFormats/TrackerCommon/interface/TrackerTopology.h" #include "FWCore/MessageLogger/interface/MessageLogger.h" diff --git a/CondCore/CTPPSPlugins/interface/DAQMappingPayloadInspectorHelper.h b/CondCore/CTPPSPlugins/interface/DAQMappingPayloadInspectorHelper.h new file mode 100644 index 0000000000000..4642d1fbccf9a --- /dev/null +++ b/CondCore/CTPPSPlugins/interface/DAQMappingPayloadInspectorHelper.h @@ -0,0 +1,91 @@ +#ifndef CONDCORE_CTPPSPLUGINS_PPSDAQMAPPINGPAYLOADINSPECTORHELPER_H +#define CONDCORE_CTPPSPLUGINS_PPSDAQMAPPINGPAYLOADINSPECTORHELPER_H + +// User includes +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "CondCore/Utilities/interface/PayloadInspectorModule.h" +#include "CondCore/Utilities/interface/PayloadInspector.h" +#include "CondCore/CondDB/interface/Time.h" +#include "CondFormats/PPSObjects/interface/TotemDAQMapping.h" +#include "DataFormats/CTPPSDetId/interface/CTPPSDetId.h" + +// system includes +#include +#include + +// ROOT includes +#include "TCanvas.h" +#include "TStyle.h" +#include "TH2F.h" +#include "TLatex.h" +#include "TGraph.h" + +namespace DAQMappingPI { + inline std::string resolveDetIDForDAQMapping(int detIDNumber) { + static const std::map mapping = {{CTPPSDetId::SubDetector::sdTrackingStrip, "Strip"}, + {CTPPSDetId::SubDetector::sdTrackingPixel, "Pixel"}, + {CTPPSDetId::SubDetector::sdTimingDiamond, "Diamond"}, + {CTPPSDetId::SubDetector::sdTimingFastSilicon, "FastSilicon"}, + {CTPPSDetId::SubDetector::sdTotemT2, "TotemT2"}}; + + auto it = mapping.find(detIDNumber); + if (it != mapping.end()) { + return it->second; + } else { + return "not defined"; + } + } +} // namespace DAQMappingPI + +template +class DAQMappingPayloadInfo + : public cond::payloadInspector::PlotImage { +public: + DAQMappingPayloadInfo() + : cond::payloadInspector::PlotImage( + "DAQMappingPayloadInfo text") {} + + bool fill() override { + auto tag = cond::payloadInspector::PlotBase::getTag<0>(); + auto tagname = tag.name; + auto iov = tag.iovs.back(); + auto m_payload = this->fetchPayload(std::get<1>(iov)); + + if (m_payload != nullptr) { + std::stringstream payloadInfo, lineCountStream; + int subDet = CTPPSDetId(m_payload->VFATMapping.begin()->second.symbolicID.symbolicID).subdetId(); + payloadInfo << "TAG: " << tagname << ", the mapping for: " << DAQMappingPI::resolveDetIDForDAQMapping(subDet) + << std::endl; + payloadInfo << *m_payload; + lineCountStream << *m_payload; + std::string line; + + //created to dynamically set canvas height + int lineCounter = 0; + while (std::getline(lineCountStream, line)) { + lineCounter++; + } + + TCanvas canvas("canvas", "Canvas", 800, 20 * lineCounter); + + TLatex latex; + latex.SetNDC(); + latex.SetTextSize(0.015); + double yPos = 0.95; + + while (std::getline(payloadInfo, line)) { + yPos -= 0.015; + latex.DrawLatex(0.1, yPos, line.c_str()); + } + + std::string fileName(this->m_imageFileName); + canvas.SaveAs(fileName.c_str()); + + return true; + } else { + return false; + } + } +}; + +#endif diff --git a/CondCore/CTPPSPlugins/interface/PPSTimingCalibrationPayloadInspectorHelper.h b/CondCore/CTPPSPlugins/interface/PPSTimingCalibrationPayloadInspectorHelper.h index a839603e89bf8..a1196ab356b2b 100644 --- a/CondCore/CTPPSPlugins/interface/PPSTimingCalibrationPayloadInspectorHelper.h +++ b/CondCore/CTPPSPlugins/interface/PPSTimingCalibrationPayloadInspectorHelper.h @@ -19,167 +19,69 @@ #include "TLatex.h" #include "TGraph.h" -class PPSTimingCalibrationPI { -public: +namespace PPSTimingCalibrationPI { enum parameter { parameter0 = 0, parameter1 = 1, parameter2 = 2, parameter3 = 3 }; - enum conditions_db { db0 = 0, db1 = 1 }; - - enum conditions_plane { plane0 = 0, plane1 = 1, plane2 = 2, plane3 = 3 }; - - enum conditions_channel { - channel0 = 0, - channel1 = 1, - channel2 = 2, - channel3 = 3, - channel4 = 4, - channel5 = 5, - channel6 = 6, - channel7 = 7, - channel8 = 8, - channel9 = 9, - channel10 = 10, - channel11 = 11 - - }; - - static std::string getStringFromParamEnum(const parameter& parameter) { - switch (parameter) { - case 0: - return "parameter 0"; - case 1: - return "parameter 1"; - case 2: - return "parameter 2"; - case 3: - return "parameter 3"; - - default: - return "not here"; - } - } + inline std::string getStringFromParamEnum(const parameter& parameter) { + const std::map parameters = {{parameter0, "parameter 0"}, + {parameter1, "parameter 1"}, + {parameter2, "parameter 2"}, + {parameter3, "parameter 3"}}; - static std::string getStringFromDbEnum(const conditions_db& db) { - switch (db) { - case 0: - return "db = 0"; - case 1: - return "db = 1"; - - default: - return "not here"; - } - } - - static std::string getStringFromPlaneEnum(const conditions_plane& plane) { - switch (plane) { - case 0: - return "plane = 0"; - case 1: - return "plane = 1"; - case 2: - return "plane = 2"; - case 3: - return "plane = 3"; - - default: - return "not here"; + auto it = parameters.find(parameter); + if (it != parameters.end()) { + return it->second; + } else { + return "no param"; } } - static std::string getStringFromChannelEnum(const conditions_channel& channel) { - switch (channel) { - case 0: - return "channel = 0"; - case 1: - return "channel = 1"; - case 2: - return "channel = 2"; - case 3: - return "channel = 3"; - case 4: - return "channel = 4"; - case 5: - return "channel = 5"; - case 6: - return "channel = 6"; - case 7: - return "channel = 7"; - case 8: - return "channel = 8"; - case 9: - return "channel = 9"; - case 10: - return "channel = 10"; - case 11: - return "channel = 11"; - - default: - return "not here"; - } - } -}; + const std::string ARM = "db (0,1)"; + const std::string STATION = "station (1,2)"; + const std::string PLANE = "plane (0-3)"; + const std::string CHANNEL = "channel (0-11)"; +} // namespace PPSTimingCalibrationPI /************************************************ History plots *************************************************/ -template +template class ParametersPerRun : public cond::payloadInspector::HistoryPlot { public: ParametersPerRun() : cond::payloadInspector::HistoryPlot( - PPSTimingCalibrationPI::getStringFromParamEnum(param) + " " + - PPSTimingCalibrationPI::getStringFromDbEnum(db) + " " + - PPSTimingCalibrationPI::getStringFromPlaneEnum(plane) + " " + - PPSTimingCalibrationPI::getStringFromChannelEnum(channel) + " vs. Runs", - PPSTimingCalibrationPI::getStringFromParamEnum(param)) {} + "Parameter " + PPSTimingCalibrationPI::getStringFromParamEnum(param) + " vs. Runs", + PPSTimingCalibrationPI::getStringFromParamEnum(param)) { + cond::payloadInspector::PlotBase::addInputParam(PPSTimingCalibrationPI::ARM); + cond::payloadInspector::PlotBase::addInputParam(PPSTimingCalibrationPI::STATION); + cond::payloadInspector::PlotBase::addInputParam(PPSTimingCalibrationPI::PLANE); + cond::payloadInspector::PlotBase::addInputParam(PPSTimingCalibrationPI::CHANNEL); + } - float getFromPayload(PayloadType& payload) override { return payload.parameters(db, 1, plane, channel)[param]; } -}; + float getFromPayload(PayloadType& payload) override { + auto paramValues = cond::payloadInspector::PlotBase::inputParamValues(); + auto db = paramValues.find(PPSTimingCalibrationPI::ARM)->second; + auto station = paramValues.find(PPSTimingCalibrationPI::STATION)->second; + auto plane = paramValues.find(PPSTimingCalibrationPI::PLANE)->second; + auto channel = paramValues.find(PPSTimingCalibrationPI::CHANNEL)->second; -/************************************************ - X-Y correlation plots -*************************************************/ -template -class PpPCorrelation : public cond::payloadInspector::ScatterPlot { -public: - PpPCorrelation() - : cond::payloadInspector::ScatterPlot( - "TimingCalibration " + PPSTimingCalibrationPI::getStringFromParamEnum(param1) + " vs. " + - PPSTimingCalibrationPI::getStringFromParamEnum(param2) + " on " + - PPSTimingCalibrationPI::getStringFromDbEnum(db) + " " + - PPSTimingCalibrationPI::getStringFromPlaneEnum(plane) + " " + - PPSTimingCalibrationPI::getStringFromChannelEnum(channel), - PPSTimingCalibrationPI::getStringFromParamEnum(param1), - PPSTimingCalibrationPI::getStringFromParamEnum(param2)) {} - - std::tuple getFromPayload(PayloadType& payload) override { - return std::make_tuple(payload.parameters(db, 1, plane, channel)[param1], - payload.parameters(db, 1, plane, channel)[param2]); + return payload.parameters(std::stoi(db), std::stoi(station), std::stoi(plane), std::stoi(channel))[param]; } }; /************************************************ - Other plots + Image plots *************************************************/ -template +template class ParametersPerChannel : public cond::payloadInspector::PlotImage { public: ParametersPerChannel() : cond::payloadInspector::PlotImage( - "PPSTimingCalibration parameters per channel") {} + "PPSTimingCalibration parameters per channel") { + cond::payloadInspector::PlotBase::addInputParam(PPSTimingCalibrationPI::ARM); + cond::payloadInspector::PlotBase::addInputParam(PPSTimingCalibrationPI::STATION); + cond::payloadInspector::PlotBase::addInputParam(PPSTimingCalibrationPI::PLANE); + } bool fill() override { auto tag = cond::payloadInspector::PlotBase::getTag<0>(); @@ -187,6 +89,11 @@ class ParametersPerChannel : public cond::payloadInspector::PlotImagefetchPayload(std::get<1>(iov)); + auto paramValues = cond::payloadInspector::PlotBase::inputParamValues(); + auto db = paramValues.find(PPSTimingCalibrationPI::ARM)->second; + auto station = paramValues.find(PPSTimingCalibrationPI::STATION)->second; + auto plane = paramValues.find(PPSTimingCalibrationPI::PLANE)->second; + if (m_payload != nullptr) { TCanvas canvas( "PPSTimingCalibration parameters per channel", "PPSTimingCalibration parameters per channel", 1400, 1000); @@ -196,14 +103,14 @@ class ParametersPerChannel : public cond::payloadInspector::PlotImageparameters(db, 1, plane, i)[param]; + y[i] = m_payload->parameters(std::stoi(db), std::stoi(station), std::stoi(plane), i)[param]; x[i] = i; } TGraph* graph = new TGraph(n, x, y); - graph->SetTitle(("PPSTimingCalibration " + PPSTimingCalibrationPI::getStringFromDbEnum(db) + " " + - PPSTimingCalibrationPI::getStringFromPlaneEnum(plane) + " " + - PPSTimingCalibrationPI::getStringFromParamEnum(param) + " per channel; channel; parameter") + graph->SetTitle(("PPSTimingCalibration db = " + db + ", " + "station = " + station + ", " + "plane = " + plane + + ", " + PPSTimingCalibrationPI::getStringFromParamEnum(param) + " PER channel; channel; " + + PPSTimingCalibrationPI::getStringFromParamEnum(param)) .c_str()); graph->SetMarkerColor(2); graph->SetMarkerSize(1.5); @@ -223,4 +130,4 @@ class ParametersPerChannel : public cond::payloadInspector::PlotImage - \ No newline at end of file + + + + + + diff --git a/CondCore/CTPPSPlugins/plugins/DAQMapping_PayloadInspector.cc b/CondCore/CTPPSPlugins/plugins/DAQMapping_PayloadInspector.cc new file mode 100644 index 0000000000000..4bcf88c1f3691 --- /dev/null +++ b/CondCore/CTPPSPlugins/plugins/DAQMapping_PayloadInspector.cc @@ -0,0 +1,18 @@ +/**************************************************************************** + * + * This is a part of PPS PI software. + * + ****************************************************************************/ + +#include "CondCore/Utilities/interface/PayloadInspectorModule.h" +#include "CondCore/Utilities/interface/PayloadInspector.h" +#include "CondCore/CondDB/interface/Time.h" +#include "CondCore/CondDB/interface/PayloadReader.h" +#include "CondFormats/PPSObjects/interface/TotemDAQMapping.h" +#include "CondCore/CTPPSPlugins/interface/DAQMappingPayloadInspectorHelper.h" + +namespace { + typedef DAQMappingPayloadInfo DAQMappingPayloadInfo_Text; +} + +PAYLOAD_INSPECTOR_MODULE(TotemDAQMapping) { PAYLOAD_INSPECTOR_CLASS(DAQMappingPayloadInfo_Text); } diff --git a/CondCore/CTPPSPlugins/plugins/PPSTimingCalibration_PayloadInspector.cc b/CondCore/CTPPSPlugins/plugins/PPSTimingCalibration_PayloadInspector.cc index 7dc1de4800696..e02afa525e697 100644 --- a/CondCore/CTPPSPlugins/plugins/PPSTimingCalibration_PayloadInspector.cc +++ b/CondCore/CTPPSPlugins/plugins/PPSTimingCalibration_PayloadInspector.cc @@ -1,3 +1,4 @@ + #include "CondCore/Utilities/interface/PayloadInspectorModule.h" #include "CondCore/Utilities/interface/PayloadInspector.h" #include "CondCore/CondDB/interface/Time.h" @@ -18,3520 +19,38 @@ namespace { History plots *************************************************/ - //db=0, plane=0, channel=0 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch0_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch0_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch0_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch0_param3; - - //db=0, plane=0, channel=1 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch1_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch1_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch1_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch1_param3; - - //db=0, plane=0, channel=2 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch2_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch2_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch2_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch2_param3; - - //db=0, plane=0, channel=3 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch3_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch3_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch3_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch3_param3; - - //db=0, plane=0, channel=4 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch4_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch4_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch4_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch4_param3; - - //db=0, plane=0, channel=5 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch5_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch5_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch5_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch5_param3; - - //db=0, plane=0, channel=6 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch6_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch6_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch6_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch6_param3; - - //db=0, plane=0, channel=7 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch7_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch7_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch7_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch7_param3; - - //db=0, plane=0, channel=8 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch8_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch8_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch8_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch8_param3; - - //db=0, plane=0, channel=9 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch9_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch9_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch9_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch9_param3; - - //db=0, plane=0, channel=10 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch10_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch10_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch10_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch10_param3; - - //db=0, plane=0, channel=11 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch11_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch11_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch11_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl0ch11_param3; - - //db=0, plane=1, channel=0 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch0_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch0_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch0_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch0_param3; - - //db=0, plane=1, channel=1 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch1_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch1_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch1_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch1_param3; - - //db=0, plane=1, channel=2 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch2_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch2_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch2_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch2_param3; - - //db=0, plane=1, channel=3 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch3_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch3_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch3_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch3_param3; - - //db=0, plane=1, channel=4 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch4_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch4_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch4_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch4_param3; - - //db=0, plane=1, channel=5 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch5_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch5_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch5_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch5_param3; - - //db=0, plane=1, channel=6 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch6_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch6_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch6_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch6_param3; - - //db=0, plane=1, channel=7 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch7_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch7_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch7_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch7_param3; - - //db=0, plane=1, channel=8 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch8_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch8_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch8_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch8_param3; - - //db=0, plane=1, channel=9 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch9_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch9_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch9_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch9_param3; - - //db=0, plane=1, channel=10 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch10_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch10_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch10_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch10_param3; - - //db=0, plane=1, channel=11 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch11_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch11_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch11_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl1ch11_param3; - - //db=0, plane=2, channel=0 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch0_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch0_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch0_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch0_param3; - - //db=0, plane=2, channel=1 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch1_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch1_param1; + using PPSTimingCalibration_history_htdc_calibration_param0 = + ParametersPerRun; + using PPSTimingCalibration_history_htdc_calibration_param1 = + ParametersPerRun; + using PPSTimingCalibration_history_htdc_calibration_param2 = + ParametersPerRun; + using PPSTimingCalibration_history_htdc_calibration_param3 = + ParametersPerRun; - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch1_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch1_param3; - - //db=0, plane=2, channel=2 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch2_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch2_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch2_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch2_param3; - - //db=0, plane=2, channel=3 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch3_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch3_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch3_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch3_param3; - - //db=0, plane=2, channel=4 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch4_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch4_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch4_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch4_param3; - - //db=0, plane=2, channel=5 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch5_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch5_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch5_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch5_param3; - - //db=0, plane=2, channel=6 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch6_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch6_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch6_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch6_param3; - - //db=0, plane=2, channel=7 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch7_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch7_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch7_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch7_param3; - - //db=0, plane=2, channel=8 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch8_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch8_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch8_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch8_param3; - - //db=0, plane=2, channel=9 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch9_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch9_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch9_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch9_param3; - - //db=0, plane=2, channel=10 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch10_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch10_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch10_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch10_param3; - - //db=0, plane=2, channel=11 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch11_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch11_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch11_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl2ch11_param3; - - //db=0, plane=3, channel=0 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch0_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch0_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch0_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch0_param3; - - //db=0, plane=3, channel=1 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch1_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch1_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch1_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch1_param3; - - //db=0, plane=3, channel=2 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch2_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch2_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch2_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch2_param3; - - //db=0, plane=3, channel=3 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch3_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch3_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch3_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch3_param3; - - //db=0, plane=3, channel=4 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch4_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch4_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch4_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch4_param3; - - //db=0, plane=3, channel=5 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch5_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch5_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch5_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch5_param3; - - //db=0, plane=3, channel=6 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch6_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch6_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch6_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch6_param3; - - //db=0, plane=3, channel=7 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch7_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch7_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch7_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch7_param3; - - //db=0, plane=3, channel=8 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch8_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch8_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch8_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch8_param3; - - //db=0, plane=3, channel=9 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch9_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch9_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch9_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch9_param3; - - //db=0, plane=3, channel=10 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch10_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch10_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch10_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch10_param3; - - //db=0, plane=3, channel=11 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch11_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch11_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch11_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db0pl3ch11_param3; - - //db=1, plane=0, channel=0 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch0_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch0_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch0_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch0_param3; - - //db=1, plane=0, channel=1 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch1_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch1_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch1_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch1_param3; - - //db=1, plane=0, channel=2 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch2_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch2_param1; + /************************************************ + Image plots + *************************************************/ - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch2_param2; + using PPSTimingCalibration_htdc_calibration_param0_per_channels = + ParametersPerChannel; + using PPSTimingCalibration_htdc_calibration_param1_per_channels = + ParametersPerChannel; + using PPSTimingCalibration_htdc_calibration_param2_per_channels = + ParametersPerChannel; + using PPSTimingCalibration_htdc_calibration_param3_per_channels = + ParametersPerChannel; - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch2_param3; - - //db=1, plane=0, channel=3 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch3_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch3_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch3_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch3_param3; - - //db=1, plane=0, channel=4 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch4_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch4_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch4_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch4_param3; - - //db=1, plane=0, channel=5 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch5_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch5_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch5_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch5_param3; - - //db=1, plane=0, channel=6 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch6_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch6_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch6_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch6_param3; - - //db=1, plane=0, channel=7 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch7_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch7_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch7_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch7_param3; - - //db=1, plane=0, channel=8 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch8_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch8_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch8_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch8_param3; - - //db=1, plane=0, channel=9 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch9_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch9_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch9_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch9_param3; - - //db=1, plane=0, channel=10 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch10_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch10_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch10_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch10_param3; - - //db=1, plane=0, channel=11 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch11_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch11_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch11_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl0ch11_param3; - - //db=1, plane=1, channel=0 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch0_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch0_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch0_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch0_param3; - - //db=1, plane=1, channel=1 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch1_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch1_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch1_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch1_param3; - - //db=1, plane=1, channel=2 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch2_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch2_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch2_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch2_param3; - - //db=1, plane=1, channel=3 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch3_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch3_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch3_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch3_param3; - - //db=1, plane=1, channel=4 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch4_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch4_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch4_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch4_param3; - - //db=1, plane=1, channel=5 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch5_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch5_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch5_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch5_param3; - - //db=1, plane=1, channel=6 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch6_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch6_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch6_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch6_param3; - - //db=1, plane=1, channel=7 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch7_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch7_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch7_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch7_param3; - - //db=1, plane=1, channel=8 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch8_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch8_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch8_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch8_param3; - - //db=1, plane=1, channel=9 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch9_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch9_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch9_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch9_param3; - - //db=1, plane=1, channel=10 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch10_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch10_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch10_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch10_param3; - - //db=1, plane=1, channel=11 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch11_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch11_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch11_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl1ch11_param3; - - //db=1, plane=2, channel=0 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch0_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch0_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch0_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch0_param3; - - //db=1, plane=2, channel=1 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch1_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch1_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch1_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch1_param3; - - //db=1, plane=2, channel=2 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch2_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch2_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch2_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch2_param3; - - //db=1, plane=2, channel=3 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch3_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch3_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch3_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch3_param3; - - //db=1, plane=2, channel=4 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch4_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch4_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch4_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch4_param3; - - //db=1, plane=2, channel=5 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch5_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch5_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch5_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch5_param3; - - //db=1, plane=2, channel=6 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch6_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch6_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch6_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch6_param3; - - //db=1, plane=2, channel=7 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch7_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch7_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch7_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch7_param3; - - //db=1, plane=2, channel=8 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch8_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch8_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch8_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch8_param3; - - //db=1, plane=2, channel=9 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch9_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch9_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch9_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch9_param3; - - //db=1, plane=2, channel=10 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch10_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch10_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch10_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch10_param3; - - //db=1, plane=2, channel=11 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch11_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch11_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch11_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl2ch11_param3; - - //db=1, plane=3, channel=0 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch0_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch0_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch0_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch0_param3; - - //db=1, plane=3, channel=1 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch1_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch1_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch1_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch1_param3; - - //db=1, plane=3, channel=2 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch2_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch2_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch2_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch2_param3; - - //db=1, plane=3, channel=3 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch3_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch3_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch3_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch3_param3; - - //db=1, plane=3, channel=4 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch4_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch4_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch4_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch4_param3; - - //db=1, plane=3, channel=5 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch5_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch5_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch5_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch5_param3; - - //db=1, plane=3, channel=6 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch6_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch6_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch6_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch6_param3; - - //db=1, plane=3, channel=7 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch7_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch7_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch7_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch7_param3; - - //db=1, plane=3, channel=8 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch8_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch8_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch8_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch8_param3; - - //db=1, plane=3, channel=9 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch9_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch9_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch9_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch9_param3; - - //db=1, plane=3, channel=10 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch10_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch10_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch10_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch10_param3; - - //db=1, plane=3, channel=11 - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch11_param0; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch11_param1; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch11_param2; - - typedef ParametersPerRun - PPSTimingCalibration_history_htdc_calibration_db1pl3ch11_param3; - - /************************************************ - X-Y correlation plots - *************************************************/ - - /************************************************ - Image plots - *************************************************/ - - //db=0, plane=0 - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db0pl0param0_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db0pl0param1_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db0pl0param2_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db0pl0param3_per_channels; - - //db=0, plane=1 - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db0pl1param0_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db0pl1param1_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db0pl1param2_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db0pl1param3_per_channels; - - //db=0, plane=2 - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db0pl2param0_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db0pl2param1_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db0pl2param2_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db0pl2param3_per_channels; - - //db=0, plane=3 - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db0pl3param0_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db0pl3param1_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db0pl3param2_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db0pl3param3_per_channels; - - //db=1, plane=0 - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db1pl0param0_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db1pl0param1_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db1pl0param2_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db1pl0param3_per_channels; - - //db=1, plane=1 - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db1pl1param0_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db1pl1param1_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db1pl1param2_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db1pl1param3_per_channels; - - //db=1, plane=2 - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db1pl2param0_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db1pl2param1_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db1pl2param2_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db1pl2param3_per_channels; - - //db=1, plane=3 - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db1pl3param0_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db1pl3param1_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db1pl3param2_per_channels; - - typedef ParametersPerChannel - PPSTimingCalibration_htdc_calibration_db1pl3param3_per_channels; - -} // namespace +} // namespace PAYLOAD_INSPECTOR_MODULE(PPSTimingCalibration) { - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch0_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch0_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch0_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch0_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch1_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch1_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch1_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch1_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch2_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch2_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch2_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch2_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch3_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch3_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch3_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch3_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch4_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch4_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch4_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch4_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch5_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch5_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch5_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch5_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch6_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch6_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch6_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch6_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch7_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch7_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch7_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch7_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch8_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch8_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch8_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch8_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch9_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch9_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch9_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch9_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch10_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch10_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch10_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch10_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch11_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch11_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch11_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl0ch11_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch0_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch0_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch0_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch0_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch1_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch1_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch1_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch1_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch2_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch2_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch2_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch2_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch3_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch3_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch3_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch3_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch4_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch4_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch4_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch4_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch5_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch5_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch5_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch5_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch6_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch6_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch6_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch6_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch7_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch7_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch7_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch7_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch8_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch8_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch8_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch8_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch9_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch9_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch9_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch9_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch10_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch10_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch10_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch10_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch11_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch11_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch11_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl1ch11_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch0_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch0_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch0_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch0_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch1_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch1_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch1_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch1_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch2_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch2_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch2_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch2_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch3_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch3_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch3_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch3_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch4_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch4_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch4_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch4_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch5_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch5_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch5_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch5_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch6_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch6_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch6_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch6_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch7_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch7_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch7_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch7_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch8_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch8_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch8_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch8_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch9_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch9_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch9_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch9_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch10_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch10_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch10_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch10_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch11_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch11_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch11_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl2ch11_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch0_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch0_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch0_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch0_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch1_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch1_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch1_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch1_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch2_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch2_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch2_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch2_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch3_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch3_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch3_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch3_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch4_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch4_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch4_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch4_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch5_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch5_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch5_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch5_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch6_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch6_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch6_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch6_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch7_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch7_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch7_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch7_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch8_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch8_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch8_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch8_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch9_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch9_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch9_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch9_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch10_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch10_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch10_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch10_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch11_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch11_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch11_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db0pl3ch11_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch0_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch0_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch0_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch0_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch1_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch1_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch1_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch1_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch2_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch2_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch2_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch2_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch3_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch3_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch3_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch3_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch4_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch4_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch4_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch4_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch5_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch5_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch5_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch5_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch6_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch6_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch6_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch6_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch7_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch7_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch7_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch7_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch8_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch8_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch8_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch8_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch9_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch9_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch9_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch9_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch10_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch10_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch10_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch10_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch11_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch11_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch11_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl0ch11_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch0_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch0_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch0_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch0_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch1_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch1_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch1_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch1_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch2_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch2_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch2_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch2_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch3_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch3_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch3_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch3_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch4_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch4_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch4_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch4_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch5_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch5_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch5_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch5_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch6_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch6_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch6_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch6_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch7_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch7_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch7_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch7_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch8_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch8_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch8_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch8_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch9_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch9_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch9_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch9_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch10_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch10_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch10_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch10_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch11_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch11_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch11_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl1ch11_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch0_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch0_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch0_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch0_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch1_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch1_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch1_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch1_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch2_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch2_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch2_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch2_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch3_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch3_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch3_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch3_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch4_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch4_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch4_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch4_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch5_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch5_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch5_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch5_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch6_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch6_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch6_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch6_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch7_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch7_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch7_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch7_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch8_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch8_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch8_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch8_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch9_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch9_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch9_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch9_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch10_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch10_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch10_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch10_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch11_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch11_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch11_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl2ch11_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch0_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch0_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch0_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch0_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch1_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch1_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch1_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch1_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch2_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch2_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch2_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch2_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch3_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch3_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch3_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch3_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch4_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch4_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch4_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch4_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch5_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch5_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch5_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch5_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch6_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch6_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch6_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch6_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch7_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch7_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch7_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch7_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch8_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch8_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch8_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch8_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch9_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch9_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch9_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch9_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch10_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch10_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch10_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch10_param3); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch11_param0); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch11_param1); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch11_param2); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_db1pl3ch11_param3); - - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db0pl0param0_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db0pl0param1_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db0pl0param2_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db0pl0param3_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db0pl1param0_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db0pl1param1_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db0pl1param2_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db0pl1param3_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db0pl2param0_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db0pl2param1_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db0pl2param2_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db0pl2param3_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db0pl3param0_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db0pl3param1_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db0pl3param2_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db0pl3param3_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db1pl0param0_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db1pl0param1_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db1pl0param2_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db1pl0param3_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db1pl1param0_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db1pl1param1_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db1pl1param2_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db1pl1param3_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db1pl2param0_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db1pl2param1_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db1pl2param2_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db1pl2param3_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db1pl3param0_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db1pl3param1_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db1pl3param2_per_channels); - PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_db1pl3param3_per_channels); -} + PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_param0) + PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_param1) + PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_param2) + PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_history_htdc_calibration_param3) + + PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_param0_per_channels) + PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_param1_per_channels) + PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_param2_per_channels) + PAYLOAD_INSPECTOR_CLASS(PPSTimingCalibration_htdc_calibration_param3_per_channels) +} \ No newline at end of file diff --git a/CondCore/CTPPSPlugins/test/testDAQMapping.sh b/CondCore/CTPPSPlugins/test/testDAQMapping.sh new file mode 100755 index 0000000000000..b187d3e0c1603 --- /dev/null +++ b/CondCore/CTPPSPlugins/test/testDAQMapping.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +if [ "$1" == "run" ] +then + mkdir -p $CMSSW_BASE/src/CondCore/CTPPSPlugins/test/results + if [ -f *.png ]; then + rm *.png + fi + + echo "Testing DAQMapping info" + + getPayloadData.py \ + --plugin pluginTotemDAQMapping_PayloadInspector \ + --plot plot_DAQMappingPayloadInfo_Text \ + --tag PPSDAQMapping_TimingDiamond_v1 \ + --time_type Run \ + --iovs '{"start_iov": "283820", "end_iov": "283820"}' \ + --db Prod \ + --test + + mv *.png $CMSSW_BASE/src/CondCore/CTPPSPlugins/test/results/DAQMapping_TextInfo.png + +elif [ "$1" == "clear" ] +then + rm -rf $CMSSW_BASE/src/CondCore/CTPPSPlugins/test/results + +else + echo "Wrong option! (available options: run/clear)" +fi diff --git a/CondCore/CTPPSPlugins/test/testPPSTimingCalibration.cpp b/CondCore/CTPPSPlugins/test/testPPSTimingCalibration.cpp index 9db167a2852d2..8ecbd37e06336 100644 --- a/CondCore/CTPPSPlugins/test/testPPSTimingCalibration.cpp +++ b/CondCore/CTPPSPlugins/test/testPPSTimingCalibration.cpp @@ -30,12 +30,19 @@ int main(int argc, char** argv) { edm::LogPrint("testPPSCalibrationPI") << "## Exercising TimingCalibration plots "; - ParametersPerChannel - test; + ParametersPerChannel test; + py::dict inputs; + inputs["db (0,1)"] = "0"; + inputs["station (1,2)"] = "1"; + inputs["plane (0-3)"] = "0"; + test.setInputParamValues(inputs); test.process(connectionString, PI::mk_input(tag, start, end)); edm::LogPrint("testparametersPerChannel") << test.data(); + + inputs.clear(); +#if PY_MAJOR_VERSION >= 3 + Py_INCREF(inputs.ptr()); +#endif + Py_Finalize(); } diff --git a/CondCore/CTPPSPlugins/test/testPPSTimingCalibration.sh b/CondCore/CTPPSPlugins/test/testPPSTimingCalibration.sh index 278afede43461..c512c449c6b23 100755 --- a/CondCore/CTPPSPlugins/test/testPPSTimingCalibration.sh +++ b/CondCore/CTPPSPlugins/test/testPPSTimingCalibration.sh @@ -10,70 +10,54 @@ then echo "Testing history plots" - for db in 0 1 + for db in 0 #1 do - for pl in 0 1 2 3 + for station in 1 #2 do - for ch in 0 1 2 3 4 5 6 7 8 9 10 11 + for pl in 0 #1 2 3 do - for param in 0 1 2 3 + for ch in 0 #1 2 3 4 5 6 7 8 9 10 11 do - python3 CondCore/Utilities/scripts/getPayloadData.py \ - --plugin pluginPPSTimingCalibration_PayloadInspector \ - --plot plot_PPSTimingCalibration_history_htdc_calibration_db${db}pl${pl}ch${ch}_param${param} \ - --tag CTPPPSTimingCalibration_HPTDC_byPCL_v0_prompt \ - --time_type Run \ - --iovs '{"start_iov": "355892", "end_iov": "357079"}' \ - --db Prod \ - --test 2> CondCore/CTPPSPlugins/test/results/data_history__db${db}pl${pl}ch${ch}_param${param}.json + for param in 0 #1 2 3 + do + python3 CondCore/Utilities/scripts/getPayloadData.py \ + --plugin pluginPPSTimingCalibration_PayloadInspector \ + --plot plot_PPSTimingCalibration_history_htdc_calibration_param${param} \ + --input_params '{"db (0,1)":"'${db}'","station (1,2)":"'${station}'", "plane (0-3)":"'${pl}'", "channel (0-11)":"'${ch}'"}' \ + --tag CTPPPSTimingCalibration_HPTDC_byPCL_v0_prompt \ + --time_type Run \ + --iovs '{"start_iov": "357079", "end_iov": "357079"}' \ + --db Prod \ + --test 2> CondCore/CTPPSPlugins/test/results/data_history__db${db}st${station}pl${pl}ch${ch}_param${param}.json + done done done done done - - echo "Testing parameters plots" - - for param1 in 0 1 2 3 - do - for param2 in 1 2 3 - do - if [ "$param1" -lt "$param2" ] - then - getPayloadData.py \ - --plugin pluginPPSTimingCalibration_PayloadInspector \ - --plot plot_PPSTimingCalibration_htdc_calibration_params${param1}${param2} \ - --tag CTPPPSTimingCalibration_HPTDC_byPCL_v0_prompt \ - --time_type Run \ - --iovs '{"start_iov": "355892", "end_iov": "357079"}' \ - --db Prod \ - --test 2> CondCore/CTPPSPlugins/test/results/data_params_${param1}${param2}.json - fi - done - done - python3 CondCore/CTPPSPlugins/test/graph_check.py - echo "Testing channel plots" - - - for db in 0 1 + for db in 0 #1 do - for pl in 0 1 2 3 + for station in 1 #2 do - for param in 0 1 2 3 + for pl in 0 #1 2 3 do - getPayloadData.py \ - --plugin pluginPPSTimingCalibration_PayloadInspector \ - --plot plot_PPSTimingCalibration_htdc_calibration_db${db}pl${pl}param${param}_per_channels \ - --tag CTPPPSTimingCalibration_HPTDC_byPCL_v0_prompt \ - --time_type Run \ - --iovs '{"start_iov": "356489", "end_iov": "356489"}' \ - --db Prod \ - --test - mv *.png CondCore/CTPPSPlugins/test/results/plot_PPSTimingCalibration_db${db}pl${pl}param${param}_per_channels.png - done - done + for param in 0 #1 2 3 + do + getPayloadData.py \ + --plugin pluginPPSTimingCalibration_PayloadInspector \ + --plot plot_PPSTimingCalibration_htdc_calibration_param${param}_per_channels \ + --input_params '{"db (0,1)":"'${db}'","station (1,2)":"'${station}'", "plane (0-3)":"'${pl}'"}' \ + --tag CTPPPSTimingCalibration_HPTDC_byPCL_v1_prompt \ + --time_type Run \ + --iovs '{"start_iov": "370092", "end_iov": "370092"}' \ + --db Prod \ + --test + mv *.png CondCore/CTPPSPlugins/test/results/plot_PPSTimingCalibration_db${db}pl${pl}param${param}_per_channels.png + done + done + done done diff --git a/CondCore/CondHDF5ESSource/plugins/HDF5ProductResolver.cc b/CondCore/CondHDF5ESSource/plugins/HDF5ProductResolver.cc index d26254297ec39..104fe9c3292c7 100644 --- a/CondCore/CondHDF5ESSource/plugins/HDF5ProductResolver.cc +++ b/CondCore/CondHDF5ESSource/plugins/HDF5ProductResolver.cc @@ -78,8 +78,10 @@ void HDF5ProductResolver::prefetchAsyncImpl(edm::WaitingTaskHolder iTask, [this, iov = iRecord.validityInterval(), iParent, &iRecord](auto& iGroup, auto iActivity) { queue_->push(iGroup, [this, &iGroup, act = std::move(iActivity), iov, iParent, &iRecord] { CMS_SA_ALLOW try { - edm::ESModuleCallingContext context( - providerDescription(), edm::ESModuleCallingContext::State::kRunning, iParent); + edm::ESModuleCallingContext context(providerDescription(), + reinterpret_cast(this), + edm::ESModuleCallingContext::State::kRunning, + iParent); iRecord.activityRegistry()->preESModuleSignal_.emit(iRecord.key(), context); struct EndGuard { EndGuard(edm::eventsetup::EventSetupRecordImpl const& iRecord, diff --git a/CondCore/ESSources/plugins/CondDBESSource.cc b/CondCore/ESSources/plugins/CondDBESSource.cc index dbbdd4627f2be..f01630f9a84b5 100644 --- a/CondCore/ESSources/plugins/CondDBESSource.cc +++ b/CondCore/ESSources/plugins/CondDBESSource.cc @@ -30,6 +30,10 @@ #include +#include + +using json = nlohmann::json; + namespace { /* utility ot build the name of the plugin corresponding to a given record se ESSources @@ -85,6 +89,27 @@ namespace { out << " " << id.since << " - " << id.till << " : " << id.payloadId << std::endl; } + void dumpInfoJson(json& jsonData, const std::string& recName, cond::ProductResolverWrapperBase const& proxy) { + json recordData; + recordData["label"] = proxy.label(); + recordData["connectionString"] = proxy.connString(); + recordData["tag"] = proxy.tag(); + + // Code to fill the JSON structure + + recordData["timeLookupPayloadIds"] = json::array(); + const auto& pids = *proxy.requests(); + for (const auto& id : pids) { + json payloadIdData; + payloadIdData["since"] = id.since; + payloadIdData["till"] = id.till; + payloadIdData["payloadId"] = id.payloadId; + recordData["timeLookupPayloadIds"].push_back(payloadIdData); + } + + jsonData[recName].push_back(recordData); + } + } // namespace /* @@ -96,7 +121,8 @@ namespace { * toGet: list of record label tag connection-string to add/overwrite the content of the global-tag */ CondDBESSource::CondDBESSource(const edm::ParameterSet& iConfig) - : m_connection(), + : m_jsonDumpFilename(iConfig.getUntrackedParameter("JsonDumpFileName", "")), + m_connection(), m_connectionString(""), m_frontierKey(""), m_lastRun(0), // for the stat @@ -322,15 +348,28 @@ void CondDBESSource::fillList(const std::string& stringList, } } +void CondDBESSource::printStatistics(const Stats& stats) const { + std::cout << "CondDBESSource Statistics\n" + << "DataProxy " << stats.nData << " setInterval " << stats.nSet << " Runs " << stats.nRun << " Lumis " + << stats.nLumi << " Refresh " << stats.nRefresh << " Actual Refresh " << stats.nActualRefresh + << " Reconnect " << stats.nReconnect << " Actual Reconnect " << stats.nActualReconnect << std::endl; +} + +void saveJsonToFile(const json& jsonData, const std::string& filename) { + std::ofstream outputFile(filename); + if (outputFile.is_open()) { + outputFile << jsonData.dump(2) << std::endl; + std::cout << "JSON data saved in file '" << filename << "'" << std::endl; + } else { + std::cerr << "Error opening file to write JSON data." << std::endl; + } +} + CondDBESSource::~CondDBESSource() { //dump info FIXME: find a more suitable place... if (m_doDump) { - std::cout << "CondDBESSource Statistics" << std::endl - << "ProductResolver " << m_stats.nData << " setInterval " << m_stats.nSet << " Runs " << m_stats.nRun - << " Lumis " << m_stats.nLumi << " Refresh " << m_stats.nRefresh << " Actual Refresh " - << m_stats.nActualRefresh << " Reconnect " << m_stats.nReconnect << " Actual Reconnect " - << m_stats.nActualReconnect; - std::cout << std::endl; + //Output CondDBESSource Statistics to the console + printStatistics(m_stats); ResolverMap::iterator b = m_resolvers.begin(); ResolverMap::iterator e = m_resolvers.end(); @@ -338,10 +377,21 @@ CondDBESSource::~CondDBESSource() { dumpInfo(std::cout, (*b).first, *(*b).second); std::cout << "\n" << std::endl; } - - // FIXME - // We shall eventually close transaction and session... } + //if filename was provided for iConfig by process.GlobalTag.JsonDumpFileName =cms.untracked.string("CondDBESSource.json") + if (!m_jsonDumpFilename.empty()) { + json jsonData; + + for (const auto& entry : m_resolvers) { + std::string recName = entry.first; + const auto& proxy = *entry.second; + dumpInfoJson(jsonData, recName, proxy); + } + //Save the dump data to a file in JSON format + saveJsonToFile(jsonData, m_jsonDumpFilename); + } + // FIXME + // We shall eventually close transaction and session... } // diff --git a/CondCore/ESSources/plugins/CondDBESSource.h b/CondCore/ESSources/plugins/CondDBESSource.h index 009949041898e..ced8f46d23ca2 100644 --- a/CondCore/ESSources/plugins/CondDBESSource.h +++ b/CondCore/ESSources/plugins/CondDBESSource.h @@ -93,6 +93,8 @@ class CondDBESSource : public edm::eventsetup::ESProductResolverProvider, public typedef enum { NOREFRESH, REFRESH_ALWAYS, REFRESH_OPEN_IOVS, REFRESH_EACH_RUN, RECONNECT_EACH_RUN } RefreshPolicy; + std::string m_jsonDumpFilename; + explicit CondDBESSource(const edm::ParameterSet&); ~CondDBESSource() override; @@ -164,5 +166,7 @@ class CondDBESSource : public edm::eventsetup::ESProductResolverProvider, public const std::vector& roottagList, std::map& replacement, cond::GTMetadata_t& gtMetadata); + + void printStatistics(const Stats& stats) const; }; #endif diff --git a/CondCore/SiPixelPlugins/plugins/SiPixelLorentzAngle_PayloadInspector.cc b/CondCore/SiPixelPlugins/plugins/SiPixelLorentzAngle_PayloadInspector.cc index 4a7a699c09fe7..278ce3dfae7cd 100644 --- a/CondCore/SiPixelPlugins/plugins/SiPixelLorentzAngle_PayloadInspector.cc +++ b/CondCore/SiPixelPlugins/plugins/SiPixelLorentzAngle_PayloadInspector.cc @@ -74,7 +74,9 @@ namespace { gStyle->SetOptStat("emr"); auto tag = PlotBase::getTag<0>(); + auto tagname = PlotBase::getTag<0>().name; auto iov = tag.iovs.front(); + std::string IOVsince = std::to_string(std::get<0>(iov)); std::shared_ptr payload = fetchPayload(std::get<1>(iov)); std::map LAMap_ = payload->getLorentzAngles(); auto extrema = SiPixelPI::findMinMaxInMap(LAMap_); @@ -84,8 +86,8 @@ namespace { auto h1 = std::make_unique("value", "SiPixel LA value;SiPixel LorentzAngle #mu_{H}(tan#theta_{L}/B) [1/T];# modules", 50, - extrema.first * 0.9, - extrema.second * 1.1); + (extrema.first * 0.9), + (extrema.second * 1.1)); SiPixelPI::adjustCanvasMargins(canvas.cd(), 0.06, 0.12, 0.12, 0.05); canvas.Modified(); @@ -106,26 +108,25 @@ namespace { canvas.Update(); - TLegend legend = TLegend(0.40, 0.88, 0.94, 0.93); - legend.SetHeader(("Payload hash: #bf{" + (std::get<1>(iov)) + "}").c_str(), + TLegend legend = TLegend(0.35, 0.88, 0.94, 0.93); + legend.SetHeader(("#splitline{Payload hash: #bf{" + (std::get<1>(iov)) + "}}{Tag,IOV: #bf{#color[2]{" + tagname + + "}," + IOVsince + "}}") + .c_str(), "C"); // option "C" allows to center the header - //legend.AddEntry(h1.get(), ("IOV: " + std::to_string(std::get<0>(iov))).c_str(), "PL"); - legend.SetTextSize(0.025); + legend.SetTextSize(0.023); legend.SetLineColor(10); legend.Draw("same"); TPaveStats *st = (TPaveStats *)h1->FindObject("stats"); st->SetTextSize(0.03); - SiPixelPI::adjustStats(st, 0.15, 0.83, 0.39, 0.93); + SiPixelPI::adjustStats(st, 0.15, 0.83, 0.34, 0.93); auto ltx = TLatex(); ltx.SetTextFont(62); //ltx.SetTextColor(kBlue); ltx.SetTextSize(0.05); ltx.SetTextAlign(11); - ltx.DrawLatexNDC(gPad->GetLeftMargin(), - 1 - gPad->GetTopMargin() + 0.01, - ("SiPixel Lorentz Angle IOV:" + std::to_string(std::get<0>(iov))).c_str()); + ltx.DrawLatexNDC(gPad->GetLeftMargin(), 1 - gPad->GetTopMargin() + 0.01, "SiPixel Lorentz Angle Value"); std::string fileName(m_imageFileName); canvas.SaveAs(fileName.c_str()); diff --git a/CondCore/SiPixelPlugins/plugins/SiPixelQualityProbabilities_PayloadInspector.cc b/CondCore/SiPixelPlugins/plugins/SiPixelQualityProbabilities_PayloadInspector.cc index 4f443978c4927..8715ed5d90210 100644 --- a/CondCore/SiPixelPlugins/plugins/SiPixelQualityProbabilities_PayloadInspector.cc +++ b/CondCore/SiPixelPlugins/plugins/SiPixelQualityProbabilities_PayloadInspector.cc @@ -24,6 +24,7 @@ #include #include #include +#include // include ROOT #include "TH2F.h" @@ -42,7 +43,7 @@ namespace { using namespace cond::payloadInspector; /************************************************ - 1d histogram of SiPixelQualityProbabilities of 1 IOV + 1d histogram of n. scenarios per PU bin in 1 IOV of SiPixelQualityProbabilities *************************************************/ class SiPixelQualityProbabilitiesScenariosCount : public PlotImage { @@ -53,6 +54,7 @@ namespace { bool fill() override { auto tag = PlotBase::getTag<0>(); auto iov = tag.iovs.front(); + auto tagname = tag.name; std::shared_ptr payload = fetchPayload(std::get<1>(iov)); auto PUbins = payload->getPileUpBins(); auto span = PUbins.back() - PUbins.front(); @@ -62,7 +64,7 @@ namespace { TCanvas canvas("Canv", "Canv", 1200, 1000); canvas.cd(); auto h1 = std::make_unique("Count", - "SiPixelQualityProbablities Scenarios count;PU bin;n. of scenarios", + "SiPixelQualityProbablities Scenarios count;PU bin;n. of scenarios per PU bin", span, PUbins.front(), PUbins.back()); @@ -70,11 +72,11 @@ namespace { canvas.SetTopMargin(0.06); canvas.SetBottomMargin(0.12); - canvas.SetLeftMargin(0.12); + canvas.SetLeftMargin(0.13); canvas.SetRightMargin(0.05); canvas.Modified(); - for (const auto &bin : PUbins) { + for (const auto& bin : PUbins) { h1->SetBinContent(bin + 1, payload->nelements(bin)); } @@ -89,21 +91,23 @@ namespace { canvas.Update(); - TLegend legend = TLegend(0.40, 0.88, 0.95, 0.94); + TLegend legend = TLegend(0.40, 0.88, 0.94, 0.93); legend.SetHeader(("Payload hash: #bf{" + (std::get<1>(iov)) + "}").c_str(), - "C"); // option "C" allows to center the header - //legend.AddEntry(h1.get(), ("IOV: " + std::to_string(std::get<0>(iov))).c_str(), "PL"); + "C"); // option "C" allows to center the header + legend.SetBorderSize(0); // Set the border size to zero to remove visible borders legend.SetTextSize(0.025); legend.Draw("same"); auto ltx = TLatex(); ltx.SetTextFont(62); //ltx.SetTextColor(kBlue); - ltx.SetTextSize(0.05); + ltx.SetTextSize(0.037); ltx.SetTextAlign(11); - ltx.DrawLatexNDC(gPad->GetLeftMargin() + 0.1, - 1 - gPad->GetTopMargin() + 0.01, - ("SiPixelQualityProbabilities IOV:" + std::to_string(std::get<0>(iov))).c_str()); + + const auto& headerText = + fmt::sprintf("#color[4]{%s},IOV: #color[4]{%s}", tagname, std::to_string(std::get<0>(iov))); + + ltx.DrawLatexNDC(gPad->GetLeftMargin() + 0.1, 1 - gPad->GetTopMargin() + 0.01, headerText.c_str()); std::string fileName(m_imageFileName); canvas.SaveAs(fileName.c_str()); @@ -112,9 +116,149 @@ namespace { } }; + /************************************************ + Probability density per PU bin of 1 IOV of SiPixelQualityProbabilities + *************************************************/ + + class SiPixelQualityProbabilityDensityPerPUbin : public PlotImage { + public: + SiPixelQualityProbabilityDensityPerPUbin() : PlotImage("") { + PlotBase::addInputParam("PU bin"); + } + + bool fill() override { + auto tag = PlotBase::getTag<0>(); + auto iov = tag.iovs.front(); + auto tagname = tag.name; + std::shared_ptr payload = fetchPayload(std::get<1>(iov)); + auto PUbins = payload->getPileUpBins(); + + // initialize the PUbin + unsigned int PUbin(0); + auto paramValues = PlotBase::inputParamValues(); + auto ip = paramValues.find("PU bin"); + if (ip != paramValues.end()) { + PUbin = std::stoul(ip->second); + } else { + edm::LogWarning("SiPixelQualityProbabilityDensityPerPUbin") + << "\n WARNING!!!! \n The needed parameter 'PU bin' has not been passed. Will use all PU bins! \n"; + PUbin = k_ALLPUBINS; + } + + // graphics + TGaxis::SetMaxDigits(3); + + SiPixelQualityProbabilities::probabilityVec probVec; + if (PUbin != k_ALLPUBINS) { + probVec = payload->getProbabilities(PUbin); + } else { + if (PUbins.front() == 0) { + // if a PU bin = 0 exist the PU-averaged is in bin=0 + probVec = payload->getProbabilities(0); + } else { + // we need to build the PDF by hand + // create a list of the probabilities for all the PU bins + std::vector listOfProbabilityVec; + for (unsigned int bin = PUbins.front(); bin <= PUbins.back(); bin++) { + const auto& probsForBin = payload->getProbabilities(bin); + listOfProbabilityVec.push_back(probsForBin); + } + + // Map to store string and pair of floats (sum and count) + std::map> stringFloatMap; + + // Loop through the list of probabilityVec elements + for (const auto& vec : listOfProbabilityVec) { + // For each pair in the current vector + for (const auto& pair : vec) { + const std::string& currentScen = pair.first; + const float& currentProb = pair.second; + + // Check if the string exists in the map + auto it = stringFloatMap.find(currentScen); + if (it != stringFloatMap.end()) { + // If the scenario already exists, update the probability sum and count + it->second.first += currentProb; + it->second.second++; + } else { + // If the string doesn't exist, add it to the map + stringFloatMap[currentScen] = {currentProb, 1}; // Initialize sum and count + } + } + } + + // Calculate the average and populate the new probabilityVec from the map + for (const auto& pair : stringFloatMap) { + float average = pair.second.first / pair.second.second; // Calculate average + probVec.emplace_back(pair.first, average); + } + } // if the first PU bin is not 0 + } // if we're asking for all the PU bins + + TCanvas canvas("Canv", "Canv", 1200, 1000); + canvas.cd(); + auto h1 = std::make_unique("SiPixelQuality PDF", + "probability density vs scenario; scenario serial ID number; probability", + probVec.size(), + -0.5, + probVec.size() - 0.5); + h1->SetStats(false); + + canvas.SetTopMargin(0.06); + canvas.SetBottomMargin(0.12); + canvas.SetLeftMargin(0.13); + canvas.SetRightMargin(0.09); + canvas.Modified(); + + unsigned int count{0}; + for (const auto& [name, prob] : probVec) { + h1->SetBinContent(count, prob); + count++; + } + + h1->SetTitle(""); + h1->GetYaxis()->SetRangeUser(0., h1->GetMaximum() * 1.30); + h1->SetFillColor(kRed); + h1->SetMarkerStyle(20); + h1->SetMarkerSize(1); + h1->Draw("bar2"); + + SiPixelPI::makeNicePlotStyle(h1.get()); + + canvas.Update(); + + TLegend legend = TLegend(0.39, 0.88, 0.89, 0.93); + std::string puBinString = (PUbin == k_ALLPUBINS) ? "PU bin: #bf{all}" : fmt::sprintf("PU bin: #bf{%u}", PUbin); + legend.SetHeader(("#splitline{Payload hash: #bf{" + (std::get<1>(iov)) + "}}{" + puBinString + "}").c_str(), + "C"); // option "C" allows to center the header + legend.SetBorderSize(0); // Set the border size to zero to remove visible borders + legend.SetTextSize(0.025); + legend.Draw("same"); + + auto ltx = TLatex(); + ltx.SetTextFont(62); + ltx.SetTextSize(0.03); + ltx.SetTextAlign(11); + + const auto& headerText = + fmt::sprintf("#color[4]{%s}, IOV: #color[4]{%s}", tagname, std::to_string(std::get<0>(iov))); + + ltx.DrawLatexNDC(gPad->GetLeftMargin() + 0.1, 1 - gPad->GetTopMargin() + 0.01, headerText.c_str()); + + std::string fileName(m_imageFileName); + canvas.SaveAs(fileName.c_str()); + + return true; + } + + private: + static constexpr unsigned int k_ALLPUBINS = 9999; + }; + } // namespace // Register the classes as boost python plugin PAYLOAD_INSPECTOR_MODULE(SiPixelQualityProbabilities) { PAYLOAD_INSPECTOR_CLASS(SiPixelQualityProbabilitiesScenariosCount); + PAYLOAD_INSPECTOR_CLASS(SiPixelQualityProbabilityDensityPerPUbin); } diff --git a/CondCore/SiPixelPlugins/test/testMiscellanea.sh b/CondCore/SiPixelPlugins/test/testMiscellanea.sh index 2ae5130a845e6..828f468647e5c 100755 --- a/CondCore/SiPixelPlugins/test/testMiscellanea.sh +++ b/CondCore/SiPixelPlugins/test/testMiscellanea.sh @@ -19,3 +19,28 @@ getPayloadData.py \ --test ; mv *.png $W_DIR/display/testPixelMap.png + +# test a given PU bin +getPayloadData.py \ + --plugin pluginSiPixelQualityProbabilities_PayloadInspector \ + --plot plot_SiPixelQualityProbabilityDensityPerPUbin \ + --tag SiPixelQualityProbabilities_UltraLegacy2018_v0_mc \ + --input_params '{"PU bin": "10"}' \ + --time_type Run \ + --iovs '{"start_iov": "1", "end_iov": "1"}' \ + --db Prod \ + --test ; + +mv *.png $W_DIR/display/testSiPixelQualityProbabilityDensity.png + +# test all PU bins +getPayloadData.py \ + --plugin pluginSiPixelQualityProbabilities_PayloadInspector \ + --plot plot_SiPixelQualityProbabilityDensityPerPUbin \ + --tag SiPixelQualityProbabilities_2023_v2_BPix_mc \ + --time_type Run \ + --iovs '{"start_iov": "1", "end_iov": "1"}' \ + --db Prod \ + --test ; + +mv *.png $W_DIR/display/testSiPixelQualityProbabilityDensity_v2.png diff --git a/CondCore/Utilities/plugins/Module_2XML.cc b/CondCore/Utilities/plugins/Module_2XML.cc index 1ac70678e105c..679e75b2c7b8b 100644 --- a/CondCore/Utilities/plugins/Module_2XML.cc +++ b/CondCore/Utilities/plugins/Module_2XML.cc @@ -115,6 +115,12 @@ PAYLOAD_2XML_MODULE(pluginUtilities_payload2xml) { PAYLOAD_2XML_CLASS(EcalTPGOddWeightGroup); PAYLOAD_2XML_CLASS(EcalTPGOddWeightIdMap); PAYLOAD_2XML_CLASS(EcalTPGTPMode); + PAYLOAD_2XML_CLASS(EcalEBPhase2TPGAmplWeightIdMap); + PAYLOAD_2XML_CLASS(EcalEBPhase2TPGAmplWeights); + PAYLOAD_2XML_CLASS(EcalEBPhase2TPGLinearizationConstant); + PAYLOAD_2XML_CLASS(EcalEBPhase2TPGPedestal); + PAYLOAD_2XML_CLASS(EcalEBPhase2TPGTimeWeightIdMap); + PAYLOAD_2XML_CLASS(EcalEBPhase2TPGTimeWeights); PAYLOAD_2XML_CLASS(EcalTimeBiasCorrections); PAYLOAD_2XML_CLASS(EcalTimeDependentCorrections); PAYLOAD_2XML_CLASS(EcalTimeOffsetConstant); diff --git a/CondCore/Utilities/src/CondDBFetch.cc b/CondCore/Utilities/src/CondDBFetch.cc index e84e0b379b889..7816a0641626c 100644 --- a/CondCore/Utilities/src/CondDBFetch.cc +++ b/CondCore/Utilities/src/CondDBFetch.cc @@ -145,6 +145,12 @@ namespace cond { FETCH_PAYLOAD_CASE(EcalPulseCovariance) FETCH_PAYLOAD_CASE(EcalCondObjectContainer) FETCH_PAYLOAD_CASE(EcalPulseSymmCovariance) + FETCH_PAYLOAD_CASE(EcalEBPhase2TPGAmplWeightIdMap) + FETCH_PAYLOAD_CASE(EcalEBPhase2TPGAmplWeights) + FETCH_PAYLOAD_CASE(EcalEBPhase2TPGTimeWeightIdMap) + FETCH_PAYLOAD_CASE(EcalEBPhase2TPGTimeWeights) + FETCH_PAYLOAD_CASE(EcalEBPhase2TPGLinearizationConstant) + FETCH_PAYLOAD_CASE(EcalEBPhase2TPGPedestal) FETCH_PAYLOAD_CASE(FileBlob) FETCH_PAYLOAD_CASE(GBRForest) FETCH_PAYLOAD_CASE(GBRForestD) diff --git a/CondCore/Utilities/src/CondDBImport.cc b/CondCore/Utilities/src/CondDBImport.cc index c354ea25e1bed..7d30541b85f86 100644 --- a/CondCore/Utilities/src/CondDBImport.cc +++ b/CondCore/Utilities/src/CondDBImport.cc @@ -165,6 +165,12 @@ namespace cond { IMPORT_PAYLOAD_CASE(EcalPulseCovariance) IMPORT_PAYLOAD_CASE(EcalCondObjectContainer) IMPORT_PAYLOAD_CASE(EcalPulseSymmCovariance) + IMPORT_PAYLOAD_CASE(EcalEBPhase2TPGAmplWeightIdMap) + IMPORT_PAYLOAD_CASE(EcalEBPhase2TPGAmplWeights) + IMPORT_PAYLOAD_CASE(EcalEBPhase2TPGTimeWeightIdMap) + IMPORT_PAYLOAD_CASE(EcalEBPhase2TPGTimeWeights) + IMPORT_PAYLOAD_CASE(EcalEBPhase2TPGPedestal) + IMPORT_PAYLOAD_CASE(EcalEBPhase2TPGLinearizationConstant) IMPORT_PAYLOAD_CASE(FileBlob) IMPORT_PAYLOAD_CASE(GBRForest) IMPORT_PAYLOAD_CASE(GBRForestD) diff --git a/CondCore/Utilities/src/CondFormats.h b/CondCore/Utilities/src/CondFormats.h index 384de4c9c21f8..94ab553acd9f0 100644 --- a/CondCore/Utilities/src/CondFormats.h +++ b/CondCore/Utilities/src/CondFormats.h @@ -73,6 +73,12 @@ #include "CondFormats/EcalObjects/interface/EcalPulseShapes.h" #include "CondFormats/EcalObjects/interface/EcalPulseCovariances.h" #include "CondFormats/EcalObjects/interface/EcalPulseSymmCovariances.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGAmplWeightIdMap.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGAmplWeights.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGLinearizationConst.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGPedestals.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGTimeWeightIdMap.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGTimeWeights.h" #include "CondFormats/GBRForest/interface/GBRForest.h" #include "CondFormats/GBRForest/interface/GBRForestD.h" #include "CondFormats/HcalObjects/interface/AbsOOTPileupCorrection.h" diff --git a/CondFormats/Common/data/SiPixelAliHGCombRcd_prep.json b/CondFormats/Common/data/SiPixelAliHGCombRcd_prep.json new file mode 100644 index 0000000000000..71a809ee8031b --- /dev/null +++ b/CondFormats/Common/data/SiPixelAliHGCombRcd_prep.json @@ -0,0 +1,10 @@ +{ + "destinationDatabase": "oracle://cms_orcoff_prep/CMS_CONDITIONS", + "destinationTags": { + "SiPixelAliHGCombined_PCL_v0_hlt": {}, + "SiPixelAliHGCombined_PCL_v0_prompt": {} + }, + "inputTag": "SiPixelAliHGCombined_pcl", + "since": null, + "userText": "T0 PCL Upload for SiPixel HG Combined (MiBias + Z->mm) Ali. (prep)" +} diff --git a/CondFormats/Common/data/SiPixelAliHGCombRcd_prod.json b/CondFormats/Common/data/SiPixelAliHGCombRcd_prod.json new file mode 100644 index 0000000000000..06cf5dfc44149 --- /dev/null +++ b/CondFormats/Common/data/SiPixelAliHGCombRcd_prod.json @@ -0,0 +1,10 @@ +{ + "destinationDatabase": "oracle://cms_orcon_prod/CMS_CONDITIONS", + "destinationTags": { + "SiPixelAliHGCombined_PCL_v0_hlt": {}, + "SiPixelAliHGCombined_PCL_v0_prompt": {} + }, + "inputTag": "SiPixelAliHGCombined_pcl", + "since": null, + "userText": "T0 PCL Upload for SiPixel HG Combined (MinBias + Z->mm) Ali. (prod)" +} diff --git a/CondFormats/Common/test/DropBoxMetadataReader.py b/CondFormats/Common/test/DropBoxMetadataReader.py index 1d0d09f79f957..487ecec1bbd0a 100644 --- a/CondFormats/Common/test/DropBoxMetadataReader.py +++ b/CondFormats/Common/test/DropBoxMetadataReader.py @@ -38,6 +38,7 @@ 'SiStripApvGainRcd', 'TrackerAlignmentRcd', 'TrackerAlignmentHGRcd', + 'TrackerAlignmentHGCombinedRcd', 'SiStripApvGainRcdAAG', 'EcalPedestalsRcd', "LumiCorrectionsRcd", @@ -49,7 +50,7 @@ "CTPPSRPAlignmentCorrectionsDataRcd", "PPSTimingCalibrationRcd_HPTDC", "PPSTimingCalibrationRcd_SAMPIC", - "SiStripLorentzAngleRcd", + "SiStripLorentzAngleRcd", ) # same strings as fType ) diff --git a/CondFormats/Common/test/ProduceDropBoxMetadata.py b/CondFormats/Common/test/ProduceDropBoxMetadata.py index c2eb1c4cbe614..b55ae1b275136 100644 --- a/CondFormats/Common/test/ProduceDropBoxMetadata.py +++ b/CondFormats/Common/test/ProduceDropBoxMetadata.py @@ -69,6 +69,10 @@ def encodeJsonInString(filename): SiPixelAliHGRcd_prod_str = encodeJsonInString("SiPixelAliHGRcd_prod.json") SiPixelAliHGRcd_prep_str = encodeJsonInString("SiPixelAliHGRcd_prep.json") +#SiPixelAliHGComb +SiPixelAliHGCombRcd_prod_str = encodeJsonInString("SiPixelAliHGCombRcd_prod.json") +SiPixelAliHGCombRcd_prep_str = encodeJsonInString("SiPixelAliHGCombRcd_prep.json") + #EcalPedestalsRcd EcalPedestalsRcd_prod_str = encodeJsonInString("EcalPedestal_prod.json") EcalPedestalsRcd_prep_str = encodeJsonInString("EcalPedestal_prep.json") @@ -174,6 +178,12 @@ def encodeJsonInString(filename): prodMetaData = cms.untracked.string(SiPixelAliHGRcd_prod_str), prepMetaData = cms.untracked.string(SiPixelAliHGRcd_prep_str), ), + cms.PSet(record = cms.untracked.string('TrackerAlignmentHGCombinedRcd'), + Source = cms.untracked.string("AlcaHarvesting"), + FileClass = cms.untracked.string("ALCA"), + prodMetaData = cms.untracked.string(SiPixelAliHGCombRcd_prod_str), + prepMetaData = cms.untracked.string(SiPixelAliHGCombRcd_prep_str), + ), cms.PSet(record = cms.untracked.string('SiStripApvGainRcdAAG'), Source = cms.untracked.string("AlcaHarvesting"), FileClass = cms.untracked.string("ALCA"), diff --git a/CondFormats/DataRecord/interface/CTPPSBeamParametersRcd.h b/CondFormats/DataRecord/interface/CTPPSBeamParametersRcd.h index a77796620c968..cd3b52a223e4d 100644 --- a/CondFormats/DataRecord/interface/CTPPSBeamParametersRcd.h +++ b/CondFormats/DataRecord/interface/CTPPSBeamParametersRcd.h @@ -8,10 +8,13 @@ #include "FWCore/Framework/interface/DependentRecordImplementation.h" #include "CondFormats/DataRecord/interface/LHCInfoRcd.h" +#include "CondFormats/DataRecord/interface/LHCInfoPerFillRcd.h" +#include "CondFormats/DataRecord/interface/LHCInfoPerLSRcd.h" #include "FWCore/Utilities/interface/mplVector.h" -class CTPPSBeamParametersRcd - : public edm::eventsetup::DependentRecordImplementation> {}; +class CTPPSBeamParametersRcd : public edm::eventsetup::DependentRecordImplementation< + CTPPSBeamParametersRcd, + edm::mpl::Vector> {}; #endif diff --git a/CondFormats/DataRecord/interface/EcalEBPhase2TPGAmplWeightIdMapRcd.h b/CondFormats/DataRecord/interface/EcalEBPhase2TPGAmplWeightIdMapRcd.h new file mode 100644 index 0000000000000..a9389ef6367a3 --- /dev/null +++ b/CondFormats/DataRecord/interface/EcalEBPhase2TPGAmplWeightIdMapRcd.h @@ -0,0 +1,7 @@ +#ifndef CondFormats_DataRecord_EcalEBPhase2TPGAmplWeightIdMapRcd_h +#define CondFormats_DataRecord_EcalEBPhase2TPGAmplWeightIdMapRcd_h + +#include "FWCore/Framework/interface/EventSetupRecordImplementation.h" +class EcalEBPhase2TPGAmplWeightIdMapRcd + : public edm::eventsetup::EventSetupRecordImplementation {}; +#endif diff --git a/CondFormats/DataRecord/interface/EcalEBPhase2TPGLinearizationConstRcd.h b/CondFormats/DataRecord/interface/EcalEBPhase2TPGLinearizationConstRcd.h new file mode 100644 index 0000000000000..8cae9d31c7d27 --- /dev/null +++ b/CondFormats/DataRecord/interface/EcalEBPhase2TPGLinearizationConstRcd.h @@ -0,0 +1,7 @@ +#ifndef CondFormats_DataRecord_EcalEBPhase2TPGLinearizationConstRcd_h +#define CondFormats_DataRecord_EcalEBPhase2TPGLinearizationConstRcd_h + +#include "FWCore/Framework/interface/EventSetupRecordImplementation.h" +class EcalEBPhase2TPGLinearizationConstRcd + : public edm::eventsetup::EventSetupRecordImplementation {}; +#endif diff --git a/CondFormats/DataRecord/interface/EcalEBPhase2TPGPedestalsRcd.h b/CondFormats/DataRecord/interface/EcalEBPhase2TPGPedestalsRcd.h new file mode 100644 index 0000000000000..399e780144b4a --- /dev/null +++ b/CondFormats/DataRecord/interface/EcalEBPhase2TPGPedestalsRcd.h @@ -0,0 +1,7 @@ +#ifndef CondFormats_DataRecord_EcalEBPhase2TPGPedestalsRcd_h +#define CondFormats_DataRecord_EcalEBPhase2TPGPedestalsRcd_h + +#include "FWCore/Framework/interface/EventSetupRecordImplementation.h" +class EcalEBPhase2TPGPedestalsRcd + : public edm::eventsetup::EventSetupRecordImplementation {}; +#endif diff --git a/CondFormats/DataRecord/interface/EcalEBPhase2TPGTimeWeightIdMapRcd.h b/CondFormats/DataRecord/interface/EcalEBPhase2TPGTimeWeightIdMapRcd.h new file mode 100644 index 0000000000000..7e591ffe19a73 --- /dev/null +++ b/CondFormats/DataRecord/interface/EcalEBPhase2TPGTimeWeightIdMapRcd.h @@ -0,0 +1,7 @@ +#ifndef CondFormats_DataRecord_EcalEBPhase2TPGTimeWeightIdMapRcd_h +#define CondFormats_DataRecord_EcalEBPhase2TPGTimeWeightIdMapRcd_h + +#include "FWCore/Framework/interface/EventSetupRecordImplementation.h" +class EcalEBPhase2TPGTimeWeightIdMapRcd + : public edm::eventsetup::EventSetupRecordImplementation {}; +#endif diff --git a/CondFormats/DataRecord/src/EcalEBPhase2TPGAmplWeightIdMapRcd.cc b/CondFormats/DataRecord/src/EcalEBPhase2TPGAmplWeightIdMapRcd.cc new file mode 100644 index 0000000000000..e6527eed26d8c --- /dev/null +++ b/CondFormats/DataRecord/src/EcalEBPhase2TPGAmplWeightIdMapRcd.cc @@ -0,0 +1,4 @@ +#include "CondFormats/DataRecord/interface/EcalEBPhase2TPGAmplWeightIdMapRcd.h" +#include "FWCore/Framework/interface/eventsetuprecord_registration_macro.h" + +EVENTSETUP_RECORD_REG(EcalEBPhase2TPGAmplWeightIdMapRcd); diff --git a/CondFormats/DataRecord/src/EcalEBPhase2TPGLinearizationConstRcd.cc b/CondFormats/DataRecord/src/EcalEBPhase2TPGLinearizationConstRcd.cc new file mode 100644 index 0000000000000..9e310ae324038 --- /dev/null +++ b/CondFormats/DataRecord/src/EcalEBPhase2TPGLinearizationConstRcd.cc @@ -0,0 +1,4 @@ +#include "CondFormats/DataRecord/interface/EcalEBPhase2TPGLinearizationConstRcd.h" +#include "FWCore/Framework/interface/eventsetuprecord_registration_macro.h" + +EVENTSETUP_RECORD_REG(EcalEBPhase2TPGLinearizationConstRcd); diff --git a/CondFormats/DataRecord/src/EcalEBPhase2TPGPedestalsRcd.cc b/CondFormats/DataRecord/src/EcalEBPhase2TPGPedestalsRcd.cc new file mode 100644 index 0000000000000..4d0beedfc9bdf --- /dev/null +++ b/CondFormats/DataRecord/src/EcalEBPhase2TPGPedestalsRcd.cc @@ -0,0 +1,4 @@ +#include "CondFormats/DataRecord/interface/EcalEBPhase2TPGPedestalsRcd.h" +#include "FWCore/Framework/interface/eventsetuprecord_registration_macro.h" + +EVENTSETUP_RECORD_REG(EcalEBPhase2TPGPedestalsRcd); diff --git a/CondFormats/DataRecord/src/EcalEBPhase2TPGTimeWeightIdMapRcd.cc b/CondFormats/DataRecord/src/EcalEBPhase2TPGTimeWeightIdMapRcd.cc new file mode 100644 index 0000000000000..1320e879c861f --- /dev/null +++ b/CondFormats/DataRecord/src/EcalEBPhase2TPGTimeWeightIdMapRcd.cc @@ -0,0 +1,4 @@ +#include "CondFormats/DataRecord/interface/EcalEBPhase2TPGTimeWeightIdMapRcd.h" +#include "FWCore/Framework/interface/eventsetuprecord_registration_macro.h" + +EVENTSETUP_RECORD_REG(EcalEBPhase2TPGTimeWeightIdMapRcd); diff --git a/CondFormats/EcalObjects/interface/EcalEBPhase2TPGAmplWeightIdMap.h b/CondFormats/EcalObjects/interface/EcalEBPhase2TPGAmplWeightIdMap.h new file mode 100644 index 0000000000000..ee5a3fdb3b02c --- /dev/null +++ b/CondFormats/EcalObjects/interface/EcalEBPhase2TPGAmplWeightIdMap.h @@ -0,0 +1,27 @@ +#ifndef CondFormats_EcalObjects_EcalEBPhase2TPGAmplWeightIdMap_h +#define CondFormats_EcalObjects_EcalEBPhase2TPGAmplWeightIdMap_h + +#include "CondFormats/Serialization/interface/Serializable.h" + +#include +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGAmplWeights.h" +#include + +class EcalEBPhase2TPGAmplWeightIdMap { +public: + typedef std::map EcalEBPhase2TPGAmplWeightMap; + typedef std::map::const_iterator EcalEBPhase2TPGAmplWeightMapItr; + + EcalEBPhase2TPGAmplWeightIdMap(){}; + ~EcalEBPhase2TPGAmplWeightIdMap(){}; + + const EcalEBPhase2TPGAmplWeightMap& getMap() const { return map_; } + void setValue(const uint32_t& id, const EcalEBPhase2TPGAmplWeights& value); + +private: + EcalEBPhase2TPGAmplWeightMap map_; + + COND_SERIALIZABLE; +}; + +#endif diff --git a/CondFormats/EcalObjects/interface/EcalEBPhase2TPGAmplWeights.h b/CondFormats/EcalObjects/interface/EcalEBPhase2TPGAmplWeights.h new file mode 100644 index 0000000000000..3352099f69bf4 --- /dev/null +++ b/CondFormats/EcalObjects/interface/EcalEBPhase2TPGAmplWeights.h @@ -0,0 +1,56 @@ +#ifndef CondFormats_EcalObjects_EcalEBPhase2TPGAmplWeights_h +#define CondFormats_EcalObjects_EcalEBPhase2TPGAmplWeights_h + +#include "CondFormats/Serialization/interface/Serializable.h" + +#include +#include + +class EcalEBPhase2TPGAmplWeights { +public: + EcalEBPhase2TPGAmplWeights(); + ~EcalEBPhase2TPGAmplWeights(); + + void getValues(uint32_t& w0, + uint32_t& w1, + uint32_t& w2, + uint32_t& w3, + uint32_t& w4, + uint32_t& w5, + uint32_t& w6, + uint32_t& w7, + uint32_t& w8, + uint32_t& w9, + uint32_t& w10, + uint32_t& w11) const; + void setValues(const uint32_t& w0, + const uint32_t& w1, + const uint32_t& w2, + const uint32_t& w3, + const uint32_t& w4, + const uint32_t& w5, + const uint32_t& w6, + const uint32_t& w7, + const uint32_t& w8, + const uint32_t& w9, + const uint32_t& w10, + const uint32_t& w11); + +private: + uint32_t w0_; + uint32_t w1_; + uint32_t w2_; + uint32_t w3_; + uint32_t w4_; + uint32_t w5_; + uint32_t w6_; + uint32_t w7_; + uint32_t w8_; + uint32_t w9_; + uint32_t w10_; + uint32_t w11_; + + COND_SERIALIZABLE; +}; + +#endif diff --git a/CondFormats/EcalObjects/interface/EcalEBPhase2TPGLinearizationConst.h b/CondFormats/EcalObjects/interface/EcalEBPhase2TPGLinearizationConst.h new file mode 100644 index 0000000000000..68813c46aa63f --- /dev/null +++ b/CondFormats/EcalObjects/interface/EcalEBPhase2TPGLinearizationConst.h @@ -0,0 +1,27 @@ +#ifndef CondFormats_EcalObjects_EcalEBPhase2TPGLinearizationConst_h +#define CondFormats_EcalObjects_EcalEBPhase2TPGLinearizationConst_h + +#include "CondFormats/Serialization/interface/Serializable.h" + +#include "CondFormats/EcalObjects/interface/EcalCondObjectContainer.h" + +struct EcalEBPhase2TPGLinearizationConstant { + EcalEBPhase2TPGLinearizationConstant() + : mult_x10(0), mult_x1(0), shift_x10(0), shift_x1(0), i2cSub_x10(0), i2cSub_x1(0) {} + + uint32_t mult_x10; + uint32_t mult_x1; + uint32_t shift_x10; + uint32_t shift_x1; + uint32_t i2cSub_x10; + uint32_t i2cSub_x1; + + COND_SERIALIZABLE; +}; + +typedef EcalCondObjectContainer EcalEBPhase2TPGLinearizationConstMap; +typedef EcalCondObjectContainer::const_iterator + EcalEBPhase2TPGLinearizationConstMapIterator; +typedef EcalEBPhase2TPGLinearizationConstMap EcalEBPhase2TPGLinearizationConst; + +#endif diff --git a/CondFormats/EcalObjects/interface/EcalEBPhase2TPGPedestals.h b/CondFormats/EcalObjects/interface/EcalEBPhase2TPGPedestals.h new file mode 100644 index 0000000000000..918fa6727f16b --- /dev/null +++ b/CondFormats/EcalObjects/interface/EcalEBPhase2TPGPedestals.h @@ -0,0 +1,20 @@ +#ifndef CondFormats_EcalObjects_EcalEBPhase2TPGPedestals_h +#define CondFormats_EcalObjects_EcalEBPhase2TPGPedestals_h + +#include "CondFormats/Serialization/interface/Serializable.h" + +#include "CondFormats/EcalObjects/interface/EcalCondObjectContainer.h" + +struct EcalEBPhase2TPGPedestal { + EcalEBPhase2TPGPedestal() : mean_x10(0), mean_x1(0) {} + uint32_t mean_x10; + uint32_t mean_x1; + + COND_SERIALIZABLE; +}; + +typedef EcalCondObjectContainer EcalEBPhase2TPGPedestalsMap; +typedef EcalEBPhase2TPGPedestalsMap::const_iterator EcalEBPhase2TPGPedestalsMapIterator; +typedef EcalEBPhase2TPGPedestalsMap EcalEBPhase2TPGPedestals; + +#endif diff --git a/CondFormats/EcalObjects/interface/EcalEBPhase2TPGTimeWeightIdMap.h b/CondFormats/EcalObjects/interface/EcalEBPhase2TPGTimeWeightIdMap.h new file mode 100644 index 0000000000000..051b7c0988227 --- /dev/null +++ b/CondFormats/EcalObjects/interface/EcalEBPhase2TPGTimeWeightIdMap.h @@ -0,0 +1,27 @@ +#ifndef CondFormats_EcalObjects_EcalEBPhase2TPGTimeWeightIdMap_h +#define CondFormats_EcalObjects_EcalEBPhase2TPGTimeWeightIdMap_h + +#include "CondFormats/Serialization/interface/Serializable.h" + +#include +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGTimeWeights.h" +#include + +class EcalEBPhase2TPGTimeWeightIdMap { +public: + typedef std::map EcalEBPhase2TPGTimeWeightMap; + typedef std::map::const_iterator EcalEBPhase2TPGTimeWeightMapItr; + + EcalEBPhase2TPGTimeWeightIdMap() {} + ~EcalEBPhase2TPGTimeWeightIdMap() {} + + const EcalEBPhase2TPGTimeWeightMap& getMap() const { return map_; } + void setValue(const uint32_t& id, const EcalEBPhase2TPGTimeWeights& value); + +private: + EcalEBPhase2TPGTimeWeightMap map_; + + COND_SERIALIZABLE; +}; + +#endif diff --git a/CondFormats/EcalObjects/interface/EcalEBPhase2TPGTimeWeights.h b/CondFormats/EcalObjects/interface/EcalEBPhase2TPGTimeWeights.h new file mode 100644 index 0000000000000..b534733d5af87 --- /dev/null +++ b/CondFormats/EcalObjects/interface/EcalEBPhase2TPGTimeWeights.h @@ -0,0 +1,56 @@ +#ifndef CondFormats_EcalObjects_EcalEBPhase2TPGTimelWeights_h +#define CondFormats_EcalObjects_EcalEBPhase2TPGTimelWeights_h + +#include "CondFormats/Serialization/interface/Serializable.h" + +#include +#include + +class EcalEBPhase2TPGTimeWeights { +public: + EcalEBPhase2TPGTimeWeights(); + ~EcalEBPhase2TPGTimeWeights() {} + + void getValues(uint32_t& w0, + uint32_t& w1, + uint32_t& w2, + uint32_t& w3, + uint32_t& w4, + uint32_t& w5, + uint32_t& w6, + uint32_t& w7, + uint32_t& w8, + uint32_t& w9, + uint32_t& w10, + uint32_t& w11) const; + void setValues(const uint32_t& w0, + const uint32_t& w1, + const uint32_t& w2, + const uint32_t& w3, + const uint32_t& w4, + const uint32_t& w5, + const uint32_t& w6, + const uint32_t& w7, + const uint32_t& w8, + const uint32_t& w9, + const uint32_t& w10, + const uint32_t& w11); + +private: + uint32_t w0_; + uint32_t w1_; + uint32_t w2_; + uint32_t w3_; + uint32_t w4_; + uint32_t w5_; + uint32_t w6_; + uint32_t w7_; + uint32_t w8_; + uint32_t w9_; + uint32_t w10_; + uint32_t w11_; + + COND_SERIALIZABLE; +}; + +#endif diff --git a/CondFormats/EcalObjects/src/EcalEBPhase2TPGAmplWeightIdMap.cc b/CondFormats/EcalObjects/src/EcalEBPhase2TPGAmplWeightIdMap.cc new file mode 100644 index 0000000000000..8f7f1d3aa59ed --- /dev/null +++ b/CondFormats/EcalObjects/src/EcalEBPhase2TPGAmplWeightIdMap.cc @@ -0,0 +1,5 @@ +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGAmplWeightIdMap.h" + +void EcalEBPhase2TPGAmplWeightIdMap::setValue(const uint32_t& id, const EcalEBPhase2TPGAmplWeights& value) { + map_[id] = value; +} diff --git a/CondFormats/EcalObjects/src/EcalEBPhase2TPGAmplWeights.cc b/CondFormats/EcalObjects/src/EcalEBPhase2TPGAmplWeights.cc new file mode 100644 index 0000000000000..dafbd250ee753 --- /dev/null +++ b/CondFormats/EcalObjects/src/EcalEBPhase2TPGAmplWeights.cc @@ -0,0 +1,58 @@ +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGAmplWeights.h" + +EcalEBPhase2TPGAmplWeights::EcalEBPhase2TPGAmplWeights() + : w0_(0), w1_(0), w2_(0), w3_(0), w4_(0), w5_(0), w6_(0), w7_(0), w8_(0), w9_(0), w10_(0), w11_(0) {} + +EcalEBPhase2TPGAmplWeights::~EcalEBPhase2TPGAmplWeights() {} + +void EcalEBPhase2TPGAmplWeights::getValues(uint32_t& w0, + uint32_t& w1, + uint32_t& w2, + uint32_t& w3, + uint32_t& w4, + uint32_t& w5, + uint32_t& w6, + uint32_t& w7, + uint32_t& w8, + uint32_t& w9, + uint32_t& w10, + uint32_t& w11) const { + w0 = w0_; + w1 = w1_; + w2 = w2_; + w3 = w3_; + w4 = w4_; + w5 = w5_; + w6 = w6_; + w7 = w7_; + w8 = w8_; + w9 = w9_; + w10 = w10_; + w11 = w11_; +} + +void EcalEBPhase2TPGAmplWeights::setValues(const uint32_t& w0, + const uint32_t& w1, + const uint32_t& w2, + const uint32_t& w3, + const uint32_t& w4, + const uint32_t& w5, + const uint32_t& w6, + const uint32_t& w7, + const uint32_t& w8, + const uint32_t& w9, + const uint32_t& w10, + const uint32_t& w11) { + w0_ = w0; + w1_ = w1; + w2_ = w2; + w3_ = w3; + w4_ = w4; + w5_ = w5; + w6_ = w6; + w7_ = w7; + w8_ = w8; + w9_ = w9; + w10_ = w10; + w11_ = w11; +} diff --git a/CondFormats/EcalObjects/src/EcalEBPhase2TPGTimeWeightIdMap.cc b/CondFormats/EcalObjects/src/EcalEBPhase2TPGTimeWeightIdMap.cc new file mode 100644 index 0000000000000..dc2124524e175 --- /dev/null +++ b/CondFormats/EcalObjects/src/EcalEBPhase2TPGTimeWeightIdMap.cc @@ -0,0 +1,5 @@ +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGTimeWeightIdMap.h" + +void EcalEBPhase2TPGTimeWeightIdMap::setValue(const uint32_t& id, const EcalEBPhase2TPGTimeWeights& value) { + map_[id] = value; +} diff --git a/CondFormats/EcalObjects/src/EcalEBPhase2TPGTimeWeights.cc b/CondFormats/EcalObjects/src/EcalEBPhase2TPGTimeWeights.cc new file mode 100644 index 0000000000000..bbc5d47e3973f --- /dev/null +++ b/CondFormats/EcalObjects/src/EcalEBPhase2TPGTimeWeights.cc @@ -0,0 +1,56 @@ +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGTimeWeights.h" + +EcalEBPhase2TPGTimeWeights::EcalEBPhase2TPGTimeWeights() + : w0_(0), w1_(0), w2_(0), w3_(0), w4_(0), w5_(0), w6_(0), w7_(0), w8_(0), w9_(0), w10_(0), w11_(0) {} + +void EcalEBPhase2TPGTimeWeights::getValues(uint32_t& w0, + uint32_t& w1, + uint32_t& w2, + uint32_t& w3, + uint32_t& w4, + uint32_t& w5, + uint32_t& w6, + uint32_t& w7, + uint32_t& w8, + uint32_t& w9, + uint32_t& w10, + uint32_t& w11) const { + w0 = w0_; + w1 = w1_; + w2 = w2_; + w3 = w3_; + w4 = w4_; + w5 = w5_; + w6 = w6_; + w7 = w7_; + w8 = w8_; + w9 = w9_; + w10 = w10_; + w11 = w11_; +} + +void EcalEBPhase2TPGTimeWeights::setValues(const uint32_t& w0, + const uint32_t& w1, + const uint32_t& w2, + const uint32_t& w3, + const uint32_t& w4, + const uint32_t& w5, + const uint32_t& w6, + const uint32_t& w7, + const uint32_t& w8, + const uint32_t& w9, + const uint32_t& w10, + const uint32_t& w11) { + w0_ = w0; + w1_ = w1; + w2_ = w2; + w3_ = w3; + w4_ = w4; + w5_ = w5; + w6_ = w6; + w7_ = w7; + w8_ = w8; + w9_ = w9; + w10_ = w10; + w11_ = w11; +} diff --git a/CondFormats/EcalObjects/src/T_EventSetup_EcalEBPhase2TPGAmplWeightIdMap.cc b/CondFormats/EcalObjects/src/T_EventSetup_EcalEBPhase2TPGAmplWeightIdMap.cc new file mode 100644 index 0000000000000..60f388c8131b7 --- /dev/null +++ b/CondFormats/EcalObjects/src/T_EventSetup_EcalEBPhase2TPGAmplWeightIdMap.cc @@ -0,0 +1,14 @@ +// -*- C++ -*- +// +// Author: Nancy Marinelli +// Created: +// $Id: T_EventSetup_EcalEBPhase2TPGAmplWeightIdMap.cc $ +// + +// system include files + +// user include files +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGAmplWeightIdMap.h" +#include "FWCore/Utilities/interface/typelookup.h" + +TYPELOOKUP_DATA_REG(EcalEBPhase2TPGAmplWeightIdMap); diff --git a/CondFormats/EcalObjects/src/T_EventSetup_EcalEBPhase2TPGLinearizationConst.cc b/CondFormats/EcalObjects/src/T_EventSetup_EcalEBPhase2TPGLinearizationConst.cc new file mode 100644 index 0000000000000..a5568f53950be --- /dev/null +++ b/CondFormats/EcalObjects/src/T_EventSetup_EcalEBPhase2TPGLinearizationConst.cc @@ -0,0 +1,17 @@ +// -*- C++ -*- +// +// Implementation: +// create all the 'infrastructure' needed to get into the Context +// +// Author: Nancy Marinelli +// Created: +// $Id: T_EventSetup_EcalEBPhase2TPGLinearizationConst.cc $ +// + +// system include files + +// user include files +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGLinearizationConst.h" +#include "FWCore/Utilities/interface/typelookup.h" + +TYPELOOKUP_DATA_REG(EcalEBPhase2TPGLinearizationConst); diff --git a/CondFormats/EcalObjects/src/T_EventSetup_EcalEBPhase2TPGPedestals.cc b/CondFormats/EcalObjects/src/T_EventSetup_EcalEBPhase2TPGPedestals.cc new file mode 100644 index 0000000000000..6919fbd5dd486 --- /dev/null +++ b/CondFormats/EcalObjects/src/T_EventSetup_EcalEBPhase2TPGPedestals.cc @@ -0,0 +1,17 @@ +// -*- C++ -*- +// +// Implementation: +// create all the 'infrastructure' needed to get into the Context +// +// Author: Nancy Marinelli +// Created: +// $Id: T_EventSetup_EcalEBPhase2TPGPedestals.cc $ +// + +// system include files + +// user include files +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGPedestals.h" +#include "FWCore/Utilities/interface/typelookup.h" + +TYPELOOKUP_DATA_REG(EcalEBPhase2TPGPedestals); diff --git a/CondFormats/EcalObjects/src/T_EventSetup_EcalEBPhase2TPGTimeWeightIdMap.cc b/CondFormats/EcalObjects/src/T_EventSetup_EcalEBPhase2TPGTimeWeightIdMap.cc new file mode 100644 index 0000000000000..2b326b3ad9f3a --- /dev/null +++ b/CondFormats/EcalObjects/src/T_EventSetup_EcalEBPhase2TPGTimeWeightIdMap.cc @@ -0,0 +1,13 @@ +// -*- C++ -*- +// +// Author: Nancy Marinelli +// $Id: T_EventSetup_EcalEBPhase2TPGTimeWeightIdMap.cc $ +// + +// system include files + +// user include files +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGTimeWeightIdMap.h" +#include "FWCore/Utilities/interface/typelookup.h" + +TYPELOOKUP_DATA_REG(EcalEBPhase2TPGTimeWeightIdMap); diff --git a/CondFormats/EcalObjects/src/classes.h b/CondFormats/EcalObjects/src/classes.h index cf9e4896a0f2b..ed447d09237a9 100644 --- a/CondFormats/EcalObjects/src/classes.h +++ b/CondFormats/EcalObjects/src/classes.h @@ -73,3 +73,7 @@ //ECAL PH2: #include "CondFormats/EcalObjects/interface/EcalLiteDTUPedestals.h" #include "CondFormats/EcalObjects/interface/EcalCATIAGainRatios.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGAmplWeightIdMap.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGTimeWeightIdMap.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGLinearizationConst.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGPedestals.h" diff --git a/CondFormats/EcalObjects/src/classes_def.xml b/CondFormats/EcalObjects/src/classes_def.xml index a6895cbdf9f58..76e5ece0cb2fe 100644 --- a/CondFormats/EcalObjects/src/classes_def.xml +++ b/CondFormats/EcalObjects/src/classes_def.xml @@ -112,6 +112,18 @@ + + + + + + + + + + + + @@ -123,6 +135,19 @@ + + + + + + + + + + + + + @@ -224,6 +249,12 @@ + + + + + + diff --git a/CondFormats/GEMObjects/interface/GEMChMap.h b/CondFormats/GEMObjects/interface/GEMChMap.h index 453060e82e146..37a1bc6376c9c 100644 --- a/CondFormats/GEMObjects/interface/GEMChMap.h +++ b/CondFormats/GEMObjects/interface/GEMChMap.h @@ -46,7 +46,13 @@ class GEMChMap { struct chamDC { uint32_t detId; int chamberType; - bool operator<(const chamDC& r) const { return detId < r.detId; } + bool operator<(const chamDC& r) const { + if (detId == r.detId) { + return chamberType < r.chamberType; + } else { + return detId < r.detId; + } + } COND_SERIALIZABLE; }; diff --git a/CondFormats/PhysicsToolsObjects/interface/MVAComputer.h b/CondFormats/PhysicsToolsObjects/interface/MVAComputer.h index 394d09d39a4ff..1a3753e59a41e 100644 --- a/CondFormats/PhysicsToolsObjects/interface/MVAComputer.h +++ b/CondFormats/PhysicsToolsObjects/interface/MVAComputer.h @@ -34,6 +34,9 @@ namespace PhysicsTools { bitsInLast = other.bitsInLast; return *this; } + BitSet(const BitSet &other) = default; + BitSet() = default; + ~BitSet() = default; std::vector store; unsigned int bitsInLast; @@ -68,6 +71,7 @@ namespace PhysicsTools { inline Variable() {} inline Variable(const std::string &name) : name(name) {} inline ~Variable() {} + Variable &operator=(const Variable &other) = default; std::string name; diff --git a/CondFormats/SiPixelObjects/BuildFile.xml b/CondFormats/SiPixelObjects/BuildFile.xml index 1d9b8d6b19f53..ddd87c956d217 100644 --- a/CondFormats/SiPixelObjects/BuildFile.xml +++ b/CondFormats/SiPixelObjects/BuildFile.xml @@ -1,3 +1,4 @@ + @@ -12,6 +13,9 @@ + + + diff --git a/CondFormats/SiPixelObjects/interface/SiPixelGainCalibrationForHLTHost.h b/CondFormats/SiPixelObjects/interface/SiPixelGainCalibrationForHLTHost.h new file mode 100644 index 0000000000000..28361ab184073 --- /dev/null +++ b/CondFormats/SiPixelObjects/interface/SiPixelGainCalibrationForHLTHost.h @@ -0,0 +1,9 @@ +#ifndef CondFormats_SiPixelObjects_SiPixelGainCalibrationForHLTHost_h +#define CondFormats_SiPixelObjects_SiPixelGainCalibrationForHLTHost_h + +#include "DataFormats/Portable/interface/PortableHostCollection.h" +#include "CondFormats/SiPixelObjects/interface/SiPixelGainCalibrationForHLTLayout.h" + +using SiPixelGainCalibrationForHLTHost = PortableHostCollection; + +#endif // CondFormats_SiPixelObjects_SiPixelGainCalibrationForHLTHost_h diff --git a/CondFormats/SiPixelObjects/interface/SiPixelGainCalibrationForHLTLayout.h b/CondFormats/SiPixelObjects/interface/SiPixelGainCalibrationForHLTLayout.h new file mode 100644 index 0000000000000..03c1c37c61046 --- /dev/null +++ b/CondFormats/SiPixelObjects/interface/SiPixelGainCalibrationForHLTLayout.h @@ -0,0 +1,42 @@ +#ifndef CondFormats_SiPixelObjects_interface_SiPixelGainCalibrationForHLTLayout_h +#define CondFormats_SiPixelObjects_interface_SiPixelGainCalibrationForHLTLayout_h + +#include +#include "DataFormats/SoATemplate/interface/SoALayout.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" + +namespace siPixelGainsSoA { + struct DecodingStructure { + uint8_t gain; + uint8_t ped; + }; + + using Ranges = std::array; + using Cols = std::array; +} // namespace siPixelGainsSoA + +GENERATE_SOA_LAYOUT(SiPixelGainCalibrationForHLTLayout, + SOA_COLUMN(siPixelGainsSoA::DecodingStructure, v_pedestals), + + SOA_SCALAR(siPixelGainsSoA::Ranges, modStarts), + SOA_SCALAR(siPixelGainsSoA::Ranges, modEnds), + SOA_SCALAR(siPixelGainsSoA::Cols, modCols), + + SOA_SCALAR(float, minPed), + SOA_SCALAR(float, maxPed), + SOA_SCALAR(float, minGain), + SOA_SCALAR(float, maxGain), + SOA_SCALAR(float, pedPrecision), + SOA_SCALAR(float, gainPrecision), + + SOA_SCALAR(unsigned int, numberOfRowsAveragedOver), + SOA_SCALAR(unsigned int, nBinsToUseForEncoding), + SOA_SCALAR(unsigned int, deadFlag), + SOA_SCALAR(unsigned int, noisyFlag), + SOA_SCALAR(float, link)) + +using SiPixelGainCalibrationForHLTSoA = SiPixelGainCalibrationForHLTLayout<>; +using SiPixelGainCalibrationForHLTSoAView = SiPixelGainCalibrationForHLTSoA::View; +using SiPixelGainCalibrationForHLTSoAConstView = SiPixelGainCalibrationForHLTSoA::ConstView; + +#endif // CondFormats_SiPixelObjects_interface_SiPixelGainCalibrationForHLTLayout_h diff --git a/CondFormats/SiPixelObjects/interface/SiPixelMappingHost.h b/CondFormats/SiPixelObjects/interface/SiPixelMappingHost.h new file mode 100644 index 0000000000000..772a7a97e267b --- /dev/null +++ b/CondFormats/SiPixelObjects/interface/SiPixelMappingHost.h @@ -0,0 +1,10 @@ +#ifndef CondFormats_SiPixelObjects_SiPixelMappingHost_h +#define CondFormats_SiPixelObjects_SiPixelMappingHost_h + +#include +#include "DataFormats/Portable/interface/PortableHostCollection.h" +#include "CondFormats/SiPixelObjects/interface/SiPixelMappingLayout.h" + +using SiPixelMappingHost = PortableHostCollection; + +#endif // CondFormats_SiPixelObjects_SiPixelMappingHost_h diff --git a/CondFormats/SiPixelObjects/interface/SiPixelMappingLayout.h b/CondFormats/SiPixelObjects/interface/SiPixelMappingLayout.h new file mode 100644 index 0000000000000..ef123d443c795 --- /dev/null +++ b/CondFormats/SiPixelObjects/interface/SiPixelMappingLayout.h @@ -0,0 +1,24 @@ +#ifndef CondFormats_SiPixelObjects_interface_SiPixelMappingLayout_h +#define CondFormats_SiPixelObjects_interface_SiPixelMappingLayout_h + +#include +#include "DataFormats/SoATemplate/interface/SoALayout.h" +#include "CondFormats/SiPixelObjects/interface/SiPixelROCsStatusAndMapping.h" + +GENERATE_SOA_LAYOUT(SiPixelMappingLayout, + SOA_COLUMN(unsigned int, fed), + SOA_COLUMN(unsigned int, link), + SOA_COLUMN(unsigned int, roc), + SOA_COLUMN(unsigned int, rawId), + SOA_COLUMN(unsigned int, rocInDet), + SOA_COLUMN(unsigned int, moduleId), + SOA_COLUMN(bool, badRocs), + SOA_COLUMN(unsigned char, modToUnpDefault), + SOA_SCALAR(unsigned int, size), + SOA_SCALAR(bool, hasQuality)) + +using SiPixelMappingSoA = SiPixelMappingLayout<>; +using SiPixelMappingSoAView = SiPixelMappingSoA::View; +using SiPixelMappingSoAConstView = SiPixelMappingSoA::ConstView; + +#endif // CondFormats_SiPixelObjects_interface_SiPixelMappingLayout_h diff --git a/CondFormats/SiPixelObjects/interface/alpaka/SiPixelGainCalibrationForHLTDevice.h b/CondFormats/SiPixelObjects/interface/alpaka/SiPixelGainCalibrationForHLTDevice.h new file mode 100644 index 0000000000000..3c5e7094654c6 --- /dev/null +++ b/CondFormats/SiPixelObjects/interface/alpaka/SiPixelGainCalibrationForHLTDevice.h @@ -0,0 +1,13 @@ +#ifndef CondFormats_SiPixelObjects_interface_alpaka_SiPixelGainCalibrationForHLTDevice_h +#define CondFormats_SiPixelObjects_interface_alpaka_SiPixelGainCalibrationForHLTDevice_h + +#include +#include "DataFormats/Portable/interface/alpaka/PortableCollection.h" +#include "CondFormats/SiPixelObjects/interface/SiPixelGainCalibrationForHLTLayout.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + using SiPixelGainCalibrationForHLTDevice = PortableCollection; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE +#endif // CondFormats_SiPixelObjects_interface_alpaka_SiPixelGainCalibrationForHLTDevice_h diff --git a/CondFormats/SiPixelObjects/interface/alpaka/SiPixelGainCalibrationForHLTUtilities.h b/CondFormats/SiPixelObjects/interface/alpaka/SiPixelGainCalibrationForHLTUtilities.h new file mode 100644 index 0000000000000..1fbce15dbe231 --- /dev/null +++ b/CondFormats/SiPixelObjects/interface/alpaka/SiPixelGainCalibrationForHLTUtilities.h @@ -0,0 +1,41 @@ +#ifndef CondFormats_SiPixelObjects_interface_alpaka_SiPixelGainCalibrationForHLTUtilities_h +#define CondFormats_SiPixelObjects_interface_alpaka_SiPixelGainCalibrationForHLTUtilities_h + +#include +#include +#include "CondFormats/SiPixelObjects/interface/SiPixelGainCalibrationForHLTLayout.h" + +struct SiPixelGainUtilities { + ALPAKA_FN_HOST_ACC ALPAKA_FN_ACC ALPAKA_FN_INLINE static std::pair getPedAndGain( + const SiPixelGainCalibrationForHLTSoAConstView& view, + uint32_t moduleInd, + int col, + int row, + bool& isDeadColumn, + bool& isNoisyColumn) { + auto start = view.modStarts()[moduleInd]; + auto end = view.modEnds()[moduleInd]; + auto nCols = view.modCols()[moduleInd]; + // determine what averaged data block we are in (there should be 1 or 2 of these depending on if plaquette is 1 by X or 2 by X + unsigned int lengthOfColumnData = (end - start) / nCols; + unsigned int lengthOfAveragedDataInEachColumn = 2; // we always only have two values per column averaged block + unsigned int numberOfDataBlocksToSkip = row / view.numberOfRowsAveragedOver(); + + auto offset = start + col * lengthOfColumnData + lengthOfAveragedDataInEachColumn * numberOfDataBlocksToSkip; + assert(offset < end); + assert(offset < 3088384); + assert(0 == offset % 2); + + auto lp = view.v_pedestals(); + auto s = lp[offset / 2]; + + isDeadColumn = (s.ped & 0xFF) == view.deadFlag(); + isNoisyColumn = (s.ped & 0xFF) == view.noisyFlag(); + float decodeGain = float(s.gain & 0xFF) * view.gainPrecision() + view.minGain(); + float decodePed = float(s.ped & 0xFF) * view.pedPrecision() + view.minPed(); + + return std::make_pair(decodePed, decodeGain); + }; +}; + +#endif //CondFormats_SiPixelObjects_interface_alpaka_SiPixelGainCalibrationForHLTUtilities_h \ No newline at end of file diff --git a/CondFormats/SiPixelObjects/interface/alpaka/SiPixelMappingDevice.h b/CondFormats/SiPixelObjects/interface/alpaka/SiPixelMappingDevice.h new file mode 100644 index 0000000000000..8a16caa0d7368 --- /dev/null +++ b/CondFormats/SiPixelObjects/interface/alpaka/SiPixelMappingDevice.h @@ -0,0 +1,17 @@ +#ifndef CondFormats_SiPixelObjects_interface_alpaka_SiPixelMappingDevice_h +#define CondFormats_SiPixelObjects_interface_alpaka_SiPixelMappingDevice_h + +#include +#include +#include "DataFormats/Portable/interface/alpaka/PortableCollection.h" +#include "CondFormats/SiPixelObjects/interface/SiPixelMappingLayout.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ESProducer.h" +#include "DataFormats/Portable/interface/PortableHostCollection.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + using SiPixelMappingDevice = PortableCollection; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +#endif // DataFormats_SiPixelMappingSoA_alpaka_SiPixelClustersDevice_h diff --git a/CondFormats/SiPixelObjects/interface/alpaka/SiPixelMappingUtilities.h b/CondFormats/SiPixelObjects/interface/alpaka/SiPixelMappingUtilities.h new file mode 100644 index 0000000000000..800cf0ac671cd --- /dev/null +++ b/CondFormats/SiPixelObjects/interface/alpaka/SiPixelMappingUtilities.h @@ -0,0 +1,53 @@ +#ifndef CondFormats_SiPixelObjects_interface_alpaka_SiPixelMappingUtilities_h +#define CondFormats_SiPixelObjects_interface_alpaka_SiPixelMappingUtilities_h + +#include +#include +#include "CondFormats/SiPixelObjects/interface/SiPixelMappingLayout.h" +#include "CondFormats/SiPixelObjects/interface/SiPixelFedCablingMap.h" +#include "CondFormats/SiPixelObjects/interface/SiPixelFedCablingTree.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + struct SiPixelMappingUtilities { + ALPAKA_FN_HOST_ACC ALPAKA_FN_ACC ALPAKA_FN_INLINE static bool hasQuality(const SiPixelMappingSoAConstView& view) { + return view.hasQuality(); + } + + ALPAKA_FN_HOST_ACC ALPAKA_FN_ACC ALPAKA_FN_INLINE static cms::alpakatools::device_buffer + getModToUnpRegionalAsync(std::set const& modules, + const SiPixelFedCablingTree* cabling, + std::vector const& fedIds, + Queue& queue) { + auto modToUnpDevice = cms::alpakatools::make_device_buffer(queue, pixelgpudetails::MAX_SIZE); + auto modToUnpHost = cms::alpakatools::make_host_buffer(queue, pixelgpudetails::MAX_SIZE); + + unsigned int startFed = fedIds.front(); + unsigned int endFed = fedIds.back() - 1; + + sipixelobjects::CablingPathToDetUnit path; + int index = 1; + + for (unsigned int fed = startFed; fed <= endFed; fed++) { + for (unsigned int link = 1; link <= pixelgpudetails::MAX_LINK; link++) { + for (unsigned int roc = 1; roc <= pixelgpudetails::MAX_ROC; roc++) { + path = {fed, link, roc}; + const sipixelobjects::PixelROC* pixelRoc = cabling->findItem(path); + if (pixelRoc != nullptr) { + modToUnpHost[index] = (not modules.empty()) and (modules.find(pixelRoc->rawId()) == modules.end()); + } else { // store some dummy number + modToUnpHost[index] = true; + } + index++; + } + } + } + + alpaka::memcpy(queue, modToUnpDevice, modToUnpHost); + + return modToUnpDevice; + } + }; +} // namespace ALPAKA_ACCELERATOR_NAMESPACE +#endif //CondFormats_SiPixelObjects_interface_alpaka_SiPixelMappingUtilities_h diff --git a/CondFormats/SiPixelObjects/src/T_EventSetup_SiPixelGainCalibrationForHLTHost.cc b/CondFormats/SiPixelObjects/src/T_EventSetup_SiPixelGainCalibrationForHLTHost.cc new file mode 100644 index 0000000000000..be54c23dd8df6 --- /dev/null +++ b/CondFormats/SiPixelObjects/src/T_EventSetup_SiPixelGainCalibrationForHLTHost.cc @@ -0,0 +1,4 @@ +#include "CondFormats/SiPixelObjects/interface/SiPixelGainCalibrationForHLTHost.h" +#include "FWCore/Utilities/interface/typelookup.h" + +TYPELOOKUP_DATA_REG(SiPixelGainCalibrationForHLTHost); diff --git a/CondFormats/SiPixelObjects/src/T_EventSetup_SiPixelMappingHost.cc b/CondFormats/SiPixelObjects/src/T_EventSetup_SiPixelMappingHost.cc new file mode 100644 index 0000000000000..27201b65add22 --- /dev/null +++ b/CondFormats/SiPixelObjects/src/T_EventSetup_SiPixelMappingHost.cc @@ -0,0 +1,4 @@ +#include "CondFormats/SiPixelObjects/interface/SiPixelMappingHost.h" +#include "FWCore/Utilities/interface/typelookup.h" + +TYPELOOKUP_DATA_REG(SiPixelMappingHost); \ No newline at end of file diff --git a/CondFormats/SiPixelObjects/src/alpaka/T_EventSetup_SiPixelGainCalibrationForHLTDevice.cc b/CondFormats/SiPixelObjects/src/alpaka/T_EventSetup_SiPixelGainCalibrationForHLTDevice.cc new file mode 100644 index 0000000000000..fec7ca3ba1c52 --- /dev/null +++ b/CondFormats/SiPixelObjects/src/alpaka/T_EventSetup_SiPixelGainCalibrationForHLTDevice.cc @@ -0,0 +1,4 @@ +#include "CondFormats/SiPixelObjects/interface/alpaka/SiPixelGainCalibrationForHLTDevice.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/typelookup.h" + +TYPELOOKUP_ALPAKA_DATA_REG(SiPixelGainCalibrationForHLTDevice); \ No newline at end of file diff --git a/CondFormats/SiPixelObjects/src/alpaka/T_EventSetup_SiPixelMappingDevice.cc b/CondFormats/SiPixelObjects/src/alpaka/T_EventSetup_SiPixelMappingDevice.cc new file mode 100644 index 0000000000000..0b86fdf64978b --- /dev/null +++ b/CondFormats/SiPixelObjects/src/alpaka/T_EventSetup_SiPixelMappingDevice.cc @@ -0,0 +1,4 @@ +#include "CondFormats/SiPixelObjects/interface/alpaka/SiPixelMappingDevice.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/typelookup.h" + +TYPELOOKUP_ALPAKA_DATA_REG(SiPixelMappingDevice); diff --git a/CondFormats/SiStripObjects/interface/SiStripBadStrip.h b/CondFormats/SiStripObjects/interface/SiStripBadStrip.h index 88e905cfdaf5b..a2b3ad36e629d 100644 --- a/CondFormats/SiStripObjects/interface/SiStripBadStrip.h +++ b/CondFormats/SiStripObjects/interface/SiStripBadStrip.h @@ -56,10 +56,6 @@ class SiStripBadStrip { typedef Container InputVector; SiStripBadStrip(){}; - SiStripBadStrip(const SiStripBadStrip& orig) { - v_badstrips = orig.v_badstrips; - indexes = orig.indexes; - } virtual ~SiStripBadStrip(){}; bool put(const uint32_t& detID, const InputVector& vect) { return put(detID, Range(vect.begin(), vect.end())); } diff --git a/CondFormats/SiStripObjects/interface/SiStripNoises.h b/CondFormats/SiStripObjects/interface/SiStripNoises.h index 6bbd071ef23fa..d936ad51b38cb 100644 --- a/CondFormats/SiStripObjects/interface/SiStripNoises.h +++ b/CondFormats/SiStripObjects/interface/SiStripNoises.h @@ -49,7 +49,6 @@ class SiStripNoises { typedef Registry::const_iterator RegistryIterator; typedef std::vector InputVector; - SiStripNoises(const SiStripNoises&); SiStripNoises() {} ~SiStripNoises() {} diff --git a/CondFormats/SiStripObjects/src/SiStripNoises.cc b/CondFormats/SiStripObjects/src/SiStripNoises.cc index 20373bf940bf0..05bf49fecb08c 100644 --- a/CondFormats/SiStripObjects/src/SiStripNoises.cc +++ b/CondFormats/SiStripObjects/src/SiStripNoises.cc @@ -6,13 +6,6 @@ #include #include "CondFormats/SiStripObjects/interface/SiStripDetSummary.h" -SiStripNoises::SiStripNoises(const SiStripNoises& input) { - v_noises.clear(); - indexes.clear(); - v_noises.insert(v_noises.end(), input.v_noises.begin(), input.v_noises.end()); - indexes.insert(indexes.end(), input.indexes.begin(), input.indexes.end()); -} - bool SiStripNoises::put(const uint32_t& DetId, const InputVector& input) { std::vector Vo_CHAR; encode(input, Vo_CHAR); diff --git a/CondTools/BeamSpot/plugins/BeamSpotOnlineShifter.cc b/CondTools/BeamSpot/plugins/BeamSpotOnlineShifter.cc new file mode 100644 index 0000000000000..3ed41362a86e6 --- /dev/null +++ b/CondTools/BeamSpot/plugins/BeamSpotOnlineShifter.cc @@ -0,0 +1,313 @@ +// -*- C++ -*- +// +// Package: CondTools/BeamSpot +// Class: BeamSpotOnlineShifter +// +/**\class BeamSpotOnlineShifter BeamSpotOnlineShifter.cc CondTools/BeamSpot/plugins/BeamSpotOnlineShifter.cc + + Description: EDAnalyzer to create a BeamSpotOnlineHLTObjectsRcd from a BeamSpotObjectsRcd (inserting some parameters manually) + + Implementation: + [Notes on implementation] +*/ +// +// Original Author: Marco Musich +// Created: Sat, 06 May 2023 21:10:00 GMT +// +// + +// system include files +#include +#include +#include +#include +#include + +// user include files +#include "CondCore/AlignmentPlugins/interface/AlignmentPayloadInspectorHelper.h" +#include "CondCore/DBOutputService/interface/PoolDBOutputService.h" +#include "CondFormats/Alignment/interface/Alignments.h" +#include "CondFormats/AlignmentRecord/interface/TrackerAlignmentRcd.h" +#include "CondFormats/BeamSpotObjects/interface/BeamSpotObjects.h" +#include "CondFormats/BeamSpotObjects/interface/BeamSpotOnlineObjects.h" +#include "CondFormats/DataRecord/interface/BeamSpotObjectsRcd.h" +#include "CondFormats/DataRecord/interface/BeamSpotOnlineHLTObjectsRcd.h" +#include "CondFormats/DataRecord/interface/BeamSpotOnlineLegacyObjectsRcd.h" +#include "DataFormats/GeometryVector/interface/GlobalPoint.h" +#include "DataFormats/TrackerCommon/interface/TrackerTopology.h" +#include "FWCore/Framework/interface/ESHandle.h" +#include "FWCore/Framework/interface/ESWatcher.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/Framework/interface/one/EDAnalyzer.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ServiceRegistry/interface/Service.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "Geometry/Records/interface/TrackerTopologyRcd.h" + +// +// class declaration +// + +class BeamSpotOnlineShifter : public edm::one::EDAnalyzer { +public: + explicit BeamSpotOnlineShifter(const edm::ParameterSet&); + ~BeamSpotOnlineShifter() override = default; + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + cond::Time_t pack(uint32_t, uint32_t); + template + void writeToDB(const edm::Event& iEvent, + const edm::EventSetup& iSetup, + const edm::ESGetToken& token); + +private: + const GlobalPoint getPixelBarycenter(const AlignmentPI::TkAlBarycenters barycenters, const bool isFullPixel); + const GlobalPoint deltaAlignments(const Alignments* target, + const Alignments* reference, + const TrackerTopology& tTopo, + const bool isFullPixel = false); + + void beginRun(const edm::Run&, const edm::EventSetup&) override; + void endRun(const edm::Run&, const edm::EventSetup&) override{}; + + void analyze(const edm::Event&, const edm::EventSetup&) override; + + // ----------member data --------------------------- + + edm::ESGetToken hltToken_; + edm::ESGetToken legacyToken_; + + edm::ESWatcher bsHLTWatcher_; + edm::ESWatcher bsLegayWatcher_; + + // IoV-structure + GlobalPoint theShift_; + const bool fIsHLT_; + const bool fullPixel_; + const edm::ESGetToken trackerTopoTokenBR_; + const edm::ESGetToken refAliTokenBR_; + const edm::ESGetToken tarAliTokenBR_; + const double xShift_; + const double yShift_; + const double zShift_; + uint32_t fIOVStartRun_; + uint32_t fIOVStartLumi_; + cond::Time_t fnewSince_; + bool fuseNewSince_; + std::string fLabel_; +}; + +// +// constructors and destructor +// +BeamSpotOnlineShifter::BeamSpotOnlineShifter(const edm::ParameterSet& iConfig) + : theShift_(GlobalPoint()), + fIsHLT_(iConfig.getParameter("isHLT")), + fullPixel_(iConfig.getParameter("useFullPixel")), + trackerTopoTokenBR_(esConsumes()), + refAliTokenBR_(esConsumes(edm::ESInputTag("", "reference"))), + tarAliTokenBR_(esConsumes(edm::ESInputTag("", "target"))), + xShift_(iConfig.getParameter("xShift")), + yShift_(iConfig.getParameter("yShift")), + zShift_(iConfig.getParameter("zShift")) { + if (iConfig.exists("IOVStartRun") && iConfig.exists("IOVStartLumi")) { + fIOVStartRun_ = iConfig.getUntrackedParameter("IOVStartRun"); + fIOVStartLumi_ = iConfig.getUntrackedParameter("IOVStartLumi"); + fnewSince_ = BeamSpotOnlineShifter::pack(fIOVStartRun_, fIOVStartLumi_); + fuseNewSince_ = true; + edm::LogPrint("BeamSpotOnlineShifter") << "useNewSince = True"; + } else { + fuseNewSince_ = false; + edm::LogPrint("BeamSpotOnlineShifter") << "useNewSince = False"; + } + fLabel_ = (fIsHLT_) ? "BeamSpotOnlineHLTObjectsRcd" : "BeamSpotOnlineLegacyObjectsRcd"; + + if (fIsHLT_) { + hltToken_ = esConsumes(); + } else { + legacyToken_ = esConsumes(); + } +} + +// +// member functions +// + +// ------------ Create a since object (cond::Time_t) by packing Run and LS (both uint32_t) ------------ +cond::Time_t BeamSpotOnlineShifter::pack(uint32_t fIOVStartRun, uint32_t fIOVStartLumi) { + return ((uint64_t)fIOVStartRun << 32 | fIOVStartLumi); +} + +template +void BeamSpotOnlineShifter::writeToDB(const edm::Event& iEvent, + const edm::EventSetup& iSetup, + const edm::ESGetToken& token) { + // input object + const BeamSpotOnlineObjects* inputSpot = &iSetup.getData(token); + + // output object + BeamSpotOnlineObjects abeam; + + // N.B.: theShift is the difference between the target and the reference geometry barycenters + // so if effectively the displacement of the new origin of reference frame w.r.t the old one. + // This has to be subtracted from the old position of the beamspot: + // - if the new reference frame rises, the beamspot drops + // - if the new reference frame drops, the beamspot rises + + abeam.setPosition(inputSpot->x() - theShift_.x(), inputSpot->y() - theShift_.y(), inputSpot->z() - theShift_.z()); + abeam.setSigmaZ(inputSpot->sigmaZ()); + abeam.setdxdz(inputSpot->dxdz()); + abeam.setdydz(inputSpot->dydz()); + abeam.setBeamWidthX(inputSpot->beamWidthX()); + abeam.setBeamWidthY(inputSpot->beamWidthY()); + abeam.setBeamWidthXError(inputSpot->beamWidthXError()); + abeam.setBeamWidthYError(inputSpot->beamWidthYError()); + + for (unsigned int i = 0; i < 7; i++) { + for (unsigned int j = 0; j < 7; j++) { + abeam.setCovariance(i, j, inputSpot->covariance(i, j)); + } + } + + abeam.setType(inputSpot->beamType()); + abeam.setEmittanceX(inputSpot->emittanceX()); + abeam.setEmittanceY(inputSpot->emittanceY()); + abeam.setBetaStar(inputSpot->betaStar()); + + // online BeamSpot object specific + abeam.setLastAnalyzedLumi(inputSpot->lastAnalyzedLumi()); + abeam.setLastAnalyzedRun(inputSpot->lastAnalyzedRun()); + abeam.setLastAnalyzedFill(inputSpot->lastAnalyzedFill()); + abeam.setStartTimeStamp(inputSpot->startTimeStamp()); + abeam.setEndTimeStamp(inputSpot->endTimeStamp()); + abeam.setNumTracks(inputSpot->numTracks()); + abeam.setNumPVs(inputSpot->numPVs()); + abeam.setUsedEvents(inputSpot->usedEvents()); + abeam.setMaxPVs(inputSpot->maxPVs()); + abeam.setMeanPV(inputSpot->meanPV()); + abeam.setMeanErrorPV(inputSpot->meanErrorPV()); + abeam.setRmsPV(inputSpot->rmsPV()); + abeam.setRmsErrorPV(inputSpot->rmsErrorPV()); + abeam.setStartTime(inputSpot->startTime()); + abeam.setEndTime(inputSpot->endTime()); + abeam.setLumiRange(inputSpot->lumiRange()); + abeam.setCreationTime(inputSpot->creationTime()); + + edm::LogPrint("BeamSpotOnlineShifter") << " Writing results to DB..."; + edm::LogPrint("BeamSpotOnlineShifter") << abeam; + + edm::Service poolDbService; + if (poolDbService.isAvailable()) { + edm::LogPrint("BeamSpotOnlineShifter") << "poolDBService available"; + if (poolDbService->isNewTagRequest(fLabel_)) { + edm::LogPrint("BeamSpotOnlineShifter") << "new tag requested"; + if (fuseNewSince_) { + edm::LogPrint("BeamSpotOnlineShifter") << "Using a new Since: " << fnewSince_; + poolDbService->createOneIOV(abeam, fnewSince_, fLabel_); + } else + poolDbService->createOneIOV(abeam, poolDbService->beginOfTime(), fLabel_); + } else { + edm::LogPrint("BeamSpotOnlineShifter") << "no new tag requested"; + if (fuseNewSince_) { + cond::Time_t thisSince = BeamSpotOnlineShifter::pack(iEvent.getLuminosityBlock().run(), + iEvent.getLuminosityBlock().luminosityBlock()); + edm::LogPrint("BeamSpotOnlineShifter") << "Using a new Since: " << thisSince; + poolDbService->appendOneIOV(abeam, thisSince, fLabel_); + } else + poolDbService->appendOneIOV(abeam, poolDbService->currentTime(), fLabel_); + } + } + edm::LogPrint("BeamSpotOnlineShifter") << "[BeamSpotOnlineShifter] analyze done \n"; +} + +//_____________________________________________________________________________________________ +const GlobalPoint BeamSpotOnlineShifter::deltaAlignments(const Alignments* target, + const Alignments* reference, + const TrackerTopology& tTopo, + const bool isFullPixel) { + const std::map theZero = { + {AlignmentPI::t_x, 0.0}, {AlignmentPI::t_y, 0.0}, {AlignmentPI::t_z, 0.0}}; + + AlignmentPI::TkAlBarycenters ref_barycenters; + ref_barycenters.computeBarycenters(reference->m_align, tTopo, theZero); + const auto& ref = this->getPixelBarycenter(ref_barycenters, isFullPixel); + + AlignmentPI::TkAlBarycenters tar_barycenters; + tar_barycenters.computeBarycenters(target->m_align, tTopo, theZero); + const auto& tar = this->getPixelBarycenter(tar_barycenters, isFullPixel); + + return GlobalPoint(tar.x() - ref.x(), tar.y() - ref.y(), tar.z() - ref.z()); +} + +//_____________________________________________________________________________________________ +const GlobalPoint BeamSpotOnlineShifter::getPixelBarycenter(AlignmentPI::TkAlBarycenters barycenters, + const bool isFullPixel) { + const auto& BPix = barycenters.getPartitionAvg(AlignmentPI::PARTITION::BPIX); + const double BPixMods = barycenters.getNModules(AlignmentPI::PARTITION::BPIX); + + const auto& FPixM = barycenters.getPartitionAvg(AlignmentPI::PARTITION::FPIXm); + const double FPixMMods = barycenters.getNModules(AlignmentPI::PARTITION::FPIXm); + + const auto& FPixP = barycenters.getPartitionAvg(AlignmentPI::PARTITION::FPIXp); + const double FPixPMods = barycenters.getNModules(AlignmentPI::PARTITION::FPIXp); + + const double BPixFrac = BPixMods / (BPixMods + FPixMMods + FPixPMods); + const double FPixMFrac = FPixMMods / (BPixMods + FPixMMods + FPixPMods); + const double FPixPFrac = FPixPMods / (BPixMods + FPixMMods + FPixPMods); + + if (isFullPixel) { + return GlobalPoint(BPixFrac * BPix.x() + FPixMFrac * FPixM.x() + FPixPFrac * FPixP.x(), + BPixFrac * BPix.y() + FPixMFrac * FPixM.y() + FPixPFrac * FPixP.y(), + BPixFrac * BPix.z() + FPixMFrac * FPixM.z() + FPixPFrac * FPixP.z()); + } else { + return GlobalPoint(BPix.x(), BPix.y(), BPix.z()); + } +} + +//_____________________________________________________________________________________________ +void BeamSpotOnlineShifter::beginRun(const edm::Run& iRun, const edm::EventSetup& iSetup) { + const auto& reference = iSetup.getHandle(refAliTokenBR_); + const auto& target = iSetup.getHandle(tarAliTokenBR_); + + const TrackerTopology& tTopo = iSetup.getData(trackerTopoTokenBR_); + + if (reference.isValid() and target.isValid()) { + theShift_ = this->deltaAlignments(&(*reference), &(*target), tTopo, fullPixel_); + } else { + theShift_ = GlobalPoint(xShift_, yShift_, zShift_); + } + edm::LogPrint("BeamSpotOnlineShifter") << "[BeamSpotOnlineShifter] applied shift: " << theShift_ << std::endl; +} + +// ------------ method called for each event ------------ +void BeamSpotOnlineShifter::analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) { + using namespace edm; + if (fIsHLT_) { + if (bsHLTWatcher_.check(iSetup)) { + writeToDB(iEvent, iSetup, hltToken_); + } + } else { + if (bsLegayWatcher_.check(iSetup)) { + writeToDB(iEvent, iSetup, legacyToken_); + } + } +} + +// ------------ method fills 'descriptions' with the allowed parameters for the module ------------ +void BeamSpotOnlineShifter::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("isHLT", true); + desc.add("useFullPixel", false)->setComment("use the full pixel detector to compute the barycenter"); + desc.add("xShift", 0.0)->setComment("in cm"); + desc.add("yShift", 0.0)->setComment("in cm"); + desc.add("zShift", 0.0)->setComment("in cm"); + desc.addOptionalUntracked("IOVStartRun", 1); + desc.addOptionalUntracked("IOVStartLumi", 1); + descriptions.addWithDefaultLabel(desc); +} + +//define this as a plug-in +DEFINE_FWK_MODULE(BeamSpotOnlineShifter); diff --git a/CondTools/BeamSpot/plugins/BuildFile.xml b/CondTools/BeamSpot/plugins/BuildFile.xml index ed4b944cf2472..2d0df5b1a0047 100644 --- a/CondTools/BeamSpot/plugins/BuildFile.xml +++ b/CondTools/BeamSpot/plugins/BuildFile.xml @@ -1,11 +1,16 @@ - + + + + + + + + + - - - - + diff --git a/CondTools/BeamSpot/test/BeamSpotOnlineShifter_cfg.py b/CondTools/BeamSpot/test/BeamSpotOnlineShifter_cfg.py new file mode 100644 index 0000000000000..ea6d28c79f30f --- /dev/null +++ b/CondTools/BeamSpot/test/BeamSpotOnlineShifter_cfg.py @@ -0,0 +1,93 @@ +import FWCore.ParameterSet.Config as cms +import FWCore.ParameterSet.VarParsing as VarParsing + +process = cms.Process("READ") + +options = VarParsing.VarParsing() +options.register('inputTag', + "myTagName", # default value + VarParsing.VarParsing.multiplicity.singleton, # singleton or list + VarParsing.VarParsing.varType.string, # string, int, or float + "output tag name") +options.register('inputRecord', + "BeamSpotOnlineLegacyObjectsRcd", # default value + VarParsing.VarParsing.multiplicity.singleton, # singleton or list + VarParsing.VarParsing.varType.string, # string, int, or float + "type of record") +options.register('startRun', + 306171, # default value + VarParsing.VarParsing.multiplicity.singleton, # singleton or list + VarParsing.VarParsing.varType.int, # string, int, or float + "location of the input data") +options.register('startLumi', + 497, # default value + VarParsing.VarParsing.multiplicity.singleton, # singleton or list + VarParsing.VarParsing.varType.int, # string, int, or float + "IOV Start Lumi") +options.register('maxLSToRead', + 10, ## default value for unit test + VarParsing.VarParsing.multiplicity.singleton, # singleton or list + VarParsing.VarParsing.varType.int, # string, int, or float + "total number of LumiSections to read in input") +options.parseArguments() + +process.load("FWCore.MessageService.MessageLogger_cfi") +process.MessageLogger.cerr.FwkReport.reportEvery = 100000 # do not clog output with IO + +process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(options.maxLSToRead)) # large number of events is needed since we probe 5000LS for run (see below) + +#################################################################### +# Empty source +#################################################################### +process.source = cms.Source("EmptySource", + firstRun = cms.untracked.uint32(options.startRun), # Run in ../data/BeamFitResults_Run306171.txt + firstLuminosityBlock = cms.untracked.uint32(options.startLumi), # Lumi in ../data/BeamFitResults_Run306171.txt + numberEventsInLuminosityBlock = cms.untracked.uint32(1), # probe one event per LS + numberEventsInRun = cms.untracked.uint32(5000), # a number of events > the number of LS possible in a real run (5000 s ~ 32 h) + ) + +#################################################################### +# Connect to conditions DB +#################################################################### +process.load("Configuration.StandardSequences.GeometryDB_cff") # for the topolgy +process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff") +from Configuration.AlCa.GlobalTag import GlobalTag +process.GlobalTag = GlobalTag(process.GlobalTag, "132X_dataRun3_HLT_v2") +process.GlobalTag.toGet = cms.VPSet(cms.PSet(record = cms.string("TrackerAlignmentRcd"), # record + tag = cms.string("TrackerAlignment_PCL_byRun_v0_hlt"), # choose your favourite tag + label = cms.untracked.string("reference")), # refence label + cms.PSet(record = cms.string("TrackerAlignmentRcd"), # record + tag = cms.string("TrackerAlignment_collisions23_forHLT_v9"), # choose your favourite tag + label = cms.untracked.string("target"))) # target label + +#process.GlobalTag.DumpStat = cms.untracked.bool(True) + +myTagName = options.inputTag + +print("isForHLT: ",(options.inputRecord == "BeamSpotOnlineHLTObjectsRcd")) +print("max LS to Read: ",options.maxLSToRead) + +################################# +# Produce a SQLITE FILE +################################# +from CondCore.CondDB.CondDB_cfi import * +CondDBBeamSpotObjects = CondDB.clone(connect = cms.string('sqlite_file:test_%s.db' % myTagName)) # choose an output name +process.PoolDBOutputService = cms.Service("PoolDBOutputService", + CondDBBeamSpotObjects, + timetype = cms.untracked.string('lumiid'), #('lumiid'), #('runnumber') + toPut = cms.VPSet(cms.PSet(record = cms.string(options.inputRecord), # BeamSpotOnline record + tag = cms.string(myTagName))), # choose your favourite tag + loadBlobStreamer = cms.untracked.bool(False) + ) + +#################################################################### +# Load and configure analyzer +#################################################################### +process.beamspotonlineshifter = cms.EDAnalyzer("BeamSpotOnlineShifter", + isHLT = cms.bool((options.inputRecord == "BeamSpotOnlineHLTObjectsRcd")), + xShift = cms.double(+0.000141), + yShift = cms.double(+0.000826), + zShift = cms.double(+0.000277)) + +# Put module in path: +process.p = cms.Path(process.beamspotonlineshifter) diff --git a/CondTools/BeamSpot/test/BuildFile.xml b/CondTools/BeamSpot/test/BuildFile.xml index 539c958bd1c70..785533c42166a 100644 --- a/CondTools/BeamSpot/test/BuildFile.xml +++ b/CondTools/BeamSpot/test/BuildFile.xml @@ -1 +1,2 @@ + diff --git a/CondTools/BeamSpot/test/testShiftBeamSpotsFromDB.sh b/CondTools/BeamSpot/test/testShiftBeamSpotsFromDB.sh new file mode 100755 index 0000000000000..f980d3ce35aa5 --- /dev/null +++ b/CondTools/BeamSpot/test/testShiftBeamSpotsFromDB.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +function die { echo $1: status $2 ; exit $2; } + +echo "TESTING BeamSpotOnline From DB shifting codes ..." +cmsRun ${SCRAM_TEST_PATH}/BeamSpotOnlineShifter_cfg.py inputTag=BeamSpotOnlineLegacy startRun=375491 startLumi=1 || die "Failure shifting payload from BeamSpotOnlineLegacy" $? +cmsRun ${SCRAM_TEST_PATH}/BeamSpotOnlineShifter_cfg.py inputTag=BeamSpotOnlineHLT startRun=375491 startLumi=1 inputRecord=BeamSpotOnlineHLTObjectsRcd || die "Failure shifting payload from BeamSpotOnlineHLT" $? diff --git a/CondTools/RunInfo/interface/LHCInfoCombined.h b/CondTools/RunInfo/interface/LHCInfoCombined.h index 2f6aab4289642..aef3dbe4cfb01 100644 --- a/CondTools/RunInfo/interface/LHCInfoCombined.h +++ b/CondTools/RunInfo/interface/LHCInfoCombined.h @@ -43,7 +43,7 @@ class LHCInfoCombined { void setFromPerLS(const LHCInfoPerLS& infoPerLS); void setFromPerFill(const LHCInfoPerFill& infoPerFill); - float crossingAngle(); + float crossingAngle() const; static constexpr float crossingAngleInvalid = -1.; bool isCrossingAngleInvalid(); diff --git a/CondTools/RunInfo/src/LHCInfoCombined.cc b/CondTools/RunInfo/src/LHCInfoCombined.cc index f489adfbbe3d6..c2376f970b606 100644 --- a/CondTools/RunInfo/src/LHCInfoCombined.cc +++ b/CondTools/RunInfo/src/LHCInfoCombined.cc @@ -43,7 +43,7 @@ void LHCInfoCombined::setFromPerFill(const LHCInfoPerFill& infoPerFill) { fillNumber = infoPerFill.fillNumber(); } -float LHCInfoCombined::crossingAngle() { +float LHCInfoCombined::crossingAngle() const { if (crossingAngleX == 0. && crossingAngleY == 0.) { return crossingAngleInvalid; } diff --git a/CondTools/SiPixel/test/SiPixelLorentzAngleDBLoader_Phase2_cfg.py b/CondTools/SiPixel/test/SiPixelLorentzAngleDBLoader_Phase2_cfg.py index 1fe8feebc8025..db03a97191c58 100644 --- a/CondTools/SiPixel/test/SiPixelLorentzAngleDBLoader_Phase2_cfg.py +++ b/CondTools/SiPixel/test/SiPixelLorentzAngleDBLoader_Phase2_cfg.py @@ -25,39 +25,51 @@ if tGeometry == 'T5': geometry_cff = 'GeometryExtended2023D17_cff' recoGeometry_cff = 'GeometryExtended2023D17Reco_cff' + has3DinL1 = False LA_value = 0.106 tag = 'SiPixelLorentzAngle_Phase2_T5' elif tGeometry == 'T6': geometry_cff = 'GeometryExtended2023D35_cff' recoGeometry_cff = 'GeometryExtended2023D35Reco_cff' + has3DinL1 = False LA_value = 0.106 tag = 'SiPixelLorentzAngle_Phase2_T6' elif tGeometry == 'T11': geometry_cff = 'GeometryExtended2023D29_cff' recoGeometry_cff = 'GeometryExtended2023D29Reco_cff' + has3DinL1 = False LA_value = 0.106 tag = 'SiPixelLorentzAngle_Phase2_T11' elif tGeometry == 'T14': geometry_cff = 'GeometryExtended2023D41_cff' recoGeometry_cff = 'GeometryExtended2023D41Reco_cff' + has3DinL1 = False LA_value = 0.106 tag = 'SiPixelLorentzAngle_Phase2_T14' elif tGeometry == 'T15': geometry_cff = 'GeometryExtended2023D42_cff' recoGeometry_cff = 'GeometryExtended2023D42Reco_cff' + has3DinL1 = False LA_value = 0.0503 tag = 'SiPixelLorentzAngle_Phase2_T15' elif tGeometry == 'T25': geometry_cff = 'GeometryExtended2026D97_cff' recoGeometry_cff = 'GeometryExtended2026D97Reco_cff' + has3DinL1 = True LA_value = 0.0503 tag = 'SiPixelLorentzAngle_Phase2_T25_v1' - + +elif tGeometry == 'T33': + geometry_cff = 'GeometryExtended2026D102_cff' + recoGeometry_cff = 'GeometryExtended2026D102Reco_cff' + has3DinL1 = True + LA_value = 0.0503 + tag = 'SiPixelLorentzAngle_Phase2_T33_v1' else: print("Unknown tracker geometry") print("What are you doing ?!?!?!?!") @@ -77,7 +89,7 @@ process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff") from Configuration.AlCa.GlobalTag import GlobalTag -process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase2_realistic', '') +process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase2_realistic_'+tGeometry, '') process.load("FWCore.MessageService.MessageLogger_cfi") @@ -123,16 +135,16 @@ "SiPixelLorentzAngleDBLoader", # enter -9999 if individual input - bPixLorentzAnglePerTesla = cms.untracked.double(-9999), - fPixLorentzAnglePerTesla = cms.untracked.double(-9999), + bPixLorentzAnglePerTesla = cms.untracked.double(-9999 if has3DinL1 else LA_value), + fPixLorentzAnglePerTesla = cms.untracked.double(-9999 if has3DinL1 else LA_value), #in case of PSet (only works if above is -9999) # One common value for BPix for now BPixParameters = cms.untracked.VPSet( - cms.PSet(layer = cms.int32(1), angle = cms.double(0.0)), - cms.PSet(layer = cms.int32(2), angle = cms.double(0.053)), - cms.PSet(layer = cms.int32(3), angle = cms.double(0.053)), - cms.PSet(layer = cms.int32(4), angle = cms.double(0.053)), + cms.PSet(layer = cms.int32(1), angle = cms.double(0.00)), + cms.PSet(layer = cms.int32(2), angle = cms.double(LA_value)), + cms.PSet(layer = cms.int32(3), angle = cms.double(LA_value)), + cms.PSet(layer = cms.int32(4), angle = cms.double(LA_value)), ), FPixParameters = cms.untracked.VPSet( cms.PSet(angle = cms.double(0.0) diff --git a/CondTools/SiStrip/plugins/SiStripApvGainInspector.cc b/CondTools/SiStrip/plugins/SiStripApvGainInspector.cc new file mode 100644 index 0000000000000..3203328cd36c5 --- /dev/null +++ b/CondTools/SiStrip/plugins/SiStripApvGainInspector.cc @@ -0,0 +1,1145 @@ +// -*- C++ -*- +// +// Package: CondTools/SiStrip +// Class: SiStripApvGainInspector +// +/* + *\class SiStripApvGainInspector SiStripApvGainInspector.cc CalibTracker/SiStripChannelGain/plugins/SiStripApvGainInspector.cc + + Description: This module allows redo the per-APV gain fits with different PDFs (landau, landau + gaus convolution, etc.) starting from the Charge vs APV index plot produced in the SiStrip G2 APV gain PCL workflow. It is possible to inspect the 1D charge distributions for certain APVs after fitting by means of specifying them via the parameter selectedModules. + + Implementation: largely based off CalibTracker/SiStripChannelGain/src/SiStripGainsPCLHarvester.cc + +*/ +// +// Original Author: Marco Musich +// Created: Tue, 05 Jun 2018 15:46:15 GMT +// +// + +// system include files +#include /* log */ +#include + +// user include files +#include "CalibFormats/SiStripObjects/interface/SiStripDetCabling.h" +#include "CalibFormats/SiStripObjects/interface/SiStripGain.h" +#include "CalibFormats/SiStripObjects/interface/SiStripQuality.h" +#include "CalibTracker/Records/interface/SiStripDetCablingRcd.h" +#include "CalibTracker/Records/interface/SiStripGainRcd.h" +#include "CalibTracker/SiStripChannelGain/interface/APVGainStruct.h" +#include "CalibTracker/Records/interface/SiStripQualityRcd.h" +#include "CommonTools/TrackerMap/interface/TrackerMap.h" +#include "CommonTools/UtilAlgos/interface/TFileService.h" +#include "CondCore/DBOutputService/interface/PoolDBOutputService.h" +#include "CondFormats/SiStripObjects/interface/SiStripApvGain.h" +#include "CondTools/SiStrip/interface/SiStripMiscalibrateHelper.h" +#include "DataFormats/SiStripDetId/interface/StripSubdetector.h" +#include "DataFormats/TrackerCommon/interface/TrackerTopology.h" +#include "FWCore/Framework/interface/ESHandle.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/Framework/interface/one/EDAnalyzer.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ServiceRegistry/interface/Service.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "Geometry/CommonDetUnit/interface/TrackingGeometry.h" +#include "Geometry/CommonTopologies/interface/PixelGeomDetUnit.h" +#include "Geometry/Records/interface/TrackerDigiGeometryRecord.h" +#include "Geometry/Records/interface/TrackerTopologyRcd.h" +#include "Geometry/TrackerGeometryBuilder/interface/StripGeomDetUnit.h" +#include "Geometry/TrackerGeometryBuilder/interface/TrackerGeometry.h" +#include "Geometry/TrackerNumberingBuilder/interface/GeometricDet.h" + +// ROOT includes +#include "TStyle.h" +#include "TCanvas.h" +#include "TFile.h" +#include "TTree.h" +#include "TH1F.h" +#include "TH2S.h" +#include "TProfile.h" +#include "TF1.h" + +// +// class declaration +// +class SiStripApvGainInspector : public edm::one::EDAnalyzer { +public: + explicit SiStripApvGainInspector(const edm::ParameterSet&); + ~SiStripApvGainInspector() override; + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + +private: + void beginJob() override; + void analyze(const edm::Event&, const edm::EventSetup&) override; + void endJob() override; + void checkBookAPVColls(const edm::EventSetup& es); + void checkAndRetrieveTopology(const edm::EventSetup& setup); + bool isGoodLandauFit(double* FitResults); + void getPeakOfLandau(TH1* InputHisto, double* FitResults, double LowRange = 50, double HighRange = 5400); + void getPeakOfLanGau(TH1* InputHisto, double* FitResults, double LowRange = 50, double HighRange = 5400); + void doFakeFit(TH1* InputHisto, double* FitResults); + void getPeakOfLandauAroundMax(TH1* InputHisto, double* FitResults, double LowRange = 100, double HighRange = 100); + static double langaufun(Double_t* x, Double_t* par); + void storeOnTree(TFileService* tfs); + void makeNicePlotStyle(TH1F* plot); + std::unique_ptr getNewObject(); + std::map bookQualityMonitor(const TFileDirectory& dir); + void fillQualityMonitor(); + + void inline fill1D(std::map& h, const std::string& s, double x) { + if (h.count(s) == 0) { + edm::LogWarning("SiStripApvGainInspector") << "Trying to fill non-existing Histogram named " << s << std::endl; + return; + } + h[s]->Fill(x); + } + + void inline fill2D(std::map& h, const std::string& s, double x, double y) { + if (h.count(s) == 0) { + edm::LogWarning("SiStripApvGainInspector") << "Trying to fill non-existing Histogram named " << s << std::endl; + return; + } + h[s]->Fill(x, y); + } + + // ----------member data --------------------------- + enum fitMode { landau = 1, landauAroundMax = 2, landauGauss = 3, fake = 4 }; + + const std::vector fitModeStrings = { + "", // Enum values start from 1, so index 0 is empty or can be used as "invalid" + "landau", + "landauAroundMax", + "landauGauss", + "fake"}; + + inline bool isValidMode(int mode) const { + return mode == landau || mode == landauAroundMax || mode == landauGauss || mode == fake; + } + + const edm::ESGetToken gainToken_; + const edm::ESGetToken qualityToken_; + const edm::ESGetToken tkGeomToken_; + const edm::ESGetToken tTopoToken_; + + TFileService* tfs; + + // map the APV ids to the charge plots + std::map, TH1F*> histoMap_; + + edm::ESHandle tkGeom_; + const TrackerGeometry* bareTkGeomPtr_; // ugly hack to fill APV colls only once, but checks + const TrackerTopology* tTopo_; + + int NStripAPVs; + int NPixelDets; + + unsigned int GOOD; + unsigned int BAD; + unsigned int MASKED; + + std::vector> APVsCollOrdered; + std::unordered_map> APVsColl; + + const TH2F* Charge_Vs_Index; + TFile* fin; + fitMode fitMode_; // Declare the enum variable + const std::string filename_; + double minNrEntries; + std::vector wantedmods; + + std::unique_ptr ratio_map; + std::unique_ptr old_payload_map; + std::unique_ptr new_payload_map; + std::unique_ptr mpv_map; + std::unique_ptr mpv_err_map; + std::unique_ptr entries_map; + std::unique_ptr fitChi2_map; + + std::map hControl; +}; + +// +// constructors and destructor +// +SiStripApvGainInspector::SiStripApvGainInspector(const edm::ParameterSet& iConfig) + : gainToken_(esConsumes()), + qualityToken_(esConsumes()), + tkGeomToken_(esConsumes()), + tTopoToken_(esConsumes()), + bareTkGeomPtr_(nullptr), + tTopo_(nullptr), + GOOD(0), + BAD(0), + filename_(iConfig.getUntrackedParameter("inputFile")), + minNrEntries(iConfig.getUntrackedParameter("minNrEntries", 20)), + wantedmods(iConfig.getUntrackedParameter>("selectedModules")) { + usesResource(TFileService::kSharedResource); + usesResource(cond::service::PoolDBOutputService::kSharedResource); + + sort(wantedmods.begin(), wantedmods.end()); + + edm::LogInfo("SelectedModules") << "Selected module list"; + for (std::vector::const_iterator mod = wantedmods.begin(); mod != wantedmods.end(); mod++) { + edm::LogVerbatim("SelectedModules") << *mod; + } + + int modeValue = iConfig.getParameter("fitMode"); + if (!isValidMode(modeValue)) { + throw std::invalid_argument("Invalid value provided for 'fitMode'"); + } else { + edm::LogPrint("SiStripApvGainInspector") << "Chosen fitting mode: " << fitModeStrings[modeValue]; + } + + fitMode_ = static_cast(modeValue); + + //now do what ever initialization is needed + fin = TFile::Open(filename_.c_str(), "READ"); + Charge_Vs_Index = (TH2F*)fin->Get("DQMData/Run 999999/AlCaReco/Run summary/SiStripGainsAAG/Charge_Vs_Index_AagBunch"); + + ratio_map = std::make_unique("ratio"); + ratio_map->setTitle("Average by module of the G2 Gain payload ratio (new/old)"); + ratio_map->setPalette(1); + + new_payload_map = std::make_unique("new_payload"); + new_payload_map->setTitle("Tracker Map of Updated G2 Gain payload averaged by module"); + new_payload_map->setPalette(1); + + old_payload_map = std::make_unique("old_payload"); + old_payload_map->setTitle("Tracker Map of Starting G2 Gain Payload averaged by module"); + old_payload_map->setPalette(1); + + // fit quality maps + + mpv_map = std::make_unique("MPV"); + mpv_map->setTitle("Landau Fit MPV average value per module [ADC counts/mm]"); + mpv_map->setPalette(1); + + mpv_err_map = std::make_unique("MPVerr"); + mpv_err_map->setTitle("Landau Fit MPV average error per module [ADC counts/mm]"); + mpv_err_map->setPalette(1); + + entries_map = std::make_unique("Entries"); + entries_map->setTitle("log_{10}(entries) average per module"); + entries_map->setPalette(1); + + fitChi2_map = std::make_unique("FitChi2"); + fitChi2_map->setTitle("log_{10}(Fit #chi^{2}/ndf) average per module"); + fitChi2_map->setPalette(1); +} + +// do anything here that needs to be done at desctruction time +// (e.g. close files, deallocate resources etc.) +SiStripApvGainInspector::~SiStripApvGainInspector() { + fin->Close(); + delete fin; +} + +// +// member functions +// + +// ------------ method called for each event ------------ +void SiStripApvGainInspector::analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) { + using namespace edm; + this->checkBookAPVColls(iSetup); // check whether APV colls are booked and do so if not yet done + this->checkAndRetrieveTopology(iSetup); + + edm::ESHandle gainHandle = iSetup.getHandle(gainToken_); + if (!gainHandle.isValid()) { + edm::LogError("SiStripApvGainInspector") << "gainHandle is not valid\n"; + exit(0); + } + + edm::ESHandle SiStripQuality_ = iSetup.getHandle(qualityToken_); + + for (unsigned int a = 0; a < APVsCollOrdered.size(); a++) { + std::shared_ptr APV = APVsCollOrdered[a]; + + if (APV->SubDet == PixelSubdetector::PixelBarrel || APV->SubDet == PixelSubdetector::PixelEndcap) + continue; + + APV->isMasked = SiStripQuality_->IsApvBad(APV->DetId, APV->APVId); + + if (gainHandle->getNumberOfTags() != 2) { + edm::LogError("SiStripApvGainInspector") << "NUMBER OF GAIN TAG IS EXPECTED TO BE 2\n"; + fflush(stdout); + exit(0); + }; + float newPreviousGain = gainHandle->getApvGain(APV->APVId, gainHandle->getRange(APV->DetId, 1), 1); + if (APV->PreviousGain != 1 and newPreviousGain != APV->PreviousGain) + edm::LogWarning("SiStripApvGainInspector") << "WARNING: ParticleGain in the global tag changed\n"; + APV->PreviousGain = newPreviousGain; + + float newPreviousGainTick = gainHandle->getApvGain(APV->APVId, gainHandle->getRange(APV->DetId, 0), 0); + if (APV->PreviousGainTick != 1 and newPreviousGainTick != APV->PreviousGainTick) { + edm::LogWarning("SiStripApvGainInspector") + << "WARNING: TickMarkGain in the global tag changed\n" + << std::endl + << " APV->SubDet: " << APV->SubDet << " APV->APVId:" << APV->APVId << std::endl + << " APV->PreviousGainTick: " << APV->PreviousGainTick << " newPreviousGainTick: " << newPreviousGainTick + << std::endl; + } + APV->PreviousGainTick = newPreviousGainTick; + } + + unsigned int I = 0; + TH1F* Proj = nullptr; + double FitResults[6]; + double MPVmean = 300; + + if (Charge_Vs_Index == nullptr) { + edm::LogError("SiStripGainsPCLHarvester") << "Harvesting: could not find input histogram " << std::endl; + return; + } + + printf("Progressing Bar :0%% 20%% 40%% 60%% 80%% 100%%\n"); + printf("Fitting Charge Distribution :"); + int TreeStep = APVsColl.size() / 50; + + for (auto it = APVsColl.begin(); it != APVsColl.end(); it++, I++) { + if (I % TreeStep == 0) { + printf("."); + fflush(stdout); + } + std::shared_ptr APV = it->second; + if (APV->Bin < 0) + APV->Bin = Charge_Vs_Index->GetXaxis()->FindBin(APV->Index); + + Proj = (TH1F*)(Charge_Vs_Index->ProjectionY( + "", Charge_Vs_Index->GetXaxis()->FindBin(APV->Index), Charge_Vs_Index->GetXaxis()->FindBin(APV->Index), "e")); + if (!Proj) + continue; + + switch (fitMode_) { + case landau: + getPeakOfLandau(Proj, FitResults); + break; + case landauAroundMax: + getPeakOfLandauAroundMax(Proj, FitResults); + break; + case landauGauss: + getPeakOfLanGau(Proj, FitResults); + break; + case fake: + doFakeFit(Proj, FitResults); + break; + default: + throw std::invalid_argument("Invalid value provided for 'fitMode'"); + } + + APV->FitMPV = FitResults[0]; + APV->FitMPVErr = FitResults[1]; + APV->FitWidth = FitResults[2]; + APV->FitWidthErr = FitResults[3]; + APV->FitChi2 = FitResults[4]; + APV->FitNorm = FitResults[5]; + APV->NEntries = Proj->GetEntries(); + + for (const auto& mod : wantedmods) { + if (mod == APV->DetId) { + edm::LogInfo("ModuleFound") << " module " << mod << " found! Storing... " << std::endl; + histoMap_[std::make_pair(APV->APVId, APV->DetId)] = (TH1F*)Proj->Clone(Form("hClone_%s", Proj->GetName())); + } + } + + if (isGoodLandauFit(FitResults)) { + APV->Gain = APV->FitMPV / MPVmean; + if (APV->SubDet > 2) + GOOD++; + } else { + APV->Gain = APV->PreviousGain; + if (APV->SubDet > 2) + BAD++; + } + if (APV->Gain <= 0) + APV->Gain = 1; + + delete Proj; + } + printf("\n"); +} + +//********************************************************************************// +// ------------ method called once each job just before starting event loop ------------ +void SiStripApvGainInspector::checkBookAPVColls(const edm::EventSetup& es) { + tkGeom_ = es.getHandle(tkGeomToken_); + const TrackerGeometry* newBareTkGeomPtr = &(*tkGeom_); + if (newBareTkGeomPtr == bareTkGeomPtr_) + return; // already filled APVColls, nothing changed + + if (!bareTkGeomPtr_) { // pointer not yet set: called the first time => fill the APVColls + auto const& Det = newBareTkGeomPtr->dets(); + + unsigned int Index = 0; + + for (unsigned int i = 0; i < Det.size(); i++) { + DetId Detid = Det[i]->geographicalId(); + int SubDet = Detid.subdetId(); + + if (SubDet == StripSubdetector::TIB || SubDet == StripSubdetector::TID || SubDet == StripSubdetector::TOB || + SubDet == StripSubdetector::TEC) { + auto DetUnit = dynamic_cast(Det[i]); + if (!DetUnit) + continue; + + const StripTopology& Topo = DetUnit->specificTopology(); + unsigned int NAPV = Topo.nstrips() / 128; + + for (unsigned int j = 0; j < NAPV; j++) { + auto APV = std::make_shared(); + APV->Index = Index; + APV->Bin = -1; + APV->DetId = Detid.rawId(); + APV->APVId = j; + APV->SubDet = SubDet; + APV->FitMPV = -1; + APV->FitMPVErr = -1; + APV->FitWidth = -1; + APV->FitWidthErr = -1; + APV->FitChi2 = -1; + APV->FitNorm = -1; + APV->Gain = -1; + APV->PreviousGain = 1; + APV->PreviousGainTick = 1; + APV->x = DetUnit->position().basicVector().x(); + APV->y = DetUnit->position().basicVector().y(); + APV->z = DetUnit->position().basicVector().z(); + APV->Eta = DetUnit->position().basicVector().eta(); + APV->Phi = DetUnit->position().basicVector().phi(); + APV->R = DetUnit->position().basicVector().transverse(); + APV->Thickness = DetUnit->surface().bounds().thickness(); + APV->NEntries = 0; + APV->isMasked = false; + + APVsCollOrdered.push_back(APV); + APVsColl[(APV->DetId << 4) | APV->APVId] = APV; + Index++; + NStripAPVs++; + } // loop on APVs + } // if is Strips + } // loop on dets + + for (unsigned int i = 0; i < Det.size(); + i++) { //Make two loop such that the Pixel information is added at the end --> make transition simpler + DetId Detid = Det[i]->geographicalId(); + int SubDet = Detid.subdetId(); + if (SubDet == PixelSubdetector::PixelBarrel || SubDet == PixelSubdetector::PixelEndcap) { + auto DetUnit = dynamic_cast(Det[i]); + if (!DetUnit) + continue; + + const PixelTopology& Topo = DetUnit->specificTopology(); + unsigned int NROCRow = Topo.nrows() / (80.); + unsigned int NROCCol = Topo.ncolumns() / (52.); + + for (unsigned int j = 0; j < NROCRow; j++) { + for (unsigned int i = 0; i < NROCCol; i++) { + auto APV = std::make_shared(); + APV->Index = Index; + APV->Bin = -1; + APV->DetId = Detid.rawId(); + APV->APVId = (j << 3 | i); + APV->SubDet = SubDet; + APV->FitMPV = -1; + APV->FitMPVErr = -1; + APV->FitWidth = -1; + APV->FitWidthErr = -1; + APV->FitChi2 = -1; + APV->Gain = -1; + APV->PreviousGain = 1; + APV->PreviousGainTick = 1; + APV->x = DetUnit->position().basicVector().x(); + APV->y = DetUnit->position().basicVector().y(); + APV->z = DetUnit->position().basicVector().z(); + APV->Eta = DetUnit->position().basicVector().eta(); + APV->Phi = DetUnit->position().basicVector().phi(); + APV->R = DetUnit->position().basicVector().transverse(); + APV->Thickness = DetUnit->surface().bounds().thickness(); + APV->isMasked = false; //SiPixelQuality_->IsModuleBad(Detid.rawId()); + APV->NEntries = 0; + + APVsCollOrdered.push_back(APV); + APVsColl[(APV->DetId << 4) | APV->APVId] = APV; + Index++; + NPixelDets++; + + } // loop on ROC cols + } // loop on ROC rows + } // if Pixel + } // loop on Dets + } //if (!bareTkGeomPtr_) ... + bareTkGeomPtr_ = newBareTkGeomPtr; +} + +void SiStripApvGainInspector::storeOnTree(TFileService* tfs) { + unsigned int tree_Index; + unsigned int tree_Bin; + unsigned int tree_DetId; + unsigned char tree_APVId; + unsigned char tree_SubDet; + float tree_x; + float tree_y; + float tree_z; + float tree_Eta; + float tree_R; + float tree_Phi; + float tree_Thickness; + float tree_FitMPV; + float tree_FitMPVErr; + float tree_FitWidth; + float tree_FitWidthErr; + float tree_FitChi2NDF; + float tree_FitNorm; + double tree_Gain; + double tree_PrevGain; + double tree_PrevGainTick; + double tree_NEntries; + bool tree_isMasked; + + TTree* MyTree; + MyTree = tfs->make("APVGain", "APVGain"); + MyTree->Branch("Index", &tree_Index, "Index/i"); + MyTree->Branch("Bin", &tree_Bin, "Bin/i"); + MyTree->Branch("DetId", &tree_DetId, "DetId/i"); + MyTree->Branch("APVId", &tree_APVId, "APVId/b"); + MyTree->Branch("SubDet", &tree_SubDet, "SubDet/b"); + MyTree->Branch("x", &tree_x, "x/F"); + MyTree->Branch("y", &tree_y, "y/F"); + MyTree->Branch("z", &tree_z, "z/F"); + MyTree->Branch("Eta", &tree_Eta, "Eta/F"); + MyTree->Branch("R", &tree_R, "R/F"); + MyTree->Branch("Phi", &tree_Phi, "Phi/F"); + MyTree->Branch("Thickness", &tree_Thickness, "Thickness/F"); + MyTree->Branch("FitMPV", &tree_FitMPV, "FitMPV/F"); + MyTree->Branch("FitMPVErr", &tree_FitMPVErr, "FitMPVErr/F"); + MyTree->Branch("FitWidth", &tree_FitWidth, "FitWidth/F"); + MyTree->Branch("FitWidthErr", &tree_FitWidthErr, "FitWidthErr/F"); + MyTree->Branch("FitChi2NDF", &tree_FitChi2NDF, "FitChi2NDF/F"); + MyTree->Branch("FitNorm", &tree_FitNorm, "FitNorm/F"); + MyTree->Branch("Gain", &tree_Gain, "Gain/D"); + MyTree->Branch("PrevGain", &tree_PrevGain, "PrevGain/D"); + MyTree->Branch("PrevGainTick", &tree_PrevGainTick, "PrevGainTick/D"); + MyTree->Branch("NEntries", &tree_NEntries, "NEntries/D"); + MyTree->Branch("isMasked", &tree_isMasked, "isMasked/O"); + + uint32_t cachedId(0); + SiStripMiscalibrate::Entry gain_ratio; + SiStripMiscalibrate::Entry o_gain; + SiStripMiscalibrate::Entry n_gain; + SiStripMiscalibrate::Entry mpv; + SiStripMiscalibrate::Entry mpv_err; + SiStripMiscalibrate::Entry entries; + SiStripMiscalibrate::Entry fitChi2; + + for (unsigned int a = 0; a < APVsCollOrdered.size(); a++) { + std::shared_ptr APV = APVsCollOrdered[a]; + if (APV == nullptr) + continue; + // printf( "%i | %i | PreviousGain = %7.5f NewGain = %7.5f (#clusters=%8.0f)\n", APV->DetId,APV->APVId,APV->PreviousGain,APV->Gain, APV->NEntries); + //fprintf(Gains,"%i | %i | PreviousGain = %7.5f(tick) x %7.5f(particle) NewGain (particle) = %7.5f (#clusters=%8.0f)\n", APV->DetId,APV->APVId,APV->PreviousGainTick, APV->PreviousGain,APV->Gain, APV->NEntries); + + // do not fill the Pixel + if (APV->SubDet == PixelSubdetector::PixelBarrel || APV->SubDet == PixelSubdetector::PixelEndcap) + continue; + + tree_Index = APV->Index; + tree_Bin = Charge_Vs_Index->GetXaxis()->FindBin(APV->Index); + tree_DetId = APV->DetId; + tree_APVId = APV->APVId; + tree_SubDet = APV->SubDet; + tree_x = APV->x; + tree_y = APV->y; + tree_z = APV->z; + tree_Eta = APV->Eta; + tree_R = APV->R; + tree_Phi = APV->Phi; + tree_Thickness = APV->Thickness; + tree_FitMPV = APV->FitMPV; + tree_FitMPVErr = APV->FitMPVErr; + tree_FitWidth = APV->FitWidth; + tree_FitWidthErr = APV->FitWidthErr; + tree_FitChi2NDF = APV->FitChi2; + tree_FitNorm = APV->FitNorm; + tree_Gain = APV->Gain; + tree_PrevGain = APV->PreviousGain; + tree_PrevGainTick = APV->PreviousGainTick; + tree_NEntries = APV->NEntries; + tree_isMasked = APV->isMasked; + + // flush the counters + if (cachedId != 0 && tree_DetId != cachedId) { + //ratio_map->fill(cachedId,gain_ratio.mean()); + ratio_map->fill(cachedId, o_gain.mean() / n_gain.mean()); + old_payload_map->fill(cachedId, o_gain.mean()); + new_payload_map->fill(cachedId, n_gain.mean()); + + if (entries.mean() > 0) { + mpv_map->fill(cachedId, mpv.mean()); + mpv_err_map->fill(cachedId, mpv_err.mean()); + entries_map->fill(cachedId, log10(entries.mean())); + if (fitChi2.mean() > 0) { + fitChi2_map->fill(cachedId, log10(fitChi2.mean())); + } else { + fitChi2_map->fill(cachedId, -1); + } + } + + gain_ratio.reset(); + o_gain.reset(); + n_gain.reset(); + + mpv.reset(); + mpv_err.reset(); + entries.reset(); + fitChi2.reset(); + } + + cachedId = tree_DetId; + gain_ratio.add(tree_PrevGain / tree_Gain); + o_gain.add(tree_PrevGain); + n_gain.add(tree_Gain); + mpv.add(tree_FitMPV); + mpv_err.add(tree_FitMPVErr); + entries.add(tree_NEntries); + fitChi2.add(tree_FitChi2NDF); + + if (tree_DetId == 402673324) { + printf("%i | %i : %f --> %f (%f)\n", tree_DetId, tree_APVId, tree_PrevGain, tree_Gain, tree_NEntries); + } + + MyTree->Fill(); + } +} + +//********************************************************************************// +void SiStripApvGainInspector::checkAndRetrieveTopology(const edm::EventSetup& setup) { + if (!tTopo_) { + edm::ESHandle TopoHandle = setup.getHandle(tTopoToken_); + tTopo_ = TopoHandle.product(); + } +} + +//********************************************************************************// +void SiStripApvGainInspector::getPeakOfLandau(TH1* InputHisto, double* FitResults, double LowRange, double HighRange) { + FitResults[0] = -0.5; //MPV + FitResults[1] = 0; //MPV error + FitResults[2] = -0.5; //Width + FitResults[3] = 0; //Width error + FitResults[4] = -0.5; //Fit Chi2/NDF + FitResults[5] = 0; //Normalization + + if (InputHisto->GetEntries() < minNrEntries) + return; + + // perform fit with standard landau + TF1 MyLandau("MyLandau", "landau", LowRange, HighRange); + MyLandau.SetParameter(1, 300); + InputHisto->Fit(&MyLandau, "QR WW"); + + // MPV is parameter 1 (0=constant, 1=MPV, 2=Sigma) + FitResults[0] = MyLandau.GetParameter(1); //MPV + FitResults[1] = MyLandau.GetParError(1); //MPV error + FitResults[2] = MyLandau.GetParameter(2); //Width + FitResults[3] = MyLandau.GetParError(2); //Width error + FitResults[4] = MyLandau.GetChisquare() / MyLandau.GetNDF(); //Fit Chi2/NDF + FitResults[5] = MyLandau.GetParameter(0); +} + +void SiStripApvGainInspector::doFakeFit(TH1* InputHisto, double* FitResults) { + FitResults[0] = -0.5; //MPV + FitResults[1] = 0; //MPV error + FitResults[2] = -0.5; //Width + FitResults[3] = 0; //Width error + FitResults[4] = -0.5; //Fit Chi2/NDF + FitResults[5] = 0; //Normalization +} + +//********************************************************************************// +double SiStripApvGainInspector::langaufun(Double_t* x, Double_t* par) +//********************************************************************************// +{ + //Fit parameters: + //par[0]=Width (scale) parameter of Landau density + //par[1]=Most Probable (MP, location) parameter of Landau density + //par[2]=Total area (integral -inf to inf, normalization constant) + //par[3]=Width (sigma) of convoluted Gaussian function + // + //In the Landau distribution (represented by the CERNLIB approximation), + //the maximum is located at x=-0.22278298 with the location parameter=0. + //This shift is corrected within this function, so that the actual + //maximum is identical to the MP parameter. + + // Numeric constants + Double_t invsq2pi = 0.3989422804014; // (2 pi)^(-1/2) + Double_t mpshift = -0.22278298; // Landau maximum location + + // Control constants + Double_t np = 100.0; // number of convolution steps + Double_t sc = 5.0; // convolution extends to +-sc Gaussian sigmas + + // Variables + Double_t xx; + Double_t mpc; + Double_t fland; + Double_t sum = 0.0; + Double_t xlow, xupp; + Double_t step; + Double_t i; + + // MP shift correction + mpc = par[1] - mpshift * par[0]; + + // Range of convolution integral + xlow = x[0] - sc * par[3]; + xupp = x[0] + sc * par[3]; + + step = (xupp - xlow) / np; + + // Convolution integral of Landau and Gaussian by sum + for (i = 1.0; i <= np / 2; i++) { + xx = xlow + (i - .5) * step; + fland = TMath::Landau(xx, mpc, par[0]) / par[0]; + sum += fland * TMath::Gaus(x[0], xx, par[3]); + + xx = xupp - (i - .5) * step; + fland = TMath::Landau(xx, mpc, par[0]) / par[0]; + sum += fland * TMath::Gaus(x[0], xx, par[3]); + } + + return (par[2] * step * sum * invsq2pi / par[3]); +} + +//********************************************************************************// +void SiStripApvGainInspector::getPeakOfLanGau(TH1* InputHisto, double* FitResults, double LowRange, double HighRange) { + FitResults[0] = -0.5; //MPV + FitResults[1] = 0; //MPV error + FitResults[2] = -0.5; //Width + FitResults[3] = 0; //Width error + FitResults[4] = -0.5; //Fit Chi2/NDF + FitResults[5] = 0; //Normalization + + if (InputHisto->GetEntries() < minNrEntries) + return; + + // perform fit with standard landau + TF1 MyLandau("MyLandau", "landau", LowRange, HighRange); + MyLandau.SetParameter(1, 300); + InputHisto->Fit(&MyLandau, "QR WW"); + + double startvalues[4] = {100, 300, 10000, 100}; + double parlimitslo[4] = {0, 250, 10, 0}; + double parlimitshi[4] = {200, 350, 1000000, 200}; + + TF1 MyLangau("MyLanGau", langaufun, LowRange, HighRange, 4); + + MyLangau.SetParameters(startvalues); + MyLangau.SetParNames("Width", "MP", "Area", "GSigma"); + + for (unsigned int i = 0; i < 4; i++) { + MyLangau.SetParLimits(i, parlimitslo[i], parlimitshi[i]); + } + + InputHisto->Fit("MyLanGau", "QRB0"); // fit within specified range, use ParLimits, do not plot + + // MPV is parameter 1 (0=constant, 1=MPV, 2=Sigma) + FitResults[0] = MyLangau.GetParameter(1); //MPV + FitResults[1] = MyLangau.GetParError(1); //MPV error + FitResults[2] = MyLangau.GetParameter(0); //Width + FitResults[3] = MyLangau.GetParError(0); //Width error + FitResults[4] = MyLangau.GetChisquare() / MyLangau.GetNDF(); //Fit Chi2/NDF + FitResults[5] = MyLangau.GetParameter(3); +} + +//********************************************************************************// +void SiStripApvGainInspector::getPeakOfLandauAroundMax(TH1* InputHisto, + double* FitResults, + double LowRange, + double HighRange) { + FitResults[0] = -0.5; //MPV + FitResults[1] = 0; //MPV error + FitResults[2] = -0.5; //Width + FitResults[3] = 0; //Width error + FitResults[4] = -0.5; //Fit Chi2/NDF + FitResults[5] = 0; //Normalization + + if (InputHisto->GetEntries() < minNrEntries) + return; + + int maxbin = InputHisto->GetMaximumBin(); + int maxbin2 = -9999.; + + if (InputHisto->GetBinContent(maxbin - 1) > InputHisto->GetBinContent(maxbin + 1)) { + maxbin2 = maxbin - 1; + } else { + maxbin2 = maxbin + 1; + } + + float maxbincenter = (InputHisto->GetBinCenter(maxbin) + InputHisto->GetBinCenter(maxbin2)) / 2; + + TF1 MyLandau("MyLandau", "[2]*TMath::Landau(x,[0],[1],0)", maxbincenter - LowRange, maxbincenter + HighRange); + // TF1 MyLandau("MyLandau", "landau", LowRange, HighRange); + // MyLandau.SetParameter(1, 300); + InputHisto->Fit(&MyLandau, "QR WW"); + + MyLandau.SetParameter(0, maxbincenter); + MyLandau.SetParameter(1, maxbincenter / 10.); + MyLandau.SetParameter(2, InputHisto->GetMaximum()); + + float mpv = MyLandau.GetParameter(1); + MyLandau.SetParameter(1, mpv); + //InputHisto->Rebin(3); + InputHisto->Fit(&MyLandau, "QOR", "", mpv - 50, mpv + 100); + + InputHisto->Fit(&MyLandau, "QOR", "", maxbincenter - LowRange, maxbincenter + HighRange); + InputHisto->Fit(&MyLandau, "QOR", "", maxbincenter - LowRange, maxbincenter + HighRange); + + // MPV is parameter 1 (0=constant, 1=MPV, 2=Sigma) + FitResults[0] = MyLandau.GetParameter(1); //MPV + FitResults[1] = MyLandau.GetParError(1); //MPV error + FitResults[2] = MyLandau.GetParameter(2); //Width + FitResults[3] = MyLandau.GetParError(2); //Width error + FitResults[4] = MyLandau.GetChisquare() / MyLandau.GetNDF(); //Fit Chi2/NDF + FitResults[5] = MyLandau.GetParameter(0); +} + +//********************************************************************************// +bool SiStripApvGainInspector::isGoodLandauFit(double* FitResults) { + if (FitResults[0] <= 0) + return false; + // if(FitResults[1] > MaxMPVError )return false; + // if(FitResults[4] > MaxChi2OverNDF)return false; + return true; +} + +/*--------------------------------------------------------------------*/ +void SiStripApvGainInspector::makeNicePlotStyle(TH1F* plot) +/*--------------------------------------------------------------------*/ +{ + plot->GetXaxis()->CenterTitle(true); + plot->GetYaxis()->CenterTitle(true); + plot->GetXaxis()->SetTitleFont(42); + plot->GetYaxis()->SetTitleFont(42); + plot->GetXaxis()->SetTitleSize(0.05); + plot->GetYaxis()->SetTitleSize(0.05); + plot->GetXaxis()->SetTitleOffset(0.9); + plot->GetYaxis()->SetTitleOffset(1.3); + plot->GetXaxis()->SetLabelFont(42); + plot->GetYaxis()->SetLabelFont(42); + plot->GetYaxis()->SetLabelSize(.05); + plot->GetXaxis()->SetLabelSize(.05); +} + +//********************************************************************************// +std::unique_ptr SiStripApvGainInspector::getNewObject() { + std::unique_ptr obj = std::make_unique(); + + std::vector theSiStripVector; + unsigned int PreviousDetId = 0; + for (unsigned int a = 0; a < APVsCollOrdered.size(); a++) { + std::shared_ptr APV = APVsCollOrdered[a]; + if (APV == nullptr) { + printf("Bug\n"); + continue; + } + if (APV->SubDet <= 2) + continue; + if (APV->DetId != PreviousDetId) { + if (!theSiStripVector.empty()) { + SiStripApvGain::Range range(theSiStripVector.begin(), theSiStripVector.end()); + if (!obj->put(PreviousDetId, range)) + printf("Bug to put detId = %i\n", PreviousDetId); + } + theSiStripVector.clear(); + PreviousDetId = APV->DetId; + } + theSiStripVector.push_back(APV->Gain); + + LogDebug("SiStripGainsPCLHarvester") << " DetId: " << APV->DetId << " APV: " << APV->APVId + << " Gain: " << APV->Gain << std::endl; + } + if (!theSiStripVector.empty()) { + SiStripApvGain::Range range(theSiStripVector.begin(), theSiStripVector.end()); + if (!obj->put(PreviousDetId, range)) + printf("Bug to put detId = %i\n", PreviousDetId); + } + + return obj; +} + +// ------------ method called once each job just before starting event loop ------------ +void SiStripApvGainInspector::beginJob() { + TFileDirectory control_dir = tfs->mkdir("Control"); + //DA.cd(); + hControl = this->bookQualityMonitor(control_dir); +} + +// ------------ method called once each job just after ending the event loop ------------ +void SiStripApvGainInspector::endJob() { + edm::LogVerbatim("SelectedModules") << "Selected APVs:" << histoMap_.size() << std::endl; + for (const auto& plot : histoMap_) { + TCanvas* c1 = new TCanvas(Form("c1_%i_%i", plot.first.second, plot.first.first), + Form("c1_%i_%i", plot.first.second, plot.first.first), + 800, + 600); + // Define common things for the different fits + + gStyle->SetOptFit(1011); + c1->Clear(); + + c1->SetLeftMargin(0.15); + c1->SetRightMargin(0.10); + plot.second->SetTitle(Form("Cluster Charge (%i,%i)", plot.first.second, plot.first.first)); + plot.second->GetXaxis()->SetTitle("Normalized Cluster Charge [ADC counts/mm]"); + plot.second->GetYaxis()->SetTitle("On-track clusters"); + plot.second->GetXaxis()->SetRangeUser(0., 1000.); + + this->makeNicePlotStyle(plot.second); + plot.second->Draw(); + edm::LogVerbatim("SelectedModules") << " DetId: " << plot.first.second << " (" << plot.first.first << ")" + << std::endl; + ; + + c1->Print(Form("c1_%i_%i.png", plot.first.second, plot.first.first)); + c1->Print(Form("c1_%i_%i.pdf", plot.first.second, plot.first.first)); + } + + tfs = edm::Service().operator->(); + storeOnTree(tfs); + + auto range = SiStripMiscalibrate::getTruncatedRange(ratio_map.get()); + + ratio_map->save(true, range.first, range.second, "G2_gain_ratio_map.pdf"); + ratio_map->save(true, range.first, range.second, "G2_gain_ratio_map.png"); + + range = SiStripMiscalibrate::getTruncatedRange(old_payload_map.get()); + + old_payload_map->save(true, range.first, range.second, "starting_G2_gain_payload_map.pdf"); + old_payload_map->save(true, range.first, range.second, "starting_G2_gain_payload_map.png"); + + range = SiStripMiscalibrate::getTruncatedRange(new_payload_map.get()); + + new_payload_map->save(true, range.first, range.second, "new_G2_gain_payload_map.pdf"); + new_payload_map->save(true, range.first, range.second, "new_G2_gain_payload_map.png"); + + mpv_map->save(true, 250, 350., "mpv_map.pdf"); + mpv_map->save(true, 250, 350., "mpv_map.png"); + + mpv_err_map->save(true, 0., 3., "mpv_err_map.pdf"); + mpv_err_map->save(true, 0., 3., "mpv_err_map.png"); + + entries_map->save(true, 0, 0, "entries_map.pdf"); + entries_map->save(true, 0, 0, "entries_map.png"); + + fitChi2_map->save(true, 0., 0., "fitChi2_map.pdf"); + fitChi2_map->save(true, 0., 0., "fitChi2_map.png"); + + fillQualityMonitor(); + + std::unique_ptr theAPVGains = this->getNewObject(); + + // write out the APVGains record + edm::Service poolDbService; + + if (poolDbService.isAvailable()) + poolDbService->writeOneIOV(theAPVGains.get(), poolDbService->currentTime(), "SiStripApvGainRcd"); + else + throw std::runtime_error("PoolDBService required."); +} + +std::map SiStripApvGainInspector::bookQualityMonitor(const TFileDirectory& dir) { + int MPVbin = 300; + float MPVmin = 0.; + float MPVmax = 600.; + + TH1F::SetDefaultSumw2(kTRUE); + std::map h; + + h["MPV_Vs_EtaTIB"] = dir.make("MPVvsEtaTIB", "MPV vs Eta TIB", 50, -3.0, 3.0, MPVbin, MPVmin, MPVmax); + h["MPV_Vs_EtaTID"] = dir.make("MPVvsEtaTID", "MPV vs Eta TID", 50, -3.0, 3.0, MPVbin, MPVmin, MPVmax); + h["MPV_Vs_EtaTOB"] = dir.make("MPVvsEtaTOB", "MPV vs Eta TOB", 50, -3.0, 3.0, MPVbin, MPVmin, MPVmax); + h["MPV_Vs_EtaTEC"] = dir.make("MPVvsEtaTEC", "MPV vs Eta TEC", 50, -3.0, 3.0, MPVbin, MPVmin, MPVmax); + h["MPV_Vs_EtaTECthin"] = dir.make("MPVvsEtaTEC1", "MPV vs Eta TEC-thin", 50, -3.0, 3.0, MPVbin, MPVmin, MPVmax); + h["MPV_Vs_EtaTECthick"] = + dir.make("MPVvsEtaTEC2", "MPV vs Eta TEC-thick", 50, -3.0, 3.0, MPVbin, MPVmin, MPVmax); + + h["MPV_Vs_PhiTIB"] = dir.make("MPVvsPhiTIB", "MPV vs Phi TIB", 50, -3.4, 3.4, MPVbin, MPVmin, MPVmax); + h["MPV_Vs_PhiTID"] = dir.make("MPVvsPhiTID", "MPV vs Phi TID", 50, -3.4, 3.4, MPVbin, MPVmin, MPVmax); + h["MPV_Vs_PhiTOB"] = dir.make("MPVvsPhiTOB", "MPV vs Phi TOB", 50, -3.4, 3.4, MPVbin, MPVmin, MPVmax); + h["MPV_Vs_PhiTEC"] = dir.make("MPVvsPhiTEC", "MPV vs Phi TEC", 50, -3.4, 3.4, MPVbin, MPVmin, MPVmax); + h["MPV_Vs_PhiTECthin"] = + dir.make("MPVvsPhiTEC1", "MPV vs Phi TEC-thin ", 50, -3.4, 3.4, MPVbin, MPVmin, MPVmax); + h["MPV_Vs_PhiTECthick"] = + dir.make("MPVvsPhiTEC2", "MPV vs Phi TEC-thick", 50, -3.4, 3.4, MPVbin, MPVmin, MPVmax); + + h["NoMPVfit"] = dir.make("NoMPVfit", "Modules with bad Landau Fit", 350, -350, 350, 240, 0, 120); + h["NoMPVmasked"] = dir.make("NoMPVmasked", "Masked Modules", 350, -350, 350, 240, 0, 120); + + h["Gains"] = dir.make("Gains", "Gains", 300, 0, 2); + h["MPVs"] = dir.make("MPVs", "MPVs", MPVbin, MPVmin, MPVmax); + h["MPVs320"] = dir.make("MPV_320", "MPV 320 thickness", MPVbin, MPVmin, MPVmax); + h["MPVs500"] = dir.make("MPV_500", "MPV 500 thickness", MPVbin, MPVmin, MPVmax); + h["MPVsTIB"] = dir.make("MPV_TIB", "MPV TIB", MPVbin, MPVmin, MPVmax); + h["MPVsTID"] = dir.make("MPV_TID", "MPV TID", MPVbin, MPVmin, MPVmax); + h["MPVsTIDP"] = dir.make("MPV_TIDP", "MPV TIDP", MPVbin, MPVmin, MPVmax); + h["MPVsTIDM"] = dir.make("MPV_TIDM", "MPV TIDM", MPVbin, MPVmin, MPVmax); + h["MPVsTOB"] = dir.make("MPV_TOB", "MPV TOB", MPVbin, MPVmin, MPVmax); + h["MPVsTEC"] = dir.make("MPV_TEC", "MPV TEC", MPVbin, MPVmin, MPVmax); + h["MPVsTECP"] = dir.make("MPV_TECP", "MPV TECP", MPVbin, MPVmin, MPVmax); + h["MPVsTECM"] = dir.make("MPV_TECM", "MPV TECM", MPVbin, MPVmin, MPVmax); + h["MPVsTECthin"] = dir.make("MPV_TEC1", "MPV TEC thin", MPVbin, MPVmin, MPVmax); + h["MPVsTECthick"] = dir.make("MPV_TEC2", "MPV TEC thick", MPVbin, MPVmin, MPVmax); + h["MPVsTECP1"] = dir.make("MPV_TECP1", "MPV TECP thin ", MPVbin, MPVmin, MPVmax); + h["MPVsTECP2"] = dir.make("MPV_TECP2", "MPV TECP thick", MPVbin, MPVmin, MPVmax); + h["MPVsTECM1"] = dir.make("MPV_TECM1", "MPV TECM thin", MPVbin, MPVmin, MPVmax); + h["MPVsTECM2"] = dir.make("MPV_TECM2", "MPV TECM thick", MPVbin, MPVmin, MPVmax); + + h["MPVError"] = dir.make("MPVError", "MPV Error", 150, 0, 150); + h["MPVErrorVsMPV"] = dir.make("MPVErrorVsMPV", "MPV Error vs MPV", 300, 0, 600, 150, 0, 150); + h["MPVErrorVsEta"] = dir.make("MPVErrorVsEta", "MPV Error vs Eta", 50, -3.0, 3.0, 150, 0, 150); + h["MPVErrorVsPhi"] = dir.make("MPVErrorVsPhi", "MPV Error vs Phi", 50, -3.4, 3.4, 150, 0, 150); + h["MPVErrorVsN"] = dir.make("MPVErrorVsN", "MPV Error vs N", 500, 0, 1000, 150, 0, 150); + + h["DiffWRTPrevGainTIB"] = dir.make("DiffWRTPrevGainTIB", "Diff w.r.t. PrevGain TIB", 250, 0.5, 1.5); + h["DiffWRTPrevGainTID"] = dir.make("DiffWRTPrevGainTID", "Diff w.r.t. PrevGain TID", 250, 0.5, 1.5); + h["DiffWRTPrevGainTOB"] = dir.make("DiffWRTPrevGainTOB", "Diff w.r.t. PrevGain TOB", 250, 0.5, 1.5); + h["DiffWRTPrevGainTEC"] = dir.make("DiffWRTPrevGainTEC", "Diff w.r.t. PrevGain TEC", 250, 0.5, 1.5); + + h["GainVsPrevGainTIB"] = dir.make("GainVsPrevGainTIB", "Gain vs PrevGain TIB", 100, 0, 2, 100, 0, 2); + h["GainVsPrevGainTID"] = dir.make("GainVsPrevGainTID", "Gain vs PrevGain TID", 100, 0, 2, 100, 0, 2); + h["GainVsPrevGainTOB"] = dir.make("GainVsPrevGainTOB", "Gain vs PrevGain TOB", 100, 0, 2, 100, 0, 2); + h["GainVsPrevGainTEC"] = dir.make("GainVsPrevGainTEC", "Gain vs PrevGain TEC", 100, 0, 2, 100, 0, 2); + + return h; +} + +void SiStripApvGainInspector::fillQualityMonitor() { + for (unsigned int a = 0; a < APVsCollOrdered.size(); a++) { + std::shared_ptr APV = APVsCollOrdered[a]; + if (APV == nullptr) + continue; + + //unsigned int Index = APV->Index; + //unsigned int DetId = APV->DetId; + unsigned int SubDet = APV->SubDet; + float z = APV->z; + float Eta = APV->Eta; + float R = APV->R; + float Phi = APV->Phi; + float Thickness = APV->Thickness; + double FitMPV = APV->FitMPV; + double FitMPVErr = APV->FitMPVErr; + double Gain = APV->Gain; + double NEntries = APV->NEntries; + double PreviousGain = APV->PreviousGain; + + if (SubDet < 3) + continue; // avoid to loop over Pixel det id + + if (FitMPV <= 0.) { // No fit of MPV + if (APV->isMasked) + fill2D(hControl, "NoMPVmasked", z, R); + else + fill2D(hControl, "NoMPVfit", z, R); + } else { // Fit of MPV + if (FitMPV > 0.) + fill1D(hControl, "Gains", Gain); + + fill1D(hControl, "MPVs", FitMPV); + if (Thickness < 0.04) + fill1D(hControl, "MPVs320", FitMPV); + if (Thickness > 0.04) + fill1D(hControl, "MPVs500", FitMPV); + + fill1D(hControl, "MPVError", FitMPVErr); + fill2D(hControl, "MPVErrorVsMPV", FitMPV, FitMPVErr); + fill2D(hControl, "MPVErrorVsEta", Eta, FitMPVErr); + fill2D(hControl, "MPVErrorVsPhi", Phi, FitMPVErr); + fill2D(hControl, "MPVErrorVsN", NEntries, FitMPVErr); + + if (SubDet == 3) { + fill2D(hControl, "MPV_Vs_EtaTIB", Eta, FitMPV); + fill2D(hControl, "MPV_Vs_PhiTIB", Phi, FitMPV); + fill1D(hControl, "MPVsTIB", FitMPV); + + } else if (SubDet == 4) { + fill2D(hControl, "MPV_Vs_EtaTID", Eta, FitMPV); + fill2D(hControl, "MPV_Vs_PhiTID", Phi, FitMPV); + fill1D(hControl, "MPVsTID", FitMPV); + if (Eta < 0.) + fill1D(hControl, "MPVsTIDM", FitMPV); + if (Eta > 0.) + fill1D(hControl, "MPVsTIDP", FitMPV); + + } else if (SubDet == 5) { + fill2D(hControl, "MPV_Vs_EtaTOB", Eta, FitMPV); + fill2D(hControl, "MPV_Vs_PhiTOB", Phi, FitMPV); + fill1D(hControl, "MPVsTOB", FitMPV); + + } else if (SubDet == 6) { + fill2D(hControl, "MPV_Vs_EtaTEC", Eta, FitMPV); + fill2D(hControl, "MPV_Vs_PhiTEC", Phi, FitMPV); + fill1D(hControl, "MPVsTEC", FitMPV); + if (Eta < 0.) + fill1D(hControl, "MPVsTECM", FitMPV); + if (Eta > 0.) + fill1D(hControl, "MPVsTECP", FitMPV); + if (Thickness < 0.04) { + fill2D(hControl, "MPV_Vs_EtaTECthin", Eta, FitMPV); + fill2D(hControl, "MPV_Vs_PhiTECthin", Phi, FitMPV); + fill1D(hControl, "MPVsTECthin", FitMPV); + if (Eta > 0.) + fill1D(hControl, "MPVsTECP1", FitMPV); + if (Eta < 0.) + fill1D(hControl, "MPVsTECM1", FitMPV); + } + if (Thickness > 0.04) { + fill2D(hControl, "MPV_Vs_EtaTECthick", Eta, FitMPV); + fill2D(hControl, "MPV_Vs_PhiTECthick", Phi, FitMPV); + fill1D(hControl, "MPVsTECthick", FitMPV); + if (Eta > 0.) + fill1D(hControl, "MPVsTECP2", FitMPV); + if (Eta < 0.) + fill1D(hControl, "MPVsTECM2", FitMPV); + } + } + } + + if (SubDet == 3 && PreviousGain != 0.) + fill1D(hControl, "DiffWRTPrevGainTIB", Gain / PreviousGain); + else if (SubDet == 4 && PreviousGain != 0.) + fill1D(hControl, "DiffWRTPrevGainTID", Gain / PreviousGain); + else if (SubDet == 5 && PreviousGain != 0.) + fill1D(hControl, "DiffWRTPrevGainTOB", Gain / PreviousGain); + else if (SubDet == 6 && PreviousGain != 0.) + fill1D(hControl, "DiffWRTPrevGainTEC", Gain / PreviousGain); + + if (SubDet == 3) + fill2D(hControl, "GainVsPrevGainTIB", PreviousGain, Gain); + else if (SubDet == 4) + fill2D(hControl, "GainVsPrevGainTID", PreviousGain, Gain); + else if (SubDet == 5) + fill2D(hControl, "GainVsPrevGainTOB", PreviousGain, Gain); + else if (SubDet == 6) + fill2D(hControl, "GainVsPrevGainTEC", PreviousGain, Gain); + + } // loop on the APV collections +} + +// ------------ method fills 'descriptions' with the allowed parameters for the module ------------ +void SiStripApvGainInspector::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.addUntracked("inputFile", {}); + desc.addUntracked("minNrEntries", 20); + desc.add("fitMode", 2) + ->setComment("fit mode. Available options: 1: landau\n 2: landau around max\n 3:landau&gaus convo\n 4: fake"); + desc.addUntracked>("selectedModules", {}); + descriptions.addWithDefaultLabel(desc); +} + +//define this as a plug-in +DEFINE_FWK_MODULE(SiStripApvGainInspector); diff --git a/CondTools/SiStrip/plugins/SiStripGainPayloadCopyAndExclude.cc b/CondTools/SiStrip/plugins/SiStripGainPayloadCopyAndExclude.cc new file mode 100644 index 0000000000000..bac00aa99d3b0 --- /dev/null +++ b/CondTools/SiStrip/plugins/SiStripGainPayloadCopyAndExclude.cc @@ -0,0 +1,195 @@ +// -*- C++ -*- +// +// Package: CondTools/SiStrip +// Class: SiStripGainPayloadCopyAndExclude +// +/* + *\class SiStripGainPayloadCopyAndExclude SiStripGainPayloadCopyAndExclude.cc CondTools/SiStrip/plugins/SiStripGainPayloadCopyAndExclude.cc + + Description: This module is meant to copy the content of a SiStrip APV Gain payload (either G1 or G2) from a local sqlite file (that should be feeded to the Event Setup via the SiStripApvGain3Rcd and put in another local sqlite file, excepted for the modules specified in the excludedModules parameter. If the doReverse parameter is true, the opposite action is performed. + + Implementation: The implemenation takes advantage of the convenience record SiStripApvGain3Rcd in the EventSetup to be able to hold at the same time two instances of the Strip Gains in the same job. + +*/ +// +// Original Author: Marco Musich +// Created: Fri, 08 Jun 2018 08:28:01 GMT +// +// + +// system include files +#include +#include + +// user include files +#include "CLHEP/Random/RandGauss.h" +#include "CalibFormats/SiStripObjects/interface/SiStripGain.h" +#include "CalibTracker/Records/interface/SiStripGainRcd.h" +#include "CommonTools/TrackerMap/interface/TrackerMap.h" +#include "CondCore/DBOutputService/interface/PoolDBOutputService.h" +#include "CondFormats/DataRecord/interface/SiStripApvGainRcd.h" +#include "CondFormats/SiStripObjects/interface/SiStripApvGain.h" +#include "CondFormats/SiStripObjects/interface/SiStripSummary.h" +#include "DataFormats/SiStripDetId/interface/SiStripDetId.h" +#include "DataFormats/SiStripDetId/interface/StripSubdetector.h" +#include "DataFormats/TrackerCommon/interface/TrackerTopology.h" +#include "FWCore/Framework/interface/ESHandle.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/Framework/interface/one/EDAnalyzer.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ServiceRegistry/interface/Service.h" +#include "Geometry/Records/interface/TrackerTopologyRcd.h" + +// +// class declaration +// +class SiStripGainPayloadCopyAndExclude : public edm::one::EDAnalyzer { +public: + explicit SiStripGainPayloadCopyAndExclude(const edm::ParameterSet&); + ~SiStripGainPayloadCopyAndExclude() override = default; + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + +private: + void analyze(const edm::Event&, const edm::EventSetup&) override; + std::unique_ptr getNewObject(const std::map, float>& theMap); + + // ----------member data --------------------------- + const edm::ESGetToken m_gainToken; + const edm::ESGetToken m_gain3Token; + std::vector m_excludedMods; + const std::string m_Record; + const uint32_t m_gainType; + const bool m_reverseSelect; +}; + +// +// constructors and destructor +// +SiStripGainPayloadCopyAndExclude::SiStripGainPayloadCopyAndExclude(const edm::ParameterSet& iConfig) + : m_gainToken{esConsumes()}, + m_gain3Token{esConsumes()}, + m_excludedMods{iConfig.getUntrackedParameter>("excludedModules")}, + m_Record{iConfig.getUntrackedParameter("record", "SiStripApvGainRcd")}, + m_gainType{iConfig.getUntrackedParameter("gainType", 1)}, + m_reverseSelect{iConfig.getUntrackedParameter("reverseSelection", false)} { + usesResource(cond::service::PoolDBOutputService::kSharedResource); + + //now do what ever initialization is needed + sort(m_excludedMods.begin(), m_excludedMods.end()); + + edm::LogInfo("ExcludedModules") << "Selected module list"; + for (std::vector::const_iterator mod = m_excludedMods.begin(); mod != m_excludedMods.end(); mod++) { + edm::LogVerbatim("ExcludedModules") << *mod; + } +} + +// +// member functions +// + +// ------------ method called for each event ------------ +void SiStripGainPayloadCopyAndExclude::analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) { + using namespace edm; + + // gain to be validated + edm::ESHandle gNew = iSetup.getHandle(m_gain3Token); + edm::ESHandle gOld = iSetup.getHandle(m_gainToken); + + std::map, float> theMap, oldPayloadMap; + + std::vector detid; + gNew->getDetIds(detid); + for (const auto& d : detid) { + SiStripApvGain::Range range_new = gNew->getRange(d); + SiStripApvGain::Range range_old = gOld->getRange(d, m_gainType); + float nAPV = 0; + + for (int it = 0; it < range_new.second - range_new.first; it++) { + nAPV += 1; + float Gain = gNew->getApvGain(it, range_new); + float patchGain = gOld->getApvGain(it, range_old); + std::pair index = std::make_pair(d, nAPV); + + oldPayloadMap[index] = Gain; + + bool found(false); + for (const auto& mod : m_excludedMods) { + if (d == mod) { + edm::LogInfo("ModuleFound") << " module " << mod << " found! Excluded... " << std::endl; + found = true; + break; + } + } + + if (m_reverseSelect) + found = (!found); + + if (!found) { + theMap[index] = Gain; + } else { + theMap[index] = patchGain; + } + + } // loop over APVs + } // loop over DetIds + + std::unique_ptr theAPVGains = this->getNewObject(theMap); + + // write out the APVGains record + edm::Service poolDbService; + + if (poolDbService.isAvailable()) + poolDbService->writeOneIOV(theAPVGains.get(), poolDbService->currentTime(), m_Record); + else + throw std::runtime_error("PoolDBService required."); +} + +//********************************************************************************// +std::unique_ptr SiStripGainPayloadCopyAndExclude::getNewObject( + const std::map, float>& theMap) { + std::unique_ptr obj = std::make_unique(); + + std::vector theSiStripVector; + uint32_t PreviousDetId = 0; + for (const auto& element : theMap) { + uint32_t DetId = element.first.first; + if (DetId != PreviousDetId) { + if (!theSiStripVector.empty()) { + SiStripApvGain::Range range(theSiStripVector.begin(), theSiStripVector.end()); + if (!obj->put(PreviousDetId, range)) + printf("Bug to put detId = %i\n", PreviousDetId); + } + theSiStripVector.clear(); + PreviousDetId = DetId; + } + theSiStripVector.push_back(element.second); + + edm::LogInfo("SiStripGainPayloadCopyAndExclude") + << " DetId: " << DetId << " APV: " << element.first.second << " Gain: " << element.second << std::endl; + } + + if (!theSiStripVector.empty()) { + SiStripApvGain::Range range(theSiStripVector.begin(), theSiStripVector.end()); + if (!obj->put(PreviousDetId, range)) + printf("Bug to put detId = %i\n", PreviousDetId); + } + + return obj; +} + +// ------------ method fills 'descriptions' with the allowed parameters for the module ------------ +void SiStripGainPayloadCopyAndExclude::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.addUntracked>("excludedModules", {}); + desc.addUntracked("record", "SiStripApvGainRcd"); + desc.addUntracked("gainType", 1); + desc.addUntracked("reverseSelection", false); + descriptions.addWithDefaultLabel(desc); +} + +//define this as a plug-in +DEFINE_FWK_MODULE(SiStripGainPayloadCopyAndExclude); diff --git a/CondTools/SiStrip/test/BuildFile.xml b/CondTools/SiStrip/test/BuildFile.xml index fd0ab2a0a150e..571b47f8e6c9e 100644 --- a/CondTools/SiStrip/test/BuildFile.xml +++ b/CondTools/SiStrip/test/BuildFile.xml @@ -4,3 +4,4 @@ + diff --git a/CondTools/SiStrip/test/SiStripApvGainInspector_cfg.py b/CondTools/SiStrip/test/SiStripApvGainInspector_cfg.py new file mode 100644 index 0000000000000..75d4490a4ed26 --- /dev/null +++ b/CondTools/SiStrip/test/SiStripApvGainInspector_cfg.py @@ -0,0 +1,52 @@ +import FWCore.ParameterSet.Config as cms + +process = cms.Process("Demo") +process.load("FWCore.MessageService.MessageLogger_cfi") + +process.source = cms.Source("EmptyIOVSource", + firstValue = cms.uint64(317340), + lastValue = cms.uint64(317340), + timetype = cms.string('runnumber'), + interval = cms.uint64(1) + ) + + +process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) ) + +process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff") +from Configuration.AlCa.GlobalTag import GlobalTag +process.GlobalTag = GlobalTag(process.GlobalTag, '101X_dataRun2_Express_v7', '') + +process.load("Configuration.Geometry.GeometryRecoDB_cff") + +#################################################################### +# Output file +#################################################################### +process.TFileService = cms.Service("TFileService", + fileName=cms.string("APVGainsTree.root") + ) + + +## +## Output database (in this case local sqlite file) +## +process.load("CondCore.CondDB.CondDB_cfi") +process.CondDB.connect = "sqlite_file:updatedGains.db" +process.PoolDBOutputService = cms.Service("PoolDBOutputService", + process.CondDB, + timetype = cms.untracked.string('runnumber'), + toPut = cms.VPSet(cms.PSet(record = cms.string('SiStripApvGainRcd'), + tag = cms.string('modifiedGains')) + )) + +process.demo = cms.EDAnalyzer('SiStripApvGainInspector', + fitMode = cms.int32(2), # landau around max + #inputFile = cms.untracked.string("DQM_V0001_R000999999__StreamExpress__Run2018B-PromptCalibProdSiStripGainsAAG-Express-v1-317279-317340__ALCAPROMPT.root"), + inputFile = cms.untracked.string("root://eoscms.cern.ch//eos/cms/store/group/alca_global/multiruns/results/prod//slc6_amd64_gcc630/CMSSW_10_1_5/86791_1p_0f/DQM_V0001_R000999999__StreamExpress__Run2018B-PromptCalibProdSiStripGainsAAG-Express-v1-317382-317488__ALCAPROMPT.root"), + ### FED 387 + selectedModules = cms.untracked.vuint32(436281608,436281604,436281592,436281624,436281620,436281644,436281640,436281648,436281668,436281680,436281684,436281688,436281720,436281700,436281708,436281556,436281552,436281704,436281764,436281768,436281572,436281576,436281748,436281744,436281740,436281780,436281784,436281612,436281616,436281588,436281580,436281584,436281636,436281656,436281652,436281676,436281672,436281732,436281736,436281716,436281712,436281776,436281772,436281548,436281544,436281540,436281752,436281560) + #### FED 434 + ###selectedModules = cms.untracked.vuint32(436266168,436266028,436266020,436266024,436266160,436266164,436266000,436266004,436266008,436265976,436265972,436266064,436266060,436266068,436265964,436265960,436265968,436265988,436266088,436266084,436266040,436266128,436266116,436266132,436266136,436266156,436266152,436266100,436266032,436266036,436266096,436266052,436266056,436265956,436266092,436265992,436265996,436266104,436266072,436266124,436266120,436266148) + ) + +process.p = cms.Path(process.demo) diff --git a/CondTools/SiStrip/test/SiStripGainPayloadCopyAndExclude_cfg.py b/CondTools/SiStrip/test/SiStripGainPayloadCopyAndExclude_cfg.py new file mode 100644 index 0000000000000..63a7b129e5183 --- /dev/null +++ b/CondTools/SiStrip/test/SiStripGainPayloadCopyAndExclude_cfg.py @@ -0,0 +1,98 @@ +''' +This file is an example configuration of the SiStripPayloadCopyAndExclude module. +This module is meant to copy the content of a SiStrip APV Gain payload (either G1 or G2) +from a local sqlite file (that should be feeded to the Event Setup via the SiStripApvGain3Rcd +and put in another local sqlite file, excepted for the modules specified in the excludedModules +parameter. If the doReverse parameter is true, the opposite action is performed. +''' + +import FWCore.ParameterSet.Config as cms +import FWCore.ParameterSet.VarParsing as VarParsing + +process = cms.Process("SiStripPayloadCopyAndExclude") + +options = VarParsing.VarParsing("analysis") + +options.register ('globalTag', + "101X_dataRun2_Express_v7", + VarParsing.VarParsing.multiplicity.singleton, # singleton or list + VarParsing.VarParsing.varType.string, # string, int, or float + "GlobalTag") + +options.register ('runNumber', + 317478, + VarParsing.VarParsing.multiplicity.singleton, # singleton or list + VarParsing.VarParsing.varType.int, # string, int, or float + "run number") + +options.register ('doReverse', + False, + VarParsing.VarParsing.multiplicity.singleton, # singleton or list + VarParsing.VarParsing.varType.bool, # string, int, or float + "reverse the selection") + +options.parseArguments() + + +if(options.doReverse): + print("====================================================================================================================================") + print("%MSG-i DoReverse: : Going to revert the selection. All modules will be taken from GT, unless they are specified in the modules list!") + print("====================================================================================================================================") + +## +## Messages +## +process.load("FWCore.MessageService.MessageLogger_cfi") + +## +## Event Source +## +process.source = cms.Source("EmptyIOVSource", + firstValue = cms.uint64(options.runNumber), + lastValue = cms.uint64(options.runNumber), + timetype = cms.string('runnumber'), + interval = cms.uint64(1) + ) + +process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) ) + +## +## Conditions inputs +## +process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff") +from Configuration.AlCa.GlobalTag import GlobalTag +process.GlobalTag = GlobalTag(process.GlobalTag,options.globalTag, '') +process.GlobalTag.toGet = cms.VPSet( + cms.PSet(record = cms.string("SiStripApvGain3Rcd"), + tag = cms.string("SiStripApvGainAAG_pcl"), + #connect = cms.string("sqlite_file:/eos/cms/store/group/alca_global/multiruns/results/prod//slc6_amd64_gcc630/CMSSW_10_1_5/86791_1p_0f/promptCalibConditions86791.db") + connect = cms.string("sqlite_file:promptCalibConditions86791.db") # locally copied file for unit test + ) + ) + +## +## Worker module +## +process.SiStripGainPayloadCopyAndExclude = cms.EDAnalyzer('SiStripGainPayloadCopyAndExclude', + ### FED 387 + excludedModules = cms.untracked.vuint32(436281608,436281604,436281592,436281624,436281620,436281644,436281640,436281648,436281668,436281680,436281684,436281688,436281720,436281700,436281708,436281556,436281552,436281704,436281764,436281768,436281572,436281576,436281748,436281744,436281740,436281780,436281784,436281612,436281616,436281588,436281580,436281584,436281636,436281656,436281652,436281676,436281672,436281732,436281736,436281716,436281712,436281776,436281772,436281548,436281544,436281540,436281752,436281560), + reverseSelection = cms.untracked.bool(options.doReverse), # if True it will take everything from GT, but the execludedModules from the Gain3 tag + record = cms.untracked.string("SiStripApvGainRcd"), + gainType = cms.untracked.uint32(1) # 0 for G1, 1 for G2 +) + +## +## Output database (in this case local sqlite file) +## +process.load("CondCore.CondDB.CondDB_cfi") +process.CondDB.connect = "sqlite_file:modifiedGains_"+process.GlobalTag.globaltag._value+'_IOV_'+str(options.runNumber)+("_reverse.db" if options.doReverse else ".db") +process.PoolDBOutputService = cms.Service("PoolDBOutputService", + process.CondDB, + timetype = cms.untracked.string('runnumber'), + toPut = cms.VPSet(cms.PSet(record = cms.string('SiStripApvGainRcd'), + tag = cms.string('modifiedGains') + ) + ) +) + +process.p = cms.Path(process.SiStripGainPayloadCopyAndExclude) diff --git a/CondTools/SiStrip/test/testSiStripGainManipulation.sh b/CondTools/SiStrip/test/testSiStripGainManipulation.sh new file mode 100755 index 0000000000000..270f7a9aff942 --- /dev/null +++ b/CondTools/SiStrip/test/testSiStripGainManipulation.sh @@ -0,0 +1,25 @@ +#!/bin/sh + +function die { echo $1: status $2 ; exit $2; } + +if [ "${SCRAM_TEST_NAME}" != "" ] ; then + mkdir ${SCRAM_TEST_NAME} + cd ${SCRAM_TEST_NAME} +fi + +echo -e "===== testing SiStripApV Gain manipulations =====\n\n" + +entries=("SiStripGainPayloadCopyAndExclude_cfg.py" "SiStripApvGainInspector_cfg.py") + +echo -e "===== copying IOV 317478 from tag SiStripApvGainAfterAbortGap_PCL_multirun_v0_prompt on dev DB =====" + +conddb --yes --db dev copy SiStripApvGainAfterAbortGap_PCL_multirun_v0_prompt SiStripApvGainAAG_pcl --from 317478 --to 317478 --destdb promptCalibConditions86791.db + +for entry in "${entries[@]}"; +do + echo -e "===== executing cmsRun "${SCRAM_TEST_PATH}/$entry" ======\n" + (cmsRun "${SCRAM_TEST_PATH}/"$entry) || die "Failure using cmsRun $entry" $? + echo -e "===== executed $entry test ======\n" +done + +echo -e "\n\n ===== Done with the Gain manipulations tests! =====\n\n" diff --git a/Configuration/AlCa/python/autoAlca.py b/Configuration/AlCa/python/autoAlca.py index 57063a214637f..3e5441087ffea 100644 --- a/Configuration/AlCa/python/autoAlca.py +++ b/Configuration/AlCa/python/autoAlca.py @@ -11,11 +11,11 @@ "DoubleMuon" : "TkAlZMuMu+TkAlDiMuonAndVertex+MuAlCalIsolatedMu", "DoubleMuonLowMass" : "TkAlJpsiMuMu+TkAlUpsilonMuMu", "EGamma" : "EcalESAlign+EcalUncalWElectron+EcalUncalZElectron+HcalCalIsoTrkProducerFilter+HcalCalIterativePhiSym", - "Express" : "SiStripCalZeroBias+TkAlMinBias+SiStripPCLHistos+SiStripCalMinBias+SiStripCalMinBiasAAG+Hotline+SiPixelCalZeroBias", + "Express" : "SiStripCalZeroBias+TkAlMinBias+TkAlZMuMu+SiStripPCLHistos+SiStripCalMinBias+SiStripCalMinBiasAAG+Hotline+SiPixelCalZeroBias", "ExpressAlignment" : "TkAlMinBias", "ExpressCosmics" : "SiStripPCLHistos+SiStripCalZeroBias+TkAlCosmics0T+SiPixelCalZeroBias+SiPixelCalCosmics+SiStripCalCosmics", "HcalNZS" : "HcalCalMinBias", - "HLTPhysics" : "TkAlMinBias", + "HLTPhysics" : "TkAlMinBias+TkAlV0s", "JetHT" : "HcalCalIsoTrkProducerFilter+TkAlJetHT", "JetMET" : "HcalCalIsoTrkProducerFilter+TkAlJetHT+HcalCalNoise", "MinimumBias" : "SiStripCalZeroBias+SiStripCalMinBias+TkAlMinBias", @@ -25,7 +25,7 @@ "ParkingDoubleMuonLowMass" : "TkAlJpsiMuMu+TkAlUpsilonMuMu", "SingleMuon" : "SiPixelCalSingleMuonLoose+SiPixelCalSingleMuonTight+TkAlMuonIsolated+MuAlCalIsolatedMu+HcalCalHO+HcalCalIterativePhiSym+HcalCalHBHEMuonProducerFilter", "SpecialHLTPhysics" : "LumiPixelsMinBias", - "StreamExpress" : "SiStripCalZeroBias+TkAlMinBias+SiStripPCLHistos+SiStripCalMinBias+SiStripCalMinBiasAAG+Hotline+SiPixelCalZeroBias+SiPixelCalSingleMuon", + "StreamExpress" : "SiStripCalZeroBias+TkAlMinBias+TkAlZMuMu+SiStripPCLHistos+SiStripCalMinBias+SiStripCalMinBiasAAG+Hotline+SiPixelCalZeroBias+SiPixelCalSingleMuon", "StreamExpressHI" : "SiStripCalZeroBias+TkAlMinBiasHI+SiStripPCLHistos+SiStripCalMinBias+SiStripCalMinBiasAAG+SiPixelCalZeroBias", # These (TestEnablesTracker, TestEnablesEcalHcal) are in the AlCaRecoMatrix, but no RelVals are produced # 'TestEnablesTracker' : 'TkAlLAS' @@ -61,6 +61,8 @@ def buildList(pdList, matrix): AlCaNoConcurrentLumis = [ 'PromptCalibProd', # AlcaBeamSpotProducer 'PromptCalibProdSiPixelAli', # AlignmentProducerAsAnalyzer, MillePedeFileConverter + 'PromptCalibProdSiPixelAliHG', # AlignmentProducerAsAnalyzer, MillePedeFileConverter + 'PromptCalibProdSiPixelAliHGComb', # AlignmentProducerAsAnalyzer, MillePedeFileConverter 'PromptCalibProdBeamSpotHP', # AlcaBeamSpotProducer 'PromptCalibProdBeamSpotHPLowPU', # AlcaBeamSpotProducer ] diff --git a/Configuration/AlCa/python/autoCond.py b/Configuration/AlCa/python/autoCond.py index 60645f6835929..9754faba412a5 100644 --- a/Configuration/AlCa/python/autoCond.py +++ b/Configuration/AlCa/python/autoCond.py @@ -24,23 +24,23 @@ # GlobalTag for MC production (p-Pb collisions) with realistic alignment and calibrations for Run2 'run2_mc_pa' : '131X_mcRun2_pA_v3', # GlobalTag for Run2 data reprocessing - 'run2_data' : '133X_dataRun2_v2', + 'run2_data' : '140X_dataRun2_v1', # GlobalTag for Run2 data 2018B relvals only: HEM-15-16 fail - 'run2_data_HEfail' : '133X_dataRun2_HEfail_v2', + 'run2_data_HEfail' : '140X_dataRun2_HEfail_v1', # GlobalTag for Run2 HI data - 'run2_data_promptlike_hi' : '133X_dataRun2_PromptLike_HI_v2', + 'run2_data_promptlike_hi' : '140X_dataRun2_PromptLike_HI_v1', # GlobalTag with fixed snapshot time for Run2 HLT RelVals: customizations to run with fixed L1 Menu - 'run2_hlt_relval' : '133X_dataRun2_HLT_relval_v2', - # GlobalTag for Run3 HLT: identical to the online GT (132X_dataRun3_HLT_v2) but with snapshot at 2023-10-04 21:27:37 (UTC) - 'run3_hlt' : '133X_dataRun3_HLT_frozen_v2', - # GlobalTag for Run3 data relvals (express GT) - 132X_dataRun3_Express_v4 with Ecal CC timing tags and snapshot at 2023-10-04 21:27:37 (UTC) - 'run3_data_express' : '133X_dataRun3_Express_frozen_v2', - # GlobalTag for Run3 data relvals (prompt GT) - 132X_dataRun3_Prompt_v4 with Ecal CC timing tags and snapshot at 2023-10-04 21:27:37 (UTC) - 'run3_data_prompt' : '133X_dataRun3_Prompt_frozen_v2', - # GlobalTag for Run3 offline data reprocessing - snapshot at 2023-10-19 12:00:00 (UTC) - 'run3_data' : '133X_dataRun3_v4', - # GlobalTag for Run3 offline data reprocessing with Prompt GT, currenlty for 2022FG - snapshot at 2023-10-19 12:00:00 (UTC) - 'run3_data_PromptAnalysis' : '133X_dataRun3_PromptAnalysis_v3', + 'run2_hlt_relval' : '140X_dataRun2_HLT_relval_v1', + # GlobalTag for Run3 HLT: identical to the online GT - 140X_dataRun3_HLT_v1 but with snapshot at 2024-01-20 12:00:00 (UTC) + 'run3_hlt' : '140X_dataRun3_HLT_frozen_v1', + # GlobalTag for Run3 data relvals (express GT) - 140X_dataRun3_Express_v1 but snapshot at 2024-01-20 12:00:00 (UTC) + 'run3_data_express' : '140X_dataRun3_Express_frozen_v1', + # GlobalTag for Run3 data relvals (prompt GT) - 140X_dataRun3_Prompt_v1 but snapshot at 2024-01-20 12:00:00 (UTC) + 'run3_data_prompt' : '140X_dataRun3_Prompt_frozen_v1', + # GlobalTag for Run3 offline data reprocessing - snapshot at 2024-01-19 05:46:14 (UTC) + 'run3_data' : '140X_dataRun3_v1', + # GlobalTag for Run3 offline data reprocessing with Prompt GT, currenlty for 2022FG - snapshot at 2024-01-19 05:46:14 (UTC) + 'run3_data_PromptAnalysis' : '140X_dataRun3_PromptAnalysis_v1', # GlobalTag for MC production with perfectly aligned and calibrated detector for Phase1 2017 (and 0,0,~0-centred beamspot) 'phase1_2017_design' : '131X_mc2017_design_v3', # GlobalTag for MC production with realistic conditions for Phase1 2017 detector @@ -64,33 +64,41 @@ # GlobalTag for MC production (cosmics) with realistic conditions for full Phase1 2018 detector, Strip tracker in PEAK mode 'phase1_2018_cosmics_peak' : '131X_upgrade2018cosmics_realistic_peak_v4', # GlobalTag for MC production with perfectly aligned and calibrated detector for Phase1 2022 - 'phase1_2022_design' : '133X_mcRun3_2022_design_v3', + 'phase1_2022_design' : '140X_mcRun3_2022_design_v1', # GlobalTag for MC production with realistic conditions for Phase1 2022 - 'phase1_2022_realistic' : '133X_mcRun3_2022_realistic_v3', + 'phase1_2022_realistic' : '140X_mcRun3_2022_realistic_v1', # GlobalTag for MC production with realistic conditions for Phase1 2022 post-EE+ leak - 'phase1_2022_realistic_postEE' : '133X_mcRun3_2022_realistic_postEE_v4', - # GlobalTag for MC production (cosmics) with realistic conditions for Phase1 2022, Strip tracker in DECO mode - 'phase1_2022_cosmics' : '133X_mcRun3_2022cosmics_realistic_deco_v3', + 'phase1_2022_realistic_postEE' : '140X_mcRun3_2022_realistic_postEE_v1', + # GlobalTag for MC production (cosmics) with realistic conditions for Phase1 2022, Strip tracker in DECO mode + 'phase1_2022_cosmics' : '140X_mcRun3_2022cosmics_realistic_deco_v1', # GlobalTag for MC production (cosmics) with perfectly aligned and calibrated detector for Phase1 2022, Strip tracker in DECO mode - 'phase1_2022_cosmics_design' : '133X_mcRun3_2022cosmics_design_deco_v3', + 'phase1_2022_cosmics_design' : '140X_mcRun3_2022cosmics_design_deco_v1', # GlobalTag for MC production with realistic conditions for Phase1 2022 detector for Heavy Ion - 'phase1_2022_realistic_hi' : '133X_mcRun3_2022_realistic_HI_v3', + 'phase1_2022_realistic_hi' : '140X_mcRun3_2022_realistic_HI_v1', # GlobalTag for MC production with perfectly aligned and calibrated detector for Phase1 2023 - 'phase1_2023_design' : '133X_mcRun3_2023_design_v3', + 'phase1_2023_design' : '140X_mcRun3_2023_design_v1', # GlobalTag for MC production with realistic conditions for Phase1 2023 - 'phase1_2023_realistic' : '133X_mcRun3_2023_realistic_v3', - # GlobalTag for MC production with realistic conditions for Phase1 post BPix issue 2023 - 'phase1_2023_realistic_postBPix' : '133X_mcRun3_2023_realistic_postBPix_v3', - # GlobalTag for MC production (cosmics) with realistic conditions for Phase1 2023, Strip tracker in DECO mode - 'phase1_2023_cosmics' : '133X_mcRun3_2023cosmics_realistic_deco_v3', + 'phase1_2023_realistic' : '140X_mcRun3_2023_realistic_v1', + # GlobalTag for MC production with realistic conditions for Phase1 postBPix issue 2023 + 'phase1_2023_realistic_postBPix' : '140X_mcRun3_2023_realistic_postBPix_v1', + # GlobalTag for MC production (cosmics) with realistic conditions for Phase1 preBPix 2023, Strip tracker in DECO mode + 'phase1_2023_cosmics' : '140X_mcRun3_2023cosmics_realistic_deco_v1', + # GlobalTag for MC production (cosmics) with realistic conditions for Phase1 postBPix 2023, Strip tracker in DECO mode + 'phase1_2023_cosmics_postBPix' : '140X_mcRun3_2023cosmics_realistic_postBPix_deco_v1', # GlobalTag for MC production (cosmics) with perfectly aligned and calibrated detector for Phase1 2023, Strip tracker in DECO mode - 'phase1_2023_cosmics_design' : '133X_mcRun3_2023cosmics_design_deco_v3', + 'phase1_2023_cosmics_design' : '140X_mcRun3_2023cosmics_design_deco_v1', # GlobalTag for MC production with realistic conditions for Phase1 2023 detector for Heavy Ion - 'phase1_2023_realistic_hi' : '133X_mcRun3_2023_realistic_HI_v7', + 'phase1_2023_realistic_hi' : '140X_mcRun3_2023_realistic_HI_v1', + # GlobalTag for MC production with perfectly aligned and calibrated detector for Phase1 2024 + 'phase1_2024_design' : '140X_mcRun3_2024_design_v1', # GlobalTag for MC production with realistic conditions for Phase1 2024 - 'phase1_2024_realistic' : '133X_mcRun3_2024_realistic_v5', + 'phase1_2024_realistic' : '140X_mcRun3_2024_realistic_v1', + # GlobalTag for MC production (cosmics) with realistic conditions for Phase1 2024, Strip tracker in DECO mode + 'phase1_2024_cosmics' : '140X_mcRun3_2024cosmics_realistic_deco_v1', + # GlobalTag for MC production (cosmics) with perfectly aligned and calibrated detector for Phase1 2024, Strip tracker in DECO mode + 'phase1_2024_cosmics_design' : '140X_mcRun3_2024cosmics_design_deco_v1', # GlobalTag for MC production with realistic conditions for Phase2 - 'phase2_realistic' : '133X_mcRun4_realistic_v1' + 'phase2_realistic' : '140X_mcRun4_realistic_v1' } aliases = { diff --git a/Configuration/AlCa/python/autoCondPhase2.py b/Configuration/AlCa/python/autoCondPhase2.py index 4e2844156d2c9..2d1d763de88e1 100644 --- a/Configuration/AlCa/python/autoCondPhase2.py +++ b/Configuration/AlCa/python/autoCondPhase2.py @@ -35,12 +35,14 @@ 'T21' : ( ','.join( [ 'SiPixelLorentzAngle_phase2_T15_mc_forWidthEmpty' ,SiPixelLARecord,connectionString, "forWidth", "2019-11-05 20:00:00.000"] ), ), # uH=0.0/T (fall-back to offset) 'T25' : ( ','.join( [ 'SiPixelLorentzAngle_phase2_T19_mc_forWidthEmpty' ,SiPixelLARecord,connectionString, "forWidth", "2020-02-23 14:00:00.000"] ), ), # uH=0.0/T (fall-back to offset) 'T30' : ( ','.join( [ 'SiPixelLorentzAngle_phase2_IT_v6.4.0_25x100_empty_mc' ,SiPixelLARecord,connectionString, "forWidth", "2021-11-29 20:00:00.000"] ), ), # uH=0.0/T (fall-back to offset) + 'T33' : ( ','.join( [ 'SiPixelLorentzAngle_phase2_IT_v7.1.1_25x100_empty_mc' ,SiPixelLARecord,connectionString, "forWidth", "2023-12-02 15:55:00.000"] ), ), # uH=0.0/T (fall-back to offset) } allTags["LAfromAlignment"] = { 'T21' : ( ','.join( [ 'SiPixelLorentzAngle_phase2_T15_mc_forWidthEmpty' ,SiPixelLARecord,connectionString, "fromAlignment", "2019-11-05 20:00:00.000"] ), ), # uH=0.0/T (not in use) 'T25' : ( ','.join( [ 'SiPixelLorentzAngle_phase2_T19_mc_forWidthEmpty' ,SiPixelLARecord,connectionString, "fromAlignment", "2020-02-23 14:00:00.000"] ), ), # uH=0.0/T (not in use) 'T30' : ( ','.join( [ 'SiPixelLorentzAngle_phase2_IT_v6.4.0_25x100_empty_mc' ,SiPixelLARecord,connectionString, "fromAlignment", "2021-11-29 20:00:00.000"] ), ), # uH=0.0/T (fall-back to offset) + 'T33' : ( ','.join( [ 'SiPixelLorentzAngle_phase2_IT_v7.1.1_25x100_empty_mc' ,SiPixelLARecord,connectionString, "fromAlignment", "2023-12-02 15:55:00.000"] ), ), # uH=0.0/T (fall-back to offset) } allTags["SimLA"] = { diff --git a/Configuration/AlCa/python/autoPCL.py b/Configuration/AlCa/python/autoPCL.py index 0005c8439e31f..b212d5494c1a9 100644 --- a/Configuration/AlCa/python/autoPCL.py +++ b/Configuration/AlCa/python/autoPCL.py @@ -2,22 +2,23 @@ # due to the limitations of the DBS database schema, as described in # https://cms-talk.web.cern.ch/t/alcaprompt-datasets-not-loaded-in-dbs/11146/2, # the keys of the dict (i.e. the "PromptCalib*") MUST be shorter than 31 characters -autoPCL = {'PromptCalibProd' : 'BeamSpotByRun+BeamSpotByLumi', - 'PromptCalibProdBeamSpotHP' : 'BeamSpotHPByRun+BeamSpotHPByLumi', +autoPCL = {'PromptCalibProd' : 'BeamSpotByRun+BeamSpotByLumi', + 'PromptCalibProdBeamSpotHP' : 'BeamSpotHPByRun+BeamSpotHPByLumi', 'PromptCalibProdBeamSpotHPLowPU' : 'BeamSpotHPLowPUByRun+BeamSpotHPLowPUByLumi', - 'PromptCalibProdSiStrip' : 'SiStripQuality', - 'PromptCalibProdSiStripGains' : 'SiStripGains', + 'PromptCalibProdSiStrip' : 'SiStripQuality', + 'PromptCalibProdSiStripGains' : 'SiStripGains', 'PromptCalibProdSiStripGainsAAG' : 'SiStripGainsAAG', - 'PromptCalibProdSiStripHitEff' : 'SiStripHitEff', - 'PromptCalibProdSiStripLA' : 'SiStripLA', - 'PromptCalibProdSiPixelAli' : 'SiPixelAli', - 'PromptCalibProdSiPixelAliHG' : 'SiPixelAliHG', - 'PromptCalibProdSiPixel' : 'SiPixelQuality', - 'PromptCalibProdSiPixelLA' : 'SiPixelLA', - 'PromptCalibProdSiPixelLAMCS' : 'SiPixelLAMCS', - 'PromptCalibProdEcalPedestals': 'EcalPedestals', - 'PromptCalibProdLumiPCC': 'LumiPCC', - 'PromptCalibProdPPSTimingCalib' : 'PPSTimingCalibration', - 'PromptCalibProdPPSDiamondSampic' : 'PPSDiamondSampicTimingCalibration', - 'PromptCalibProdPPSAlignment' : 'PPSAlignment' + 'PromptCalibProdSiStripHitEff' : 'SiStripHitEff', + 'PromptCalibProdSiStripLA' : 'SiStripLA', + 'PromptCalibProdSiPixelAli' : 'SiPixelAli', + 'PromptCalibProdSiPixelAliHG' : 'SiPixelAliHG', + 'PromptCalibProdSiPixelAliHGComb': 'SiPixelAliHGCombined', + 'PromptCalibProdSiPixel' : 'SiPixelQuality', + 'PromptCalibProdSiPixelLA' : 'SiPixelLA', + 'PromptCalibProdSiPixelLAMCS' : 'SiPixelLAMCS', + 'PromptCalibProdEcalPedestals' : 'EcalPedestals', + 'PromptCalibProdLumiPCC' : 'LumiPCC', + 'PromptCalibProdPPSTimingCalib' : 'PPSTimingCalibration', + 'PromptCalibProdPPSDiamondSampic': 'PPSDiamondSampicTimingCalibration', + 'PromptCalibProdPPSAlignment' : 'PPSAlignment' } diff --git a/Configuration/Applications/python/ConfigBuilder.py b/Configuration/Applications/python/ConfigBuilder.py index 9862e90d937ce..bf1d7f5089ed4 100644 --- a/Configuration/Applications/python/ConfigBuilder.py +++ b/Configuration/Applications/python/ConfigBuilder.py @@ -771,6 +771,12 @@ def addStandardSequences(self): if self._options.pileup: pileupSpec=self._options.pileup.split(',')[0] + #make sure there is a set of pileup files specified when needed + pileups_without_input=[defaultOptions.pileup,"Cosmics","default","HiMixNoPU",None] + if self._options.pileup not in pileups_without_input and self._options.pileup_input==None: + message = "Pileup scenerio requires input files. Please add an appropriate --pileup_input option" + raise Exception(message) + # Does the requested pile-up scenario exist? from Configuration.StandardSequences.Mixing import Mixing,defineMixing if not pileupSpec in Mixing and '.' not in pileupSpec and 'file:' not in pileupSpec: @@ -1463,7 +1469,7 @@ def prepare_GEN(self, stepSpec = None): elif isinstance(theObject, cms.Sequence) or isinstance(theObject, cmstypes.ESProducer): self._options.inlineObjects+=','+name - if stepSpec == self.GENDefaultSeq or stepSpec == 'pgen_genonly' or stepSpec == 'pgen_smear': + if stepSpec == self.GENDefaultSeq or stepSpec == 'pgen_genonly': if 'ProductionFilterSequence' in genModules and ('generator' in genModules): self.productionFilterSequence = 'ProductionFilterSequence' elif 'generator' in genModules: @@ -1494,7 +1500,7 @@ def prepare_GEN(self, stepSpec = None): #register to the genstepfilter the name of the path (static right now, but might evolve) self.executeAndRemember('process.genstepfilter.triggerConditions=cms.vstring("generation_step")') - if 'reGEN' in self.stepMap: + if 'reGEN' in self.stepMap or stepSpec == 'pgen_smear': #stop here return @@ -1830,7 +1836,15 @@ def prepare_NANO(self, stepSpec = '' ): # build and inject the sequence if len(_nanoSeq) < 1 and '@' in stepSpec: raise Exception(f'The specified mapping: {stepSpec} generates an empty NANO sequence. Please provide a valid mappign') - self.scheduleSequence('+'.join(_nanoSeq), 'nanoAOD_step') + _seqToSchedule = [] + for _subSeq in _nanoSeq: + if '.' in _subSeq: + _cff,_seq = _subSeq.split('.') + self.loadAndRemember(_cff) + _seqToSchedule.append(_seq) + else: + _seqToSchedule.append(_subSeq) + self.scheduleSequence('+'.join(_seqToSchedule), 'nanoAOD_step') # add the customisations for custom in _nanoCustoms: diff --git a/Configuration/DataProcessing/python/Merge.py b/Configuration/DataProcessing/python/Merge.py index 4dc9b8af57609..c031f5f8ac33c 100644 --- a/Configuration/DataProcessing/python/Merge.py +++ b/Configuration/DataProcessing/python/Merge.py @@ -73,7 +73,6 @@ def mergeProcess(*inputFiles, **options): elif mergeNANO: import Configuration.EventContent.EventContent_cff outMod = OutputModule("NanoAODOutputModule",Configuration.EventContent.EventContent_cff.NANOAODEventContent.clone()) - process.add_(Service("InitRootHandlers", EnableIMT = CfgTypes.untracked.bool(False))) else: outMod = OutputModule("PoolOutputModule") outMod.mergeJob = CfgTypes.untracked.bool(True) diff --git a/Configuration/DataProcessing/test/BuildFile.xml b/Configuration/DataProcessing/test/BuildFile.xml index 42f55518a3b4e..4ce1d364c06b6 100644 --- a/Configuration/DataProcessing/test/BuildFile.xml +++ b/Configuration/DataProcessing/test/BuildFile.xml @@ -1 +1,12 @@ - + + + + + + + + + + + + diff --git a/Configuration/DataProcessing/test/run_CfgTest.sh b/Configuration/DataProcessing/test/run_CfgTest.sh deleted file mode 100755 index 52f2a79d81829..0000000000000 --- a/Configuration/DataProcessing/test/run_CfgTest.sh +++ /dev/null @@ -1,88 +0,0 @@ -#!/bin/bash - -# Test suite for various ConfigDP scenarios -# run using: scram build runtests -# feel free to contribute with your favourite configuration - - -# Pass in name and status -function die { echo $1: status $2 ; exit $2; } - -function runTest { echo $1 ; python3 $1 || die "Failure for configuration: $1" $?; } - -runTest "${SCRAM_TEST_PATH}/RunRepack.py --select-events HLT:path1,HLT:path2 --lfn /store/whatever" - -declare -a arr=("cosmicsEra_Run2_2018" "ppEra_Run2_2018" "ppEra_Run2_2018_highBetaStar" "ppEra_Run2_2018_pp_on_AA" "cosmicsHybridEra_Run2_2018" "cosmicsEra_Run3" "ppEra_Run3") -for scenario in "${arr[@]}" -do - runTest "${SCRAM_TEST_PATH}/RunExpressProcessing.py --scenario $scenario --global-tag GLOBALTAG --lfn /store/whatever --fevt --dqmio --alcareco TkAlMinBias+SiStripCalMinBias " - runTest "${SCRAM_TEST_PATH}/RunVisualizationProcessing.py --scenario $scenario --lfn /store/whatever --global-tag GLOBALTAG --fevt" - runTest "${SCRAM_TEST_PATH}/RunAlcaHarvesting.py --scenario $scenario --lfn /store/whatever --dataset /A/B/C --global-tag GLOBALTAG --workflows=BeamSpotByRun,BeamSpotByLumi,SiStripQuality" -done - -declare -a arr=("cosmicsEra_Run2_2018" "cosmicsHybridEra_Run2_2018" "cosmicsEra_Run3") -for scenario in "${arr[@]}" -do - runTest "${SCRAM_TEST_PATH}/RunExpressProcessing.py --scenario $scenario --global-tag GLOBALTAG --lfn /store/whatever --fevt --dqmio --alcareco SiStripCalCosmicsNano " -done - -declare -a arr=("HeavyIonsEra_Run2_2018") -for scenario in "${arr[@]}" -do - runTest "${SCRAM_TEST_PATH}/RunExpressProcessing.py --scenario $scenario --global-tag GLOBALTAG --lfn /store/whatever --fevt --dqmio --alcareco TkAlMinBiasHI+SiStripCalMinBias " - runTest "${SCRAM_TEST_PATH}/RunAlcaHarvesting.py --scenario $scenario --lfn /store/whatever --dataset /A/B/C --global-tag GLOBALTAG --workflows=BeamSpotByRun,BeamSpotByLumi,SiStripQuality" - runTest "${SCRAM_TEST_PATH}/RunAlcaSkimming.py --scenario $scenario --lfn=/store/whatever --global-tag GLOBALTAG --skims SiStripCalZeroBias,SiStripCalMinBias,PromptCalibProd" - runTest "${SCRAM_TEST_PATH}/RunDQMHarvesting.py --scenario $scenario --lfn /store/whatever --run 12345 --dataset /A/B/C --global-tag GLOBALTAG" - runTest "${SCRAM_TEST_PATH}/RunPromptReco.py --scenario $scenario --reco --aod --dqmio --global-tag GLOBALTAG --lfn=/store/whatever --alcareco TkAlMinBiasHI+SiStripCalMinBias --PhysicsSkim=DiJet+Photon+ZEE+ZMM" -done - - -declare -a arr=("AlCaLumiPixels" "AlCaTestEnable" "cosmicsEra_Run2_2018" "hcalnzsEra_Run2_2018" "ppEra_Run2_2018" "hcalnzsEra_Run2_2018_highBetaStar" "hcalnzsEra_Run2_2018_pp_on_AA" "ppEra_Run2_2018_highBetaStar" "ppEra_Run2_2018_pp_on_AA" "cosmicsHybridEra_Run2_2018" "cosmicsEra_Run3" "hcalnzsEra_Run3" "ppEra_Run3" "AlCaLumiPixels_Run3" "AlCaPhiSymEcal_Nano" "AlCaPPS_Run3" "ppEra_Run3_pp_on_PbPb" "hcalnzsEra_Run3_pp_on_PbPb" "ppEra_Run3_pp_on_PbPb_approxSiStripClusters" "ppEra_Run3_2023" "ppEra_Run3_pp_on_PbPb_2023" "ppEra_Run3_pp_on_PbPb_approxSiStripClusters_2023") -for scenario in "${arr[@]}" -do - runTest "${SCRAM_TEST_PATH}/RunPromptReco.py --scenario $scenario --reco --aod --dqmio --global-tag GLOBALTAG --lfn=/store/whatever --alcareco TkAlMinBias+SiStripCalMinBias" -done - -declare -a arr=("AlCaLumiPixels" "cosmicsEra_Run2_2018" "ppEra_Run2_2018" "ppEra_Run2_2018_highBetaStar" "ppEra_Run2_2018_pp_on_AA" "cosmicsHybridEra_Run2_2018" "cosmicsEra_Run3" "ppEra_Run3" "AlCaLumiPixels_Run3") -for scenario in "${arr[@]}" -do - runTest "${SCRAM_TEST_PATH}/RunAlcaSkimming.py --scenario $scenario --lfn=/store/whatever --global-tag GLOBALTAG --skims SiStripCalZeroBias,SiStripCalMinBias,PromptCalibProd" - runTest "${SCRAM_TEST_PATH}/RunDQMHarvesting.py --scenario $scenario --lfn /store/whatever --run 12345 --dataset /A/B/C --global-tag GLOBALTAG" -done - -declare -a arr=("ppEra_Run2_2018" "ppEra_Run2_2018_highBetaStar" "ppEra_Run2_2018_pp_on_AA") -for scenario in "${arr[@]}" -do - runTest "${SCRAM_TEST_PATH}/RunPromptReco.py --scenario $scenario --reco --aod --miniaod --dqmio --global-tag GLOBALTAG --lfn=/store/whatever --alcareco TkAlMinBias+SiStripCalMinBias" - runTest "${SCRAM_TEST_PATH}/RunPromptReco.py --scenario $scenario --reco --aod --dqmio --global-tag GLOBALTAG --lfn=/store/whatever --alcareco TkAlMinBias+SiStripCalMinBias --PhysicsSkim=@SingleMuon" -done - -declare -a arr=("ppEra_Run3" "ppEra_Run3_2023" "ppEra_Run3_2023_repacked") -for scenario in "${arr[@]}" -do - runTest "${SCRAM_TEST_PATH}/RunPromptReco.py --scenario $scenario --reco --aod --miniaod --nanoaod --dqmio --global-tag GLOBALTAG --lfn=/store/whatever --alcareco TkAlMinBias+SiStripCalMinBias" - runTest "${SCRAM_TEST_PATH}/RunPromptReco.py --scenario $scenario --reco --aod --dqmio --global-tag GLOBALTAG --lfn=/store/whatever --alcareco TkAlMinBias+SiStripCalMinBias --PhysicsSkim=@Muon0" -done - - -runTest "${SCRAM_TEST_PATH}/RunExpressProcessing.py --scenario AlCaTestEnable --global-tag GLOBALTAG --lfn /store/whatever --alcareco PromptCalibProdEcalPedestals " -runTest "${SCRAM_TEST_PATH}/RunAlcaSkimming.py --scenario AlCaTestEnable --lfn=/store/whatever --global-tag GLOBALTAG --skims PromptCalibProdEcalPedestals" -runTest "${SCRAM_TEST_PATH}/RunAlcaHarvesting.py --scenario AlCaTestEnable --lfn /store/whatever --dataset /A/B/C --global-tag GLOBALTAG --alcapromptdataset=PromptCalibProdEcalPedestals" - -runTest "${SCRAM_TEST_PATH}/RunExpressProcessing.py --scenario AlCaLumiPixels --global-tag GLOBALTAG --lfn /store/whatever --alcareco AlCaPCCRandom+PromptCalibProdLumiPCC" -runTest "${SCRAM_TEST_PATH}/RunAlcaSkimming.py --scenario AlCaLumiPixels --lfn=/store/whatever --global-tag GLOBALTAG --skims AlCaPCCRandom,PromptCalibProdLumiPCC" -runTest "${SCRAM_TEST_PATH}/RunAlcaHarvesting.py --scenario AlCaLumiPixels --lfn /store/whatever --dataset /A/B/C --global-tag GLOBALTAG --alcapromptdataset=PromptCalibProdLumiPCC" - -runTest "${SCRAM_TEST_PATH}/RunExpressProcessing.py --scenario AlCaLumiPixels_Run3 --global-tag GLOBALTAG --lfn /store/whatever --alcareco AlCaPCCRandom+PromptCalibProdLumiPCC" -runTest "${SCRAM_TEST_PATH}/RunAlcaSkimming.py --scenario AlCaLumiPixels_Run3 --lfn=/store/whatever --global-tag GLOBALTAG --skims AlCaPCCRandom,PromptCalibProdLumiPCC" -runTest "${SCRAM_TEST_PATH}/RunAlcaHarvesting.py --scenario AlCaLumiPixels_Run3 --lfn /store/whatever --dataset /A/B/C --global-tag GLOBALTAG --alcapromptdataset=PromptCalibProdLumiPCC" - -declare -a arr=("trackingOnlyEra_Run2_2018" "trackingOnlyEra_Run2_2018_highBetaStar" "trackingOnlyEra_Run2_2018_pp_on_AA" "trackingOnlyEra_Run3" "trackingOnlyEra_Run3_pp_on_PbPb") -for scenario in "${arr[@]}" -do - runTest "${SCRAM_TEST_PATH}/RunExpressProcessing.py --scenario $scenario --global-tag GLOBALTAG --lfn /store/whatever --alcarecos=TkAlMinBias+PromptCalibProdBeamSpotHP" - runTest "${SCRAM_TEST_PATH}/RunAlcaSkimming.py --scenario $scenario --lfn /store/whatever --global-tag GLOBALTAG --skims TkAlMinBias,PromptCalibProdBeamSpotHP" - runTest "${SCRAM_TEST_PATH}/RunAlcaSkimming.py --scenario $scenario --lfn /store/whatever --global-tag GLOBALTAG --skims TkAlMinBias,PromptCalibProdBeamSpotHPLowPU" - runTest "${SCRAM_TEST_PATH}/RunAlcaHarvesting.py --scenario $scenario --lfn /store/whatever --dataset /A/B/C --global-tag GLOBALTAG --alcapromptdataset=PromptCalibProdBeamSpotHP" - runTest "${SCRAM_TEST_PATH}/RunAlcaHarvesting.py --scenario $scenario --lfn /store/whatever --dataset /A/B/C --global-tag GLOBALTAG --alcapromptdataset=PromptCalibProdBeamSpotHPLowPU" -done diff --git a/Configuration/DataProcessing/test/run_CfgTest_1.sh b/Configuration/DataProcessing/test/run_CfgTest_1.sh new file mode 100755 index 0000000000000..0a791822fbca1 --- /dev/null +++ b/Configuration/DataProcessing/test/run_CfgTest_1.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# Test suite for various ConfigDP scenarios +# run using: scram build runtests +# feel free to contribute with your favourite configuration + + +# Pass in name and status +function die { echo $1: status $2 ; exit $2; } + +function runTest { echo $1 ; python3 $1 || die "Failure for configuration: $1" $?; } + +runTest "${SCRAM_TEST_PATH}/RunRepack.py --select-events HLT:path1,HLT:path2 --lfn /store/whatever" + diff --git a/Configuration/DataProcessing/test/run_CfgTest_10.sh b/Configuration/DataProcessing/test/run_CfgTest_10.sh new file mode 100755 index 0000000000000..b278e1490c621 --- /dev/null +++ b/Configuration/DataProcessing/test/run_CfgTest_10.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +# Test suite for various ConfigDP scenarios +# run using: scram build runtests +# feel free to contribute with your favourite configuration + + +# Pass in name and status +function die { echo $1: status $2 ; exit $2; } + +function runTest { echo $1 ; python3 $1 || die "Failure for configuration: $1" $?; } + +runTest "${SCRAM_TEST_PATH}/RunExpressProcessing.py --scenario AlCaLumiPixels --global-tag GLOBALTAG --lfn /store/whatever --alcareco AlCaPCCRandom+PromptCalibProdLumiPCC" +runTest "${SCRAM_TEST_PATH}/RunAlcaSkimming.py --scenario AlCaLumiPixels --lfn=/store/whatever --global-tag GLOBALTAG --skims AlCaPCCRandom,PromptCalibProdLumiPCC" +runTest "${SCRAM_TEST_PATH}/RunAlcaHarvesting.py --scenario AlCaLumiPixels --lfn /store/whatever --dataset /A/B/C --global-tag GLOBALTAG --alcapromptdataset=PromptCalibProdLumiPCC" + diff --git a/Configuration/DataProcessing/test/run_CfgTest_11.sh b/Configuration/DataProcessing/test/run_CfgTest_11.sh new file mode 100755 index 0000000000000..539c3756e5b6d --- /dev/null +++ b/Configuration/DataProcessing/test/run_CfgTest_11.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +# Test suite for various ConfigDP scenarios +# run using: scram build runtests +# feel free to contribute with your favourite configuration + + +# Pass in name and status +function die { echo $1: status $2 ; exit $2; } + +function runTest { echo $1 ; python3 $1 || die "Failure for configuration: $1" $?; } + +runTest "${SCRAM_TEST_PATH}/RunExpressProcessing.py --scenario AlCaLumiPixels_Run3 --global-tag GLOBALTAG --lfn /store/whatever --alcareco AlCaPCCRandom+PromptCalibProdLumiPCC" +runTest "${SCRAM_TEST_PATH}/RunAlcaSkimming.py --scenario AlCaLumiPixels_Run3 --lfn=/store/whatever --global-tag GLOBALTAG --skims AlCaPCCRandom,PromptCalibProdLumiPCC" +runTest "${SCRAM_TEST_PATH}/RunAlcaHarvesting.py --scenario AlCaLumiPixels_Run3 --lfn /store/whatever --dataset /A/B/C --global-tag GLOBALTAG --alcapromptdataset=PromptCalibProdLumiPCC" + diff --git a/Configuration/DataProcessing/test/run_CfgTest_12.sh b/Configuration/DataProcessing/test/run_CfgTest_12.sh new file mode 100755 index 0000000000000..746ecdd6a4f2e --- /dev/null +++ b/Configuration/DataProcessing/test/run_CfgTest_12.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Test suite for various ConfigDP scenarios +# run using: scram build runtests +# feel free to contribute with your favourite configuration + + +# Pass in name and status +function die { echo $1: status $2 ; exit $2; } + +function runTest { echo $1 ; python3 $1 || die "Failure for configuration: $1" $?; } + +declare -a arr=("trackingOnlyEra_Run2_2018" "trackingOnlyEra_Run2_2018_highBetaStar" "trackingOnlyEra_Run2_2018_pp_on_AA" "trackingOnlyEra_Run3" "trackingOnlyEra_Run3_pp_on_PbPb") +for scenario in "${arr[@]}" +do + runTest "${SCRAM_TEST_PATH}/RunExpressProcessing.py --scenario $scenario --global-tag GLOBALTAG --lfn /store/whatever --alcarecos=TkAlMinBias+PromptCalibProdBeamSpotHP" + runTest "${SCRAM_TEST_PATH}/RunAlcaSkimming.py --scenario $scenario --lfn /store/whatever --global-tag GLOBALTAG --skims TkAlMinBias,PromptCalibProdBeamSpotHP" + runTest "${SCRAM_TEST_PATH}/RunAlcaSkimming.py --scenario $scenario --lfn /store/whatever --global-tag GLOBALTAG --skims TkAlMinBias,PromptCalibProdBeamSpotHPLowPU" + runTest "${SCRAM_TEST_PATH}/RunAlcaHarvesting.py --scenario $scenario --lfn /store/whatever --dataset /A/B/C --global-tag GLOBALTAG --alcapromptdataset=PromptCalibProdBeamSpotHP" + runTest "${SCRAM_TEST_PATH}/RunAlcaHarvesting.py --scenario $scenario --lfn /store/whatever --dataset /A/B/C --global-tag GLOBALTAG --alcapromptdataset=PromptCalibProdBeamSpotHPLowPU" +done diff --git a/Configuration/DataProcessing/test/run_CfgTest_2.sh b/Configuration/DataProcessing/test/run_CfgTest_2.sh new file mode 100755 index 0000000000000..10ec8c3fcfe97 --- /dev/null +++ b/Configuration/DataProcessing/test/run_CfgTest_2.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# Test suite for various ConfigDP scenarios +# run using: scram build runtests +# feel free to contribute with your favourite configuration + + +# Pass in name and status +function die { echo $1: status $2 ; exit $2; } + +function runTest { echo $1 ; python3 $1 || die "Failure for configuration: $1" $?; } + +declare -a arr=("cosmicsEra_Run2_2018" "ppEra_Run2_2018" "ppEra_Run2_2018_highBetaStar" "ppEra_Run2_2018_pp_on_AA" "cosmicsHybridEra_Run2_2018" "cosmicsEra_Run3" "ppEra_Run3") +for scenario in "${arr[@]}" +do + runTest "${SCRAM_TEST_PATH}/RunExpressProcessing.py --scenario $scenario --global-tag GLOBALTAG --lfn /store/whatever --fevt --dqmio --alcareco TkAlMinBias+SiStripCalMinBias " + runTest "${SCRAM_TEST_PATH}/RunVisualizationProcessing.py --scenario $scenario --lfn /store/whatever --global-tag GLOBALTAG --fevt" + runTest "${SCRAM_TEST_PATH}/RunAlcaHarvesting.py --scenario $scenario --lfn /store/whatever --dataset /A/B/C --global-tag GLOBALTAG --workflows=BeamSpotByRun,BeamSpotByLumi,SiStripQuality" +done + diff --git a/Configuration/DataProcessing/test/run_CfgTest_3.sh b/Configuration/DataProcessing/test/run_CfgTest_3.sh new file mode 100755 index 0000000000000..fb9bfc62110fa --- /dev/null +++ b/Configuration/DataProcessing/test/run_CfgTest_3.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# Test suite for various ConfigDP scenarios +# run using: scram build runtests +# feel free to contribute with your favourite configuration + + +# Pass in name and status +function die { echo $1: status $2 ; exit $2; } + +function runTest { echo $1 ; python3 $1 || die "Failure for configuration: $1" $?; } + +declare -a arr=("cosmicsEra_Run2_2018" "cosmicsHybridEra_Run2_2018" "cosmicsEra_Run3") +for scenario in "${arr[@]}" +do + runTest "${SCRAM_TEST_PATH}/RunExpressProcessing.py --scenario $scenario --global-tag GLOBALTAG --lfn /store/whatever --fevt --dqmio --alcareco SiStripCalCosmicsNano " +done + diff --git a/Configuration/DataProcessing/test/run_CfgTest_4.sh b/Configuration/DataProcessing/test/run_CfgTest_4.sh new file mode 100755 index 0000000000000..8303ef4668bfd --- /dev/null +++ b/Configuration/DataProcessing/test/run_CfgTest_4.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# Test suite for various ConfigDP scenarios +# run using: scram build runtests +# feel free to contribute with your favourite configuration + + +# Pass in name and status +function die { echo $1: status $2 ; exit $2; } + +function runTest { echo $1 ; python3 $1 || die "Failure for configuration: $1" $?; } + +declare -a arr=("HeavyIonsEra_Run2_2018") +for scenario in "${arr[@]}" +do + runTest "${SCRAM_TEST_PATH}/RunExpressProcessing.py --scenario $scenario --global-tag GLOBALTAG --lfn /store/whatever --fevt --dqmio --alcareco TkAlMinBiasHI+SiStripCalMinBias " + runTest "${SCRAM_TEST_PATH}/RunAlcaHarvesting.py --scenario $scenario --lfn /store/whatever --dataset /A/B/C --global-tag GLOBALTAG --workflows=BeamSpotByRun,BeamSpotByLumi,SiStripQuality" + runTest "${SCRAM_TEST_PATH}/RunAlcaSkimming.py --scenario $scenario --lfn=/store/whatever --global-tag GLOBALTAG --skims SiStripCalZeroBias,SiStripCalMinBias,PromptCalibProd" + runTest "${SCRAM_TEST_PATH}/RunDQMHarvesting.py --scenario $scenario --lfn /store/whatever --run 12345 --dataset /A/B/C --global-tag GLOBALTAG" + runTest "${SCRAM_TEST_PATH}/RunPromptReco.py --scenario $scenario --reco --aod --dqmio --global-tag GLOBALTAG --lfn=/store/whatever --alcareco TkAlMinBiasHI+SiStripCalMinBias --PhysicsSkim=DiJet+Photon+ZEE+ZMM" +done + diff --git a/Configuration/DataProcessing/test/run_CfgTest_5.sh b/Configuration/DataProcessing/test/run_CfgTest_5.sh new file mode 100755 index 0000000000000..552fbf82c123f --- /dev/null +++ b/Configuration/DataProcessing/test/run_CfgTest_5.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# Test suite for various ConfigDP scenarios +# run using: scram build runtests +# feel free to contribute with your favourite configuration + + +# Pass in name and status +function die { echo $1: status $2 ; exit $2; } + +function runTest { echo $1 ; python3 $1 || die "Failure for configuration: $1" $?; } + +declare -a arr=("AlCaLumiPixels" "AlCaTestEnable" "cosmicsEra_Run2_2018" "hcalnzsEra_Run2_2018" "ppEra_Run2_2018" "hcalnzsEra_Run2_2018_highBetaStar" "hcalnzsEra_Run2_2018_pp_on_AA" "ppEra_Run2_2018_highBetaStar" "ppEra_Run2_2018_pp_on_AA" "cosmicsHybridEra_Run2_2018" "cosmicsEra_Run3" "hcalnzsEra_Run3" "ppEra_Run3" "AlCaLumiPixels_Run3" "AlCaPhiSymEcal_Nano" "AlCaPPS_Run3" "ppEra_Run3_pp_on_PbPb" "hcalnzsEra_Run3_pp_on_PbPb" "ppEra_Run3_pp_on_PbPb_approxSiStripClusters" "ppEra_Run3_2023" "ppEra_Run3_pp_on_PbPb_2023" "ppEra_Run3_pp_on_PbPb_approxSiStripClusters_2023") +for scenario in "${arr[@]}" +do + runTest "${SCRAM_TEST_PATH}/RunPromptReco.py --scenario $scenario --reco --aod --dqmio --global-tag GLOBALTAG --lfn=/store/whatever --alcareco TkAlMinBias+SiStripCalMinBias" +done + diff --git a/Configuration/DataProcessing/test/run_CfgTest_6.sh b/Configuration/DataProcessing/test/run_CfgTest_6.sh new file mode 100755 index 0000000000000..a2cddf2baa042 --- /dev/null +++ b/Configuration/DataProcessing/test/run_CfgTest_6.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +# Test suite for various ConfigDP scenarios +# run using: scram build runtests +# feel free to contribute with your favourite configuration + + +# Pass in name and status +function die { echo $1: status $2 ; exit $2; } + +function runTest { echo $1 ; python3 $1 || die "Failure for configuration: $1" $?; } + +declare -a arr=("AlCaLumiPixels" "cosmicsEra_Run2_2018" "ppEra_Run2_2018" "ppEra_Run2_2018_highBetaStar" "ppEra_Run2_2018_pp_on_AA" "cosmicsHybridEra_Run2_2018" "cosmicsEra_Run3" "ppEra_Run3" "AlCaLumiPixels_Run3") +for scenario in "${arr[@]}" +do + runTest "${SCRAM_TEST_PATH}/RunAlcaSkimming.py --scenario $scenario --lfn=/store/whatever --global-tag GLOBALTAG --skims SiStripCalZeroBias,SiStripCalMinBias,PromptCalibProd" + runTest "${SCRAM_TEST_PATH}/RunDQMHarvesting.py --scenario $scenario --lfn /store/whatever --run 12345 --dataset /A/B/C --global-tag GLOBALTAG" +done + diff --git a/Configuration/DataProcessing/test/run_CfgTest_7.sh b/Configuration/DataProcessing/test/run_CfgTest_7.sh new file mode 100755 index 0000000000000..86360b330f11d --- /dev/null +++ b/Configuration/DataProcessing/test/run_CfgTest_7.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +# Test suite for various ConfigDP scenarios +# run using: scram build runtests +# feel free to contribute with your favourite configuration + + +# Pass in name and status +function die { echo $1: status $2 ; exit $2; } + +function runTest { echo $1 ; python3 $1 || die "Failure for configuration: $1" $?; } + +declare -a arr=("ppEra_Run2_2018" "ppEra_Run2_2018_highBetaStar" "ppEra_Run2_2018_pp_on_AA") +for scenario in "${arr[@]}" +do + runTest "${SCRAM_TEST_PATH}/RunPromptReco.py --scenario $scenario --reco --aod --miniaod --dqmio --global-tag GLOBALTAG --lfn=/store/whatever --alcareco TkAlMinBias+SiStripCalMinBias" + runTest "${SCRAM_TEST_PATH}/RunPromptReco.py --scenario $scenario --reco --aod --dqmio --global-tag GLOBALTAG --lfn=/store/whatever --alcareco TkAlMinBias+SiStripCalMinBias --PhysicsSkim=@SingleMuon" +done + diff --git a/Configuration/DataProcessing/test/run_CfgTest_8.sh b/Configuration/DataProcessing/test/run_CfgTest_8.sh new file mode 100755 index 0000000000000..a5a2acb0f8623 --- /dev/null +++ b/Configuration/DataProcessing/test/run_CfgTest_8.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +# Test suite for various ConfigDP scenarios +# run using: scram build runtests +# feel free to contribute with your favourite configuration + + +# Pass in name and status +function die { echo $1: status $2 ; exit $2; } + +function runTest { echo $1 ; python3 $1 || die "Failure for configuration: $1" $?; } + +declare -a arr=("ppEra_Run3" "ppEra_Run3_2023" "ppEra_Run3_2023_repacked") +for scenario in "${arr[@]}" +do + runTest "${SCRAM_TEST_PATH}/RunPromptReco.py --scenario $scenario --reco --aod --miniaod --nanoaod --dqmio --global-tag GLOBALTAG --lfn=/store/whatever --alcareco TkAlMinBias+SiStripCalMinBias" + runTest "${SCRAM_TEST_PATH}/RunPromptReco.py --scenario $scenario --reco --aod --dqmio --global-tag GLOBALTAG --lfn=/store/whatever --alcareco TkAlMinBias+SiStripCalMinBias --PhysicsSkim=@Muon0" +done + diff --git a/Configuration/DataProcessing/test/run_CfgTest_9.sh b/Configuration/DataProcessing/test/run_CfgTest_9.sh new file mode 100755 index 0000000000000..9e22ea7810316 --- /dev/null +++ b/Configuration/DataProcessing/test/run_CfgTest_9.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +# Test suite for various ConfigDP scenarios +# run using: scram build runtests +# feel free to contribute with your favourite configuration + + +# Pass in name and status +function die { echo $1: status $2 ; exit $2; } + +function runTest { echo $1 ; python3 $1 || die "Failure for configuration: $1" $?; } + +runTest "${SCRAM_TEST_PATH}/RunExpressProcessing.py --scenario AlCaTestEnable --global-tag GLOBALTAG --lfn /store/whatever --alcareco PromptCalibProdEcalPedestals " +runTest "${SCRAM_TEST_PATH}/RunAlcaSkimming.py --scenario AlCaTestEnable --lfn=/store/whatever --global-tag GLOBALTAG --skims PromptCalibProdEcalPedestals" +runTest "${SCRAM_TEST_PATH}/RunAlcaHarvesting.py --scenario AlCaTestEnable --lfn /store/whatever --dataset /A/B/C --global-tag GLOBALTAG --alcapromptdataset=PromptCalibProdEcalPedestals" + diff --git a/Configuration/Eras/python/Era_Phase2_cff.py b/Configuration/Eras/python/Era_Phase2_cff.py index c345cc8145a4f..7ce7293175a3d 100644 --- a/Configuration/Eras/python/Era_Phase2_cff.py +++ b/Configuration/Eras/python/Era_Phase2_cff.py @@ -11,6 +11,8 @@ from Configuration.Eras.Modifier_phase2_GEM_cff import phase2_GEM from Configuration.Eras.Modifier_phase1Pixel_cff import phase1Pixel from Configuration.Eras.Modifier_trackingPhase1_cff import trackingPhase1 +from Configuration.ProcessModifiers.seedingDeepCore_cff import seedingDeepCore +from Configuration.ProcessModifiers.displacedRegionalTracking_cff import displacedRegionalTracking from Configuration.Eras.Modifier_hcalHardcodeConditions_cff import hcalHardcodeConditions from Configuration.Eras.Modifier_phase2_timing_cff import phase2_timing from Configuration.Eras.Modifier_phase2_timing_layer_cff import phase2_timing_layer @@ -18,4 +20,5 @@ from Configuration.Eras.Modifier_ctpps_2022_cff import ctpps_2022 from Configuration.ProcessModifiers.dd4hep_cff import dd4hep -Phase2 = cms.ModifierChain(Run3_noMkFit.copyAndExclude([phase1Pixel,trackingPhase1,ctpps_2022,dd4hep]), phase2_common, phase2_tracker, trackingPhase2PU140, phase2_ecal, phase2_hcal, phase2_hgcal, phase2_muon, phase2_GEM, hcalHardcodeConditions, phase2_timing, phase2_timing_layer, phase2_trigger) +Phase2 = cms.ModifierChain(Run3_noMkFit.copyAndExclude([phase1Pixel,trackingPhase1,seedingDeepCore,displacedRegionalTracking,ctpps_2022,dd4hep]), + phase2_common, phase2_tracker, trackingPhase2PU140, phase2_ecal, phase2_hcal, phase2_hgcal, phase2_muon, phase2_GEM, hcalHardcodeConditions, phase2_timing, phase2_timing_layer, phase2_trigger) diff --git a/Configuration/Eras/python/Era_Run2_2017_cff.py b/Configuration/Eras/python/Era_Run2_2017_cff.py index 5b418ba6d64ee..506ba6aa500f9 100644 --- a/Configuration/Eras/python/Era_Run2_2017_cff.py +++ b/Configuration/Eras/python/Era_Run2_2017_cff.py @@ -10,6 +10,9 @@ from Configuration.Eras.Modifier_run2_HEPlan1_2017_cff import run2_HEPlan1_2017 from Configuration.Eras.Modifier_trackingPhase1_cff import trackingPhase1 from Configuration.Eras.ModifierChain_trackingMkFitProd_cff import trackingMkFitProd +from Configuration.ProcessModifiers.trackingParabolicMf_cff import trackingParabolicMf +from Configuration.ProcessModifiers.seedingDeepCore_cff import seedingDeepCore +from Configuration.ProcessModifiers.displacedRegionalTracking_cff import displacedRegionalTracking from Configuration.Eras.Modifier_run2_GEM_2017_cff import run2_GEM_2017 from Configuration.Eras.Modifier_stage2L1Trigger_2017_cff import stage2L1Trigger_2017 from Configuration.Eras.Modifier_run2_HLTconditions_2017_cff import run2_HLTconditions_2017 @@ -27,5 +30,6 @@ Run2_2017 = cms.ModifierChain(Run2_2016.copyAndExclude([run2_muon_2016, run2_HLTconditions_2016, run2_ECAL_2016, run2_egamma_2016,pixel_2016,run2_jme_2016, strips_vfp30_2016, ctpps_2016]), phase1Pixel, run2_ECAL_2017, run2_HF_2017, run2_HCAL_2017, run2_HE_2017, run2_HEPlan1_2017, - trackingPhase1, trackdnn, trackingMkFitProd, run2_GEM_2017, stage2L1Trigger_2017, run2_HLTconditions_2017, run2_muon_2017,run2_egamma_2017, ctpps_2017, run2_jme_2017) + trackingPhase1, trackdnn, trackingMkFitProd, trackingParabolicMf, seedingDeepCore, displacedRegionalTracking, + run2_GEM_2017, stage2L1Trigger_2017, run2_HLTconditions_2017, run2_muon_2017,run2_egamma_2017, ctpps_2017, run2_jme_2017) diff --git a/Configuration/Eras/python/Era_Run3_2023_UPC_cff.py b/Configuration/Eras/python/Era_Run3_2023_UPC_cff.py new file mode 100644 index 0000000000000..fe22754461aa8 --- /dev/null +++ b/Configuration/Eras/python/Era_Run3_2023_UPC_cff.py @@ -0,0 +1,8 @@ +import FWCore.ParameterSet.Config as cms + +from Configuration.Eras.Era_Run3_2023_cff import Run3_2023 +from Configuration.ProcessModifiers.egamma_lowPt_exclusive_cff import egamma_lowPt_exclusive +from Configuration.Eras.Modifier_highBetaStar_2018_cff import highBetaStar_2018 +from Configuration.Eras.Modifier_run3_upc_cff import run3_upc + +Run3_2023_UPC = cms.ModifierChain(Run3_2023, egamma_lowPt_exclusive, highBetaStar_2018, run3_upc) diff --git a/Configuration/Eras/python/Era_Run3_pp_on_PbPb_2023_cff.py b/Configuration/Eras/python/Era_Run3_pp_on_PbPb_2023_cff.py index 4ca176860878b..b4c900f49e0c4 100644 --- a/Configuration/Eras/python/Era_Run3_pp_on_PbPb_2023_cff.py +++ b/Configuration/Eras/python/Era_Run3_pp_on_PbPb_2023_cff.py @@ -2,5 +2,6 @@ from Configuration.Eras.Era_Run3_pp_on_PbPb_cff import Run3_pp_on_PbPb from Configuration.Eras.Modifier_run3_egamma_2023_cff import run3_egamma_2023 +from Configuration.Eras.Modifier_pp_on_PbPb_run3_2023_cff import pp_on_PbPb_run3_2023 -Run3_pp_on_PbPb_2023 = cms.ModifierChain(Run3_pp_on_PbPb, run3_egamma_2023) +Run3_pp_on_PbPb_2023 = cms.ModifierChain(Run3_pp_on_PbPb, run3_egamma_2023, pp_on_PbPb_run3_2023) diff --git a/Configuration/Eras/python/ModifierChain_run2_2017_noTrackingModifier_cff.py b/Configuration/Eras/python/ModifierChain_run2_2017_noTrackingModifier_cff.py index 1fb8676d4e31e..ec54890989029 100644 --- a/Configuration/Eras/python/ModifierChain_run2_2017_noTrackingModifier_cff.py +++ b/Configuration/Eras/python/ModifierChain_run2_2017_noTrackingModifier_cff.py @@ -3,5 +3,7 @@ from Configuration.Eras.Era_Run2_2017_cff import Run2_2017 from Configuration.Eras.Modifier_trackingPhase1_cff import trackingPhase1 from Configuration.Eras.ModifierChain_trackingMkFitProd_cff import trackingMkFitProd +from Configuration.ProcessModifiers.seedingDeepCore_cff import seedingDeepCore +from Configuration.ProcessModifiers.displacedRegionalTracking_cff import displacedRegionalTracking -run2_2017_noTrackingModifier = Run2_2017.copyAndExclude([trackingPhase1,trackingMkFitProd]) +run2_2017_noTrackingModifier = Run2_2017.copyAndExclude([trackingPhase1,trackingMkFitProd,seedingDeepCore,displacedRegionalTracking]) diff --git a/Configuration/Eras/python/Modifier_ctpps_directSim_cff.py b/Configuration/Eras/python/Modifier_ctpps_directSim_cff.py new file mode 100644 index 0000000000000..6d010cdcb308e --- /dev/null +++ b/Configuration/Eras/python/Modifier_ctpps_directSim_cff.py @@ -0,0 +1,3 @@ +import FWCore.ParameterSet.Config as cms + +ctpps_directSim = cms.Modifier() diff --git a/Configuration/Eras/python/Modifier_phase2_ecalTP_devel_cff.py b/Configuration/Eras/python/Modifier_phase2_ecalTP_devel_cff.py new file mode 100644 index 0000000000000..cd60a1aa06ca6 --- /dev/null +++ b/Configuration/Eras/python/Modifier_phase2_ecalTP_devel_cff.py @@ -0,0 +1,4 @@ +import FWCore.ParameterSet.Config as cms + +phase2_ecalTP_devel = cms.Modifier() + diff --git a/Configuration/Eras/python/Modifier_phase2_hgcalOnly_cff.py b/Configuration/Eras/python/Modifier_phase2_hgcalOnly_cff.py new file mode 100644 index 0000000000000..045d5ec1ea9d6 --- /dev/null +++ b/Configuration/Eras/python/Modifier_phase2_hgcalOnly_cff.py @@ -0,0 +1,3 @@ +import FWCore.ParameterSet.Config as cms + +phase2_hgcalOnly = cms.Modifier() diff --git a/Configuration/Eras/python/Modifier_pp_on_PbPb_run3_2023_cff.py b/Configuration/Eras/python/Modifier_pp_on_PbPb_run3_2023_cff.py new file mode 100644 index 0000000000000..a642fc427bdfa --- /dev/null +++ b/Configuration/Eras/python/Modifier_pp_on_PbPb_run3_2023_cff.py @@ -0,0 +1,4 @@ +import FWCore.ParameterSet.Config as cms + +pp_on_PbPb_run3_2023 = cms.Modifier() + diff --git a/Configuration/Eras/python/Modifier_run3_upc_cff.py b/Configuration/Eras/python/Modifier_run3_upc_cff.py new file mode 100644 index 0000000000000..00be48032a84b --- /dev/null +++ b/Configuration/Eras/python/Modifier_run3_upc_cff.py @@ -0,0 +1,3 @@ +import FWCore.ParameterSet.Config as cms + +run3_upc =cms.Modifier() diff --git a/Configuration/EventContent/python/AlCaRecoOutput_cff.py b/Configuration/EventContent/python/AlCaRecoOutput_cff.py index 961c13dea5722..4f1eaa6cff3dc 100644 --- a/Configuration/EventContent/python/AlCaRecoOutput_cff.py +++ b/Configuration/EventContent/python/AlCaRecoOutput_cff.py @@ -41,6 +41,8 @@ from Alignment.CommonAlignmentProducer.ALCARECOTkAlMinBiasHI_Output_cff import * # AlCaReco for track based alignment using JetHT events from Alignment.CommonAlignmentProducer.ALCARECOTkAlJetHT_Output_cff import * +# AlCaReco for track based alignment using V0s +from Alignment.CommonAlignmentProducer.ALCARECOTkAlV0s_Output_cff import * # AlCaReco for pixel calibration using muons from Calibration.TkAlCaRecoProducers.ALCARECOSiPixelCalSingleMuon_Output_cff import * @@ -170,6 +172,7 @@ from Calibration.TkAlCaRecoProducers.ALCARECOSiStripPCLHistos_Output_cff import * from Alignment.CommonAlignmentProducer.ALCARECOPromptCalibProdSiPixelAli_Output_cff import * from Alignment.CommonAlignmentProducer.ALCARECOPromptCalibProdSiPixelAliHG_Output_cff import * +from Alignment.CommonAlignmentProducer.ALCARECOPromptCalibProdSiPixelAliHGCombined_Output_cff import * from CalibTracker.SiPixelQuality.ALCARECOPromptCalibProdSiPixel_Output_cff import * @@ -201,6 +204,4 @@ eventAutoFlushCompressedSize=cms.untracked.int32(5*1024*1024) ) - - ALCARECOEventContent.outputCommands.append('drop *_MEtoEDMConverter_*_*') diff --git a/Configuration/EventContent/python/EventContentCosmics_cff.py b/Configuration/EventContent/python/EventContentCosmics_cff.py index 3beaa0f9c1a67..95f2f49aeb621 100644 --- a/Configuration/EventContent/python/EventContentCosmics_cff.py +++ b/Configuration/EventContent/python/EventContentCosmics_cff.py @@ -155,6 +155,17 @@ RAWSIMEventContent.outputCommands.extend(DigiToRawFEVT.outputCommands) RAWSIMEventContent.outputCommands.extend(MEtoEDMConverterFEVT.outputCommands) RAWSIMEventContent.outputCommands.extend(IOMCRAW.outputCommands) + +# +# Temporary collections needed for Phase-2 RECO using RAWSIM as input in Prod-like workflow +# They are until packer/unpackers are done. +# +from Configuration.Eras.Modifier_phase2_common_cff import phase2_common +phase2_common.toModify(RAWSIMEventContent, + outputCommands = RAWSIMEventContent.outputCommands+[ + 'keep *_sim*Digis_*_*', + 'keep *Phase2TrackerDigi*_*_*_*']) + # # # RECOSIM Data Tier definition @@ -205,6 +216,14 @@ FEVTEventContent.outputCommands.extend(OnlineMetaDataContent.outputCommands) FEVTEventContent.outputCommands.extend(TcdsEventContent.outputCommands) +phase2_tracker.toModify(FEVTEventContent, + outputCommands = FEVTEventContent.outputCommands + [ + 'keep Phase2TrackerDigiedmDetSetVector_mix_*_*', + 'keep *_TTClustersFromPhase2TrackerDigis_*_*', + 'keep *_TTStubsFromPhase2TrackerDigis_*_*', + 'keep *_TrackerDTC_*_*', + 'keep *_*_Level1TTTracks_*']) + #replace FEVTEventContent.outputCommands += HLTriggerFEVT.outputCommands FEVTHLTALLEventContent = cms.PSet( outputCommands = cms.untracked.vstring('drop *'), diff --git a/Configuration/Generator/python/Py8Eta2MuGammaPtExpGun_cfi.py b/Configuration/Generator/python/Py8Eta2MuGammaPtExpGun_cfi.py new file mode 100644 index 0000000000000..03febe3e40912 --- /dev/null +++ b/Configuration/Generator/python/Py8Eta2MuGammaPtExpGun_cfi.py @@ -0,0 +1,47 @@ +import FWCore.ParameterSet.Config as cms + +from Configuration.Generator.Pythia8CommonSettings_cfi import * +from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import * + +generator = cms.EDFilter("Pythia8PtExpGun", + + maxEventsToPrint = cms.untracked.int32(1), + pythiaPylistVerbosity = cms.untracked.int32(1), + pythiaHepMCVerbosity = cms.untracked.bool(True), + + PGunParameters = cms.PSet( + ParticleID = cms.vint32(221), + AddAntiParticle = cms.bool(False), + MinPhi = cms.double(-3.14159265359), + MaxPhi = cms.double(3.14159265359), + #MinPt = cms.double(5.0), + MinPt = cms.double(10.0), + #MaxPt = cms.double(65.0), + #MaxPt = cms.double(25.0), + MaxPt = cms.double(65.0), + MinEta = cms.double(-2.4), + MaxEta = cms.double(2.4) + ), + + PythiaParameters = cms.PSet( + pythia8CommonSettingsBlock, + pythia8CP5SettingsBlock, + processParameters = cms.vstring( + #'SLHA:keepSM = on', + #'SLHA:minMassSM = 10.', + # Very important to enable override! + 'SLHA:allowUserOverride = on', + 'RHadrons:allow = on', + 'RHadrons:allowDecay = on', + #'32:mayDecay = true', + '221:mayDecay = true', + # Set decay channels of eta (mumugamma) + '221:oneChannel = 1 1.0 0 13 -13 22' + ), + parameterSets = cms.vstring( + 'pythia8CommonSettings', + 'pythia8CP5Settings', + 'processParameters', + ) + ) +) diff --git a/Configuration/Geometry/README.md b/Configuration/Geometry/README.md index 1e9da7b40dfb2..fd2e7b862e145 100644 --- a/Configuration/Geometry/README.md +++ b/Configuration/Geometry/README.md @@ -42,7 +42,8 @@ The script also handles the common and forward elements of the geometry: * O5: as O6, but with trackermaterial removed (they are in T5, T6, T7, T8) * O6: 2021 baseline * F1: 2021 baseline -* F2: same as F1 with modified file from ZDC group +* F2: same as F1 with modified file zdc.xmlfrom ZDC group +* F3: same as F2 with added simulti geometry for RPD Several detector combinations have been generated: * 2021 = T3+C1+M1+P7+O6+F1 @@ -51,7 +52,7 @@ Several detector combinations have been generated: * 2021FlatMinus10Percent = T6+C1+M1+P7+O5+F1 * 2021FlatPlus05Percent = T7+C1+M1+P7+O5+F1 * 2021FlatPlus10Percent = T8+C1+M1+P7+O5+F1 -* 2023 = T3+C1+M2+P7+O6+F2 +* 2023 = T3+C1+M2+P7+O6+F3 * 2023ZeroMaterial = T4+C1+M1+P7+O4+F2 * 2023FlatMinus05Percent = T5+C1+M1+P7+O5+F2 * 2023FlatMinus10Percent = T6+C1+M1+P7+O5+F2 @@ -91,6 +92,8 @@ Calorimeters: * C19: HGCal (v17 version of HGCal geometry as in C18 but without internal cells in the Geant4 geometry definition) + Phase2 HCAL and EB (used in 2026D93) * C20: HGCal (v17 version of HGCal geometry as in C18) + HFNose with corrected wafer size + Phase2 HCAL and EB (used in 2026D93) * C21: HGCal (v17 version of HGCal geometry as in C19 but turning off all dead areas and gaps) + Phase2 HCAL and EB (used in 2026D101) +* C22: HGCal (v18 version of HGCal geometry as in C18 with calibration cells, nonzero cssette retraction, correct mousebite, guard ring, proper cell size) + Phase2 HCAL and EB (used in 2026D104) +* C23: HGCal (same as the v18 version which is in C22 but without internal cells in the Geant4 geometry defintiion) + Phase2 HCAL and EB (used in 2026D106) Muon system: * M4: Phase2 muon system for TDR w/ GE2/1, ME0, RE3/1, RE4/1 (incl. granularity in ME0, staggered GE2/1), 96 iRPC strips, no overlaps, MB4Shields @@ -109,6 +112,7 @@ Fast Timing system: * I14: Same as I13, updated sensor structure, disc z location and passive materials * I15: Same as I14, addition of notch and revision of envelope * I16: Starting from I15, revised BTL with complete passive material description, it needs Tracker T31 or newer +* I17: Same as I16, BTL with one crystal thickness (type) only, ETL with LGAD split into two sensors The script also handles the common and forward elements of the geometry: * O4: detailed cavern description, changes for modified CALO region for endcap part, no overlaps inside the Muon System @@ -141,5 +145,9 @@ Several detector combinations have been generated: * D100 = T34+C17+M11+I16+O9+F8 * D101 = T34+C18+M11+I16+O9+F8 * D102 = T35+C17+M11+I16+O9+F8 -* D103 = T34+C21+M11+I16+O9+F8 - +* D103 = T35+C21+M11+I17+O9+F8 +* D104 = T35+C22+M11+I16+O9+F8 +* D105 = T35+C17+M11+I17+O9+F8 +* D106 = T35+C23+M11+I17+O9+F8 +* D107 = T32+C17+M11+I17+O9+F8 +* D108 = T35+C19+M11+I17+O9+F8 diff --git a/Configuration/Geometry/python/GeometryDD4hepExtended2026D104Reco_cff.py b/Configuration/Geometry/python/GeometryDD4hepExtended2026D104Reco_cff.py new file mode 100644 index 0000000000000..26e7fa1ae997d --- /dev/null +++ b/Configuration/Geometry/python/GeometryDD4hepExtended2026D104Reco_cff.py @@ -0,0 +1,59 @@ +import FWCore.ParameterSet.Config as cms + +# This config was generated automatically using generate2026Geometry.py +# If you notice a mistake, please update the generating script, not just this config + +from Configuration.Geometry.GeometryDD4hepExtended2026D104_cff import * + +# tracker +from Geometry.CommonTopologies.globalTrackingGeometry_cfi import * +from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import * +from Geometry.TrackerGeometryBuilder.trackerParameters_cff import * +from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import * +from Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff import * +trackerGeometry.applyAlignment = True + +# calo +from Geometry.CaloEventSetup.HGCalTopology_cfi import * +from Geometry.HGCalGeometry.HGCalGeometryESProducer_cfi import * +from Geometry.CaloEventSetup.CaloTopology_cfi import * +from Geometry.CaloEventSetup.CaloGeometryBuilder_cfi import * +CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder", + SelectedCalos = cms.vstring("HCAL", + "ZDC", + "EcalBarrel", + "TOWER", + "HGCalEESensitive", + "HGCalHESiliconSensitive", + "HGCalHEScintillatorSensitive" + ) +) +from Geometry.EcalAlgo.EcalBarrelGeometry_cfi import * +from Geometry.HcalEventSetup.HcalGeometry_cfi import * +from Geometry.HcalEventSetup.CaloTowerGeometry_cfi import * +from Geometry.HcalEventSetup.CaloTowerTopology_cfi import * +from Geometry.HcalCommonData.hcalDDDRecConstants_cfi import * +from Geometry.HcalEventSetup.hcalTopologyIdeal_cfi import * +from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import * +from Geometry.EcalMapping.EcalMapping_cfi import * +from Geometry.EcalMapping.EcalMappingRecord_cfi import * + +# muon +from Geometry.MuonNumbering.muonNumberingInitialization_cfi import * +from RecoMuon.DetLayers.muonDetLayerGeometry_cfi import * +from Geometry.GEMGeometryBuilder.gemGeometry_cff import * +from Geometry.CSCGeometryBuilder.idealForDigiCscGeometry_cff import * +from Geometry.DTGeometryBuilder.idealForDigiDtGeometry_cff import * + +# forward +from Geometry.ForwardGeometry.ZdcGeometry_cfi import * + +# timing +from RecoMTD.DetLayers.mtdDetLayerGeometry_cfi import * +from Geometry.MTDGeometryBuilder.mtdParameters_cff import * +from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import * +from Geometry.MTDNumberingBuilder.mtdTopology_cfi import * +from Geometry.MTDGeometryBuilder.mtdGeometry_cfi import * +from Geometry.MTDGeometryBuilder.idealForDigiMTDGeometry_cff import * +mtdGeometry.applyAlignment = False + diff --git a/Configuration/Geometry/python/GeometryDD4hepExtended2026D104_cff.py b/Configuration/Geometry/python/GeometryDD4hepExtended2026D104_cff.py new file mode 100644 index 0000000000000..15e07147063a4 --- /dev/null +++ b/Configuration/Geometry/python/GeometryDD4hepExtended2026D104_cff.py @@ -0,0 +1,17 @@ +import FWCore.ParameterSet.Config as cms + +# This config was generated automatically using generate2026Geometry.py +# If you notice a mistake, please update the generating script, not just this config + +from Configuration.Geometry.GeometryDD4hep_cff import * +DDDetectorESProducer.confGeomXMLFiles = cms.FileInPath("Geometry/CMSCommonData/data/dd4hep/cmsExtendedGeometry2026D104.xml") + +from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cff import * +from SLHCUpgradeSimulations.Geometry.fakePhase2OuterTrackerConditions_cff import * +from Geometry.EcalCommonData.ecalSimulationParameters_cff import * +from Geometry.HcalCommonData.hcalDDDSimConstants_cff import * +from Geometry.HGCalCommonData.hgcalParametersInitialization_cfi import * +from Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi import * +from Geometry.MuonNumbering.muonGeometryConstants_cff import * +from Geometry.MuonNumbering.muonOffsetESProducer_cff import * +from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import * diff --git a/Configuration/Geometry/python/GeometryDD4hepExtended2026D105Reco_cff.py b/Configuration/Geometry/python/GeometryDD4hepExtended2026D105Reco_cff.py new file mode 100644 index 0000000000000..b8378ca30614b --- /dev/null +++ b/Configuration/Geometry/python/GeometryDD4hepExtended2026D105Reco_cff.py @@ -0,0 +1,59 @@ +import FWCore.ParameterSet.Config as cms + +# This config was generated automatically using generate2026Geometry.py +# If you notice a mistake, please update the generating script, not just this config + +from Configuration.Geometry.GeometryDD4hepExtended2026D105_cff import * + +# tracker +from Geometry.CommonTopologies.globalTrackingGeometry_cfi import * +from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import * +from Geometry.TrackerGeometryBuilder.trackerParameters_cff import * +from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import * +from Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff import * +trackerGeometry.applyAlignment = True + +# calo +from Geometry.CaloEventSetup.HGCalTopology_cfi import * +from Geometry.HGCalGeometry.HGCalGeometryESProducer_cfi import * +from Geometry.CaloEventSetup.CaloTopology_cfi import * +from Geometry.CaloEventSetup.CaloGeometryBuilder_cfi import * +CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder", + SelectedCalos = cms.vstring("HCAL", + "ZDC", + "EcalBarrel", + "TOWER", + "HGCalEESensitive", + "HGCalHESiliconSensitive", + "HGCalHEScintillatorSensitive" + ) +) +from Geometry.EcalAlgo.EcalBarrelGeometry_cfi import * +from Geometry.HcalEventSetup.HcalGeometry_cfi import * +from Geometry.HcalEventSetup.CaloTowerGeometry_cfi import * +from Geometry.HcalEventSetup.CaloTowerTopology_cfi import * +from Geometry.HcalCommonData.hcalDDDRecConstants_cfi import * +from Geometry.HcalEventSetup.hcalTopologyIdeal_cfi import * +from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import * +from Geometry.EcalMapping.EcalMapping_cfi import * +from Geometry.EcalMapping.EcalMappingRecord_cfi import * + +# muon +from Geometry.MuonNumbering.muonNumberingInitialization_cfi import * +from RecoMuon.DetLayers.muonDetLayerGeometry_cfi import * +from Geometry.GEMGeometryBuilder.gemGeometry_cff import * +from Geometry.CSCGeometryBuilder.idealForDigiCscGeometry_cff import * +from Geometry.DTGeometryBuilder.idealForDigiDtGeometry_cff import * + +# forward +from Geometry.ForwardGeometry.ZdcGeometry_cfi import * + +# timing +from RecoMTD.DetLayers.mtdDetLayerGeometry_cfi import * +from Geometry.MTDGeometryBuilder.mtdParameters_cff import * +from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import * +from Geometry.MTDNumberingBuilder.mtdTopology_cfi import * +from Geometry.MTDGeometryBuilder.mtdGeometry_cfi import * +from Geometry.MTDGeometryBuilder.idealForDigiMTDGeometry_cff import * +mtdGeometry.applyAlignment = False + diff --git a/Configuration/Geometry/python/GeometryDD4hepExtended2026D105_cff.py b/Configuration/Geometry/python/GeometryDD4hepExtended2026D105_cff.py new file mode 100644 index 0000000000000..6a59004b7209b --- /dev/null +++ b/Configuration/Geometry/python/GeometryDD4hepExtended2026D105_cff.py @@ -0,0 +1,17 @@ +import FWCore.ParameterSet.Config as cms + +# This config was generated automatically using generate2026Geometry.py +# If you notice a mistake, please update the generating script, not just this config + +from Configuration.Geometry.GeometryDD4hep_cff import * +DDDetectorESProducer.confGeomXMLFiles = cms.FileInPath("Geometry/CMSCommonData/data/dd4hep/cmsExtendedGeometry2026D105.xml") + +from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cff import * +from SLHCUpgradeSimulations.Geometry.fakePhase2OuterTrackerConditions_cff import * +from Geometry.EcalCommonData.ecalSimulationParameters_cff import * +from Geometry.HcalCommonData.hcalDDDSimConstants_cff import * +from Geometry.HGCalCommonData.hgcalParametersInitialization_cfi import * +from Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi import * +from Geometry.MuonNumbering.muonGeometryConstants_cff import * +from Geometry.MuonNumbering.muonOffsetESProducer_cff import * +from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import * diff --git a/Configuration/Geometry/python/GeometryDD4hepExtended2026D106Reco_cff.py b/Configuration/Geometry/python/GeometryDD4hepExtended2026D106Reco_cff.py new file mode 100644 index 0000000000000..7cd751a1d0f71 --- /dev/null +++ b/Configuration/Geometry/python/GeometryDD4hepExtended2026D106Reco_cff.py @@ -0,0 +1,59 @@ +import FWCore.ParameterSet.Config as cms + +# This config was generated automatically using generate2026Geometry.py +# If you notice a mistake, please update the generating script, not just this config + +from Configuration.Geometry.GeometryDD4hepExtended2026D106_cff import * + +# tracker +from Geometry.CommonTopologies.globalTrackingGeometry_cfi import * +from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import * +from Geometry.TrackerGeometryBuilder.trackerParameters_cff import * +from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import * +from Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff import * +trackerGeometry.applyAlignment = True + +# calo +from Geometry.CaloEventSetup.HGCalTopology_cfi import * +from Geometry.HGCalGeometry.HGCalGeometryESProducer_cfi import * +from Geometry.CaloEventSetup.CaloTopology_cfi import * +from Geometry.CaloEventSetup.CaloGeometryBuilder_cfi import * +CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder", + SelectedCalos = cms.vstring("HCAL", + "ZDC", + "EcalBarrel", + "TOWER", + "HGCalEESensitive", + "HGCalHESiliconSensitive", + "HGCalHEScintillatorSensitive" + ) +) +from Geometry.EcalAlgo.EcalBarrelGeometry_cfi import * +from Geometry.HcalEventSetup.HcalGeometry_cfi import * +from Geometry.HcalEventSetup.CaloTowerGeometry_cfi import * +from Geometry.HcalEventSetup.CaloTowerTopology_cfi import * +from Geometry.HcalCommonData.hcalDDDRecConstants_cfi import * +from Geometry.HcalEventSetup.hcalTopologyIdeal_cfi import * +from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import * +from Geometry.EcalMapping.EcalMapping_cfi import * +from Geometry.EcalMapping.EcalMappingRecord_cfi import * + +# muon +from Geometry.MuonNumbering.muonNumberingInitialization_cfi import * +from RecoMuon.DetLayers.muonDetLayerGeometry_cfi import * +from Geometry.GEMGeometryBuilder.gemGeometry_cff import * +from Geometry.CSCGeometryBuilder.idealForDigiCscGeometry_cff import * +from Geometry.DTGeometryBuilder.idealForDigiDtGeometry_cff import * + +# forward +from Geometry.ForwardGeometry.ZdcGeometry_cfi import * + +# timing +from RecoMTD.DetLayers.mtdDetLayerGeometry_cfi import * +from Geometry.MTDGeometryBuilder.mtdParameters_cff import * +from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import * +from Geometry.MTDNumberingBuilder.mtdTopology_cfi import * +from Geometry.MTDGeometryBuilder.mtdGeometry_cfi import * +from Geometry.MTDGeometryBuilder.idealForDigiMTDGeometry_cff import * +mtdGeometry.applyAlignment = False + diff --git a/Configuration/Geometry/python/GeometryDD4hepExtended2026D106_cff.py b/Configuration/Geometry/python/GeometryDD4hepExtended2026D106_cff.py new file mode 100644 index 0000000000000..bb29835215c1d --- /dev/null +++ b/Configuration/Geometry/python/GeometryDD4hepExtended2026D106_cff.py @@ -0,0 +1,17 @@ +import FWCore.ParameterSet.Config as cms + +# This config was generated automatically using generate2026Geometry.py +# If you notice a mistake, please update the generating script, not just this config + +from Configuration.Geometry.GeometryDD4hep_cff import * +DDDetectorESProducer.confGeomXMLFiles = cms.FileInPath("Geometry/CMSCommonData/data/dd4hep/cmsExtendedGeometry2026D106.xml") + +from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cff import * +from SLHCUpgradeSimulations.Geometry.fakePhase2OuterTrackerConditions_cff import * +from Geometry.EcalCommonData.ecalSimulationParameters_cff import * +from Geometry.HcalCommonData.hcalDDDSimConstants_cff import * +from Geometry.HGCalCommonData.hgcalParametersInitialization_cfi import * +from Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi import * +from Geometry.MuonNumbering.muonGeometryConstants_cff import * +from Geometry.MuonNumbering.muonOffsetESProducer_cff import * +from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import * diff --git a/Configuration/Geometry/python/GeometryDD4hepExtended2026D107Reco_cff.py b/Configuration/Geometry/python/GeometryDD4hepExtended2026D107Reco_cff.py new file mode 100644 index 0000000000000..9b4f501814b0a --- /dev/null +++ b/Configuration/Geometry/python/GeometryDD4hepExtended2026D107Reco_cff.py @@ -0,0 +1,59 @@ +import FWCore.ParameterSet.Config as cms + +# This config was generated automatically using generate2026Geometry.py +# If you notice a mistake, please update the generating script, not just this config + +from Configuration.Geometry.GeometryDD4hepExtended2026D107_cff import * + +# tracker +from Geometry.CommonTopologies.globalTrackingGeometry_cfi import * +from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import * +from Geometry.TrackerGeometryBuilder.trackerParameters_cff import * +from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import * +from Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff import * +trackerGeometry.applyAlignment = True + +# calo +from Geometry.CaloEventSetup.HGCalTopology_cfi import * +from Geometry.HGCalGeometry.HGCalGeometryESProducer_cfi import * +from Geometry.CaloEventSetup.CaloTopology_cfi import * +from Geometry.CaloEventSetup.CaloGeometryBuilder_cfi import * +CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder", + SelectedCalos = cms.vstring("HCAL", + "ZDC", + "EcalBarrel", + "TOWER", + "HGCalEESensitive", + "HGCalHESiliconSensitive", + "HGCalHEScintillatorSensitive" + ) +) +from Geometry.EcalAlgo.EcalBarrelGeometry_cfi import * +from Geometry.HcalEventSetup.HcalGeometry_cfi import * +from Geometry.HcalEventSetup.CaloTowerGeometry_cfi import * +from Geometry.HcalEventSetup.CaloTowerTopology_cfi import * +from Geometry.HcalCommonData.hcalDDDRecConstants_cfi import * +from Geometry.HcalEventSetup.hcalTopologyIdeal_cfi import * +from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import * +from Geometry.EcalMapping.EcalMapping_cfi import * +from Geometry.EcalMapping.EcalMappingRecord_cfi import * + +# muon +from Geometry.MuonNumbering.muonNumberingInitialization_cfi import * +from RecoMuon.DetLayers.muonDetLayerGeometry_cfi import * +from Geometry.GEMGeometryBuilder.gemGeometry_cff import * +from Geometry.CSCGeometryBuilder.idealForDigiCscGeometry_cff import * +from Geometry.DTGeometryBuilder.idealForDigiDtGeometry_cff import * + +# forward +from Geometry.ForwardGeometry.ZdcGeometry_cfi import * + +# timing +from RecoMTD.DetLayers.mtdDetLayerGeometry_cfi import * +from Geometry.MTDGeometryBuilder.mtdParameters_cff import * +from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import * +from Geometry.MTDNumberingBuilder.mtdTopology_cfi import * +from Geometry.MTDGeometryBuilder.mtdGeometry_cfi import * +from Geometry.MTDGeometryBuilder.idealForDigiMTDGeometry_cff import * +mtdGeometry.applyAlignment = False + diff --git a/Configuration/Geometry/python/GeometryDD4hepExtended2026D107_cff.py b/Configuration/Geometry/python/GeometryDD4hepExtended2026D107_cff.py new file mode 100644 index 0000000000000..59a5c7249f773 --- /dev/null +++ b/Configuration/Geometry/python/GeometryDD4hepExtended2026D107_cff.py @@ -0,0 +1,17 @@ +import FWCore.ParameterSet.Config as cms + +# This config was generated automatically using generate2026Geometry.py +# If you notice a mistake, please update the generating script, not just this config + +from Configuration.Geometry.GeometryDD4hep_cff import * +DDDetectorESProducer.confGeomXMLFiles = cms.FileInPath("Geometry/CMSCommonData/data/dd4hep/cmsExtendedGeometry2026D107.xml") + +from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cff import * +from SLHCUpgradeSimulations.Geometry.fakePhase2OuterTrackerConditions_cff import * +from Geometry.EcalCommonData.ecalSimulationParameters_cff import * +from Geometry.HcalCommonData.hcalDDDSimConstants_cff import * +from Geometry.HGCalCommonData.hgcalParametersInitialization_cfi import * +from Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi import * +from Geometry.MuonNumbering.muonGeometryConstants_cff import * +from Geometry.MuonNumbering.muonOffsetESProducer_cff import * +from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import * diff --git a/Configuration/Geometry/python/GeometryDD4hepExtended2026D108Reco_cff.py b/Configuration/Geometry/python/GeometryDD4hepExtended2026D108Reco_cff.py new file mode 100644 index 0000000000000..925a43d98484a --- /dev/null +++ b/Configuration/Geometry/python/GeometryDD4hepExtended2026D108Reco_cff.py @@ -0,0 +1,59 @@ +import FWCore.ParameterSet.Config as cms + +# This config was generated automatically using generate2026Geometry.py +# If you notice a mistake, please update the generating script, not just this config + +from Configuration.Geometry.GeometryDD4hepExtended2026D108_cff import * + +# tracker +from Geometry.CommonTopologies.globalTrackingGeometry_cfi import * +from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import * +from Geometry.TrackerGeometryBuilder.trackerParameters_cff import * +from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import * +from Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff import * +trackerGeometry.applyAlignment = True + +# calo +from Geometry.CaloEventSetup.HGCalTopology_cfi import * +from Geometry.HGCalGeometry.HGCalGeometryESProducer_cfi import * +from Geometry.CaloEventSetup.CaloTopology_cfi import * +from Geometry.CaloEventSetup.CaloGeometryBuilder_cfi import * +CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder", + SelectedCalos = cms.vstring("HCAL", + "ZDC", + "EcalBarrel", + "TOWER", + "HGCalEESensitive", + "HGCalHESiliconSensitive", + "HGCalHEScintillatorSensitive" + ) +) +from Geometry.EcalAlgo.EcalBarrelGeometry_cfi import * +from Geometry.HcalEventSetup.HcalGeometry_cfi import * +from Geometry.HcalEventSetup.CaloTowerGeometry_cfi import * +from Geometry.HcalEventSetup.CaloTowerTopology_cfi import * +from Geometry.HcalCommonData.hcalDDDRecConstants_cfi import * +from Geometry.HcalEventSetup.hcalTopologyIdeal_cfi import * +from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import * +from Geometry.EcalMapping.EcalMapping_cfi import * +from Geometry.EcalMapping.EcalMappingRecord_cfi import * + +# muon +from Geometry.MuonNumbering.muonNumberingInitialization_cfi import * +from RecoMuon.DetLayers.muonDetLayerGeometry_cfi import * +from Geometry.GEMGeometryBuilder.gemGeometry_cff import * +from Geometry.CSCGeometryBuilder.idealForDigiCscGeometry_cff import * +from Geometry.DTGeometryBuilder.idealForDigiDtGeometry_cff import * + +# forward +from Geometry.ForwardGeometry.ZdcGeometry_cfi import * + +# timing +from RecoMTD.DetLayers.mtdDetLayerGeometry_cfi import * +from Geometry.MTDGeometryBuilder.mtdParameters_cff import * +from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import * +from Geometry.MTDNumberingBuilder.mtdTopology_cfi import * +from Geometry.MTDGeometryBuilder.mtdGeometry_cfi import * +from Geometry.MTDGeometryBuilder.idealForDigiMTDGeometry_cff import * +mtdGeometry.applyAlignment = False + diff --git a/Configuration/Geometry/python/GeometryDD4hepExtended2026D108_cff.py b/Configuration/Geometry/python/GeometryDD4hepExtended2026D108_cff.py new file mode 100644 index 0000000000000..79e677f7d91d5 --- /dev/null +++ b/Configuration/Geometry/python/GeometryDD4hepExtended2026D108_cff.py @@ -0,0 +1,17 @@ +import FWCore.ParameterSet.Config as cms + +# This config was generated automatically using generate2026Geometry.py +# If you notice a mistake, please update the generating script, not just this config + +from Configuration.Geometry.GeometryDD4hep_cff import * +DDDetectorESProducer.confGeomXMLFiles = cms.FileInPath("Geometry/CMSCommonData/data/dd4hep/cmsExtendedGeometry2026D108.xml") + +from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cff import * +from SLHCUpgradeSimulations.Geometry.fakePhase2OuterTrackerConditions_cff import * +from Geometry.EcalCommonData.ecalSimulationParameters_cff import * +from Geometry.HcalCommonData.hcalDDDSimConstants_cff import * +from Geometry.HGCalCommonData.hgcalParametersInitialization_cfi import * +from Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi import * +from Geometry.MuonNumbering.muonGeometryConstants_cff import * +from Geometry.MuonNumbering.muonOffsetESProducer_cff import * +from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import * diff --git a/Configuration/Geometry/python/GeometryDD4hepExtended2026DefaultReco_cff.py b/Configuration/Geometry/python/GeometryDD4hepExtended2026DefaultReco_cff.py new file mode 100644 index 0000000000000..beff59e29780a --- /dev/null +++ b/Configuration/Geometry/python/GeometryDD4hepExtended2026DefaultReco_cff.py @@ -0,0 +1,3 @@ +import FWCore.ParameterSet.Config as cms + +from Configuration.Geometry.GeometryDD4hepExtended2026D98Reco_cff import * diff --git a/Configuration/Geometry/python/GeometryDD4hepExtended2026Default_cff.py b/Configuration/Geometry/python/GeometryDD4hepExtended2026Default_cff.py new file mode 100644 index 0000000000000..c1e4cd3c76aa5 --- /dev/null +++ b/Configuration/Geometry/python/GeometryDD4hepExtended2026Default_cff.py @@ -0,0 +1,3 @@ +import FWCore.ParameterSet.Config as cms + +from Configuration.Geometry.GeometryDD4hepExtended2026D98_cff import * diff --git a/Configuration/Geometry/python/GeometryExtended2026D104Reco_cff.py b/Configuration/Geometry/python/GeometryExtended2026D104Reco_cff.py new file mode 100644 index 0000000000000..3b6f6ef4f3e81 --- /dev/null +++ b/Configuration/Geometry/python/GeometryExtended2026D104Reco_cff.py @@ -0,0 +1,59 @@ +import FWCore.ParameterSet.Config as cms + +# This config was generated automatically using generate2026Geometry.py +# If you notice a mistake, please update the generating script, not just this config + +from Configuration.Geometry.GeometryExtended2026D104_cff import * + +# tracker +from Geometry.CommonTopologies.globalTrackingGeometry_cfi import * +from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import * +from Geometry.TrackerGeometryBuilder.trackerParameters_cff import * +from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import * +from Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff import * +trackerGeometry.applyAlignment = True + +# calo +from Geometry.CaloEventSetup.HGCalTopology_cfi import * +from Geometry.HGCalGeometry.HGCalGeometryESProducer_cfi import * +from Geometry.CaloEventSetup.CaloTopology_cfi import * +from Geometry.CaloEventSetup.CaloGeometryBuilder_cfi import * +CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder", + SelectedCalos = cms.vstring("HCAL", + "ZDC", + "EcalBarrel", + "TOWER", + "HGCalEESensitive", + "HGCalHESiliconSensitive", + "HGCalHEScintillatorSensitive" + ) +) +from Geometry.EcalAlgo.EcalBarrelGeometry_cfi import * +from Geometry.HcalEventSetup.HcalGeometry_cfi import * +from Geometry.HcalEventSetup.CaloTowerGeometry_cfi import * +from Geometry.HcalEventSetup.CaloTowerTopology_cfi import * +from Geometry.HcalCommonData.hcalDDDRecConstants_cfi import * +from Geometry.HcalEventSetup.hcalTopologyIdeal_cfi import * +from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import * +from Geometry.EcalMapping.EcalMapping_cfi import * +from Geometry.EcalMapping.EcalMappingRecord_cfi import * + +# muon +from Geometry.MuonNumbering.muonNumberingInitialization_cfi import * +from RecoMuon.DetLayers.muonDetLayerGeometry_cfi import * +from Geometry.GEMGeometryBuilder.gemGeometry_cff import * +from Geometry.CSCGeometryBuilder.idealForDigiCscGeometry_cff import * +from Geometry.DTGeometryBuilder.idealForDigiDtGeometry_cff import * + +# forward +from Geometry.ForwardGeometry.ZdcGeometry_cfi import * + +# timing +from RecoMTD.DetLayers.mtdDetLayerGeometry_cfi import * +from Geometry.MTDGeometryBuilder.mtdParameters_cff import * +from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import * +from Geometry.MTDNumberingBuilder.mtdTopology_cfi import * +from Geometry.MTDGeometryBuilder.mtdGeometry_cfi import * +from Geometry.MTDGeometryBuilder.idealForDigiMTDGeometry_cff import * +mtdGeometry.applyAlignment = False + diff --git a/Geometry/TrackerCommonData/python/GeometryExtended2026D100V1_cff.py b/Configuration/Geometry/python/GeometryExtended2026D104_cff.py similarity index 74% rename from Geometry/TrackerCommonData/python/GeometryExtended2026D100V1_cff.py rename to Configuration/Geometry/python/GeometryExtended2026D104_cff.py index e397849a9eb01..21b389af810e6 100644 --- a/Geometry/TrackerCommonData/python/GeometryExtended2026D100V1_cff.py +++ b/Configuration/Geometry/python/GeometryExtended2026D104_cff.py @@ -1,6 +1,9 @@ import FWCore.ParameterSet.Config as cms -from Geometry.TrackerCommonData.cmsExtendedGeometry2026D100V1XML_cfi import * +# This config was generated automatically using generate2026Geometry.py +# If you notice a mistake, please update the generating script, not just this config + +from Geometry.CMSCommonData.cmsExtendedGeometry2026D104XML_cfi import * from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cff import * from SLHCUpgradeSimulations.Geometry.fakePhase2OuterTrackerConditions_cff import * from Geometry.EcalCommonData.ecalSimulationParameters_cff import * diff --git a/Configuration/Geometry/python/GeometryExtended2026D105Reco_cff.py b/Configuration/Geometry/python/GeometryExtended2026D105Reco_cff.py new file mode 100644 index 0000000000000..4b2725ec2be78 --- /dev/null +++ b/Configuration/Geometry/python/GeometryExtended2026D105Reco_cff.py @@ -0,0 +1,59 @@ +import FWCore.ParameterSet.Config as cms + +# This config was generated automatically using generate2026Geometry.py +# If you notice a mistake, please update the generating script, not just this config + +from Configuration.Geometry.GeometryExtended2026D105_cff import * + +# tracker +from Geometry.CommonTopologies.globalTrackingGeometry_cfi import * +from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import * +from Geometry.TrackerGeometryBuilder.trackerParameters_cff import * +from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import * +from Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff import * +trackerGeometry.applyAlignment = True + +# calo +from Geometry.CaloEventSetup.HGCalTopology_cfi import * +from Geometry.HGCalGeometry.HGCalGeometryESProducer_cfi import * +from Geometry.CaloEventSetup.CaloTopology_cfi import * +from Geometry.CaloEventSetup.CaloGeometryBuilder_cfi import * +CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder", + SelectedCalos = cms.vstring("HCAL", + "ZDC", + "EcalBarrel", + "TOWER", + "HGCalEESensitive", + "HGCalHESiliconSensitive", + "HGCalHEScintillatorSensitive" + ) +) +from Geometry.EcalAlgo.EcalBarrelGeometry_cfi import * +from Geometry.HcalEventSetup.HcalGeometry_cfi import * +from Geometry.HcalEventSetup.CaloTowerGeometry_cfi import * +from Geometry.HcalEventSetup.CaloTowerTopology_cfi import * +from Geometry.HcalCommonData.hcalDDDRecConstants_cfi import * +from Geometry.HcalEventSetup.hcalTopologyIdeal_cfi import * +from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import * +from Geometry.EcalMapping.EcalMapping_cfi import * +from Geometry.EcalMapping.EcalMappingRecord_cfi import * + +# muon +from Geometry.MuonNumbering.muonNumberingInitialization_cfi import * +from RecoMuon.DetLayers.muonDetLayerGeometry_cfi import * +from Geometry.GEMGeometryBuilder.gemGeometry_cff import * +from Geometry.CSCGeometryBuilder.idealForDigiCscGeometry_cff import * +from Geometry.DTGeometryBuilder.idealForDigiDtGeometry_cff import * + +# forward +from Geometry.ForwardGeometry.ZdcGeometry_cfi import * + +# timing +from RecoMTD.DetLayers.mtdDetLayerGeometry_cfi import * +from Geometry.MTDGeometryBuilder.mtdParameters_cff import * +from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import * +from Geometry.MTDNumberingBuilder.mtdTopology_cfi import * +from Geometry.MTDGeometryBuilder.mtdGeometry_cfi import * +from Geometry.MTDGeometryBuilder.idealForDigiMTDGeometry_cff import * +mtdGeometry.applyAlignment = False + diff --git a/Geometry/TrackerCommonData/python/GeometryExtended2026D100V2_cff.py b/Configuration/Geometry/python/GeometryExtended2026D105_cff.py similarity index 74% rename from Geometry/TrackerCommonData/python/GeometryExtended2026D100V2_cff.py rename to Configuration/Geometry/python/GeometryExtended2026D105_cff.py index ad6cf46fbd0df..ec9939d2a8250 100644 --- a/Geometry/TrackerCommonData/python/GeometryExtended2026D100V2_cff.py +++ b/Configuration/Geometry/python/GeometryExtended2026D105_cff.py @@ -1,6 +1,9 @@ import FWCore.ParameterSet.Config as cms -from Geometry.TrackerCommonData.cmsExtendedGeometry2026D100V2XML_cfi import * +# This config was generated automatically using generate2026Geometry.py +# If you notice a mistake, please update the generating script, not just this config + +from Geometry.CMSCommonData.cmsExtendedGeometry2026D105XML_cfi import * from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cff import * from SLHCUpgradeSimulations.Geometry.fakePhase2OuterTrackerConditions_cff import * from Geometry.EcalCommonData.ecalSimulationParameters_cff import * diff --git a/Configuration/Geometry/python/GeometryExtended2026D106Reco_cff.py b/Configuration/Geometry/python/GeometryExtended2026D106Reco_cff.py new file mode 100644 index 0000000000000..0f038291451ec --- /dev/null +++ b/Configuration/Geometry/python/GeometryExtended2026D106Reco_cff.py @@ -0,0 +1,59 @@ +import FWCore.ParameterSet.Config as cms + +# This config was generated automatically using generate2026Geometry.py +# If you notice a mistake, please update the generating script, not just this config + +from Configuration.Geometry.GeometryExtended2026D106_cff import * + +# tracker +from Geometry.CommonTopologies.globalTrackingGeometry_cfi import * +from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import * +from Geometry.TrackerGeometryBuilder.trackerParameters_cff import * +from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import * +from Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff import * +trackerGeometry.applyAlignment = True + +# calo +from Geometry.CaloEventSetup.HGCalTopology_cfi import * +from Geometry.HGCalGeometry.HGCalGeometryESProducer_cfi import * +from Geometry.CaloEventSetup.CaloTopology_cfi import * +from Geometry.CaloEventSetup.CaloGeometryBuilder_cfi import * +CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder", + SelectedCalos = cms.vstring("HCAL", + "ZDC", + "EcalBarrel", + "TOWER", + "HGCalEESensitive", + "HGCalHESiliconSensitive", + "HGCalHEScintillatorSensitive" + ) +) +from Geometry.EcalAlgo.EcalBarrelGeometry_cfi import * +from Geometry.HcalEventSetup.HcalGeometry_cfi import * +from Geometry.HcalEventSetup.CaloTowerGeometry_cfi import * +from Geometry.HcalEventSetup.CaloTowerTopology_cfi import * +from Geometry.HcalCommonData.hcalDDDRecConstants_cfi import * +from Geometry.HcalEventSetup.hcalTopologyIdeal_cfi import * +from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import * +from Geometry.EcalMapping.EcalMapping_cfi import * +from Geometry.EcalMapping.EcalMappingRecord_cfi import * + +# muon +from Geometry.MuonNumbering.muonNumberingInitialization_cfi import * +from RecoMuon.DetLayers.muonDetLayerGeometry_cfi import * +from Geometry.GEMGeometryBuilder.gemGeometry_cff import * +from Geometry.CSCGeometryBuilder.idealForDigiCscGeometry_cff import * +from Geometry.DTGeometryBuilder.idealForDigiDtGeometry_cff import * + +# forward +from Geometry.ForwardGeometry.ZdcGeometry_cfi import * + +# timing +from RecoMTD.DetLayers.mtdDetLayerGeometry_cfi import * +from Geometry.MTDGeometryBuilder.mtdParameters_cff import * +from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import * +from Geometry.MTDNumberingBuilder.mtdTopology_cfi import * +from Geometry.MTDGeometryBuilder.mtdGeometry_cfi import * +from Geometry.MTDGeometryBuilder.idealForDigiMTDGeometry_cff import * +mtdGeometry.applyAlignment = False + diff --git a/Configuration/Geometry/python/GeometryExtended2026D106_cff.py b/Configuration/Geometry/python/GeometryExtended2026D106_cff.py new file mode 100644 index 0000000000000..345f92c4142e0 --- /dev/null +++ b/Configuration/Geometry/python/GeometryExtended2026D106_cff.py @@ -0,0 +1,15 @@ +import FWCore.ParameterSet.Config as cms + +# This config was generated automatically using generate2026Geometry.py +# If you notice a mistake, please update the generating script, not just this config + +from Geometry.CMSCommonData.cmsExtendedGeometry2026D106XML_cfi import * +from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cff import * +from SLHCUpgradeSimulations.Geometry.fakePhase2OuterTrackerConditions_cff import * +from Geometry.EcalCommonData.ecalSimulationParameters_cff import * +from Geometry.HcalCommonData.hcalDDDSimConstants_cff import * +from Geometry.HGCalCommonData.hgcalParametersInitialization_cfi import * +from Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi import * +from Geometry.MuonNumbering.muonGeometryConstants_cff import * +from Geometry.MuonNumbering.muonOffsetESProducer_cff import * +from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import * diff --git a/Configuration/Geometry/python/GeometryExtended2026D107Reco_cff.py b/Configuration/Geometry/python/GeometryExtended2026D107Reco_cff.py new file mode 100644 index 0000000000000..5f7bb251564a9 --- /dev/null +++ b/Configuration/Geometry/python/GeometryExtended2026D107Reco_cff.py @@ -0,0 +1,59 @@ +import FWCore.ParameterSet.Config as cms + +# This config was generated automatically using generate2026Geometry.py +# If you notice a mistake, please update the generating script, not just this config + +from Configuration.Geometry.GeometryExtended2026D107_cff import * + +# tracker +from Geometry.CommonTopologies.globalTrackingGeometry_cfi import * +from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import * +from Geometry.TrackerGeometryBuilder.trackerParameters_cff import * +from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import * +from Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff import * +trackerGeometry.applyAlignment = True + +# calo +from Geometry.CaloEventSetup.HGCalTopology_cfi import * +from Geometry.HGCalGeometry.HGCalGeometryESProducer_cfi import * +from Geometry.CaloEventSetup.CaloTopology_cfi import * +from Geometry.CaloEventSetup.CaloGeometryBuilder_cfi import * +CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder", + SelectedCalos = cms.vstring("HCAL", + "ZDC", + "EcalBarrel", + "TOWER", + "HGCalEESensitive", + "HGCalHESiliconSensitive", + "HGCalHEScintillatorSensitive" + ) +) +from Geometry.EcalAlgo.EcalBarrelGeometry_cfi import * +from Geometry.HcalEventSetup.HcalGeometry_cfi import * +from Geometry.HcalEventSetup.CaloTowerGeometry_cfi import * +from Geometry.HcalEventSetup.CaloTowerTopology_cfi import * +from Geometry.HcalCommonData.hcalDDDRecConstants_cfi import * +from Geometry.HcalEventSetup.hcalTopologyIdeal_cfi import * +from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import * +from Geometry.EcalMapping.EcalMapping_cfi import * +from Geometry.EcalMapping.EcalMappingRecord_cfi import * + +# muon +from Geometry.MuonNumbering.muonNumberingInitialization_cfi import * +from RecoMuon.DetLayers.muonDetLayerGeometry_cfi import * +from Geometry.GEMGeometryBuilder.gemGeometry_cff import * +from Geometry.CSCGeometryBuilder.idealForDigiCscGeometry_cff import * +from Geometry.DTGeometryBuilder.idealForDigiDtGeometry_cff import * + +# forward +from Geometry.ForwardGeometry.ZdcGeometry_cfi import * + +# timing +from RecoMTD.DetLayers.mtdDetLayerGeometry_cfi import * +from Geometry.MTDGeometryBuilder.mtdParameters_cff import * +from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import * +from Geometry.MTDNumberingBuilder.mtdTopology_cfi import * +from Geometry.MTDGeometryBuilder.mtdGeometry_cfi import * +from Geometry.MTDGeometryBuilder.idealForDigiMTDGeometry_cff import * +mtdGeometry.applyAlignment = False + diff --git a/Configuration/Geometry/python/GeometryExtended2026D107_cff.py b/Configuration/Geometry/python/GeometryExtended2026D107_cff.py new file mode 100644 index 0000000000000..ede48066e90af --- /dev/null +++ b/Configuration/Geometry/python/GeometryExtended2026D107_cff.py @@ -0,0 +1,15 @@ +import FWCore.ParameterSet.Config as cms + +# This config was generated automatically using generate2026Geometry.py +# If you notice a mistake, please update the generating script, not just this config + +from Geometry.CMSCommonData.cmsExtendedGeometry2026D107XML_cfi import * +from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cff import * +from SLHCUpgradeSimulations.Geometry.fakePhase2OuterTrackerConditions_cff import * +from Geometry.EcalCommonData.ecalSimulationParameters_cff import * +from Geometry.HcalCommonData.hcalDDDSimConstants_cff import * +from Geometry.HGCalCommonData.hgcalParametersInitialization_cfi import * +from Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi import * +from Geometry.MuonNumbering.muonGeometryConstants_cff import * +from Geometry.MuonNumbering.muonOffsetESProducer_cff import * +from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import * diff --git a/Configuration/Geometry/python/GeometryExtended2026D108Reco_cff.py b/Configuration/Geometry/python/GeometryExtended2026D108Reco_cff.py new file mode 100644 index 0000000000000..e64122a492495 --- /dev/null +++ b/Configuration/Geometry/python/GeometryExtended2026D108Reco_cff.py @@ -0,0 +1,59 @@ +import FWCore.ParameterSet.Config as cms + +# This config was generated automatically using generate2026Geometry.py +# If you notice a mistake, please update the generating script, not just this config + +from Configuration.Geometry.GeometryExtended2026D108_cff import * + +# tracker +from Geometry.CommonTopologies.globalTrackingGeometry_cfi import * +from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import * +from Geometry.TrackerGeometryBuilder.trackerParameters_cff import * +from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import * +from Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff import * +trackerGeometry.applyAlignment = True + +# calo +from Geometry.CaloEventSetup.HGCalTopology_cfi import * +from Geometry.HGCalGeometry.HGCalGeometryESProducer_cfi import * +from Geometry.CaloEventSetup.CaloTopology_cfi import * +from Geometry.CaloEventSetup.CaloGeometryBuilder_cfi import * +CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder", + SelectedCalos = cms.vstring("HCAL", + "ZDC", + "EcalBarrel", + "TOWER", + "HGCalEESensitive", + "HGCalHESiliconSensitive", + "HGCalHEScintillatorSensitive" + ) +) +from Geometry.EcalAlgo.EcalBarrelGeometry_cfi import * +from Geometry.HcalEventSetup.HcalGeometry_cfi import * +from Geometry.HcalEventSetup.CaloTowerGeometry_cfi import * +from Geometry.HcalEventSetup.CaloTowerTopology_cfi import * +from Geometry.HcalCommonData.hcalDDDRecConstants_cfi import * +from Geometry.HcalEventSetup.hcalTopologyIdeal_cfi import * +from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import * +from Geometry.EcalMapping.EcalMapping_cfi import * +from Geometry.EcalMapping.EcalMappingRecord_cfi import * + +# muon +from Geometry.MuonNumbering.muonNumberingInitialization_cfi import * +from RecoMuon.DetLayers.muonDetLayerGeometry_cfi import * +from Geometry.GEMGeometryBuilder.gemGeometry_cff import * +from Geometry.CSCGeometryBuilder.idealForDigiCscGeometry_cff import * +from Geometry.DTGeometryBuilder.idealForDigiDtGeometry_cff import * + +# forward +from Geometry.ForwardGeometry.ZdcGeometry_cfi import * + +# timing +from RecoMTD.DetLayers.mtdDetLayerGeometry_cfi import * +from Geometry.MTDGeometryBuilder.mtdParameters_cff import * +from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import * +from Geometry.MTDNumberingBuilder.mtdTopology_cfi import * +from Geometry.MTDGeometryBuilder.mtdGeometry_cfi import * +from Geometry.MTDGeometryBuilder.idealForDigiMTDGeometry_cff import * +mtdGeometry.applyAlignment = False + diff --git a/Configuration/Geometry/python/GeometryExtended2026D108_cff.py b/Configuration/Geometry/python/GeometryExtended2026D108_cff.py new file mode 100644 index 0000000000000..6109dc7300bea --- /dev/null +++ b/Configuration/Geometry/python/GeometryExtended2026D108_cff.py @@ -0,0 +1,15 @@ +import FWCore.ParameterSet.Config as cms + +# This config was generated automatically using generate2026Geometry.py +# If you notice a mistake, please update the generating script, not just this config + +from Geometry.CMSCommonData.cmsExtendedGeometry2026D108XML_cfi import * +from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cff import * +from SLHCUpgradeSimulations.Geometry.fakePhase2OuterTrackerConditions_cff import * +from Geometry.EcalCommonData.ecalSimulationParameters_cff import * +from Geometry.HcalCommonData.hcalDDDSimConstants_cff import * +from Geometry.HGCalCommonData.hgcalParametersInitialization_cfi import * +from Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi import * +from Geometry.MuonNumbering.muonGeometryConstants_cff import * +from Geometry.MuonNumbering.muonOffsetESProducer_cff import * +from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import * diff --git a/Configuration/Geometry/python/GeometryExtended2026DefaultReco_cff.py b/Configuration/Geometry/python/GeometryExtended2026DefaultReco_cff.py new file mode 100644 index 0000000000000..2144415e04a9b --- /dev/null +++ b/Configuration/Geometry/python/GeometryExtended2026DefaultReco_cff.py @@ -0,0 +1,3 @@ +import FWCore.ParameterSet.Config as cms + +from Configuration.Geometry.GeometryExtended2026D98Reco_cff import * diff --git a/Configuration/Geometry/python/GeometryExtended2026Default_cff.py b/Configuration/Geometry/python/GeometryExtended2026Default_cff.py new file mode 100644 index 0000000000000..ae40639051777 --- /dev/null +++ b/Configuration/Geometry/python/GeometryExtended2026Default_cff.py @@ -0,0 +1,3 @@ +import FWCore.ParameterSet.Config as cms + +from Configuration.Geometry.GeometryExtended2026D98_cff import * diff --git a/Configuration/Geometry/python/dict2021Geometry.py b/Configuration/Geometry/python/dict2021Geometry.py index c0cd29377ac71..41292064a94a3 100644 --- a/Configuration/Geometry/python/dict2021Geometry.py +++ b/Configuration/Geometry/python/dict2021Geometry.py @@ -1484,7 +1484,7 @@ forwardDict = { "abbrev" : "F", "name" : "forward", - "default" : 2, + "default" : 3, "F1" : { 2 : [ 'Geometry/ForwardCommonData/data/forward/2021/v1/forward.xml', @@ -1551,6 +1551,39 @@ 'from Geometry.ForwardGeometry.ForwardGeometry_cfi import *', ] }, + "F3" : { + 2 : [ + 'Geometry/ForwardCommonData/data/forward/2021/v1/forward.xml', + 'Geometry/ForwardCommonData/data/totemt2/2021/v1/totemt2.xml', + 'Geometry/ForwardCommonData/data/forwardshield/2021/v1/forwardshield.xml', + 'Geometry/ForwardCommonData/data/bhm.xml', + 'Geometry/ForwardCommonData/data/pltbcm/2021/v1/pltbcm.xml', + 'Geometry/ForwardCommonData/data/bcm1f/2021/v1/bcm1f.xml', + 'Geometry/ForwardCommonData/data/plt/2021/v1/plt.xml', + 'Geometry/ForwardCommonData/data/zdcmaterials/2021/v1/zdcmaterials.xml', + 'Geometry/ForwardCommonData/data/lumimaterials.xml', + 'Geometry/ForwardCommonData/data/zdcrotations.xml', + 'Geometry/ForwardCommonData/data/lumirotations.xml', + 'Geometry/ForwardCommonData/data/zdc/2021/v3/zdc.xml', + 'Geometry/ForwardCommonData/data/rpd/2021/v1/rpd.xml', + 'Geometry/ForwardCommonData/data/cmszdc.xml', + ], + 3 : [ + 'Geometry/ForwardSimData/data/totemsensT2/2021/totemsensT2.xml', + 'Geometry/ForwardCommonData/data/bhmsens.xml', + 'Geometry/ForwardSimData/data/pltsens.xml', + 'Geometry/ForwardSimData/data/bcm1fsens.xml', + 'Geometry/ForwardSimData/data/zdcsens/2021/v1/zdcsens.xml', + ], + 4 : [ + 'Geometry/ForwardSimData/data/ForwardShieldProdCuts.xml', + 'Geometry/ForwardSimData/data/bhmProdCuts/2021/v1/bhmProdCuts.xml', + 'Geometry/ForwardSimData/data/zdcProdCuts/2021/v3/zdcProdCuts.xml', + ], + "reco" :[ + 'from Geometry.ForwardGeometry.ForwardGeometry_cfi import *', + ] + }, } ppsDict = { @@ -1666,7 +1699,7 @@ ("O5","T6","C1","M1","F1","P7") : "2021FlatMinus10Percent", ("O5","T7","C1","M1","F1","P7") : "2021FlatPlus05Percent", ("O5","T8","C1","M1","F1","P7") : "2021FlatPlus10Percent", - ("O6","T3","C1","M2","F2","P7") : "2023", + ("O6","T3","C1","M2","F3","P7") : "2023", ("O4","T4","C1","M2","F2","P7") : "2023ZeroMaterial", ("O5","T5","C1","M2","F2","P7") : "2023FlatMinus05Percent", ("O5","T6","C1","M2","F2","P7") : "2023FlatMinus10Percent", diff --git a/Configuration/Geometry/python/dict2026Geometry.py b/Configuration/Geometry/python/dict2026Geometry.py index afc0b72502cec..5aeb2860e3864 100644 --- a/Configuration/Geometry/python/dict2026Geometry.py +++ b/Configuration/Geometry/python/dict2026Geometry.py @@ -251,7 +251,7 @@ 'Geometry/TrackerCommonData/data/trackermaterial.xml', 'Geometry/TrackerCommonData/data/PhaseII/Tracker_DD4hep_compatible_2021_02/tracker.xml', 'Geometry/TrackerCommonData/data/PhaseII/OuterTracker616_2020_04/otst.xml', - 'Geometry/TrackerCommonData/data/PhaseII/Tracker_DD4hep_compatible_IT711_2023_05V1/pixel.xml', + 'Geometry/TrackerCommonData/data/PhaseII/Tracker_DD4hep_compatible_IT711_2023_05/pixel.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerbar.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerfwd.xml', 'Geometry/TrackerCommonData/data/PhaseII/Tracker_DD4hep_compatible_2021_02/trackerStructureTopology.xml', @@ -322,7 +322,7 @@ 'Geometry/TrackerCommonData/data/trackermaterial.xml', 'Geometry/TrackerCommonData/data/PhaseII/Tracker_DD4hep_compatible_2021_03/tracker.xml', 'Geometry/TrackerCommonData/data/PhaseII/OuterTracker616_2020_04/otst.xml', - 'Geometry/TrackerCommonData/data/PhaseII/Tracker_DD4hep_compatible_IT711_2023_05V1/pixel.xml', + 'Geometry/TrackerCommonData/data/PhaseII/Tracker_DD4hep_compatible_IT711_2023_05/pixel.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerbar.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerfwd.xml', 'Geometry/TrackerCommonData/data/PhaseII/Tracker_DD4hep_compatible_2021_02/trackerStructureTopology.xml', @@ -736,6 +736,155 @@ ], "era" : "phase2_ecal, phase2_hcal, phase2_hgcal, hcalHardcodeConditions, phase2_hgcalV10, phase2_hgcalV11, phase2_hgcalV16, phase2_hfnose", }, + "C22" : { + 1 : [ + 'Geometry/EcalCommonData/data/eregalgo/2026/v2/eregalgo.xml', + 'Geometry/EcalCommonData/data/ectkcable/2026/v1/ectkcable.xml', + 'Geometry/EcalCommonData/data/ectkcablemat/2026/v2/ectkcablemat.xml', + 'Geometry/EcalCommonData/data/ebalgo.xml', + 'Geometry/EcalCommonData/data/ebcon/2021/v1/ebcon.xml', + 'Geometry/EcalCommonData/data/ebrot.xml', + 'Geometry/HcalCommonData/data/hcalrotations.xml', + 'Geometry/HcalCommonData/data/average/hcalforwardmaterial.xml', + 'Geometry/HcalCommonData/data/hcal/v2/hcalalgo.xml', + 'Geometry/HcalCommonData/data/hcalbarrelalgo.xml', + 'Geometry/HcalCommonData/data/hcalcablealgo/v2/hcalcablealgo.xml', + 'Geometry/HcalCommonData/data/hcalouteralgo/v1/hcalouteralgo.xml', + 'Geometry/HcalCommonData/data/hcalforwardalgo.xml', + 'Geometry/HcalCommonData/data/hcalSimNumbering/NoHE/v1/hcalSimNumbering.xml', + 'Geometry/HcalCommonData/data/hcalRecNumbering/NoHE/v2/hcalRecNumbering.xml', + 'Geometry/HGCalCommonData/data/hgcalMaterial/v2/hgcalMaterial.xml', + 'Geometry/HGCalCommonData/data/hgcal/v18/hgcal.xml', + 'Geometry/HGCalCommonData/data/hgcalcell/v17/hgcalcell.xml', + 'Geometry/HGCalCommonData/data/hgcalwafer/v18/hgcalwafer.xml', + 'Geometry/HGCalCommonData/data/hgcalPassive/v18/hgcalPassive.xml', + 'Geometry/HGCalCommonData/data/hgcalEE/v18/hgcalEE.xml', + 'Geometry/HGCalCommonData/data/hgcalHEsil/v18/hgcalHEsil.xml', + 'Geometry/HGCalCommonData/data/hgcalHEmix/v18/hgcalHEmix.xml', + 'Geometry/HGCalCommonData/data/hgcalCons/v18/hgcalCons.xml', + 'Geometry/HGCalCommonData/data/hgcalConsData/v17/hgcalConsData.xml', + ], + 3 : [ + 'Geometry/EcalSimData/data/PhaseII/ecalsens.xml', + 'Geometry/HcalCommonData/data/hcalsens/NoHE/v1/hcalsenspmf.xml', + 'Geometry/HcalSimData/data/hf.xml', + 'Geometry/HcalSimData/data/hfpmt.xml', + 'Geometry/HcalSimData/data/hffibrebundle.xml', + 'Geometry/HcalSimData/data/CaloUtil/2026/v2c/CaloUtil.xml', + 'Geometry/HGCalSimData/data/hgcsensv15.xml', + ], + 4 : [ + 'Geometry/HcalSimData/data/HcalProdCuts/2026/v1/HcalProdCuts.xml', + 'Geometry/EcalSimData/data/EcalProdCuts.xml', + 'Geometry/HGCalSimData/data/hgcProdCutsv15.xml', + ], + "sim" : [ + 'from Geometry.EcalCommonData.ecalSimulationParameters_cff import *', + 'from Geometry.HcalCommonData.hcalDDDSimConstants_cff import *', + 'from Geometry.HGCalCommonData.hgcalParametersInitialization_cfi import *', + 'from Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi import *' + ], + "reco" : [ + 'from Geometry.CaloEventSetup.HGCalTopology_cfi import *', + 'from Geometry.HGCalGeometry.HGCalGeometryESProducer_cfi import *', + 'from Geometry.CaloEventSetup.CaloTopology_cfi import *', + 'from Geometry.CaloEventSetup.CaloGeometryBuilder_cfi import *', + 'CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder",', + ' SelectedCalos = cms.vstring("HCAL",', + ' "ZDC",', + ' "EcalBarrel",', + ' "TOWER",', + ' "HGCalEESensitive",', + ' "HGCalHESiliconSensitive",', + ' "HGCalHEScintillatorSensitive"', + ' )', + ')', + 'from Geometry.EcalAlgo.EcalBarrelGeometry_cfi import *', + 'from Geometry.HcalEventSetup.HcalGeometry_cfi import *', + 'from Geometry.HcalEventSetup.CaloTowerGeometry_cfi import *', + 'from Geometry.HcalEventSetup.CaloTowerTopology_cfi import *', + 'from Geometry.HcalCommonData.hcalDDDRecConstants_cfi import *', + 'from Geometry.HcalEventSetup.hcalTopologyIdeal_cfi import *', + 'from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import *', + 'from Geometry.EcalMapping.EcalMapping_cfi import *', + 'from Geometry.EcalMapping.EcalMappingRecord_cfi import *', + ], + "era" : "phase2_ecal, phase2_hcal, phase2_hgcal, hcalHardcodeConditions, phase2_hgcalV10, phase2_hgcalV11, phase2_hgcalV16, phase2_hfnose", + }, + "C23" : { + 1 : [ + 'Geometry/EcalCommonData/data/eregalgo/2026/v2/eregalgo.xml', + 'Geometry/EcalCommonData/data/ectkcable/2026/v1/ectkcable.xml', + 'Geometry/EcalCommonData/data/ectkcablemat/2026/v2/ectkcablemat.xml', + 'Geometry/EcalCommonData/data/ebalgo.xml', + 'Geometry/EcalCommonData/data/ebcon/2021/v1/ebcon.xml', + 'Geometry/EcalCommonData/data/ebrot.xml', + 'Geometry/HcalCommonData/data/hcalrotations.xml', + 'Geometry/HcalCommonData/data/average/hcalforwardmaterial.xml', + 'Geometry/HcalCommonData/data/hcal/v2/hcalalgo.xml', + 'Geometry/HcalCommonData/data/hcalbarrelalgo.xml', + 'Geometry/HcalCommonData/data/hcalcablealgo/v2/hcalcablealgo.xml', + 'Geometry/HcalCommonData/data/hcalouteralgo/v1/hcalouteralgo.xml', + 'Geometry/HcalCommonData/data/hcalforwardalgo.xml', + 'Geometry/HcalCommonData/data/hcalSimNumbering/NoHE/v1/hcalSimNumbering.xml', + 'Geometry/HcalCommonData/data/hcalRecNumbering/NoHE/v2/hcalRecNumbering.xml', + 'Geometry/HGCalCommonData/data/hgcalMaterial/v2/hgcalMaterial.xml', + 'Geometry/HGCalCommonData/data/hgcal/v18/hgcal.xml', + 'Geometry/HGCalCommonData/data/hgcalwafer/v18n/hgcalwafer.xml', + 'Geometry/HGCalCommonData/data/hgcalPassive/v18/hgcalPassive.xml', + 'Geometry/HGCalCommonData/data/hgcalEE/v18/hgcalEE.xml', + 'Geometry/HGCalCommonData/data/hgcalHEsil/v18/hgcalHEsil.xml', + 'Geometry/HGCalCommonData/data/hgcalHEmix/v18/hgcalHEmix.xml', + 'Geometry/HGCalCommonData/data/hgcalCons/v18n/hgcalCons.xml', + 'Geometry/HGCalCommonData/data/hgcalConsData/v17/hgcalConsData.xml', + ], + 3 : [ + 'Geometry/EcalSimData/data/PhaseII/ecalsens.xml', + 'Geometry/HcalCommonData/data/hcalsens/NoHE/v1/hcalsenspmf.xml', + 'Geometry/HcalSimData/data/hf.xml', + 'Geometry/HcalSimData/data/hfpmt.xml', + 'Geometry/HcalSimData/data/hffibrebundle.xml', + 'Geometry/HcalSimData/data/CaloUtil/2026/v2c/CaloUtil.xml', + 'Geometry/HGCalSimData/data/hgcsensv17n.xml', + ], + 4 : [ + 'Geometry/HcalSimData/data/HcalProdCuts/2026/v1/HcalProdCuts.xml', + 'Geometry/EcalSimData/data/EcalProdCuts.xml', + 'Geometry/HGCalSimData/data/hgcProdCutsv15.xml', + ], + "sim" : [ + 'from Geometry.EcalCommonData.ecalSimulationParameters_cff import *', + 'from Geometry.HcalCommonData.hcalDDDSimConstants_cff import *', + 'from Geometry.HGCalCommonData.hgcalParametersInitialization_cfi import *', + 'from Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi import *' + ], + "reco" : [ + 'from Geometry.CaloEventSetup.HGCalTopology_cfi import *', + 'from Geometry.HGCalGeometry.HGCalGeometryESProducer_cfi import *', + 'from Geometry.CaloEventSetup.CaloTopology_cfi import *', + 'from Geometry.CaloEventSetup.CaloGeometryBuilder_cfi import *', + 'CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder",', + ' SelectedCalos = cms.vstring("HCAL",', + ' "ZDC",', + ' "EcalBarrel",', + ' "TOWER",', + ' "HGCalEESensitive",', + ' "HGCalHESiliconSensitive",', + ' "HGCalHEScintillatorSensitive"', + ' )', + ')', + 'from Geometry.EcalAlgo.EcalBarrelGeometry_cfi import *', + 'from Geometry.HcalEventSetup.HcalGeometry_cfi import *', + 'from Geometry.HcalEventSetup.CaloTowerGeometry_cfi import *', + 'from Geometry.HcalEventSetup.CaloTowerTopology_cfi import *', + 'from Geometry.HcalCommonData.hcalDDDRecConstants_cfi import *', + 'from Geometry.HcalEventSetup.hcalTopologyIdeal_cfi import *', + 'from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import *', + 'from Geometry.EcalMapping.EcalMapping_cfi import *', + 'from Geometry.EcalMapping.EcalMappingRecord_cfi import *', + ], + "era" : "phase2_ecal, phase2_hcal, phase2_hgcal, hcalHardcodeConditions, phase2_hgcalV10, phase2_hgcalV11, phase2_hgcalV16, phase2_hfnose", + }, } @@ -955,6 +1104,34 @@ ], "era" : "phase2_timing, phase2_timing_layer, phase2_etlV4", }, + "I17" : { + 1 : [ + 'Geometry/MTDCommonData/data/mtdMaterial/v3/mtdMaterial.xml', + 'Geometry/MTDCommonData/data/btl/v3/btl.xml', + 'Geometry/MTDCommonData/data/etl/v8/etl.xml', + 'Geometry/MTDCommonData/data/mtdParameters/v5/mtdStructureTopology.xml', + 'Geometry/MTDCommonData/data/mtdParameters/v5/mtdParameters.xml', + ], + 3 : [ + 'Geometry/MTDSimData/data/v5/mtdsens.xml' + ], + 4 : [ + 'Geometry/MTDSimData/data/v5/mtdProdCuts.xml' + ], + "sim" : [ + 'from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import *', + ], + "reco" :[ + 'from RecoMTD.DetLayers.mtdDetLayerGeometry_cfi import *', + 'from Geometry.MTDGeometryBuilder.mtdParameters_cff import *', + 'from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cff import *', + 'from Geometry.MTDNumberingBuilder.mtdTopology_cfi import *', + 'from Geometry.MTDGeometryBuilder.mtdGeometry_cfi import *', + 'from Geometry.MTDGeometryBuilder.idealForDigiMTDGeometry_cff import *', + 'mtdGeometry.applyAlignment = False' + ], + "era" : "phase2_timing, phase2_timing_layer, phase2_etlV4", + }, } allDicts = [ commonDict, trackerDict, caloDict, muonDict, forwardDict, timingDict ] @@ -975,6 +1152,11 @@ ("O9","T34","C18","M11","F8","I16") : "D101", ("O9","T35","C17","M11","F8","I16") : "D102", ("O9","T34","C21","M11","F8","I16") : "D103", + ("O9","T35","C22","M11","F8","I17") : "D104", + ("O9","T35","C17","M11","F8","I17") : "D105", + ("O9","T35","C23","M11","F8","I17") : "D106", + ("O9","T32","C17","M11","F8","I17") : "D107", + ("O9","T35","C19","M11","F8","I17") : "D108", } deprecatedDets = set([ "D1", "D2", "D3", "D5", "D6" , "D7", "D4", "D8" , "D9", "D12", "D13", "D15", "D10", "D11", "D14", "D16", "D17", "D18", "D19", "D20", "D21", "D22", "D23", "D24", "D25", "D26", "D27", "D28", "D29", "D30", "D31", "D32", "D33", "D34", "D36", "D37", "D38", "D39", "D40", "D42", "D35", "D41", "D43", "D44", "D45", "D46", "D48", "D47", "D50", "D51", "D52", "D53", "D54", "D55", "D56", "D57", "D58", "D59", "D61", "D62", "D63", "D64", "D65", "D66", "D67", "D69", "D71", "D72", "D73", "D74", "D75", "D78", "D79", "D87", "D89", "D90", "D49", "D60", "D68", "D70", "D76", "D77", "D80", "D81", "D82", "D83", "D84", "D85"]) diff --git a/Configuration/HLT/python/addOnTestsHLT.py b/Configuration/HLT/python/addOnTestsHLT.py index a174dfd6cf880..2fcd545b815fd 100644 --- a/Configuration/HLT/python/addOnTestsHLT.py +++ b/Configuration/HLT/python/addOnTestsHLT.py @@ -22,6 +22,9 @@ def addOnTestsHLT(): 'hlt_mc_PRef' : ['cmsDriver.py TTbar_13TeV_TuneCUETP8M1_cfi -s GEN,SIM,DIGI,L1,DIGI2RAW --mc --scenario=pp -n 10 --conditions auto:run3_mc_PRef --relval 9000,50 --datatier "GEN-SIM-RAW" --eventcontent RAWSIM --customise=HLTrigger/Configuration/CustomConfigs.L1T --era Run3_2023 --fileout file:RelVal_Raw_PRef_MC.root', 'HLTrigger/Configuration/test/OnLine_HLT_PRef.py', 'cmsDriver.py RelVal -s HLT:PRef,RAW2DIGI,L1Reco,RECO --mc --scenario=pp -n 10 --conditions auto:run3_mc_PRef --relval 9000,50 --datatier "RAW-HLT-RECO" --eventcontent FEVTDEBUGHLT --customise=HLTrigger/Configuration/CustomConfigs.L1THLT --customise=HLTrigger/Configuration/CustomConfigs.HLTRECO --era Run3_2023 --processName=HLTRECO --filein file:RelVal_Raw_PRef_MC.root --fileout file:RelVal_Raw_PRef_MC_HLT_RECO.root'], + 'hlt_mc_Special' : ['cmsDriver.py TTbar_13TeV_TuneCUETP8M1_cfi -s GEN,SIM,DIGI,L1,DIGI2RAW --mc --scenario=pp -n 10 --conditions auto:run3_mc_Special --relval 9000,50 --datatier "GEN-SIM-RAW" --eventcontent RAWSIM --customise=HLTrigger/Configuration/CustomConfigs.L1T --era Run3_2023 --fileout file:RelVal_Raw_Special_MC.root', + 'HLTrigger/Configuration/test/OnLine_HLT_Special.py', + 'cmsDriver.py RelVal -s HLT:GRun,RAW2DIGI,L1Reco,RECO --mc --scenario=pp -n 10 --conditions auto:run3_mc_Special --relval 9000,50 --datatier "RAW-HLT-RECO" --eventcontent FEVTDEBUGHLT --customise=HLTrigger/Configuration/CustomConfigs.L1THLT --customise=HLTrigger/Configuration/CustomConfigs.HLTRECO --era Run3_2023 --processName=HLTRECO --filein file:RelVal_Raw_Special_MC.root --fileout file:RelVal_Raw_Special_MC_HLT_RECO.root'], 'hlt_data_Fake' : ['cmsDriver.py RelVal -s L1REPACK:GT1 --data --scenario=pp -n 10 --conditions auto:run1_hlt_Fake --relval 9000,50 --datatier "RAW" --eventcontent RAW --customise=HLTrigger/Configuration/CustomConfigs.L1T --fileout file:RelVal_Raw_Fake_DATA.root --filein /store/data/Run2012A/MuEG/RAW/v1/000/191/718/14932935-E289-E111-830C-5404A6388697.root', 'HLTrigger/Configuration/test/OnLine_HLT_Fake.py', 'cmsDriver.py RelVal -s HLT:Fake,RAW2DIGI,L1Reco,RECO --data --scenario=pp -n 10 --conditions auto:run1_data_Fake --relval 9000,50 --datatier "RAW-HLT-RECO" --eventcontent FEVTDEBUGHLT --customise=HLTrigger/Configuration/CustomConfigs.L1THLT --customise=HLTrigger/Configuration/CustomConfigs.HLTRECO --processName=HLTRECO --filein file:RelVal_Raw_Fake_DATA.root --fileout file:RelVal_Raw_Fake_DATA_HLT_RECO.root'], @@ -43,6 +46,9 @@ def addOnTestsHLT(): 'hlt_data_PRef' : ['cmsDriver.py RelVal -s L1REPACK:Full --data --scenario=pp -n 10 --conditions auto:run3_hlt_PRef --relval 9000,50 --datatier "RAW" --customise=HLTrigger/Configuration/CustomConfigs.L1T --era Run3_2023 --eventcontent RAW --fileout file:RelVal_Raw_PRef_DATA.root --filein /store/data/Run2023D/EphemeralHLTPhysics0/RAW/v1/000/370/293/00000/2ef73d2a-1fb7-4dac-9961-149525f9e887.root', 'HLTrigger/Configuration/test/OnLine_HLT_PRef.py', 'cmsDriver.py RelVal -s HLT:PRef,RAW2DIGI,L1Reco,RECO --data --scenario=pp -n 10 --conditions auto:run3_data_PRef --relval 9000,50 --datatier "RAW-HLT-RECO" --eventcontent FEVTDEBUGHLT --customise=HLTrigger/Configuration/CustomConfigs.L1THLT --customise=HLTrigger/Configuration/CustomConfigs.HLTRECO --customise=HLTrigger/Configuration/CustomConfigs.customiseGlobalTagForOnlineBeamSpot --era Run3_2023 --processName=HLTRECO --filein file:RelVal_Raw_PRef_DATA.root --fileout file:RelVal_Raw_PRef_DATA_HLT_RECO.root'], + 'hlt_data_Special' : ['cmsDriver.py RelVal -s L1REPACK:Full --data --scenario=pp -n 10 --conditions auto:run3_hlt_Special --relval 9000,50 --datatier "RAW" --eventcontent RAW --customise=HLTrigger/Configuration/CustomConfigs.L1T --era Run3_2023 --fileout file:RelVal_Raw_Special_DATA.root --filein /store/data/Run2023D/EphemeralHLTPhysics0/RAW/v1/000/370/293/00000/2ef73d2a-1fb7-4dac-9961-149525f9e887.root', + 'HLTrigger/Configuration/test/OnLine_HLT_Special.py', + 'cmsDriver.py RelVal -s HLT:Special,RAW2DIGI,L1Reco,RECO --data --scenario=pp -n 10 --conditions auto:run3_data_Special --relval 9000,50 --datatier "RAW-HLT-RECO" --eventcontent FEVTDEBUGHLT --customise=HLTrigger/Configuration/CustomConfigs.L1THLT --customise=HLTrigger/Configuration/CustomConfigs.HLTRECO --customise=HLTrigger/Configuration/CustomConfigs.customiseGlobalTagForOnlineBeamSpot --era Run3_2023 --processName=HLTRECO --filein file:RelVal_Raw_Special_DATA.root --fileout file:RelVal_Raw_Special_DATA_HLT_RECO.root'], } return addOnTestsHLT diff --git a/Configuration/HLT/python/autoCondHLT.py b/Configuration/HLT/python/autoCondHLT.py index 1189799600c7a..d1576114d8024 100644 --- a/Configuration/HLT/python/autoCondHLT.py +++ b/Configuration/HLT/python/autoCondHLT.py @@ -24,6 +24,7 @@ 'HIon' : ( ','.join( [ 'L1Menu_CollisionsHeavyIons2023_v1_1_5_xml' ,l1tMenuRecord,connectionString,l1tMenuLabel, "2023-10-24 16:59:55.000"] ), ), 'PIon' : ( ','.join( [ 'L1Menu_HeavyIons2016_v3_m2_xml' ,l1tMenuRecord,connectionString,l1tMenuLabel, "2016-11-22 11:11:00.000"] ), ), 'PRef' : ( ','.join( [ 'L1Menu_CollisionsPPRef2023_v1_1_2_xml' ,l1tMenuRecord,connectionString,l1tMenuLabel, "2023-09-21 19:00:00.000"] ), ), + 'Special' : ( ','.join( [ 'L1Menu_Collisions2023_v1_3_0_xml' ,l1tMenuRecord,connectionString,l1tMenuLabel, "2023-07-11 15:33:37.000"] ), ), } hltGTs = { @@ -40,6 +41,7 @@ 'run3_mc_HIon' : ('phase1_2023_realistic_hi',l1Menus['HIon']), 'run3_mc_PIon' : ('phase1_2023_realistic' ,l1Menus['PIon']), 'run3_mc_PRef' : ('phase1_2023_realistic' ,l1Menus['PRef']), + 'run3_mc_Special' : ('phase1_2023_realistic' ,l1Menus['Special']), 'run1_hlt_Fake' : ('run2_hlt_relval' ,l1Menus['Fake']), 'run2_hlt_Fake' : ('run2_hlt_relval' ,l1Menus['Fake']), @@ -51,6 +53,7 @@ 'run3_hlt_HIon' : ('run3_hlt' ,l1Menus['HIon']), 'run3_hlt_PIon' : ('run3_hlt' ,l1Menus['PIon']), 'run3_hlt_PRef' : ('run3_hlt' ,l1Menus['PRef']), + 'run3_hlt_Special' : ('run3_hlt' ,l1Menus['Special']), 'run1_data_Fake' : ('run2_data' ,l1Menus['Fake']), 'run2_data_Fake' : ('run2_data' ,l1Menus['Fake']), @@ -62,6 +65,7 @@ 'run3_data_HIon' : ('run3_data_prompt' ,l1Menus['HIon']), 'run3_data_PIon' : ('run3_data_prompt' ,l1Menus['PIon']), 'run3_data_PRef' : ('run3_data_prompt' ,l1Menus['PRef']), + 'run3_data_Special' : ('run3_data_prompt' ,l1Menus['Special']), } diff --git a/Configuration/HLT/python/autoHLT.py b/Configuration/HLT/python/autoHLT.py index 89fa8e094c8cf..110e0de57ac68 100644 --- a/Configuration/HLT/python/autoHLT.py +++ b/Configuration/HLT/python/autoHLT.py @@ -11,7 +11,8 @@ 'relval2017' : 'Fake2', 'relval2018' : 'Fake2', 'relval2022' : 'Fake2', - 'relval2023' : 'GRun', + 'relval2023' : '2023v12', + 'relval2024' : 'GRun', 'relval2026' : '75e33', 'test' : 'GRun', } diff --git a/Configuration/ProcessModifiers/python/Era_Run3_CTPPS_directSim_cff.py b/Configuration/ProcessModifiers/python/Era_Run3_CTPPS_directSim_cff.py new file mode 100644 index 0000000000000..67c085ea3cc9b --- /dev/null +++ b/Configuration/ProcessModifiers/python/Era_Run3_CTPPS_directSim_cff.py @@ -0,0 +1,6 @@ +import FWCore.ParameterSet.Config as cms + +from Configuration.Eras.Era_Run3_cff import Run3 +from Configuration.Eras.Modifier_ctpps_directSim_cff import ctpps_directSim + +Run3_CTPPS_directSim = cms.ModifierChain(Run3,ctpps_directSim) diff --git a/Configuration/ProcessModifiers/python/alpakaValidationPixel_cff.py b/Configuration/ProcessModifiers/python/alpakaValidationPixel_cff.py new file mode 100644 index 0000000000000..ebdb7d9e6981a --- /dev/null +++ b/Configuration/ProcessModifiers/python/alpakaValidationPixel_cff.py @@ -0,0 +1,6 @@ +import FWCore.ParameterSet.Config as cms + +# This modifier chain is for turning on DQM modules used for alpaka device/host validation for pixels + +alpakaValidationPixel = cms.Modifier() + diff --git a/Configuration/ProcessModifiers/python/alpakaValidation_cff.py b/Configuration/ProcessModifiers/python/alpakaValidation_cff.py new file mode 100644 index 0000000000000..3399bdda7c4df --- /dev/null +++ b/Configuration/ProcessModifiers/python/alpakaValidation_cff.py @@ -0,0 +1,11 @@ +import FWCore.ParameterSet.Config as cms + +from Configuration.ProcessModifiers.alpaka_cff import * +from Configuration.ProcessModifiers.alpakaValidationPixel_cff import * + +# This modifier chain is for turning on DQM modules used for alpaka device/host validation + +alpakaValidation = cms.ModifierChain( + alpaka, + alpakaValidationPixel +) diff --git a/Configuration/ProcessModifiers/python/vertex4DTrackSelMVA_cff.py b/Configuration/ProcessModifiers/python/vertex4DTrackSelMVA_cff.py new file mode 100644 index 0000000000000..4983c6b1fe0c3 --- /dev/null +++ b/Configuration/ProcessModifiers/python/vertex4DTrackSelMVA_cff.py @@ -0,0 +1,6 @@ +import FWCore.ParameterSet.Config as cms + +# Modifier to enable the use of the MVA selection on +# tracks for the 4D vertex reco + +vertex4DTrackSelMVA = cms.Modifier() diff --git a/Configuration/PyReleaseValidation/README.md b/Configuration/PyReleaseValidation/README.md index 9c9cb96cd7792..817a20e2c86a5 100644 --- a/Configuration/PyReleaseValidation/README.md +++ b/Configuration/PyReleaseValidation/README.md @@ -30,6 +30,7 @@ The offsets currently in use are: * 0.2: Tracking Run-2 era, `Run2_2017_trackingRun2` * 0.3: 0.1 + 0.2 * 0.4: LowPU tracking era, `Run2_2017_trackingLowPU` +* 0.411: Patatrack, ECAL only, Alpaka * 0.5: Pixel tracking only + 0.1 * 0.501: Patatrack, pixel only quadruplets, on CPU * 0.502: Patatrack, pixel only quadruplets, with automatic offload to GPU if available @@ -69,6 +70,9 @@ The offsets currently in use are: * 0.612: ECAL `phase2_ecal_devel` era, with automatic offload to GPU if available * 0.631: ECAL component-method based digis * 0.632: ECAL component-method based finely-sampled waveforms +* 0.633: ECAL phase2 Trigger Primitive +* 0.634: ECAL phase2 Trigger Primitive + component-method based digis +* 0.635: ECAL phase2 Trigger Primitive + component-method based finely-sampled waveforms * 0.75: Phase-2 HLT * 0.91: Track DNN modifier * 0.97: Premixing stage1 diff --git a/Configuration/PyReleaseValidation/python/relval_2017.py b/Configuration/PyReleaseValidation/python/relval_2017.py index 094f0bccbeabb..2f641e5005ff4 100644 --- a/Configuration/PyReleaseValidation/python/relval_2017.py +++ b/Configuration/PyReleaseValidation/python/relval_2017.py @@ -46,7 +46,10 @@ # (Patatrack HCAL-only: TTbar - on CPU) # (Patatrack pixel-only: ZMM - on CPU: quadruplets, triplets) # (TTbar FastSim, TTbar FastSim PU, MinBiasFS for mixing)) +# (ZEE) +# (Nu Gun) # 2024 (TTbar, TTbar PU, TTbar PU premix) + numWFIB = [10001.0,10002.0,10003.0,10004.0,10005.0,10006.0,10007.0,10008.0,10009.0,10059.0,10071.0, 10042.0,10024.0,10025.0,10026.0,10023.0,10224.0,10225.0,10424.0, 10024.1,10024.2,10024.3,10024.4,10024.5, @@ -83,7 +86,9 @@ 12434.521, 12450.501,12450.505, 14034.0,14234.0,14040.303, - 12834.0,13034.0,13034.99,] + 12446.0, + 12461.0, + 12834.0,13034.0,13034.99,] for numWF in numWFIB: if not numWF in _upgrade_workflows: diff --git a/Configuration/PyReleaseValidation/python/relval_2026.py b/Configuration/PyReleaseValidation/python/relval_2026.py index 4821d0832daea..3b3cde4884729 100644 --- a/Configuration/PyReleaseValidation/python/relval_2026.py +++ b/Configuration/PyReleaseValidation/python/relval_2026.py @@ -16,7 +16,6 @@ numWFIB = [] numWFIB.extend([20034.0]) #2026D86 numWFIB.extend([20834.0]) #2026D88 -numWFIB.extend([20834.501,20834.502]) #2026D88 Patatrack local reconstruction on CPU, Patatrack local reconstruction on GPU (to remove when available in D98) numWFIB.extend([22034.0]) #2026D91 numWFIB.extend([22434.0]) #2026D92 numWFIB.extend([22834.0]) #2026D93 @@ -27,6 +26,7 @@ numWFIB.extend([24834.0,24834.911,24834.103]) #2026D98 DDD XML, DD4hep XML, aging numWFIB.extend([25061.97]) #2026D98 premixing stage1 (NuGun+PU) numWFIB.extend([24834.5,24834.9]) #2026D98 pixelTrackingOnly, vector hits +numWFIB.extend([24834.501,24834.502]) #2026D98 Patatrack local reconstruction on CPU, Patatrack local reconstruction on GPU numWFIB.extend([25034.99,25034.999]) #2026D98 premixing combined stage1+stage2 (ttbar+PU200, ttbar+PU50 for PR test) numWFIB.extend([24834.21,25034.21,25034.9921]) #2026D98 prodlike, prodlike PU, prodlike premix stage1+stage2 numWFIB.extend([25034.114]) #2026D98 PU, with 10% OT ineffiency @@ -35,11 +35,18 @@ numWFIB.extend([26034.0]) #2026D101 numWFIB.extend([26434.0]) #2026D102 numWFIB.extend([26834.0]) #2026D103 +numWFIB.extend([27234.0]) #2026D104 +numWFIB.extend([27634.0]) #2026D105 +numWFIB.extend([28034.0]) #2026D106 +numWFIB.extend([28434.0]) #2026D107 +numWFIB.extend([28834.0]) #2026D108 #Additional sample for short matrix and IB #CloseByPGun for HGCAL numWFIB.extend([24896.0]) #CE_E_Front_120um D98 numWFIB.extend([24900.0]) #CE_H_Coarse_Scint D98 +# NuGun +numWFIB.extend([24861.0]) #Nu Gun 2026D98 for numWF in numWFIB: workflows[numWF] = _upgrade_workflows[numWF] diff --git a/Configuration/PyReleaseValidation/python/relval_gpu.py b/Configuration/PyReleaseValidation/python/relval_gpu.py index d53c326662370..3eeabf9d3c43a 100644 --- a/Configuration/PyReleaseValidation/python/relval_gpu.py +++ b/Configuration/PyReleaseValidation/python/relval_gpu.py @@ -23,6 +23,8 @@ # Patatrack pixel-only triplets, ECAL, HCAL: TTbar - on GPU (optional), GPU-vs-CPU validation, profiling (to be implemented) # full reco with Patatrack pixel-only quadruplets: TTbar - on GPU (optional), GPU-vs-CPU validation # full reco with Patatrack pixel-only triplets: TTbar - on GPU (optional), GPU-vs-CPU validation +# Patatrack Single Nu E10 on GPU (optional) +# mc 2026 Patatrack Single Nu E10 on GPU (optional) numWFIB = [ # 2023 12450.502, 12450.503, 12450.504, @@ -35,6 +37,8 @@ 12434.586, 12434.587, # 12434.588, 12434.592, 12434.593, 12434.596, 12434.597, + 12461.502, + 24861.502 ] for numWF in numWFIB: diff --git a/Configuration/PyReleaseValidation/python/relval_nano.py b/Configuration/PyReleaseValidation/python/relval_nano.py index ef88f824e14fc..30a127b7f7994 100644 --- a/Configuration/PyReleaseValidation/python/relval_nano.py +++ b/Configuration/PyReleaseValidation/python/relval_nano.py @@ -195,7 +195,8 @@ def subnext(self): '--datatier':'NANOAOD', '--eventcontent':'NANOAOD', '--filein':'/store/mc/Run3Summer22MiniAODv3/BulkGravitonToHH_MX1120_MH121_TuneCP5_13p6TeV_madgraph-pythia8/MINIAODSIM/124X_mcRun3_2022_realistic_v12-v3/2810000/f9cdd76c-faac-4f24-bf0c-2496c8fffe54.root', - '--secondfilein':'/store/mc/Run3Summer22DRPremix/BulkGravitonToHH_MX1120_MH121_TuneCP5_13p6TeV_madgraph-pythia8/AODSIM/124X_mcRun3_2022_realistic_v12-v3/2810000/ab09fc5d-859c-407f-b7ce-74b0bae9bb96.root'}]) + '--secondfilein':'/store/mc/Run3Summer22DRPremix/BulkGravitonToHH_MX1120_MH121_TuneCP5_13p6TeV_madgraph-pythia8/AODSIM/124X_mcRun3_2022_realistic_v12-v3/2810000/ab09fc5d-859c-407f-b7ce-74b0bae9bb96.root', + '--customise':'IOPool/Input/fixReading_12_4_X_Files.fixReading_12_4_X_Files'}]) _wfn=WFN(2500) ################ diff --git a/Configuration/PyReleaseValidation/python/relval_production.py b/Configuration/PyReleaseValidation/python/relval_production.py index 6b97967eea083..fca0bc0856e34 100644 --- a/Configuration/PyReleaseValidation/python/relval_production.py +++ b/Configuration/PyReleaseValidation/python/relval_production.py @@ -11,7 +11,7 @@ ## data production test workflows[1000] = [ '',['RunMinBias2011A','TIER0','SKIMD','HARVESTDfst2','ALCASPLIT']] workflows[1001] = [ '',['RunMinBias2011A','TIER0EXP','ALCAEXP','ALCAHARVDSIPIXELCALRUN1','ALCAHARVD1','ALCAHARVD2','ALCAHARVD3','ALCAHARVD4','ALCAHARVD5','ALCAHARVD7','ALCAHARVD8']] -workflows[1001.2] = [ '',['RunZeroBias2017F','TIER0EXPRUN2','ALCAEXPRUN2','ALCAHARVDSIPIXELCAL','ALCAHARVDSIPIXELCALLA','ALCAHARVD4','ALCAHARVDSIPIXELALIHG']] +workflows[1001.2] = [ '',['RunZeroBias2017F','TIER0EXPRUN2','ALCAEXPRUN2','ALCAHARVDSIPIXELCAL','ALCAHARVDSIPIXELCALLA','ALCAHARVD4','ALCAHARVDSIPIXELALIHG','ALCAHARVDSIPIXELALIHGCOMBINED']] workflows[1001.3] = [ '',['RunSingleMuon2022B','TIER0EXPRUN3','ALCAEXPRUN3','ALCAHARVDEXPRUN3']] workflows[1002.3] = [ '',['RunZeroBias2022B','TIER0PROMPTRUN3','ALCASPLITRUN3','ALCAHARVDEXPRUN3']] workflows[1002.4] = [ '',['RunDoubleMuon2022B','TIER0PROMPTRUN3','HARVESTPROMPTRUN3']] diff --git a/Configuration/PyReleaseValidation/python/relval_standard.py b/Configuration/PyReleaseValidation/python/relval_standard.py index 2054c7e6e8d89..eb0a677a42482 100644 --- a/Configuration/PyReleaseValidation/python/relval_standard.py +++ b/Configuration/PyReleaseValidation/python/relval_standard.py @@ -440,58 +440,58 @@ workflows[139.005] = ['',['AlCaPhiSym2021','RECOALCAECALPHISYMDR3','ALCAECALPHISYM']] ### run3 (2022) ### -workflows[140.001] = ['',['RunMinimumBias2022A','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.002] = ['',['RunSingleMuon2022A','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.001] = ['',['RunMinimumBias2022A','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.002] = ['',['RunSingleMuon2022A','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] workflows[140.003] = ['',['RunZeroBias2022A','HLTDR3_2022','RECONANORUN3_ZB_reHLT_2022','HARVESTRUN3_ZB_2022']] -workflows[140.004] = ['',['RunBTagMu2022A','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.005] = ['',['RunJetHT2022A','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.006] = ['',['RunDisplacedJet2022A','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.007] = ['',['RunMET2022A','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.008] = ['',['RunEGamma2022A','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.009] = ['',['RunTau2022A','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.010] = ['',['RunDoubleMuon2022A','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.011] = ['',['RunMuonEG2022A','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] - -workflows[140.021] = ['',['RunMinimumBias2022B','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.022] = ['',['RunSingleMuon2022B','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.004] = ['',['RunBTagMu2022A','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.005] = ['',['RunJetHT2022A','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.006] = ['',['RunDisplacedJet2022A','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.007] = ['',['RunMET2022A','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.008] = ['',['RunEGamma2022A','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.009] = ['',['RunTau2022A','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.010] = ['',['RunDoubleMuon2022A','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.011] = ['',['RunMuonEG2022A','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] + +workflows[140.021] = ['',['RunMinimumBias2022B','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.022] = ['',['RunSingleMuon2022B','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] workflows[140.023] = ['',['RunZeroBias2022B','HLTDR3_2022','RECONANORUN3_ZB_reHLT_2022','HARVESTRUN3_ZB_2022']] -workflows[140.024] = ['',['RunBTagMu2022B','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.025] = ['',['RunJetHT2022B','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.026] = ['',['RunDisplacedJet2022B','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.027] = ['',['RunMET2022B','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.028] = ['',['RunEGamma2022B','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.029] = ['',['RunTau2022B','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.030] = ['',['RunDoubleMuon2022B','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.031] = ['',['RunMuonEG2022B','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] - -workflows[140.042] = ['',['RunSingleMuon2022C','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.024] = ['',['RunBTagMu2022B','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.025] = ['',['RunJetHT2022B','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.026] = ['',['RunDisplacedJet2022B','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.027] = ['',['RunMET2022B','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.028] = ['',['RunEGamma2022B','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.029] = ['',['RunTau2022B','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.030] = ['',['RunDoubleMuon2022B','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.031] = ['',['RunMuonEG2022B','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] + +workflows[140.042] = ['',['RunSingleMuon2022C','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] workflows[140.043] = ['',['RunZeroBias2022C','HLTDR3_2022','RECONANORUN3_ZB_reHLT_2022','HARVESTRUN3_ZB_2022']] -workflows[140.044] = ['',['RunBTagMu2022C','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.045] = ['',['RunJetHT2022C','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.046] = ['',['RunDisplacedJet2022C','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.047] = ['',['RunMET2022C','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.048] = ['',['RunEGamma2022C','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.049] = ['',['RunTau2022C','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.050] = ['',['RunDoubleMuon2022C','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.051] = ['',['RunMuonEG2022C','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] - -workflows[140.062] = ['',['RunMuon2022D','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.044] = ['',['RunBTagMu2022C','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.045] = ['',['RunJetHT2022C','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.046] = ['',['RunDisplacedJet2022C','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.047] = ['',['RunMET2022C','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.048] = ['',['RunEGamma2022C','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.049] = ['',['RunTau2022C','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.050] = ['',['RunDoubleMuon2022C','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.051] = ['',['RunMuonEG2022C','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] + +workflows[140.062] = ['',['RunMuon2022D','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] workflows[140.063] = ['',['RunZeroBias2022D','HLTDR3_2022','RECONANORUN3_ZB_reHLT_2022','HARVESTRUN3_ZB_2022']] -workflows[140.064] = ['',['RunBTagMu2022D','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.065] = ['',['RunJetMET2022D','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.066] = ['',['RunDisplacedJet2022D','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.067] = ['',['RunEGamma2022D','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.068] = ['',['RunTau2022D','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.069] = ['',['RunMuonEG2022D','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] - -workflows[140.071] = ['',['RunMuon2022E','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.064] = ['',['RunBTagMu2022D','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.065] = ['',['RunJetMET2022D','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.066] = ['',['RunDisplacedJet2022D','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.067] = ['',['RunEGamma2022D','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.068] = ['',['RunTau2022D','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.069] = ['',['RunMuonEG2022D','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] + +workflows[140.071] = ['',['RunMuon2022E','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] workflows[140.072] = ['',['RunZeroBias2022E','HLTDR3_2022','RECONANORUN3_ZB_reHLT_2022','HARVESTRUN3_ZB_2022']] -workflows[140.073] = ['',['RunBTagMu2022E','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.074] = ['',['RunJetMET2022E','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.075] = ['',['RunDisplacedJet2022E','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.076] = ['',['RunEGamma2022E','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.077] = ['',['RunTau2022E','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] -workflows[140.078] = ['',['RunMuonEG2022E','HLTDR3_2022','RECONANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.073] = ['',['RunBTagMu2022E','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.074] = ['',['RunJetMET2022E','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.075] = ['',['RunDisplacedJet2022E','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.076] = ['',['RunEGamma2022E','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.077] = ['',['RunTau2022E','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] +workflows[140.078] = ['',['RunMuonEG2022E','HLTDR3_2022','AODNANORUN3_reHLT_2022','HARVESTRUN3_2022']] ### run3 (2022) skims ### workflows[140.101] = ['',['RunZeroBias2022D','HLTDR3_2022','SKIMZEROBIASRUN3_reHLT_2022','HARVESTRUN3_ZB_2022']] @@ -513,39 +513,39 @@ workflows[140.202] = ['',['RunJetMET2022D_reMINI', 'REMININANO_data2022']] ### run3 (2023) ### -workflows[141.001] = ['',['RunMuon2023B','HLTDR3_2023B','RECONANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] +workflows[141.001] = ['',['RunMuon2023B','HLTDR3_2023B','AODNANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] workflows[141.002] = ['',['RunZeroBias2023B','HLTDR3_2023B','RECONANORUN3_ZB_reHLT_2023B','HARVESTRUN3_ZB_2023B']] -workflows[141.003] = ['',['RunBTagMu2023B','HLTDR3_2023B','RECONANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] -workflows[141.004] = ['',['RunNoBPTX2023B','HLTDR3_2023B','RECONANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] -workflows[141.005] = ['',['RunHcalNZS2023B','HLTDR3_2023B','RECONANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] -workflows[141.006] = ['',['RunHLTPhysics2023B','HLTDR3_2023B','RECONANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] -workflows[141.007] = ['',['RunCommissioning2023B','HLTDR3_2023B','RECONANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] -workflows[141.008] = ['',['RunJetMET2023B','HLTDR3_2023B','RECONANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] -workflows[141.009] = ['',['RunCosmics2023B','HLTDR3_2023B','RECONANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] -workflows[141.010] = ['',['RunDisplacedJet2023B','HLTDR3_2023B','RECONANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] -workflows[141.011] = ['',['RunEGamma2023B','HLTDR3_2023B','RECONANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] -workflows[141.012] = ['',['RunTau2023B','HLTDR3_2023B','RECONANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] -workflows[141.013] = ['',['RunMuonEG2023B','HLTDR3_2023B','RECONANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] - -workflows[141.031] = ['',['RunMuon2023C','HLTDR3_2023','RECONANORUN3_reHLT_2023','HARVESTRUN3_2023']] +workflows[141.003] = ['',['RunBTagMu2023B','HLTDR3_2023B','AODNANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] +workflows[141.004] = ['',['RunNoBPTX2023B','HLTDR3_2023B','AODNANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] +workflows[141.005] = ['',['RunHcalNZS2023B','HLTDR3_2023B','AODNANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] +workflows[141.006] = ['',['RunHLTPhysics2023B','HLTDR3_2023B','AODNANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] +workflows[141.007] = ['',['RunCommissioning2023B','HLTDR3_2023B','AODNANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] +workflows[141.008] = ['',['RunJetMET2023B','HLTDR3_2023B','AODNANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] +workflows[141.009] = ['',['RunCosmics2023B','HLTDR3_2023B','AODNANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] +workflows[141.010] = ['',['RunDisplacedJet2023B','HLTDR3_2023B','AODNANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] +workflows[141.011] = ['',['RunEGamma2023B','HLTDR3_2023B','AODNANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] +workflows[141.012] = ['',['RunTau2023B','HLTDR3_2023B','AODNANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] +workflows[141.013] = ['',['RunMuonEG2023B','HLTDR3_2023B','AODNANORUN3_reHLT_2023B','HARVESTRUN3_2023B']] + +workflows[141.031] = ['',['RunMuon2023C','HLTDR3_2023','AODNANORUN3_reHLT_2023','HARVESTRUN3_2023']] workflows[141.032] = ['',['RunZeroBias2023C','HLTDR3_2023','RECONANORUN3_ZB_reHLT_2023','HARVESTRUN3_ZB_2023']] -workflows[141.033] = ['',['RunBTagMu2023C','HLTDR3_2023','RECONANORUN3_reHLT_2023','HARVESTRUN3_2023']] -workflows[141.034] = ['',['RunJetMET2023C','HLTDR3_2023','RECONANORUN3_reHLT_2023','HARVESTRUN3_2023']] -workflows[141.035] = ['',['RunDisplacedJet2023C','HLTDR3_2023','RECONANORUN3_reHLT_2023','HARVESTRUN3_2023']] -workflows[141.036] = ['',['RunEGamma2023C','HLTDR3_2023','RECONANORUN3_reHLT_2023','HARVESTRUN3_2023']] -workflows[141.037] = ['',['RunTau2023C','HLTDR3_2023','RECONANORUN3_reHLT_2023','HARVESTRUN3_2023']] -workflows[141.038] = ['',['RunMuonEG2023C','HLTDR3_2023','RECONANORUN3_reHLT_2023','HARVESTRUN3_2023']] -workflows[141.039] = ['',['RunParkingDoubleMuonLowMass2023C','HLTDR3_2023','RECONANORUN3_reHLT_2023','HARVESTRUN3_2023']] - -workflows[141.041] = ['',['RunMuon2023D','HLTDR3_2023','RECONANORUN3_reHLT_2023','HARVESTRUN3_2023']] +workflows[141.033] = ['',['RunBTagMu2023C','HLTDR3_2023','AODNANORUN3_reHLT_2023','HARVESTRUN3_2023']] +workflows[141.034] = ['',['RunJetMET2023C','HLTDR3_2023','AODNANORUN3_reHLT_2023','HARVESTRUN3_2023']] +workflows[141.035] = ['',['RunDisplacedJet2023C','HLTDR3_2023','AODNANORUN3_reHLT_2023','HARVESTRUN3_2023']] +workflows[141.036] = ['',['RunEGamma2023C','HLTDR3_2023','AODNANORUN3_reHLT_2023','HARVESTRUN3_2023']] +workflows[141.037] = ['',['RunTau2023C','HLTDR3_2023','AODNANORUN3_reHLT_2023','HARVESTRUN3_2023']] +workflows[141.038] = ['',['RunMuonEG2023C','HLTDR3_2023','AODNANORUN3_reHLT_2023','HARVESTRUN3_2023']] +workflows[141.039] = ['',['RunParkingDoubleMuonLowMass2023C','HLTDR3_2023','AODNANORUN3_reHLT_2023','HARVESTRUN3_2023']] + +workflows[141.041] = ['',['RunMuon2023D','HLTDR3_2023','AODNANORUN3_reHLT_2023','HARVESTRUN3_2023']] workflows[141.042] = ['',['RunZeroBias2023D','HLTDR3_2023','RECONANORUN3_ZB_reHLT_2023','HARVESTRUN3_ZB_2023']] -workflows[141.043] = ['',['RunBTagMu2023D','HLTDR3_2023','RECONANORUN3_reHLT_2023','HARVESTRUN3_2023']] -workflows[141.044] = ['',['RunJetMET2023D','HLTDR3_2023','RECONANORUN3_reHLT_2023','HARVESTRUN3_2023']] -workflows[141.045] = ['',['RunDisplacedJet2023D','HLTDR3_2023','RECONANORUN3_reHLT_2023','HARVESTRUN3_2023']] -workflows[141.046] = ['',['RunEGamma2023D','HLTDR3_2023','RECONANORUN3_reHLT_2023','HARVESTRUN3_2023']] -workflows[141.047] = ['',['RunTau2023D','HLTDR3_2023','RECONANORUN3_reHLT_2023','HARVESTRUN3_2023']] -workflows[141.048] = ['',['RunMuonEG2023D','HLTDR3_2023','RECONANORUN3_reHLT_2023','HARVESTRUN3_2023']] -workflows[141.049] = ['',['RunParkingDoubleMuonLowMass2023D','HLTDR3_2023','RECONANORUN3_reHLT_2023','HARVESTRUN3_2023']] +workflows[141.043] = ['',['RunBTagMu2023D','HLTDR3_2023','AODNANORUN3_reHLT_2023','HARVESTRUN3_2023']] +workflows[141.044] = ['',['RunJetMET2023D','HLTDR3_2023','AODNANORUN3_reHLT_2023','HARVESTRUN3_2023']] +workflows[141.045] = ['',['RunDisplacedJet2023D','HLTDR3_2023','AODNANORUN3_reHLT_2023','HARVESTRUN3_2023']] +workflows[141.046] = ['',['RunEGamma2023D','HLTDR3_2023','AODNANORUN3_reHLT_2023','HARVESTRUN3_2023']] +workflows[141.047] = ['',['RunTau2023D','HLTDR3_2023','AODNANORUN3_reHLT_2023','HARVESTRUN3_2023']] +workflows[141.048] = ['',['RunMuonEG2023D','HLTDR3_2023','AODNANORUN3_reHLT_2023','HARVESTRUN3_2023']] +workflows[141.049] = ['',['RunParkingDoubleMuonLowMass2023D','HLTDR3_2023','AODNANORUN3_reHLT_2023','HARVESTRUN3_2023']] ### run3 (2023) skims ### workflows[141.101] = ['',['RunZeroBias2023C','HLTDR3_2023','SKIMZEROBIASRUN3_reHLT_2023','HARVESTRUN3_ZB_2023']] @@ -568,6 +568,10 @@ workflows[141.008511] = ['Run3-2023_JetMET2023B_RecoECALOnlyCPU',['RunJetMET2023B','HLTDR3_2023','RECODR3_reHLT_ECALOnlyCPU','HARVESTRUN3_ECALOnly']] workflows[141.008521] = ['Run3-2023_JetMET2023B_RecoHCALOnlyCPU',['RunJetMET2023B','HLTDR3_2023','RECODR3_reHLT_HCALOnlyCPU','HARVESTRUN3_HCALOnly']] +### run3-2023 (2023 HI UPC data) +workflows[141.901] = ['',['RunUPC2023','RECODR3_2023_UPC','HARVESTDPROMPTR3']] +workflows[141.902] = ['',['RunUPC2023','RECODR3_2023_HIN','HARVESTDPROMPTR3']] + ### run3-2023 (2023 HI data RawPrime with re-HLT) workflows[142.0] = ['',['RunHIPhysicsRawPrime2023A','HLTDR3_HI2023ARawprime','RECOHIRUN3_reHLT_2023','HARVESTRUN3_HI2023A']] @@ -684,6 +688,7 @@ workflows[7.24] = ['', ['Cosmics_UP21_0T','DIGICOS_UP21_0T','RECOCOS_UP21_0T','ALCACOS_UP21_0T','HARVESTCOS_UP21_0T']]#2021 0T workflows[7.3] = ['CosmicsSPLoose2018', ['CosmicsSPLoose_UP18','DIGICOS_UP18','RECOCOS_UP18','ALCACOS_UP18','HARVESTCOS_UP18']] workflows[7.4] = ['CosmicsPEAK2018', ['Cosmics_UP18','DIGICOSPEAK_UP18','RECOCOSPEAK_UP18','ALCACOS_UP18','HARVESTCOS_UP18']] +workflows[7.5] = ['', ['Cosmics_Phase2','DIGICOS_Phase2','RECOCOS_Phase2']] #,'ALCACOS_Phase2','HARVESTCOS_Phase2']] inactive at the moment workflows[8] = ['', ['BeamHalo','DIGICOS','RECOCOS','ALCABH','HARVESTCOS']] workflows[8.1] = ['', ['BeamHalo_UP18','DIGICOS_UP18','RECOCOS_UP18','ALCABH_UP18','HARVESTCOS_UP18']] diff --git a/Configuration/PyReleaseValidation/python/relval_steps.py b/Configuration/PyReleaseValidation/python/relval_steps.py index 162272cebf54d..7dce8d60adb46 100644 --- a/Configuration/PyReleaseValidation/python/relval_steps.py +++ b/Configuration/PyReleaseValidation/python/relval_steps.py @@ -615,6 +615,9 @@ steps['RunMuonEG2023D']={'INPUT':InputInfo(dataSet='/MuonEG/Run2023D-v1/RAW',label='2023D',events=100000,location='STD', ls=Run2023D)} steps['RunParkingDoubleMuonLowMass2023D']={'INPUT':InputInfo(dataSet='/ParkingDoubleMuonLowMass0/Run2023D-v1/RAW',label='2023D',events=100000,location='STD', ls=Run2023D)} +Run2023UPC={375463: [[52,52]]} +steps['RunUPC2023']={'INPUT':InputInfo(dataSet='/HIForward1/HIRun2023A-v1/RAW',label='upc2023',events=10000,location='STD',ls=Run2023UPC)} + RunHI2023={375491: [[100, 100]]} steps['RunHIPhysicsRawPrime2023A']={'INPUT':InputInfo(dataSet='/HIPhysicsRawPrime0/HIRun2023A-v1/RAW',label='HI2023A',events=100000,location='STD', ls=RunHI2023)} @@ -1086,6 +1089,19 @@ def genS(fragment,howMuch): steps['Cosmics_UP21_0T']=merge([{'--magField':'0T','--conditions':'auto:phase1_2022_cosmics_0T'},steps['Cosmics_UP21']]) steps['CosmicsSPLoose_UP17']=merge([{'cfg':'UndergroundCosmicSPLooseMu_cfi.py','-n':'2000','--conditions':'auto:phase1_2017_cosmics','--scenario':'cosmics','--era':'Run2_2017'},Kby(5000,500000),step1Up2015Defaults]) steps['CosmicsSPLoose_UP18']=merge([{'cfg':'UndergroundCosmicSPLooseMu_cfi.py','-n':'2000','--conditions':'auto:phase1_2018_cosmics','--scenario':'cosmics','--era':'Run2_2018'},Kby(5000,500000),step1Up2015Defaults]) + +# Phase2 cosmics with geometry D98 +from Configuration.PyReleaseValidation.upgradeWorkflowComponents import upgradeProperties +phase2CosInfo=upgradeProperties[2026]['2026D98'] # so if the default changes, change wf only here + +steps['Cosmics_Phase2']=merge([{'cfg':'UndergroundCosmicMu_cfi.py', + '-n':'500', + '--conditions': phase2CosInfo['GT'], + '--scenario':'cosmics', + '--era': phase2CosInfo['Era'], + '--geometry': phase2CosInfo['Geom'], + '--beamspot':'HLLHC14TeV'},Kby(666,100000),step1Defaults]) + steps['BeamHalo']=merge([{'cfg':'BeamHalo_cfi.py','--scenario':'cosmics'},Kby(9,100),step1Defaults]) steps['BeamHalo_13']=merge([{'cfg':'BeamHalo_13TeV_cfi.py','--scenario':'cosmics'},Kby(9,100),step1Up2015Defaults]) steps['BeamHalo_UP18']=merge([{'cfg':'BeamHalo_13TeV_cfi.py','-n':'500','--conditions':'auto:phase1_2018_cosmics','--scenario':'cosmics','--era':'Run2_2018','--beamspot':'Realistic25ns13TeVEarly2018Collision'},Kby(666,100000),step1Defaults]) @@ -1879,6 +1895,14 @@ def lhegensim2018ml(fragment,howMuch): steps['DIGICOSPEAK_UP17']=merge([{'--conditions':'auto:phase1_2017_cosmics_peak','-s':'DIGI:pdigi_valid,L1,DIGI2RAW,HLT:@relval2017','--customise_commands': '"process.mix.digitizers.strip.APVpeakmode=cms.bool(True)"','--scenario':'cosmics','--eventcontent':'FEVTDEBUG','--datatier':'GEN-SIM-DIGI-RAW', '--era' : 'Run2_2017'},step2Upg2015Defaults]) steps['DIGICOSPEAK_UP18']=merge([{'--conditions':'auto:phase1_2018_cosmics_peak','-s':'DIGI:pdigi_valid,L1,DIGI2RAW,HLT:@relval2018','--customise_commands': '"process.mix.digitizers.strip.APVpeakmode=cms.bool(True)"','--scenario':'cosmics','--eventcontent':'FEVTDEBUG','--datatier':'GEN-SIM-DIGI-RAW', '--era' : 'Run2_2018'},step2Upg2015Defaults]) +steps['DIGICOS_Phase2']=merge([{'--conditions': phase2CosInfo['GT'], + '-s':'DIGI:pdigi_valid,L1,DIGI2RAW,HLT:@fake2', + '--scenario':'cosmics', + '--eventcontent':'FEVTDEBUG', + '--datatier':'GEN-SIM-DIGI-RAW', + '--era' : phase2CosInfo['Era'], + '--geometry': phase2CosInfo['Geom']},step2Upg2015Defaults]) + steps['DIGIPU1']=merge([PU,step2Defaults]) steps['DIGIPU2']=merge([PU2,step2Defaults]) steps['REDIGIPU']=merge([{'-s':'reGEN,reDIGI,L1,DIGI2RAW,HLT:@fake'},steps['DIGIPU1']]) @@ -2074,10 +2098,12 @@ def lhegensim2018ml(fragment,howMuch): hltKey2022='relval2022' steps['HLTDR3_2022']=merge( [ {'-s':'L1REPACK:Full,HLT:@%s'%hltKey2022,},{'--conditions':'auto:run3_hlt_relval'},{'--era' : 'Run3'},steps['HLTD'] ] ) -hltKey2023='relval2023' -steps['HLTDR3_2023']=merge( [ {'-s':'L1REPACK:Full,HLT:@%s'%hltKey2023,},{'--conditions':'auto:run3_hlt_relval'},{'--era' : 'Run3_2023'},steps['HLTD'] ] ) +hltKey2023='relval2023' # currently points to Fake2 + +hltKey2024='relval2024' # currently points to GRUN (hacky solution to keep running GRun on real data) +steps['HLTDR3_2023']=merge( [ {'-s':'L1REPACK:Full,HLT:@%s'%hltKey2024,},{'--conditions':'auto:run3_hlt_relval'},{'--era' : 'Run3_2023'},steps['HLTD'] ] ) -steps['HLTDR3_2023B']=merge( [ {'-s':'L1REPACK:Full,HLT:@%s'%hltKey2023,},{'--conditions':'auto:run3_hlt_relval'},{'--era' : 'Run3'},steps['HLTD'] ] ) +steps['HLTDR3_2023B']=merge( [ {'-s':'L1REPACK:Full,HLT:@%s'%hltKey2024,},{'--conditions':'auto:run3_hlt_relval'},{'--era' : 'Run3'},steps['HLTD'] ] ) steps['HLTDR3_HI2023ARawprime']=merge([{'-s':'L1REPACK:Full,HLT:HIon'}, {'--conditions':'auto:run3_hlt_HIon'}, @@ -2541,6 +2567,13 @@ def lhegensim2018ml(fragment,howMuch): '--filein':'file:PromptCalibProdSiPixelAliHG.root', '--customise':'Alignment/CommonAlignmentProducer/customizeLSNumberFilterForRelVals.lowerHitsPerStructure'} +steps['ALCAHARVDSIPIXELALIHGCOMBINED']={'-s':'ALCAHARVEST:%s'%(autoPCL['PromptCalibProdSiPixelAliHGComb']), + '--conditions':'auto:run2_data', + '--scenario':'pp', + '--data':'', + '--filein':'file:PromptCalibProdSiPixelAliHGComb.root', + '--customise':'Alignment/CommonAlignmentProducer/customizeLSNumberFilterForRelVals.lowerHitsPerStructure'} + steps['ALCAHARVDPPSCAL']={'-s':'ALCAHARVEST:%s'%(autoPCL['PromptCalibProdPPSTimingCalib']), '--conditions':'auto:run3_data_express', '--scenario':'pp', @@ -2605,6 +2638,8 @@ def lhegensim2018ml(fragment,howMuch): steps['RECODR3_reHLT_2023']=merge([{'--conditions':'auto:run3_data_prompt_relval', '--hltProcess':'reHLT'},steps['RECODR3_2023']]) steps['RECODR3_reHLT_2023B']=merge([{'--conditions':'auto:run3_data_prompt_relval', '--hltProcess':'reHLT'},steps['RECODR3']]) +steps['RECODR3_2023_HIN']=merge([{'--conditions':'auto:run3_data_prompt', '-s':'RAW2DIGI,L1Reco,RECO,DQM:@commonFakeHLT+@standardDQMFakeHLT', '--repacked':'', '-n':1000},steps['RECODR3_2023']]) +steps['RECODR3_2023_UPC']=merge([{'--era':'Run3_2023_UPC', '--conditions':'132X_dataRun3_Prompt_HI_LowPtPhotonReg_v2'},steps['RECODR3_2023_HIN']]) steps['RECODR3Splash']=merge([{'-n': 2, '-s': 'RAW2DIGI,L1Reco,RECO,PAT,ALCA:SiStripCalZeroBias+SiStripCalMinBias+TkAlMinBias+EcalESAlign,DQM:@standardDQMFakeHLT+@miniAODDQM' @@ -2892,7 +2927,11 @@ def gen2023HiMix(fragment,howMuch): steps['RECOCOS_UP21_0T']=merge([{'--magField':'0T','--conditions':'auto:phase1_2022_cosmics_0T'},steps['RECOCOS_UP21']]) steps['RECOCOSPEAK_UP17']=merge([{'--conditions':'auto:phase1_2017_cosmics_peak','-s':'RAW2DIGI,L1Reco,RECO,ALCA:MuAlGlobalCosmics,DQM','--scenario':'cosmics','--era':'Run2_2017'},step3Up2015Hal]) steps['RECOCOSPEAK_UP18']=merge([{'--conditions':'auto:phase1_2018_cosmics_peak','-s':'RAW2DIGI,L1Reco,RECO,ALCA:MuAlGlobalCosmics,DQM','--scenario':'cosmics','--era':'Run2_2018'},step3Up2015Hal]) - +steps['RECOCOS_Phase2']=merge([{'--conditions': phase2CosInfo['GT'], + '-s':'RAW2DIGI,L1Reco,RECO,ALCA:MuAlGlobalCosmics', + '--scenario':'cosmics', + '--era': phase2CosInfo['Era'], + '--geometry': phase2CosInfo['Geom']},step3Up2015Hal]) # DQM step excluded for the moment steps['RECOMIN']=merge([{'-s':'RAW2DIGI,L1Reco,RECO,RECOSIM,ALCA:SiStripCalZeroBias+SiStripCalMinBias,VALIDATION,DQM'},stCond,step3Defaults]) steps['RECOMINUP15']=merge([{'-s':'RAW2DIGI,L1Reco,RECO,RECOSIM,ALCA:SiStripCalZeroBias+SiStripCalMinBias,VALIDATION,DQM'},step3Up2015Defaults]) @@ -2940,12 +2979,17 @@ def gen2023HiMix(fragment,howMuch): steps['RECONANORUN3_ZB_reHLT_2022']=merge([{'-s':'RAW2DIGI,L1Reco,RECO,PAT,NANO,DQM:@rerecoZeroBiasFakeHLT+@miniAODDQM+@nanoAODDQM'},steps['RECONANORUN3_reHLT_2022']]) steps['RECOCOSMRUN3_reHLT_2022']=merge([{'--scenario':'cosmics','-s':'RAW2DIGI,L1Reco,RECO,DQM','--datatier':'RECO,DQMIO','--eventcontent':'RECO,DQM'},steps['RECONANORUN3_reHLT_2022']]) +steps['AODNANORUN3_reHLT_2022']=merge([{'-s':'RAW2DIGI,L1Reco,RECO,PAT,NANO,DQM:@standardDQMFakeHLT+@miniAODDQM+@nanoAODDQM','--datatier':'AOD,MINIAOD,NANOAOD,DQMIO','--eventcontent':'AOD,MINIAOD,NANOEDMAOD,DQM'},steps['RECODR3_reHLT_2022']]) + steps['RECONANORUN3_reHLT_2023']=merge([{'-s':'RAW2DIGI,L1Reco,RECO,PAT,NANO,DQM:@standardDQM+@miniAODDQM+@nanoAODDQM','--datatier':'RECO,MINIAOD,NANOAOD,DQMIO','--eventcontent':'RECO,MINIAOD,NANOEDMAOD,DQM'},steps['RECODR3_reHLT_2023']]) steps['RECONANORUN3_reHLT_2023B']=merge([{'-s':'RAW2DIGI,L1Reco,RECO,PAT,NANO,DQM:@standardDQM+@miniAODDQM+@nanoAODDQM','--datatier':'RECO,MINIAOD,NANOAOD,DQMIO','--eventcontent':'RECO,MINIAOD,NANOEDMAOD,DQM'},steps['RECODR3_reHLT_2023B']]) steps['RECONANORUN3_ZB_reHLT_2023B']=merge([{'-s':'RAW2DIGI,L1Reco,RECO,PAT,NANO,DQM:@rerecoZeroBias+@miniAODDQM+@nanoAODDQM'},steps['RECONANORUN3_reHLT_2023B']]) steps['RECONANORUN3_ZB_reHLT_2023']=merge([{'-s':'RAW2DIGI,L1Reco,RECO,PAT,NANO,DQM:@rerecoZeroBias+@miniAODDQM+@nanoAODDQM'},steps['RECONANORUN3_reHLT_2023']]) steps['RECOCOSMRUN3_reHLT_2023']=merge([{'--scenario':'cosmics','-s':'RAW2DIGI,L1Reco,RECO,DQM','--datatier':'RECO,DQMIO','--eventcontent':'RECO,DQM'},steps['RECONANORUN3_reHLT_2023']]) +steps['AODNANORUN3_reHLT_2023']=merge([{'-s':'RAW2DIGI,L1Reco,RECO,PAT,NANO,DQM:@standardDQM+@miniAODDQM+@nanoAODDQM','--datatier':'AOD,MINIAOD,NANOAOD,DQMIO','--eventcontent':'AOD,MINIAOD,NANOEDMAOD,DQM'},steps['RECODR3_reHLT_2023']]) +steps['AODNANORUN3_reHLT_2023B']=merge([{'-s':'RAW2DIGI,L1Reco,RECO,PAT,NANO,DQM:@standardDQM+@miniAODDQM+@nanoAODDQM','--datatier':'AOD,MINIAOD,NANOAOD,DQMIO','--eventcontent':'AOD,MINIAOD,NANOEDMAOD,DQM'},steps['RECODR3_reHLT_2023B']]) + steps['RECOHIRUN3_reHLT_2023']=merge([{'-s':'RAW2DIGI,L1Reco,RECO,PAT,DQM:@standardDQM','--datatier':'RECO,MINIAOD,DQMIO','--eventcontent':'RECO,MINIAOD,DQM','--era':'Run3_pp_on_PbPb_approxSiStripClusters_2023','--conditions':'auto:run3_data_HIon'},steps['RECODR3_reHLT_2023']]) # patatrack validation in data @@ -3230,6 +3274,11 @@ def gen2023HiMix(fragment,howMuch): steps['ALCACOSDPROMPTRUN3']=merge([{'--conditions':'auto:run3_data_prompt','--era':'Run3','-s':'ALCA:SiPixelCalCosmics+TkAlCosmics0T+SiStripCalCosmics+SiStripCalCosmicsNano+MuAlGlobalCosmics+HcalCalHOCosmics+DQM'},steps['ALCACOSD']]) steps['ALCACOSDEXPRUN3']=merge([{'--conditions':'auto:run3_data_express','--era':'Run3','-s':'ALCA:SiPixelCalCosmics+TkAlCosmics0T+SiStripCalCosmics+SiStripCalCosmicsNano+MuAlGlobalCosmics+HcalCalHOCosmics+DQM'},steps['ALCACOSD']]) +steps['ALCACOS_Phase2']=merge([{'--conditions': phase2CosInfo['GT'], + '--era': phase2CosInfo['Era'], + '-s':'ALCA:SiPixelCalCosmics+TkAlCosmics0T+DQM', + '--geometry': phase2CosInfo['Geom']},steps['ALCACOSD']]) + steps['ALCAPROMPT']={'-s':'ALCA:PromptCalibProd', '--filein':'file:TkAlMinBias.root', '--conditions':'auto:run1_data', @@ -3242,18 +3291,19 @@ def gen2023HiMix(fragment,howMuch): '--eventcontent':'ALCARECO', '--triggerResultsProcess': 'RECO'} -steps['ALCAEXPRUN2']={'-s':'ALCAOUTPUT:SiStripCalZeroBias+TkAlMinBias+LumiPixelsMinBias+AlCaPCCZeroBiasFromRECO+AlCaPCCRandomFromRECO+SiPixelCalZeroBias+SiPixelCalSingleMuon,ALCA:PromptCalibProd+PromptCalibProdSiStrip+PromptCalibProdSiStripGains+PromptCalibProdSiStripGainsAAG+PromptCalibProdSiStripHitEff+PromptCalibProdSiPixelAli+PromptCalibProdSiPixelAliHG+PromptCalibProdSiPixel+PromptCalibProdSiPixelLA', +steps['ALCAEXPRUN2']={'-s':'ALCAOUTPUT:SiStripCalZeroBias+TkAlMinBias+LumiPixelsMinBias+AlCaPCCZeroBiasFromRECO+AlCaPCCRandomFromRECO+SiPixelCalZeroBias+SiPixelCalSingleMuon,ALCA:PromptCalibProd+PromptCalibProdSiStrip+PromptCalibProdSiStripGains+PromptCalibProdSiStripGainsAAG+PromptCalibProdSiStripHitEff+PromptCalibProdSiPixelAli+PromptCalibProdSiPixelAliHG+PromptCalibProdSiPixelAliHGComb+PromptCalibProdSiPixel+PromptCalibProdSiPixelLA', '--customise': 'Alignment/CommonAlignmentProducer/customizeLSNumberFilterForRelVals.doNotFilterLS', '--conditions':'auto:run2_data', '--datatier':'ALCARECO', '--eventcontent':'ALCARECO', '--triggerResultsProcess': 'RECO'} -steps['ALCAEXPRUN3']={'-s':'ALCAOUTPUT:@allForPrompt+@allForExpress,ALCA:PromptCalibProd+PromptCalibProdSiStrip+PromptCalibProdSiStripGains+PromptCalibProdSiStripGainsAAG+PromptCalibProdSiStripHitEff+PromptCalibProdSiPixelAli+PromptCalibProdSiPixel+PromptCalibProdSiPixelLA+PromptCalibProdBeamSpotHP+PromptCalibProdBeamSpotHPLowPU', - '--conditions':'auto:run3_data', - '--datatier':'ALCARECO', - '--eventcontent':'ALCARECO', - '--triggerResultsProcess': 'RECO'} +steps['ALCAEXPRUN3']={'-s':'ALCAOUTPUT:@allForPrompt+@allForExpress,ALCA:PromptCalibProd+PromptCalibProdSiStrip+PromptCalibProdSiStripGains+PromptCalibProdSiStripGainsAAG+PromptCalibProdSiStripHitEff+PromptCalibProdSiPixelAli+PromptCalibProdSiPixelAliHG+PromptCalibProdSiPixelAliHGComb+PromptCalibProdSiPixel+PromptCalibProdSiPixelLA+PromptCalibProdBeamSpotHP+PromptCalibProdBeamSpotHPLowPU', + '--customise': 'Alignment/CommonAlignmentProducer/customizeLSNumberFilterForRelVals.doNotFilterLS', + '--conditions':'auto:run3_data', + '--datatier':'ALCARECO', + '--eventcontent':'ALCARECO', + '--triggerResultsProcess': 'RECO'} steps['ALCAEXPCOSMICSRUN3']={'-s':'ALCAOUTPUT:@allForExpressCosmics,ALCA:PromptCalibProdSiStrip+PromptCalibProdSiPixelLAMCS+PromptCalibProdSiStripLA', '-n':1000, '--scenario':'cosmics', @@ -3271,7 +3321,7 @@ def gen2023HiMix(fragment,howMuch): '--eventcontent':'ALCARECO', '--triggerResultsProcess': 'RECO'} -steps['ALCARECOEXPR3']=merge([{'-s':'ALCAOUTPUT:SiPixelCalZeroBias+SiStripCalZeroBias+SiStripCalMinBias+SiStripCalMinBiasAAG+TkAlMinBias,ALCA:PromptCalibProd+PromptCalibProdSiStrip+PromptCalibProdSiPixelAli+PromptCalibProdSiPixelAliHG+PromptCalibProdSiStripGains+PromptCalibProdSiStripGainsAAG+PromptCalibProdSiPixel', +steps['ALCARECOEXPR3']=merge([{'-s':'ALCAOUTPUT:SiPixelCalZeroBias+SiStripCalZeroBias+SiStripCalMinBias+SiStripCalMinBiasAAG+TkAlMinBias,ALCA:PromptCalibProd+PromptCalibProdSiStrip+PromptCalibProdSiPixelAli+PromptCalibProdSiPixelAliHG+PromptCalibProdSiPixelAliHGComb+PromptCalibProdSiStripGains+PromptCalibProdSiStripGainsAAG+PromptCalibProdSiPixel', '--conditions':'auto:run3_data_express', '--scenario':'pp', '--era':'Run3', @@ -3414,12 +3464,12 @@ def gen2023HiMix(fragment,howMuch): '--data':'', '--filein':'file:PromptCalibProdEcalPedestals.root'} -steps['ALCAHARVDEXPRUN3']={'-s':'ALCAHARVEST:SiStripQuality+SiStripHitEff+SiStripGains+SiStripGainsAAG+SiPixelAli+SiPixelQuality+SiPixelLA+BeamSpotHPByRun+BeamSpotHPByLumi+BeamSpotHPLowPUByRun+BeamSpotHPLowPUByLumi', - '--conditions':'auto:run3_data', - '--scenario':'pp', - '--data':'', - '--filein':'file:PromptCalibProd.root'} - +steps['ALCAHARVDEXPRUN3']={'-s':'ALCAHARVEST:SiStripQuality+SiStripHitEff+SiStripGains+SiStripGainsAAG+SiPixelAli+SiPixelAliHG+SiPixelAliHGCombined+SiPixelQuality+SiPixelLA+BeamSpotHPByRun+BeamSpotHPByLumi+BeamSpotHPLowPUByRun+BeamSpotHPLowPUByLumi', + '--customise':'Alignment/CommonAlignmentProducer/customizeLSNumberFilterForRelVals.lowerHitsPerStructure', + '--conditions':'auto:run3_data', + '--scenario':'pp', + '--data':'', + '--filein':'file:PromptCalibProd.root'} steps['RECOHISt4']=steps['RECOHI2015'] @@ -3721,6 +3771,16 @@ def gen2023HiMix(fragment,howMuch): steps['HARVESTCOS_UP21_0T']=merge([{'--magField':'0T','--conditions':'auto:phase1_2022_cosmics_0T'},steps['HARVESTCOS_UP21']]) +steps['HARVESTCOS_Phase2']={'-s' : 'HARVESTING:@cosmics', + '--conditions': phase2CosInfo['GT'], + '--mc' : '', + '--filein' : 'file:step3_inDQM.root', + '--scenario' : 'cosmics', + '--filetype' : 'DQM', + '--era' : phase2CosInfo['Era'], + '--geometry' : phase2CosInfo['Geom'] + } + steps['HARVESTFS']={'-s':'HARVESTING:validationHarvestingFS', '--conditions':'auto:run1_mc', '--mc':'', @@ -4110,7 +4170,7 @@ def gen2023HiMix(fragment,howMuch): ### ################################################################################# -from Configuration.PyReleaseValidation.upgradeWorkflowComponents import * +from Configuration.PyReleaseValidation.upgradeWorkflowComponents import * # imported from above, only non-empty values should be provided here defaultDataSets['2017']='CMSSW_12_0_0_pre4-113X_mc2017_realistic_v5-v' diff --git a/Configuration/PyReleaseValidation/python/upgradeWorkflowComponents.py b/Configuration/PyReleaseValidation/python/upgradeWorkflowComponents.py index 442733ea10c8f..55ba7d6321b13 100644 --- a/Configuration/PyReleaseValidation/python/upgradeWorkflowComponents.py +++ b/Configuration/PyReleaseValidation/python/upgradeWorkflowComponents.py @@ -69,6 +69,16 @@ '2026D102PU', '2026D103', '2026D103PU', + '2026D104', + '2026D104PU', + '2026D105', + '2026D105PU', + '2026D106', + '2026D106PU', + '2026D107', + '2026D107PU', + '2026D108', + '2026D108PU', ] # pre-generation of WF numbers @@ -778,10 +788,13 @@ def condition(self, fragment, stepList, key, hasHarvest): # - 2018 conditions, TTbar # - 2018 conditions, Z->mumu # - 2022 conditions (labelled "2021"), TTbar +# - 2022 conditions (labelled "2021"), NuGun # - 2022 conditions (labelled "2021"), Z->mumu # - 2023 conditions, TTbar +# - 2023 conditions, NuGun # - 2023 conditions, Z->mumu # - 2026 conditions, TTbar +# - 2026 conditions, NuGu class PatatrackWorkflow(UpgradeWorkflow): def __init__(self, digi = {}, reco = {}, mini = {}, harvest = {}, **kwargs): # adapt the parameters for the UpgradeWorkflow init method @@ -839,10 +852,12 @@ def condition(self, fragment, stepList, key, hasHarvest): ('2018' in key and fragment == "TTbar_13"), ('2021' in key and fragment == "TTbar_14TeV" and 'FS' not in key), ('2023' in key and fragment == "TTbar_14TeV" and 'FS' not in key), + ('2021' in key and fragment == "NuGun"), + ('2023' in key and fragment == "NuGun"), ('2018' in key and fragment == "ZMM_13"), ('2021' in key and fragment == "ZMM_14" and 'FS' not in key), ('2023' in key and fragment == "ZMM_14" and 'FS' not in key), - ('2026' in key and fragment == "TTbar_14TeV"), + ('2026' in key and (fragment == "TTbar_14TeV" or fragment=="NuGun")), (('HI' in key) and 'Hydjet' in fragment and "PixelOnly" in self.suffix ) ] result = any(selected) and hasHarvest @@ -885,6 +900,7 @@ def setup_(self, step, stepName, stepDict, k, properties): # - HLT on CPU # - Pixel-only reconstruction on CPU, with DQM and validation # - harvesting + upgradeWFs['PatatrackPixelOnlyCPU'] = PatatrackWorkflow( digi = { # the HLT menu is already set up for using GPUs if available and if the "gpu" modifier is enabled @@ -1044,6 +1060,26 @@ def setup_(self, step, stepName, stepDict, k, properties): offset = 0.508, ) +# ECAL-only workflow running on CPU or GPU with Alpaka code +# - HLT with Alpaka +# - ECAL-only reconstruction with Alpaka, with DQM and validation +# - harvesting +upgradeWFs['PatatrackECALOnlyAlpaka'] = PatatrackWorkflow( + digi = { + # customize the ECAL Local Reco part of the HLT menu for Alpaka + '--procModifiers': 'alpaka', # alpaka modifier activates customiseHLTForAlpaka + }, + reco = { + '-s': 'RAW2DIGI:RawToDigi_ecalOnly,RECO:reconstruction_ecalOnly,VALIDATION:@ecalOnlyValidation,DQM:@ecalOnly', + '--procModifiers': 'alpaka' + }, + harvest = { + '-s': 'HARVESTING:@ecalOnlyValidation+@ecal' + }, + suffix = 'Patatrack_ECALOnlyAlpaka', + offset = 0.411, +) + # ECAL-only workflow running on CPU # - HLT on CPU # - ECAL-only reconstruction on CPU, with DQM and validation @@ -1504,6 +1540,53 @@ def setup_(self, step, stepName, stepDict, k, properties): offset = 0.597, ) + +# Alpaka workflows + +upgradeWFs['PatatrackPixelOnlyAlpaka'] = PatatrackWorkflow( + digi = { + '--procModifiers': 'alpaka', # alpaka modifier activates customiseHLTForAlpaka + }, + reco = { + '-s': 'RAW2DIGI:RawToDigi_pixelOnly,RECO:reconstruction_pixelTrackingOnly,VALIDATION:@pixelTrackingOnlyValidation,DQM:@pixelTrackingOnlyDQM', + '--procModifiers': 'alpaka' + }, + harvest = { + '-s': 'HARVESTING:@trackingOnlyValidation+@pixelTrackingOnlyDQM' + }, + suffix = 'Patatrack_PixelOnlyAlpaka', + offset = 0.402, +) + +upgradeWFs['PatatrackPixelOnlyAlpakaValidation'] = PatatrackWorkflow( + digi = { + '--procModifiers': 'alpaka', # alpaka modifier activates customiseHLTForAlpaka + }, + reco = { + '-s': 'RAW2DIGI:RawToDigi_pixelOnly,RECO:reconstruction_pixelTrackingOnly,VALIDATION:@pixelTrackingOnlyValidation,DQM:@pixelTrackingOnlyDQM', + '--procModifiers': 'alpakaValidation' + }, + harvest = { + '-s': 'HARVESTING:@trackingOnlyValidation+@pixelTrackingOnlyDQM' + }, + suffix = 'Patatrack_PixelOnlyAlpaka_Validation', + offset = 0.403, +) + +upgradeWFs['PatatrackPixelOnlyAlpakaProfiling'] = PatatrackWorkflow( + digi = { + '--procModifiers': 'alpaka', # alpaka modifier activates customiseHLTForAlpaka + }, + reco = { + '-s': 'RAW2DIGI:RawToDigi_pixelOnly,RECO:reconstruction_pixelTrackingOnly', + '--procModifiers': 'alpaka', + '--customise' : 'RecoTracker/Configuration/customizePixelOnlyForProfiling.customizePixelOnlyForProfilingGPUOnly' + }, + harvest = None, + suffix = 'Patatrack_PixelOnlyAlpaka_Profiling', + offset = 0.404, +) + # end of Patatrack workflows class UpgradeWorkflow_ProdLike(UpgradeWorkflow): @@ -1880,7 +1963,7 @@ def condition(self, fragment, stepList, key, hasHarvest): # ECAL component class UpgradeWorkflow_ECalComponent(UpgradeWorkflow): - def __init__(self, suffix, offset, ecalMod, + def __init__(self, suffix, offset, ecalTPPh2, ecalMod, steps = [ 'GenSim', 'GenSimHLBeamSpot', @@ -1888,6 +1971,9 @@ def __init__(self, suffix, offset, ecalMod, 'GenSimHLBeamSpotHGCALCloseBy', 'Digi', 'DigiTrigger', + 'RecoGlobal', + 'HARVESTGlobal', + 'ALCAPhase2', ], PU = [ 'GenSim', @@ -1896,14 +1982,35 @@ def __init__(self, suffix, offset, ecalMod, 'GenSimHLBeamSpotHGCALCloseBy', 'Digi', 'DigiTrigger', + 'RecoGlobal', + 'HARVESTGlobal', + 'ALCAPhase2', ]): super(UpgradeWorkflow_ECalComponent, self).__init__(steps, PU, suffix, offset) + self.__ecalTPPh2 = ecalTPPh2 self.__ecalMod = ecalMod - + def setup_(self, step, stepName, stepDict, k, properties): - if 'Sim' in step or 'Digi' in step: + stepDict[stepName][k] = deepcopy(stepDict[step][k]) + if 'Sim' in step: + if self.__ecalMod is not None: + stepDict[stepName][k] = merge([{'--procModifiers':self.__ecalMod},stepDict[step][k]]) + if 'Digi' in step: if self.__ecalMod is not None: stepDict[stepName][k] = merge([{'--procModifiers':self.__ecalMod},stepDict[step][k]]) + if self.__ecalTPPh2 is not None: + mods = {'--era': stepDict[step][k]['--era']+',phase2_ecal_devel,phase2_ecalTP_devel'} + mods['-s'] = 'DIGI:pdigi_valid,DIGI2RAW,HLT:@fake2' + stepDict[stepName][k] = merge([mods, stepDict[step][k]]) + if 'RecoGlobal' in step: + stepDict[stepName][k] = merge([{'-s': 'RAW2DIGI,RECO,RECOSIM,PAT', + '--datatier':'GEN-SIM-RECO', + '--eventcontent':'FEVTDEBUGHLT', + }, stepDict[step][k]]) + if 'HARVESTGlobal' in step: + stepDict[stepName][k] = None + if 'ALCAPhase2' in step: + stepDict[stepName][k] = None def condition(self, fragment, stepList, key, hasHarvest): return ('2021' in key or '2023' in key or '2026' in key) @@ -1911,12 +2018,35 @@ def condition(self, fragment, stepList, key, hasHarvest): upgradeWFs['ECALComponent'] = UpgradeWorkflow_ECalComponent( suffix = '_ecalComponent', offset = 0.631, + ecalTPPh2 = None, ecalMod = 'ecal_component', ) upgradeWFs['ECALComponentFSW'] = UpgradeWorkflow_ECalComponent( suffix = '_ecalComponentFSW', offset = 0.632, + ecalTPPh2 = None, + ecalMod = 'ecal_component_finely_sampled_waveforms', +) + +upgradeWFs['ECALTPPh2'] = UpgradeWorkflow_ECalComponent( + suffix = '_ecalTPPh2', + offset = 0.633, + ecalTPPh2 = 'phase2_ecal_devel,phase2_ecalTP_devel', + ecalMod = None, +) + +upgradeWFs['ECALTPPh2Component'] = UpgradeWorkflow_ECalComponent( + suffix = '_ecalTPPh2Component', + offset = 0.634, + ecalTPPh2 = 'phase2_ecal_devel,phase2_ecalTP_devel', + ecalMod = 'ecal_component', +) + +upgradeWFs['ECALTPPh2ComponentFSW'] = UpgradeWorkflow_ECalComponent( + suffix = '_ecalTPPh2ComponentFSW', + offset = 0.635, + ecalTPPh2 = 'phase2_ecal_devel,phase2_ecalTP_devel', ecalMod = 'ecal_component_finely_sampled_waveforms', ) @@ -2585,7 +2715,7 @@ def condition(self, fragment, stepList, key, hasHarvest): 'GT' : 'auto:phase1_2017_design', 'HLTmenu': '@relval2017', 'Era' : 'Run2_2017', - 'BeamSpot': 'GaussSigmaZ4cm', + 'BeamSpot': 'DBdesign', 'ScenToRun' : ['GenSim','Digi','RecoFakeHLT','HARVESTFakeHLT'], }, '2018' : { @@ -2593,7 +2723,7 @@ def condition(self, fragment, stepList, key, hasHarvest): 'GT' : 'auto:phase1_2018_realistic', 'HLTmenu': '@relval2018', 'Era' : 'Run2_2018', - 'BeamSpot': 'Realistic25ns13TeVEarly2018Collision', + 'BeamSpot': 'DBrealistic', 'ScenToRun' : ['GenSim','Digi','RecoFakeHLT','HARVESTFakeHLT','ALCA','Nano'], }, '2018Design' : { @@ -2601,7 +2731,7 @@ def condition(self, fragment, stepList, key, hasHarvest): 'GT' : 'auto:phase1_2018_design', 'HLTmenu': '@relval2018', 'Era' : 'Run2_2018', - 'BeamSpot': 'GaussSigmaZ4cm', + 'BeamSpot': 'DBdesign', 'ScenToRun' : ['GenSim','Digi','RecoFakeHLT','HARVESTFakeHLT'], }, '2021' : { @@ -2609,7 +2739,7 @@ def condition(self, fragment, stepList, key, hasHarvest): 'GT' : 'auto:phase1_2022_realistic', 'HLTmenu': '@relval2022', 'Era' : 'Run3', - 'BeamSpot': 'Realistic25ns13p6TeVEOY2022Collision', + 'BeamSpot': 'DBrealistic', 'ScenToRun' : ['GenSim','Digi','RecoNanoFakeHLT','HARVESTNanoFakeHLT','ALCA'], }, '2021Design' : { @@ -2617,7 +2747,7 @@ def condition(self, fragment, stepList, key, hasHarvest): 'GT' : 'auto:phase1_2022_design', 'HLTmenu': '@relval2022', 'Era' : 'Run3', - 'BeamSpot': 'GaussSigmaZ4cm', + 'BeamSpot': 'DBdesign', 'ScenToRun' : ['GenSim','Digi','RecoNanoFakeHLT','HARVESTNanoFakeHLT'], }, '2023' : { @@ -2625,15 +2755,15 @@ def condition(self, fragment, stepList, key, hasHarvest): 'GT' : 'auto:phase1_2023_realistic', 'HLTmenu': '@relval2023', 'Era' : 'Run3_2023', - 'BeamSpot': 'Realistic25ns13p6TeVEarly2023Collision', + 'BeamSpot': 'DBrealistic', 'ScenToRun' : ['GenSim','Digi','RecoNano','HARVESTNano','ALCA'], }, '2024' : { 'Geom' : 'DB:Extended', 'GT' : 'auto:phase1_2024_realistic', - 'HLTmenu': '@relval2023', + 'HLTmenu': '@relval2024', 'Era' : 'Run3', - 'BeamSpot': 'Realistic25ns13p6TeVEarly2022Collision', + 'BeamSpot': 'DBrealistic', 'ScenToRun' : ['GenSim','Digi','RecoNano','HARVESTNano','ALCA'], }, '2021FS' : { @@ -2641,7 +2771,7 @@ def condition(self, fragment, stepList, key, hasHarvest): 'GT' : 'auto:phase1_2022_realistic', 'HLTmenu': '@relval2022', 'Era' : 'Run3_FastSim', - 'BeamSpot': 'Realistic25ns13p6TeVEarly2022Collision', + 'BeamSpot': 'DBrealistic', 'ScenToRun' : ['Gen','FastSimRun3','HARVESTFastRun3'], }, '2021postEE' : { @@ -2649,7 +2779,7 @@ def condition(self, fragment, stepList, key, hasHarvest): 'GT' : 'auto:phase1_2022_realistic_postEE', 'HLTmenu': '@relval2022', 'Era' : 'Run3', - 'BeamSpot': 'Realistic25ns13p6TeVEarly2022Collision', + 'BeamSpot': 'DBrealistic', 'ScenToRun' : ['GenSim','Digi','RecoNanoFakeHLT','HARVESTNanoFakeHLT','ALCA'], }, '2023FS' : { @@ -2657,39 +2787,39 @@ def condition(self, fragment, stepList, key, hasHarvest): 'GT' : 'auto:phase1_2023_realistic', 'HLTmenu': '@relval2023', 'Era' : 'Run3_2023_FastSim', - 'BeamSpot': 'Realistic25ns13p6TeVEarly2023Collision', + 'BeamSpot': 'DBrealistic', 'ScenToRun' : ['Gen','FastSimRun3','HARVESTFastRun3'], }, '2022HI' : { 'Geom' : 'DB:Extended', - 'GT':'auto:phase1_2022_realistic_hi', + 'GT':'auto:phase1_2022_realistic_hi', 'HLTmenu': '@fake2', 'Era':'Run3_pp_on_PbPb', - 'BeamSpot': 'Realistic2022PbPbCollision', + 'BeamSpot': 'DBrealistic', 'ScenToRun' : ['GenSim','Digi','RecoNano','HARVESTNano','ALCA'], }, '2022HIRP' : { 'Geom' : 'DB:Extended', - 'GT':'auto:phase1_2022_realistic_hi', + 'GT':'auto:phase1_2022_realistic_hi', 'HLTmenu': '@fake2', 'Era':'Run3_pp_on_PbPb_approxSiStripClusters', - 'BeamSpot': 'Realistic2022PbPbCollision', + 'BeamSpot': 'DBrealistic', 'ScenToRun' : ['GenSim','Digi','RecoNano','HARVESTNano','ALCA'], }, '2023HI' : { 'Geom' : 'DB:Extended', - 'GT':'auto:phase1_2023_realistic_hi', + 'GT':'auto:phase1_2023_realistic_hi', 'HLTmenu': '@fake2', 'Era':'Run3_pp_on_PbPb', - 'BeamSpot': 'Realistic2022PbPbCollision', + 'BeamSpot': 'DBrealistic', 'ScenToRun' : ['GenSim','Digi','RecoNano','HARVESTNano','ALCA'], }, '2023HIRP' : { 'Geom' : 'DB:Extended', - 'GT':'auto:phase1_2023_realistic_hi', + 'GT':'auto:phase1_2023_realistic_hi', 'HLTmenu': '@fake2', 'Era':'Run3_pp_on_PbPb_approxSiStripClusters', - 'BeamSpot': 'Realistic2022PbPbCollision', + 'BeamSpot': 'DBrealistic', 'ScenToRun' : ['GenSim','Digi','RecoNano','HARVESTNano','ALCA'], } } @@ -2802,7 +2932,7 @@ def condition(self, fragment, stepList, key, hasHarvest): }, '2026D102' : { 'Geom' : 'Extended2026D102', - 'HLTmenu': '@fake2', + 'HLTmenu': '@relval2026', 'GT' : 'auto:phase2_realistic_T33', 'Era' : 'Phase2C17I13M9', 'ScenToRun' : ['GenSimHLBeamSpot','DigiTrigger','RecoGlobal', 'HARVESTGlobal', 'ALCAPhase2'], @@ -2814,6 +2944,41 @@ def condition(self, fragment, stepList, key, hasHarvest): 'Era' : 'Phase2C17I13M9', 'ScenToRun' : ['GenSimHLBeamSpot','DigiTrigger','RecoGlobal', 'HARVESTGlobal', 'ALCAPhase2'], }, + '2026D104' : { + 'Geom' : 'Extended2026D104', + 'HLTmenu': '@relval2026', + 'GT' : 'auto:phase2_realistic_T33', + 'Era' : 'Phase2C22I13M9', + 'ScenToRun' : ['GenSimHLBeamSpot','DigiTrigger','RecoGlobal', 'HARVESTGlobal', 'ALCAPhase2'], + }, + '2026D105' : { + 'Geom' : 'Extended2026D105', + 'HLTmenu': '@relval2026', + 'GT' : 'auto:phase2_realistic_T33', + 'Era' : 'Phase2C17I13M9', + 'ScenToRun' : ['GenSimHLBeamSpot','DigiTrigger','RecoGlobal', 'HARVESTGlobal', 'ALCAPhase2'], + }, + '2026D106' : { + 'Geom' : 'Extended2026D106', + 'HLTmenu': '@relval2026', + 'GT' : 'auto:phase2_realistic_T33', + 'Era' : 'Phase2C22I13M9', + 'ScenToRun' : ['GenSimHLBeamSpot','DigiTrigger','RecoGlobal', 'HARVESTGlobal', 'ALCAPhase2'], + }, + '2026D107' : { + 'Geom' : 'Extended2026D107', + 'HLTmenu': '@relval2026', + 'GT' : 'auto:phase2_realistic_T25', + 'Era' : 'Phase2C17I13M9', + 'ScenToRun' : ['GenSimHLBeamSpot','DigiTrigger','RecoGlobal', 'HARVESTGlobal', 'ALCAPhase2'], + }, + '2026D108' : { + 'Geom' : 'Extended2026D108', + 'HLTmenu': '@relval2026', + 'GT' : 'auto:phase2_realistic_T33', + 'Era' : 'Phase2C17I13M9', + 'ScenToRun' : ['GenSimHLBeamSpot','DigiTrigger','RecoGlobal', 'HARVESTGlobal', 'ALCAPhase2'], + }, } # standard PU sequences diff --git a/Configuration/PyReleaseValidation/scripts/README.md b/Configuration/PyReleaseValidation/scripts/README.md index 24bfb66db9670..a4850843a16e9 100644 --- a/Configuration/PyReleaseValidation/scripts/README.md +++ b/Configuration/PyReleaseValidation/scripts/README.md @@ -289,3 +289,102 @@ matrix> All commands come with dynamic TAB-completion. There's also a transient history of the commands issued within a single session. Transient means that, after a session is closed, the history is lost. + +### Limited Matrix for (also) PR Testing + +The "limited" predefined set of workflows is used in PR integration testing. Here the workflows run. + +MC workflows for pp collisions: + +| **WF** | **Fragment/Input** | **Conditions** | **Era** | **Notes** | +|--- |--- |--- |--- |--- | +| | | | | | +| **Run1** | | | | | +| | | | | | +| 5.1 | TTbar_8TeV_TuneCUETP8M1 | run1_mc | | *FastSim* | +| 8 | RelValBeamHalo | run1_mc | | Cosmics | +| 9.0 | RelValHiggs200ChargedTaus | run1_mc | | | +| 25 | RelValTTbar | run1_mc | | | +| 101.0 | SingleElectronE120EHCAL | run1_mc | | + ECALHCAL.customise + fullMixCustomize_cff.setCrossingFrameOn | +| | | | | | +| **Run2** | | | | | +| | | | | | +| 7.3 | UndergroundCosmicSPLooseMu | run2_2018 | | | +| 1306.0 | RelValSingleMuPt1_UP15 | run2_mc | Run2_2016 | with miniAOD | +| 1330 | RelValZMM_13 | run2_mc | Run2_2016 | | +| 135.4 | ZEE_13TeV_TuneCUETP8M1 | run2_mc | Run2_2016 | *FastSim* | +| 25202.0 | RelValTTbar_13 | run2_mc | Run2_2016 | AVE_35_BX_25ns | +| 250202.181 | RelValTTbar_13 (PREMIX) | phase1_2018_realistic | Run2_2018 | | | +| | | | | | +| **Run3** | | | | | +| | | | | | +| 11634.0 | TTbar_14TeV | phase1_2022_realistic | Run3 | | +| 13234.0 | RelValTTbar_14TeV | phase1_2022_realistic | Run3_FastSim | *FastSim* | +| 12434.0 | RelValTTbar_14TeV | phase1_2023_realistic | Run3_2023 | | +| 12446.0 | RelValZEE_14 | phase1_2023_realistic | Run3_2023 | | +| 12634.0 | RelValTTbar_14TeV | phase1_2023_realistic | Run3_2023 | Run3_Flat55To75_PoissonOOTPU | +| 12434.7 | RelValTTbar_14TeV | phase1_2023_realistic | Run3_2023 | mkFit | +| 14034.0 | RelValTTbar_14TeV | phase1_2023_realistic | Run3_2023_FastSim | *FastSim* | +| 14234.0 | RelValTTbar_14TeV | phase1_2023_realistic | Run3_2023_FastSim | *FastSim* Run3_Flat55To75_PoissonOOTPU | +| 2500.4 | RelValTTbar_14TeV | phase1_2022_realistic | Run3 | NanoAOD from existing MINI | +| | | | | | +| **Phase2** | | | | **Geometry** | +| | | | | | +| 24834.0 | RelValTTbar_14TeV | phase2_realistic_T25 | Phase2C17I13M9 | Extended2026D98 | (Phase-2 baseline) +| 24834.911 | TTbar_14TeV_TuneCP5 | phase2_realistic_T25 | Phase2C17I13M9 | DD4hepExtended2026D98 | DD4Hep (HLLHC14TeV BeamSpot) +| 25034.999 | RelValTTbar_14TeV (PREMIX) | phase2_realistic_T25 | Phase2C17I13M9 | Extended2026D98 | AVE_50_BX_25ns_m3p3 +| 24896.0 | RelValCloseByPGun_CE_E_Front_120um | phase2_realistic_T25 | Phase2C17I13M9 | Extended2026D98 | +| 24900.0 | RelValCloseByPGun_CE_H_Coarse_Scint | phase2_realistic_T25 | Phase2C17I13M9 | Extended2026D98 | +| 23234.0 | TTbar_14TeV_TuneCP5 | phase2_realistic_T21 | Phase2C20I13M9 | Extended2026D94 | (exercise with HFNose) + +pp Data reRECO workflows: + +| Data | | | | | +|--- |--- |--- |--- |--- | +| **WF** | **Input** | **Conditions** | **Era** | **Notes** | +| | | | | | +| **Run1** | | | | | +| | | | | | +| 4.22 | Run2011A Cosmics | run1_data | | *Cosmics* | +| 4.53 | Run2012B Photon | run1_hlt_Fake | | + miniAODs | +| 1000 | Run2011A MinimumBias Prompt | run1_data | | + RecoTLR.customisePrompt | +| 1001 | Run2011A MinimumBias | run1_data | | Data+Express | +| | | | | | +| **Run2** | | | | | +| | | | | | +| 136.731 | Run2016B SinglePhoton | | | | +| 136.7611 | Run2016E JetHT (reMINIAOD) | run2_data | Run2_2016_HIPM | + run2_miniAOD_80XLegacy custom | +| 136.8311 | Run2017F JetHT (reMINIAOD) | run2_data | Run2_2017 | + run2_miniAOD_94XFall17 custom | +| 136.88811 | Run2018D JetHT (reMINIAOD) | run2_data | Run2_2018 | + run2_miniAOD_UL_preSummer20 (UL MINI) custom | +| 136.793 | Run2017C DoubleEG | run2_hlt_relval | Run2_2017 | HLT:@relval2017| +| 136.874 | Run2018C EGamma | run2_hlt_relval | Run2_2018 | HLT@relval2018 | +| | | | | | +| **Run3** | | | | | +| | | | | | +| 2021 | | | | | +| 139.001 | Run2021 MinimumBias | run3_hlt_relval | Run3 | HLT@relval2022 (Commissioning2021) | +| 2022 | | | | | +| 140.023 | Run2022B ZeroBias | run3_hlt_relval | Run3 | HLT:@relval2022 | +| 140.043 | Run2022C ZeroBias | run3_hlt_relval | Run3 | HLT:@relval2022 | +| 140.063 | Run2022D ZeroBias | run3_hlt_relval | Run3 | HLT:@relval2022 | +| 2023 | | | | | +| 141.044 | Run2023D JetMET0 | run3_hlt_relval | Run3_2023 | HLT@relval2024 | +| 141.042 | Run2023D ZeroBias | run3_hlt_relval | Run3_2023 | HLT@relval2024 | +| 141.046 | Run2023D EGamma0 | run3_hlt_relval | Run3_2023 | HLT@relval2024 | + + +And Heavy Ion workflows: + +| **HIon** | | | | | +|--- |--- |--- |--- |--- | +| **WF** | **Fragment/Input** | **Conditions** | **Era** | **Notes** | +| | | | | | +| **Data** | | | | | +| | | | | | +| 140.53 | HIRun2011 HIMinBiasUPC | run1_data | | +| 140.56 | HIRun2018A HIHardProbes | run2_data_promptlike_hi | Run2_2018_pp_on_AA | +| | | | | | +| **MC** | | | | | +| | | | | | +| 158.01 | RelValHydjetQ_B12_5020GeV_2018_ppReco (reMINIAOD) | phase1_2018_realistic_hi | Run2_2018_pp_on_AA | (HI MC with pp-like reco) | +| 312.0 | Pyquen_ZeemumuJets_pt10_2760GeV | phase1_2022_realistic_hi | Run3_pp_on_PbPb | PU = HiMixGEN | diff --git a/Configuration/PyReleaseValidation/scripts/runTheMatrix.py b/Configuration/PyReleaseValidation/scripts/runTheMatrix.py index 610460b31f02d..4138e276b5454 100755 --- a/Configuration/PyReleaseValidation/scripts/runTheMatrix.py +++ b/Configuration/PyReleaseValidation/scripts/runTheMatrix.py @@ -58,55 +58,83 @@ def runSelected(opt): #this can get out of here predefinedSet={ - 'limited' : [5.1, #FastSim ttbar - 7.3, #CosmicsSPLoose_UP17 - 8, #BH/Cosmic MC - 25, #MC ttbar - 4.22, #cosmic data - 4.53, #run1 data + miniAOD - 9.0, #Higgs200 charged taus - 1000, #data+prompt - 1001, #data+express - 101.0, #SingleElectron120E120EHCAL - 136.731, #2016B Photon data - 136.7611, #2016E JetHT reMINIAOD from 80X legacy - 136.8311, #2017F JetHT reMINIAOD from 94X reprocessing - 136.88811,#2018D JetHT reMINIAOD from UL processing - 136.793, #2017C DoubleEG - 136.874, #2018C EGamma - 138.4, #2021 MinimumBias prompt reco - 138.5, #2021 MinimumBias express - 139.001, #2021 MinimumBias offline with HLT step - 140.53, #2011 HI data - 140.56, #2018 HI data - 158.01, #reMiniAOD of 2018 HI MC with pp-like reco - 312.0, #2021/Run3 HI MC Pyquen_ZeemumuJets_pt10 with pp-like reco - 1306.0, #SingleMu Pt1 UP15 - 2500.4, #test NanoAOD from existing MINI - 1330, #Run2 2015/2016 MC Zmm - 135.4, #Run 2 2015/2016 Zee ttbar fastsim - 10042.0, #2017 ZMM - 10024.0, #2017 ttbar - 10824.0, #2018 ttbar - 2018.1, #2018 ttbar fastsim - 11634.911, #2021 DD4hep ttbar reading geometry from XML - 11634.914, #2021 DDD ttbar reading geometry from the DB - 11634.0, #2021 ttbar (switching to DD4hep by default) - 13234.0, #2021 ttbar fastsim - 12434.0, #2023 ttbar - 12634.0, #2023 ttbar PU - 12434.7, #2023 ttbar mkFit - 14034.0, #2023 ttbar fastsim - 14234.0, #2023 ttbar PU fastsim - 24834.0, #2026D98 ttbar (Phase-2 baseline) - 24834.911, #2026D98 ttbar DD4hep XML - 25034.999, #2026D98 ttbar premixing stage1+stage2, PU50 - 24896.0, #CE_E_Front_120um D98 - 24900.0, #CE_H_Coarse_Scint D98 - 23234.0, #2026D94 ttbar (exercise with HFNose) - 25202.0, #2016 ttbar UP15 PU - 250202.181, #2018 ttbar stage1 + stage2 premix - 141.044 # 2023D JetMET PD + 'limited' : [ + # See README for further details + ###### MC (generated from scratch or from RelVals) + ### FullSim + # Run1 + 5.1, # TTbar_8TeV_TuneCUETP8M1 FastSim + 8, # RelValBeamHalo Cosmics + 9.0, # RelValHiggs200ChargedTaus + 25, # RelValTTbar + 101.0, # SingleElectronE120EHCAL + ECALHCAL.customise + fullMixCustomize_cff.setCrossingFrameOn + + # Run2 + 7.3, # UndergroundCosmicSPLooseMu + 1306.0, # RelValSingleMuPt1_UP15 + 1330, # RelValZMM_13 + 135.4, # ZEE_13TeV_TuneCUETP8M1 + 25202.0, # RelValTTbar_13 PU = AVE_35_BX_25ns + 250202.181, # RelValTTbar_13 PREMIX + + # Run3 + 11634.0, # TTbar_14TeV + 13234.0, # RelValTTbar_14TeV FastsSim + 12434.0, # RelValTTbar_14TeV + 12446.0, # RelValZEE_13 + 12634.0, # RelValTTbar_14TeV PU = Run3_Flat55To75_PoissonOOTPU + 12434.7, # RelValTTbar_14TeV mkFit + 14034.0, # RelValTTbar_14TeV Run3_2023_FastSim + 14234.0, # RelValTTbar_14TeV Run3_2023_FastSim PU = Run3_Flat55To75_PoissonOOTPU + 2500.4, # RelValTTbar_14TeV NanoAOD from existing MINI + + # Phase2 + 24834.0, # RelValTTbar_14TeV phase2_realistic_T25 Extended2026D98 (Phase-2 baseline) + 24834.911, # TTbar_14TeV_TuneCP5 phase2_realistic_T25 DD4hepExtended2026D98 DD4Hep (HLLHC14TeV BeamSpot) + 25034.999, # RelValTTbar_14TeV (PREMIX) phase2_realistic_T25 Extended2026D98 AVE_50_BX_25ns_m3p3 + 24896.0, # RelValCloseByPGun_CE_E_Front_120um phase2_realistic_T25 Extended2026D98 + 24900.0, # RelValCloseByPGun_CE_H_Coarse_Scint phase2_realistic_T25 Extended2026D98 + 23234.0, # TTbar_14TeV_TuneCP5 phase2_realistic_T21 Extended2026D94 (exercise with HFNose) + + + ###### pp Data + ## Run1 + 4.22, # Run2011A Cosmics + 4.53, # Run2012B Photon miniAODs + 1000, # Run2011A MinimumBias Prompt RecoTLR.customisePrompt + 1001, # Run2011A MinimumBias Data+Express + ## Run2 + 136.731, # Run2016B SinglePhoton + 136.7611, # Run2016E JetHT (reMINIAOD) Run2_2016_HIPM + run2_miniAOD_80XLegacy + 136.8311, # Run2017F JetHT (reMINIAOD) run2_miniAOD_94XFall17 + 136.88811, # Run2018D JetHT (reMINIAOD) run2_miniAOD_UL_preSummer20 (UL MINI) + 136.793, # Run2017C DoubleEG + 136.874, # Run2018C EGamma + + ## Run3 + # 2021 + 139.001, # Run2021 MinimumBias Commissioning2021 + + # 2022 + 140.023, # Run2022B ZeroBias + 140.043, # Run2022C ZeroBias + 140.063, # Run2022D ZeroBias + + # 2023 + 141.044, # Run2023D JetMET0 + 141.042, # Run2023D ZeroBias + 141.046, # Run2023D EGamma0 + + ###### Heavy Ions + ## Data + # Run1 + 140.53, # HIRun2011 HIMinBiasUPC + # Run2 + 140.56, # HIRun2018A HIHardProbes Run2_2018_pp_on_AA + ## MC + 158.01, # RelValHydjetQ_B12_5020GeV_2018_ppReco (reMINIAOD) (HI MC with pp-like reco) + 312.0, # Pyquen_ZeemumuJets_pt10_2760GeV PU : HiMixGEN + ], 'jetmc': [5.1, 13, 15, 25, 38, 39], #MC 'metmc' : [5.1, 15, 25, 37, 38, 39], #MC diff --git a/Configuration/Skimming/python/PDWG_EGMJME_SD_cff.py b/Configuration/Skimming/python/PDWG_EGMJME_SD_cff.py new file mode 100644 index 0000000000000..8711ee1ceab12 --- /dev/null +++ b/Configuration/Skimming/python/PDWG_EGMJME_SD_cff.py @@ -0,0 +1,17 @@ +import FWCore.ParameterSet.Config as cms + +#Trigger bit requirement +import HLTrigger.HLTfilters.hltHighLevel_cfi as hlt +EGMJME = hlt.hltHighLevel.clone() +EGMJME.TriggerResultsTag = cms.InputTag( "TriggerResults", "", "HLT" ) +EGMJME.HLTPaths = cms.vstring( + 'HLT_Photon110EB_TightID_TightIso*', + 'HLT_Photon30EB_TightID_TightIso*', + 'HLT_Photon90_R9Id90_HE10_IsoM*', + 'HLT_Photon75_R9Id90_HE10_IsoM*', + 'HLT_Photon50_R9Id90_HE10_IsoM*', + 'HLT_Photon200*') +EGMJME.andOr = cms.bool( True ) +# we want to intentionally throw and exception +# in case it does not match one of the HLT Paths +EGMJME.throw = cms.bool( False ) diff --git a/Configuration/Skimming/python/PDWG_EXODisappTrk_cff.py b/Configuration/Skimming/python/PDWG_EXODisappTrk_cff.py index 8fccace26e2f1..b18d1fa1ec877 100644 --- a/Configuration/Skimming/python/PDWG_EXODisappTrk_cff.py +++ b/Configuration/Skimming/python/PDWG_EXODisappTrk_cff.py @@ -21,8 +21,13 @@ "HLT_IsoMu*_v*", "HLT_MediumChargedIsoPFTau*HighPtRelaxedIso_Trk50_eta2p1_v*", "HLT_VBF_DoubleMediumDeepTauPFTauHPS20_eta2p1_v*", + # 2023 "HLT_DoubleMediumDeepTauPFTauHPS*_L2NN_eta2p1_*", - "HLT_LooseDeepTauPFTauHPS*_L2NN_eta2p1_v*" + "HLT_LooseDeepTauPFTauHPS*_L2NN_eta2p1_v*", + # 2022 + "HLT_VBF_DoubleMediumChargedIsoPFTauHPS20_Trk1_eta2p1_v*", + "HLT_DoubleMediumDeepTauIsoPFTauHPS*_L2NN_eta2p1_v*", + "HLT_DoubleMediumChargedIsoPFTauHPS*_Trk1_eta2p1_v*", ] ) diff --git a/Configuration/Skimming/python/PDWG_MUOJME_SD_cff.py b/Configuration/Skimming/python/PDWG_MUOJME_SD_cff.py new file mode 100644 index 0000000000000..a37610e362d76 --- /dev/null +++ b/Configuration/Skimming/python/PDWG_MUOJME_SD_cff.py @@ -0,0 +1,13 @@ +import FWCore.ParameterSet.Config as cms + +#Trigger bit requirement +import HLTrigger.HLTfilters.hltHighLevel_cfi as hlt +MUOJME = hlt.hltHighLevel.clone() +MUOJME.TriggerResultsTag = cms.InputTag( "TriggerResults", "", "HLT" ) +MUOJME.HLTPaths = cms.vstring( + 'HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_Mass8*', + ) +MUOJME.andOr = cms.bool( True ) +# we want to intentionally throw and exception +# in case it does not match one of the HLT Paths +MUOJME.throw = cms.bool( False ) diff --git a/Configuration/Skimming/python/Skims_PDWG_cff.py b/Configuration/Skimming/python/Skims_PDWG_cff.py index 2ca43ee5b650c..3fba086cb4606 100644 --- a/Configuration/Skimming/python/Skims_PDWG_cff.py +++ b/Configuration/Skimming/python/Skims_PDWG_cff.py @@ -348,7 +348,7 @@ paths = (EXODisappTrkPath), content = EXODisappTrkSkimContent.outputCommands, selectEvents = cms.untracked.PSet(), - dataTier = cms.untracked.string('AOD') + dataTier = cms.untracked.string('USER') ) from Configuration.Skimming.PDWG_EXODisappMuon_cff import * @@ -396,6 +396,28 @@ dataTier = cms.untracked.string('RAW') # for the moment, it could be DIGI in the future ) +from Configuration.Skimming.PDWG_EGMJME_SD_cff import * +EGMJMEPath = cms.Path(EGMJME) +SKIMStreamEGMJME = cms.FilteredStream( + responsible = 'PDWG', + name = 'EGMJME', + paths = (EGMJMEPath), + content = skimRawContent.outputCommands, + selectEvents = cms.untracked.PSet(), + dataTier = cms.untracked.string('RAW-RECO') + ) + +from Configuration.Skimming.PDWG_MUOJME_SD_cff import * +MUOJMEPath = cms.Path(MUOJME) +SKIMStreamMUOJME = cms.FilteredStream( + responsible = 'PDWG', + name = 'MUOJME', + paths = (MUOJMEPath), + content = skimRawContent.outputCommands, + selectEvents = cms.untracked.PSet(), + dataTier = cms.untracked.string('RAW-RECO') + ) + #################### diff --git a/Configuration/Skimming/python/autoSkim.py b/Configuration/Skimming/python/autoSkim.py index 3930b064b7579..364be46435579 100644 --- a/Configuration/Skimming/python/autoSkim.py +++ b/Configuration/Skimming/python/autoSkim.py @@ -2,14 +2,14 @@ # Skim 2023 'BTagMu' : 'LogError+LogErrorMonitor', - 'DisplacedJet' : 'EXODisplacedJet+EXODelayedJet+EXODTCluster+EXOLLPJetHCAL+LogError+LogErrorMonitor', - 'JetMET0' : 'JetHTJetPlusHOFilter+EXOHighMET+EXODelayedJetMET+EXODisappTrk+LogError+LogErrorMonitor', - 'JetMET1' : 'JetHTJetPlusHOFilter+EXOHighMET+EXODelayedJetMET+EXODisappTrk+LogError+LogErrorMonitor', - 'EGamma0':'ZElectron+WElectron+EXOMONOPOLE+EXODisappTrk+IsoPhotonEB+LogError+LogErrorMonitor', - 'EGamma1':'ZElectron+WElectron+EXOMONOPOLE+EXODisappTrk+IsoPhotonEB+LogError+LogErrorMonitor', + 'DisplacedJet' : 'EXODisplacedJet+EXODelayedJet+EXODTCluster+EXOCSCCluster+EXOLLPJetHCAL+LogError+LogErrorMonitor', + 'JetMET0' : 'JetHTJetPlusHOFilter+EXOHighMET+EXODelayedJetMET+EXODisappTrk+TeVJet+LogError+LogErrorMonitor', + 'JetMET1' : 'JetHTJetPlusHOFilter+EXOHighMET+EXODelayedJetMET+EXODisappTrk+TeVJet+LogError+LogErrorMonitor', + 'EGamma0':'EGMJME+ZElectron+WElectron+EXOMONOPOLE+EXODisappTrk+IsoPhotonEB+LogError+LogErrorMonitor', + 'EGamma1':'EGMJME+ZElectron+WElectron+EXOMONOPOLE+EXODisappTrk+IsoPhotonEB+LogError+LogErrorMonitor', 'Tau' : 'EXODisappTrk+LogError+LogErrorMonitor', - 'Muon0' : 'ZMu+EXODisappTrk+EXOCSCCluster+EXODisappMuon+LogError+LogErrorMonitor', - 'Muon1' : 'ZMu+EXODisappTrk+EXOCSCCluster+EXODisappMuon+LogError+LogErrorMonitor', + 'Muon0' : 'MUOJME+ZMu+EXODisappTrk+EXOCSCCluster+EXODisappMuon+LogError+LogErrorMonitor', + 'Muon1' : 'MUOJME+ZMu+EXODisappTrk+EXOCSCCluster+EXODisappMuon+LogError+LogErrorMonitor', 'MuonEG' : 'TopMuEG+LogError+LogErrorMonitor', 'NoBPTX' : 'EXONoBPTXSkim+LogError+LogErrorMonitor', 'HcalNZS' : 'LogError+LogErrorMonitor', @@ -28,17 +28,16 @@ # These should be uncommented when 2022 data reprocessing # Dedicated skim for 2022 - #'JetMET' : 'JetHTJetPlusHOFilter+EXOHighMET+EXODelayedJetMET+EXODisappTrk+LogError+LogErrorMonitor', - #'EGamma':'ZElectron+WElectron+EXOMONOPOLE+EXODisappTrk+LogError+LogErrorMonitor', - #'Muon' : 'ZMu+EXODisappTrk+EXODisappMuon+LogError+LogErrorMonitor', - #'DisplacedJet' : 'EXODisplacedJet+EXODelayedJet+EXODTCluster+EXOCSCCluster+EXOLLPJetHCAL+LogError+LogErrorMonitor', - #'JetHT' : 'JetHTJetPlusHOFilter+LogError+LogErrorMonitor', - #'MET' : 'EXOHighMET+EXODelayedJetMET+EXODisappTrk+LogError+LogErrorMonitor', - #'SingleMuon' : 'ZMu+EXODisappTrk+EXODisappMuon+LogError+LogErrorMonitor', - #'DoubleMuon' : 'LogError+LogErrorMonitor', + 'JetMET' : 'JetHTJetPlusHOFilter+EXOHighMET+EXODelayedJetMET+EXODisappTrk+TeVJet+LogError+LogErrorMonitor', + 'EGamma':'EGMJME+ZElectron+WElectron+EXOMONOPOLE+EXODisappTrk+IsoPhotonEB+LogError+LogErrorMonitor', + 'Muon' : 'MUOJME+ZMu+EXODisappTrk+EXODisappMuon+LogError+LogErrorMonitor', + 'JetHT' : 'JetHTJetPlusHOFilter+TeVJet+LogError+LogErrorMonitor', + 'MET' : 'EXOHighMET+EXODelayedJetMET+EXODisappTrk+TeVJet+LogError+LogErrorMonitor', + 'SingleMuon' : 'ZMu+EXODisappTrk+EXODisappMuon+LogError+LogErrorMonitor', + 'DoubleMuon' : 'MUOJME+LogError+LogErrorMonitor', # Used in unit test scenario ppEra_Run2_2018 - 'SingleMuon': 'LogError+LogErrorMonitor', + #'SingleMuon': 'LogError+LogErrorMonitor', } # For 2023 PbPb skims diff --git a/Configuration/StandardSequences/python/AlCaHarvesting_cff.py b/Configuration/StandardSequences/python/AlCaHarvesting_cff.py index 8f57c5066f468..a183a262981ac 100644 --- a/Configuration/StandardSequences/python/AlCaHarvesting_cff.py +++ b/Configuration/StandardSequences/python/AlCaHarvesting_cff.py @@ -11,6 +11,7 @@ from Calibration.TkAlCaRecoProducers.AlcaSiPixelLorentzAngleHarvesterMCS_cff import * from Alignment.CommonAlignmentProducer.AlcaSiPixelAliHarvester_cff import * from Alignment.CommonAlignmentProducer.AlcaSiPixelAliHarvesterHG_cff import * +from Alignment.CommonAlignmentProducer.AlcaSiPixelAliHarvesterHGCombined_cff import * from Calibration.EcalCalibAlgos.AlcaEcalPedestalsHarvester_cff import * from Calibration.LumiAlCaRecoProducers.AlcaLumiPCCHarvester_cff import * from CalibTracker.SiPixelQuality.SiPixelStatusHarvester_cfi import * @@ -187,6 +188,15 @@ timetype = cms.untracked.string('runnumber') ) +# -------------------------------------------------------------------------------------- +# SiPixel Alignment HG combined +ALCAHARVESTSiPixelAliHGCombined_metadata = cms.PSet(record = cms.untracked.string('TrackerAlignmentHGCombinedRcd')) + +ALCAHARVESTSiPixelAliHGCombined_dbOutput = cms.PSet(record = cms.string('TrackerAlignmentHGCombinedRcd'), + tag = cms.string('SiPixelAliHGCombined_pcl'), + timetype = cms.untracked.string('runnumber') + ) + # -------------------------------------------------------------------------------------- # SiPixel Lorentz Angle ALCAHARVESTSiPixelLA_metadata = cms.PSet(record = cms.untracked.string('SiPixelLorentzAngleRcd')) @@ -296,6 +306,7 @@ SiStripLA = cms.Path(ALCAHARVESTSiStripLorentzAngle) SiPixelAli = cms.Path(ALCAHARVESTSiPixelAli) SiPixelAliHG = cms.Path(ALCAHARVESTSiPixelAliHG) +SiPixelAliHGCombined = cms.Path(ALCAHARVESTSiPixelAliHGCombined) SiPixelLA = cms.Path(ALCAHARVESTSiPixelLorentzAngle) SiPixelLAMCS = cms.Path(ALCAHARVESTSiPixelLorentzAngleMCS) EcalPedestals = cms.Path(ALCAHARVESTEcalPedestals) diff --git a/Configuration/StandardSequences/python/AlCaRecoStreams_cff.py b/Configuration/StandardSequences/python/AlCaRecoStreams_cff.py index d4a718a5d14c4..7d43cdfd35c02 100644 --- a/Configuration/StandardSequences/python/AlCaRecoStreams_cff.py +++ b/Configuration/StandardSequences/python/AlCaRecoStreams_cff.py @@ -33,6 +33,8 @@ from Alignment.CommonAlignmentProducer.ALCARECOTkAlMinBias_cff import * # AlCaReco for track based alignment using JetHT events from Alignment.CommonAlignmentProducer.ALCARECOTkAlJetHT_cff import * +# AlCaReco for track based alignment using V0s +from Alignment.CommonAlignmentProducer.ALCARECOTkAlV0s_cff import * ############################################################### # Tracker Calibration @@ -166,6 +168,7 @@ from Calibration.TkAlCaRecoProducers.ALCARECOSiStripPCLHistos_cff import * from Alignment.CommonAlignmentProducer.ALCARECOPromptCalibProdSiPixelAli_cff import * from Alignment.CommonAlignmentProducer.ALCARECOPromptCalibProdSiPixelAliHG_cff import * +from Alignment.CommonAlignmentProducer.ALCARECOPromptCalibProdSiPixelAliHGDiMuon_cff import * from CalibTracker.SiPixelQuality.ALCARECOPromptCalibProdSiPixel_cff import * @@ -196,6 +199,8 @@ pathALCARECOTkAlUpsilonMuMuPA = cms.Path(seqALCARECOTkAlUpsilonMuMuPA*ALCARECOTkAlUpsilonMuMuPADQM) pathALCARECOTkAlMinBias = cms.Path(seqALCARECOTkAlMinBias*ALCARECOTkAlMinBiasDQM) pathALCARECOTkAlJetHT = cms.Path(seqALCARECOTkAlJetHT*ALCARECOTkAlJetHTDQM) +pathALCARECOTkAlK0s = cms.Path(seqALCARECOTkAlK0s*ALCARECOTkAlKShortTracksDQM) +pathALCARECOTkAlLambdas = cms.Path(seqALCARECOTkAlLambdas*ALCARECOTkAlLambdaTracksDQM) pathALCARECOSiPixelCalSingleMuon = cms.Path(seqALCARECOSiPixelCalSingleMuon) pathALCARECOSiPixelCalSingleMuonLoose = cms.Path(seqALCARECOSiPixelCalSingleMuonLoose) pathALCARECOSiPixelCalSingleMuonTight = cms.Path(seqALCARECOSiPixelCalSingleMuonTight * ALCARECOSiPixelCalSingleMuonTightDQM) @@ -290,6 +295,8 @@ pathALCARECOPromptCalibProdSiPixelLorentzAngleMCS = cms.Path(seqALCARECOPromptCalibProdSiPixelLorentzAngleMCS) pathALCARECOPromptCalibProdSiPixelAli = cms.Path(seqALCARECOPromptCalibProdSiPixelAli) pathALCARECOPromptCalibProdSiPixelAliHG = cms.Path(seqALCARECOPromptCalibProdSiPixelAliHG) +pathALCARECOPromptCalibProdSiPixelAliHGMinBias = cms.Path(seqALCARECOPromptCalibProdSiPixelAliHG) +pathALCARECOPromptCalibProdSiPixelAliHGDiMu = cms.Path(seqALCARECOPromptCalibProdSiPixelAliHGDiMu) pathALCARECOPromptCalibProdSiPixel = cms.Path(seqALCARECOPromptCalibProdSiPixel) pathALCARECOPromptCalibProdEcalPedestals = cms.Path(seqALCARECOPromptCalibProdEcalPedestals) pathALCARECOPromptCalibProdLumiPCC = cms.Path(seqALCARECOPromptCalibProdLumiPCC) @@ -410,6 +417,15 @@ dataTier = cms.untracked.string('ALCARECO') ) +ALCARECOStreamTkAlV0s = cms.FilteredStream( + responsible = 'Marco Musich', + name = 'TkAlV0s', + paths = (pathALCARECOTkAlK0s,pathALCARECOTkAlLambdas), + content = OutALCARECOTkAlV0s.outputCommands, + selectEvents = OutALCARECOTkAlV0s.SelectEvents, + dataTier = cms.untracked.string('ALCARECO') + ) + ALCARECOStreamSiPixelCalSingleMuon = cms.FilteredStream( responsible = 'Tamas Almos Vami', name = 'SiPixelCalSingleMuon', @@ -1067,6 +1083,15 @@ dataTier = cms.untracked.string('ALCARECO') ) +ALCARECOStreamPromptCalibProdSiPixelAliHGComb = cms.FilteredStream( + responsible = 'Marco Musich', + name = 'PromptCalibProdSiPixelAliHGComb', + paths = (pathALCARECOPromptCalibProdSiPixelAliHGMinBias,pathALCARECOPromptCalibProdSiPixelAliHGDiMu), + content = OutALCARECOPromptCalibProdSiPixelAliHGComb.outputCommands, + selectEvents = OutALCARECOPromptCalibProdSiPixelAliHGComb.SelectEvents, + dataTier = cms.untracked.string('ALCARECO') + ) + ALCARECOStreamSiStripPCLHistos = cms.FilteredStream( responsible = 'Gianluca Cerminara', name = 'SiStripPCLHistos', diff --git a/Configuration/StandardSequences/python/DigiCosmics_cff.py b/Configuration/StandardSequences/python/DigiCosmics_cff.py index c170e7061a83b..ce268d419bfdb 100644 --- a/Configuration/StandardSequences/python/DigiCosmics_cff.py +++ b/Configuration/StandardSequences/python/DigiCosmics_cff.py @@ -62,3 +62,12 @@ pdigi = cms.Sequence(pdigiTask) pdigi_valid = cms.Sequence(pdigiTask) +#phase 2 common mods +def _modifyEnableHcalHardcode( theProcess ): + from CalibCalorimetry.HcalPlugins.Hcal_Conditions_forGlobalTag_cff import hcal_db_producer as _hcal_db_producer, es_hardcode as _es_hardcode, es_prefer_hcalHardcode as _es_prefer_hcalHardcode + theProcess.hcal_db_producer = _hcal_db_producer + theProcess.es_hardcode = _es_hardcode + theProcess.es_prefer_hcalHardcode = _es_prefer_hcalHardcode + +from Configuration.Eras.Modifier_hcalHardcodeConditions_cff import hcalHardcodeConditions +modifyEnableHcalHardcode_ = hcalHardcodeConditions.makeProcessModifier( _modifyEnableHcalHardcode ) diff --git a/Configuration/StandardSequences/python/Eras.py b/Configuration/StandardSequences/python/Eras.py index 74d2b6bdb424d..9c875e8fba998 100644 --- a/Configuration/StandardSequences/python/Eras.py +++ b/Configuration/StandardSequences/python/Eras.py @@ -45,6 +45,7 @@ def __init__(self): 'Run3_FastSim', 'Run3_2023_FastSim', 'Run3_2023_ZDC', + 'Run3_2023_UPC', 'Phase2', 'Phase2C9', 'Phase2C10', @@ -60,7 +61,8 @@ def __init__(self): 'Phase2C11I13T25M9', 'Phase2C11I13T26M9', 'Phase2C17I13M9', - 'Phase2C20I13M9' + 'Phase2C20I13M9', + 'Phase2C22I13M9' ] internalUseMods = ['run2_common', 'run2_25ns_specific', @@ -75,7 +77,7 @@ def __init__(self): 'phase2_common', 'phase2_tracker', 'phase2_muon', 'phase2_GEM', 'phase2_GE0', 'phase2_hgcal', 'phase2_timing', 'phase2_hfnose', 'phase2_hgcalV10', 'phase2_hgcalV11', 'phase2_hgcalV12', - 'phase2_timing_layer', 'phase2_etlV4', 'phase2_hcal', 'phase2_ecal','phase2_ecal_devel', + 'phase2_timing_layer', 'phase2_etlV4', 'phase2_hcal', 'phase2_ecal','phase2_ecal_devel', 'phase2_ecalTP_devel', 'phase2_trigger', 'phase2_squarePixels', 'phase2_3DPixels', 'trackingLowPU', 'trackingPhase1', @@ -87,6 +89,7 @@ def __init__(self): 'run2_nanoAOD_106Xv2', 'run3_nanoAOD_122', 'run3_nanoAOD_124', 'run3_ecal_devel', + 'run3_upc', 'hcalHardcodeConditions', 'hcalSkipPacker', 'run2_HLTconditions_2016','run2_HLTconditions_2017','run2_HLTconditions_2018', 'bParking'] diff --git a/Configuration/StandardSequences/python/GeometryConf.py b/Configuration/StandardSequences/python/GeometryConf.py index ebcf464772e48..275e5207a7f29 100644 --- a/Configuration/StandardSequences/python/GeometryConf.py +++ b/Configuration/StandardSequences/python/GeometryConf.py @@ -91,4 +91,14 @@ 'DD4hepExtended2026D102' : 'DD4hepExtended2026D102,DD4hepExtended2026D102Reco', 'Extended2026D103' : 'Extended2026D103,Extended2026D103Reco', 'DD4hepExtended2026D103' : 'DD4hepExtended2026D103,DD4hepExtended2026D103Reco', + 'Extended2026D104' : 'Extended2026D104,Extended2026D104Reco', + 'DD4hepExtended2026D104' : 'DD4hepExtended2026D104,DD4hepExtended2026D104Reco', + 'Extended2026D105' : 'Extended2026D105,Extended2026D105Reco', + 'DD4hepExtended2026D105' : 'DD4hepExtended2026D105,DD4hepExtended2026D105Reco', + 'Extended2026D106' : 'Extended2026D106,Extended2026D106Reco', + 'DD4hepExtended2026D106' : 'DD4hepExtended2026D106,DD4hepExtended2026D106Reco', + 'Extended2026D107' : 'Extended2026D107,Extended2026D107Reco', + 'DD4hepExtended2026D107' : 'DD4hepExtended2026D107,DD4hepExtended2026D107Reco', + 'Extended2026D108' : 'Extended2026D108,Extended2026D108Reco', + 'DD4hepExtended2026D108' : 'DD4hepExtended2026D108,DD4hepExtended2026D108Reco', } diff --git a/Configuration/StandardSequences/python/RawToDigi_Repacked_cff.py b/Configuration/StandardSequences/python/RawToDigi_Repacked_cff.py index 3d64a8c1c4912..e40eaee29c6e1 100644 --- a/Configuration/StandardSequences/python/RawToDigi_Repacked_cff.py +++ b/Configuration/StandardSequences/python/RawToDigi_Repacked_cff.py @@ -2,6 +2,9 @@ from Configuration.StandardSequences.RawToDigi_cff import * +from Configuration.ProcessModifiers.gpu_cff import gpu +from Configuration.ProcessModifiers.alpaka_cff import alpaka + scalersRawToDigi.scalersInputTag = 'rawDataRepacker' csctfDigis.producer = 'rawDataRepacker' dttfDigis.DTTF_FED_Source = 'rawDataRepacker' @@ -10,7 +13,9 @@ gtEvmDigis.EvmGtInputTag = 'rawDataRepacker' siPixelDigis.cpu.InputLabel = 'rawDataRepacker' siStripDigis.ProductLabel = 'rawDataRepacker' -ecalDigis.cpu.InputLabel = 'rawDataRepacker' +ecalDigisCPU.InputLabel = 'rawDataRepacker' +gpu.toModify(ecalDigisGPU, InputLabel = 'rawDataRepacker') +alpaka.toModify(ecalDigisPortable, InputLabel = 'rawDataRepacker') ecalPreshowerDigis.sourceTag = 'rawDataRepacker' hcalDigis.InputLabel = 'rawDataRepacker' muonCSCDigis.InputObjects = 'rawDataRepacker' diff --git a/Configuration/StandardSequences/python/RawToDigi_cff.py b/Configuration/StandardSequences/python/RawToDigi_cff.py index c245488f29ef7..321e5daa02370 100644 --- a/Configuration/StandardSequences/python/RawToDigi_cff.py +++ b/Configuration/StandardSequences/python/RawToDigi_cff.py @@ -73,9 +73,14 @@ RawToDigiTask_hcalOnly = cms.Task(hcalDigis) RawToDigi_hcalOnly = cms.Sequence(RawToDigiTask_hcalOnly) +from Configuration.ProcessModifiers.gpu_cff import gpu +from Configuration.ProcessModifiers.alpaka_cff import alpaka + scalersRawToDigi.scalersInputTag = 'rawDataCollector' siPixelDigis.cpu.InputLabel = 'rawDataCollector' -ecalDigis.cpu.InputLabel = 'rawDataCollector' +ecalDigisCPU.InputLabel = 'rawDataCollector' +gpu.toModify(ecalDigisGPU, InputLabel = 'rawDataCollector') +alpaka.toModify(ecalDigisPortable, InputLabel = 'rawDataCollector') ecalPreshowerDigis.sourceTag = 'rawDataCollector' hcalDigis.InputLabel = 'rawDataCollector' muonCSCDigis.InputObjects = 'rawDataCollector' diff --git a/Configuration/StandardSequences/python/ReconstructionCosmics_cff.py b/Configuration/StandardSequences/python/ReconstructionCosmics_cff.py index b64aabf646bcc..0189dc4de83f8 100644 --- a/Configuration/StandardSequences/python/ReconstructionCosmics_cff.py +++ b/Configuration/StandardSequences/python/ReconstructionCosmics_cff.py @@ -21,6 +21,8 @@ # from RecoLocalCalo.Configuration.RecoLocalCalo_Cosmics_cff import * from RecoEcal.Configuration.RecoEcalCosmics_cff import * +from RecoHGCal.Configuration.recoHGCAL_cff import * + # # muons # @@ -44,6 +46,11 @@ # local reco trackerCosmicsTask = cms.Task(offlineBeamSpot,trackerlocalrecoTask,MeasurementTrackerEvent,tracksP5Task) + +# ugly hack: for the time being remove tracking (no Cosmics seeding in Phase-2) +from Configuration.Eras.Modifier_phase2_tracker_cff import phase2_tracker +phase2_tracker.toReplaceWith(trackerCosmicsTask,trackerCosmicsTask.copyAndExclude([tracksP5Task])) + trackerCosmics = cms.Sequence(trackerCosmicsTask) caloCosmicsTask = cms.Task(calolocalrecoTaskCosmics,ecalClustersCosmicsTask) caloCosmics = cms.Sequence(caloCosmicsTask) @@ -51,32 +58,49 @@ caloCosmics_HcalNZS = cms.Sequence(caloCosmics_HcalNZSTask) muonsLocalRecoCosmicsTask = cms.Task(muonlocalrecoTask,muonlocalrecoT0SegTask) muonsLocalRecoCosmics = cms.Sequence(muonsLocalRecoCosmicsTask) +localReconstructionCosmicsTask = cms.Task(bunchSpacingProducer,trackerCosmicsTask,caloCosmicsTask,muonsLocalRecoCosmicsTask,vertexrecoCosmicsTask) +#phase2_tracker.toReplaceWith(localReconstructionCosmicsTask,localReconstructionCosmicsTask.copyAndExclude([vertexrecoCosmicsTask])) -localReconstructionCosmicsTask = cms.Task(bunchSpacingProducer,trackerCosmicsTask,caloCosmicsTask,muonsLocalRecoCosmicsTask,vertexrecoCosmicsTask) localReconstructionCosmics = cms.Sequence(localReconstructionCosmicsTask) localReconstructionCosmics_HcalNZSTask = cms.Task(bunchSpacingProducer,trackerCosmicsTask,caloCosmics_HcalNZSTask,muonsLocalRecoCosmicsTask,vertexrecoCosmicsTask) localReconstructionCosmics_HcalNZS = cms.Sequence(localReconstructionCosmics_HcalNZSTask) - # global reco muonsCosmicsTask = cms.Task(muonRecoGRTask) jetsCosmicsTask = cms.Task(recoCaloTowersGRTask,recoJetsGRTask) egammaCosmicsTask = cms.Task(egammarecoGlobal_cosmicsTask,egammarecoCosmics_woElectronsTask) - from FWCore.Modules.logErrorHarvester_cfi import * +reconstructionCosmicsTask = cms.Task(localReconstructionCosmicsTask, + beamhaloTracksTask, + jetsCosmicsTask, + muonsCosmicsTask, + regionalCosmicTracksTask, + cosmicDCTracksSeqTask, + metrecoCosmicsTask, + egammaCosmicsTask, + logErrorHarvester) -reconstructionCosmicsTask = cms.Task(localReconstructionCosmicsTask, - beamhaloTracksTask, - jetsCosmicsTask, - muonsCosmicsTask, - regionalCosmicTracksTask, - cosmicDCTracksSeqTask, - metrecoCosmicsTask, - egammaCosmicsTask, - logErrorHarvester) -reconstructionCosmics = cms.Sequence(reconstructionCosmicsTask) +# ugly hack +# for the time being remove all tasks related to tracking +phase2_tracker.toReplaceWith(reconstructionCosmicsTask,reconstructionCosmicsTask.copyAndExclude([beamhaloTracksTask, + cosmicDCTracksSeqTask, + regionalCosmicTracksTask, + metrecoCosmicsTask, + muonsCosmicsTask])) + +from Configuration.Eras.Modifier_phase2_hgcal_cff import phase2_hgcal +_phase2HGALRecoTask = reconstructionCosmicsTask.copy() +_phase2HGALRecoTask.add(iterTICLTask) +phase2_hgcal.toReplaceWith(reconstructionCosmicsTask, _phase2HGALRecoTask) + +from Configuration.Eras.Modifier_phase2_hfnose_cff import phase2_hfnose +_phase2HFNoseRecoTask = reconstructionCosmicsTask.copy() +_phase2HFNoseRecoTask.add(iterHFNoseTICLTask) +phase2_hfnose.toReplaceWith(reconstructionCosmicsTask, _phase2HFNoseRecoTask) + +reconstructionCosmics = cms.Sequence(reconstructionCosmicsTask) #logErrorHarvester should only wait for items produced in the reconstructionCosmics sequence _modulesInReconstruction = list() reconstructionCosmics.visit(cms.ModuleNamesFromGlobalsVisitor(globals(),_modulesInReconstruction)) @@ -91,7 +115,10 @@ metrecoCosmicsTask, egammaCosmicsTask, logErrorHarvester) + +phase2_tracker.toReplaceWith(reconstructionCosmics_HcalNZSTask,reconstructionCosmics_HcalNZSTask.copyAndExclude([beamhaloTracksTask,cosmicDCTracksSeqTask,regionalCosmicTracksTask])) reconstructionCosmics_HcalNZS = cms.Sequence(reconstructionCosmics_HcalNZSTask) + reconstructionCosmics_woTkBHMTask = cms.Task(localReconstructionCosmicsTask, jetsCosmicsTask, muonsCosmicsTask, @@ -99,4 +126,6 @@ cosmicDCTracksSeqTask, metrecoCosmicsTask, egammaCosmicsTask) + +phase2_tracker.toReplaceWith(reconstructionCosmics_woTkBHMTask,reconstructionCosmics_woTkBHMTask.copyAndExclude([beamhaloTracksTask,cosmicDCTracksSeqTask,regionalCosmicTracksTask])) reconstructionCosmics_woTkBHM = cms.Sequence(reconstructionCosmics_woTkBHMTask) diff --git a/Configuration/StandardSequences/python/Reconstruction_cff.py b/Configuration/StandardSequences/python/Reconstruction_cff.py index d01b11b329a04..8bd230ea3ce09 100644 --- a/Configuration/StandardSequences/python/Reconstruction_cff.py +++ b/Configuration/StandardSequences/python/Reconstruction_cff.py @@ -182,6 +182,7 @@ # AA data with pp reco from Configuration.Eras.Modifier_pp_on_XeXe_2017_cff import pp_on_XeXe_2017 from Configuration.ProcessModifiers.pp_on_AA_cff import pp_on_AA +from Configuration.Eras.Modifier_run3_upc_cff import run3_upc from RecoHI.HiTracking.HILowPtConformalPixelTracks_cfi import * from RecoHI.HiCentralityAlgos.HiCentrality_cfi import hiCentrality from RecoHI.HiCentralityAlgos.HiClusterCompatibility_cfi import hiClusterCompatibility @@ -189,7 +190,7 @@ _highlevelreco_HITask.add(hiConformalPixelTracksTaskPhase1) _highlevelreco_HITask.add(hiCentrality) _highlevelreco_HITask.add(hiClusterCompatibility) -(pp_on_XeXe_2017 | pp_on_AA ).toReplaceWith(highlevelrecoTask, _highlevelreco_HITask) +(pp_on_XeXe_2017 | pp_on_AA | run3_upc).toReplaceWith(highlevelrecoTask, _highlevelreco_HITask) pp_on_AA.toReplaceWith(highlevelrecoTask,highlevelrecoTask.copyAndExclude([PFTauTask])) # not commisoned and not relevant in FastSim (?): diff --git a/Configuration/StandardSequences/python/SimL1EmulatorRepack_GCTGT_cff.py b/Configuration/StandardSequences/python/SimL1EmulatorRepack_GCTGT_cff.py index dbcc43f08352c..3a1d0505b4d8c 100644 --- a/Configuration/StandardSequences/python/SimL1EmulatorRepack_GCTGT_cff.py +++ b/Configuration/StandardSequences/python/SimL1EmulatorRepack_GCTGT_cff.py @@ -26,8 +26,14 @@ ## run the L1 emulator ## +from Configuration.ProcessModifiers.gpu_cff import gpu +from Configuration.ProcessModifiers.alpaka_cff import alpaka + from L1Trigger.L1TCalorimeter.L1TCaloStage1_PPFromRaw_cff import * -ecalDigis.cpu.InputLabel = cms.InputTag( 'rawDataCollector', processName=cms.InputTag.skipCurrentProcess()) +from EventFilter.EcalRawToDigi.ecalDigis_cff import ecalDigisCPU, ecalDigisGPU, ecalDigisPortable +ecalDigisCPU.InputLabel = cms.InputTag('rawDataCollector', processName=cms.InputTag.skipCurrentProcess()) +gpu.toModify(ecalDigisGPU, InputLabel = cms.InputTag('rawDataCollector', processName=cms.InputTag.skipCurrentProcess())) +alpaka.toModify(ecalDigisPortable, InputLabel = cms.InputTag('rawDataCollector', processName=cms.InputTag.skipCurrentProcess())) hcalDigis.InputLabel = cms.InputTag( 'rawDataCollector', processName=cms.InputTag.skipCurrentProcess()) simHcalTriggerPrimitiveDigis.InputTagFEDRaw = cms.InputTag( 'rawDataCollector', processName=cms.InputTag.skipCurrentProcess()) diff --git a/Configuration/StandardSequences/python/VtxSmeared.py b/Configuration/StandardSequences/python/VtxSmeared.py index 86a670e2a013d..f3018eacc42ca 100644 --- a/Configuration/StandardSequences/python/VtxSmeared.py +++ b/Configuration/StandardSequences/python/VtxSmeared.py @@ -1,4 +1,6 @@ VtxSmeared = { + 'DBdesign': 'IOMC.EventVertexGenerators.VtxSmearedDesign_cfi', + 'DBrealistic': 'IOMC.EventVertexGenerators.VtxSmearedRealistic_cfi', 'NoSmear': 'Configuration.StandardSequences.VtxSmearedNoSmear_cff', 'BetafuncEarlyCollision': 'IOMC.EventVertexGenerators.VtxSmearedBetafuncEarlyCollision_cfi', 'BeamProfile': 'IOMC.EventVertexGenerators.VtxSmearedBeamProfile_cfi', diff --git a/DPGAnalysis/HcalNanoAOD/plugins/HcalUHTRTableProducer.cc b/DPGAnalysis/HcalNanoAOD/plugins/HcalUHTRTableProducer.cc index 423cd96763dd5..3f66ccb559f15 100644 --- a/DPGAnalysis/HcalNanoAOD/plugins/HcalUHTRTableProducer.cc +++ b/DPGAnalysis/HcalNanoAOD/plugins/HcalUHTRTableProducer.cc @@ -56,12 +56,9 @@ class HcalUHTRTableProducer : public edm::stream::EDProducer<> { */ private: - void beginRun(edm::Run const&, edm::EventSetup const&) override; void produce(edm::Event&, edm::EventSetup const&) override; }; -void HcalUHTRTableProducer::beginRun(edm::Run const& iRun, edm::EventSetup const& iSetup) {} - void HcalUHTRTableProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { std::vector crate; std::vector slot; diff --git a/DPGAnalysis/HcalNanoAOD/plugins/HcalUMNioTableProducer.cc b/DPGAnalysis/HcalNanoAOD/plugins/HcalUMNioTableProducer.cc index 2f2d499239260..566b7ba8cc749 100644 --- a/DPGAnalysis/HcalNanoAOD/plugins/HcalUMNioTableProducer.cc +++ b/DPGAnalysis/HcalNanoAOD/plugins/HcalUMNioTableProducer.cc @@ -40,12 +40,9 @@ class HcalUMNioTableProducer : public edm::stream::EDProducer<> { */ private: - void beginRun(edm::Run const&, edm::EventSetup const&) override; void produce(edm::Event&, edm::EventSetup const&) override; }; -void HcalUMNioTableProducer::beginRun(edm::Run const& iRun, edm::EventSetup const& iSetup) {} - void HcalUMNioTableProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { edm::Handle uMNioDigi; iEvent.getByToken(tokenUMNio_, uMNioDigi); diff --git a/DPGAnalysis/MuonTools/interface/MuDigiBaseProducer.h b/DPGAnalysis/MuonTools/interface/MuDigiBaseProducer.h index 27d161fa233c5..d29c71c61700c 100644 --- a/DPGAnalysis/MuonTools/interface/MuDigiBaseProducer.h +++ b/DPGAnalysis/MuonTools/interface/MuDigiBaseProducer.h @@ -24,7 +24,7 @@ class MuDigiBaseProducer : public SimpleFlatTableProducerBase, int>; using UIntDetVar = FuncVariable, unsigned int>; - using Int8DetVar = FuncVariable, int8_t>; + using Int16DetVar = FuncVariable, int16_t>; using UInt8DetVar = FuncVariable, uint8_t>; std::vector>> detIdVars_; @@ -44,8 +44,8 @@ class MuDigiBaseProducer : public SimpleFlatTableProducerBase(name, varCfg)); } else if (type == "uint") { detVarPtr = std::move(std::make_unique(name, varCfg)); - } else if (type == "int8") { - detVarPtr = std::move(std::make_unique(name, varCfg)); + } else if (type == "int16") { + detVarPtr = std::move(std::make_unique(name, varCfg)); } else if (type == "uint8") { detVarPtr = std::move(std::make_unique(name, varCfg)); } else { @@ -69,7 +69,7 @@ class MuDigiBaseProducer : public SimpleFlatTableProducerBase("doc")->setComment("few words description of the branch content"); variable.ifValue(edm::ParameterDescription("type", "int", true, comType), - edm::allowedValues("int", "uint", "int8", "uint8")); + edm::allowedValues("int", "uint", "int16", "uint8")); edm::ParameterSetDescription variables; diff --git a/DPGAnalysis/MuonTools/interface/MuLocalRecoBaseProducer.h b/DPGAnalysis/MuonTools/interface/MuLocalRecoBaseProducer.h index 318ba3c60477e..ba27733fc8e1f 100644 --- a/DPGAnalysis/MuonTools/interface/MuLocalRecoBaseProducer.h +++ b/DPGAnalysis/MuonTools/interface/MuLocalRecoBaseProducer.h @@ -40,7 +40,7 @@ class MuRecObjBaseProducer using IntDetVar = FuncVariable, int>; using UIntDetVar = FuncVariable, unsigned int>; - using Int8DetVar = FuncVariable, int8_t>; + using Int16DetVar = FuncVariable, int16_t>; using UInt8DetVar = FuncVariable, uint8_t>; std::vector>> detIdVars_; @@ -67,8 +67,8 @@ class MuRecObjBaseProducer detVarPtr = std::move(std::make_unique(name, varCfg)); // CB can improve? } else if (type == "uint") { detVarPtr = std::move(std::make_unique(name, varCfg)); - } else if (type == "int8") { - detVarPtr = std::move(std::make_unique(name, varCfg)); + } else if (type == "int16") { + detVarPtr = std::move(std::make_unique(name, varCfg)); } else if (type == "uint8") { detVarPtr = std::move(std::make_unique(name, varCfg)); } else { @@ -124,7 +124,7 @@ class MuRecObjBaseProducer edm::Comment comType{"the c++ type of the branch in the flat table"}; detIdVar.ifValue(edm::ParameterDescription{"type", "int", true, comType}, - edm::allowedValues("int", "uint", "int8", "uint8")); + edm::allowedValues("int", "uint", "int16", "uint8")); edm::Comment comPrecision{"the precision with which to store the value in the flat table"}; globalGeomVar.addOptionalNode(edm::ParameterDescription{"precision", true, comPrecision}, false); diff --git a/DPGAnalysis/MuonTools/plugins/MuDTMuonExtTableProducer.cc b/DPGAnalysis/MuonTools/plugins/MuDTMuonExtTableProducer.cc index 8ebcacb24c231..1982215368c9d 100644 --- a/DPGAnalysis/MuonTools/plugins/MuDTMuonExtTableProducer.cc +++ b/DPGAnalysis/MuonTools/plugins/MuDTMuonExtTableProducer.cc @@ -166,9 +166,9 @@ void MuDTMuonExtTableProducer::fillTable(edm::Event& ev) { std::vector staMatches_begin; std::vector staMatches_end; - std::vector matches_wheel; - std::vector matches_sector; - std::vector matches_station; + std::vector matches_wheel; + std::vector matches_sector; + std::vector matches_station; std::vector matches_x; std::vector matches_y; @@ -402,4 +402,4 @@ bool MuDTMuonExtTableProducer::hasTrigger(std::vector& trigIndices, #include "FWCore/PluginManager/interface/ModuleDef.h" #include "FWCore/Framework/interface/MakerMacros.h" -DEFINE_FWK_MODULE(MuDTMuonExtTableProducer); \ No newline at end of file +DEFINE_FWK_MODULE(MuDTMuonExtTableProducer); diff --git a/DPGAnalysis/MuonTools/plugins/MuDTSegmentExtTableProducer.cc b/DPGAnalysis/MuonTools/plugins/MuDTSegmentExtTableProducer.cc index b37ed891f7e93..2658d4c3c9dfc 100644 --- a/DPGAnalysis/MuonTools/plugins/MuDTSegmentExtTableProducer.cc +++ b/DPGAnalysis/MuonTools/plugins/MuDTSegmentExtTableProducer.cc @@ -145,7 +145,7 @@ void MuDTSegmentExtTableProducer::fillTable(edm::Event& ev) { std::vector seg4D_hitsExpPos; std::vector seg4D_hitsExpPosCh; - std::vector seg4D_hitsExpWire; + std::vector seg4D_hitsExpWire; // rec-hits vectors, filled if m_fillHits == true unsigned int nHits{0}; @@ -153,11 +153,11 @@ void MuDTSegmentExtTableProducer::fillTable(edm::Event& ev) { std::vector seg2D_hits_pos; std::vector seg2D_hits_posCh; std::vector seg2D_hits_posErr; - std::vector seg2D_hits_side; - std::vector seg2D_hits_wire; - std::vector seg2D_hits_wirePos; - std::vector seg2D_hits_layer; - std::vector seg2D_hits_superLayer; + std::vector seg2D_hits_side; + std::vector seg2D_hits_wire; + std::vector seg2D_hits_wirePos; + std::vector seg2D_hits_layer; + std::vector seg2D_hits_superLayer; std::vector seg2D_hits_time; std::vector seg2D_hits_timeCali; diff --git a/DPGAnalysis/MuonTools/plugins/MuDTTPGPhiFlatTableProducer.cc b/DPGAnalysis/MuonTools/plugins/MuDTTPGPhiFlatTableProducer.cc index 781cddeea2ea0..e876edf07c15a 100644 --- a/DPGAnalysis/MuonTools/plugins/MuDTTPGPhiFlatTableProducer.cc +++ b/DPGAnalysis/MuonTools/plugins/MuDTTPGPhiFlatTableProducer.cc @@ -77,12 +77,12 @@ void MuDTTPGPhiFlatTableProducer::getFromES(const edm::Run& run, const edm::Even void MuDTTPGPhiFlatTableProducer::fillTable(edm::Event& ev) { unsigned int nTrigs{0}; - std::vector wheel; - std::vector sector; - std::vector station; + std::vector wheel; + std::vector sector; + std::vector station; - std::vector quality; - std::vector rpcBit; + std::vector quality; + std::vector rpcBit; std::vector phi; std::vector phiB; @@ -90,8 +90,8 @@ void MuDTTPGPhiFlatTableProducer::fillTable(edm::Event& ev) { std::vector posLoc_x; std::vector dirLoc_phi; - std::vector bx; - std::vector is2nd; + std::vector bx; + std::vector is2nd; auto trigColl = m_token.conditionalGet(ev); diff --git a/DPGAnalysis/MuonTools/plugins/MuDTTPGThetaFlatTableProducer.cc b/DPGAnalysis/MuonTools/plugins/MuDTTPGThetaFlatTableProducer.cc index 95c0e9ce6fe83..31af63499abf1 100644 --- a/DPGAnalysis/MuonTools/plugins/MuDTTPGThetaFlatTableProducer.cc +++ b/DPGAnalysis/MuonTools/plugins/MuDTTPGThetaFlatTableProducer.cc @@ -63,11 +63,11 @@ void MuDTTPGThetaFlatTableProducer::fillDescriptions(edm::ConfigurationDescripti void MuDTTPGThetaFlatTableProducer::fillTable(edm::Event& ev) { unsigned int nTrigs{0}; - std::vector wheel; - std::vector sector; - std::vector station; + std::vector wheel; + std::vector sector; + std::vector station; - std::vector bx; + std::vector bx; std::vector hitMap; auto trigColl = m_token.conditionalGet(ev); @@ -152,4 +152,4 @@ MuDTTPGThetaFlatTableProducer::TriggerTag MuDTTPGThetaFlatTableProducer::getTag( #include "FWCore/PluginManager/interface/ModuleDef.h" #include "FWCore/Framework/interface/MakerMacros.h" -DEFINE_FWK_MODULE(MuDTTPGThetaFlatTableProducer); \ No newline at end of file +DEFINE_FWK_MODULE(MuDTTPGThetaFlatTableProducer); diff --git a/DPGAnalysis/MuonTools/plugins/MuGEMMuonExtTableProducer.cc b/DPGAnalysis/MuonTools/plugins/MuGEMMuonExtTableProducer.cc index 6b49b1c34cc8b..f7e96197b8885 100644 --- a/DPGAnalysis/MuonTools/plugins/MuGEMMuonExtTableProducer.cc +++ b/DPGAnalysis/MuonTools/plugins/MuGEMMuonExtTableProducer.cc @@ -115,10 +115,10 @@ void MuGEMMuonExtTableProducer::fillTable(edm::Event& ev) { std::vector propagated_isincoming; std::vector propagated_isinsideout; - std::vector propagated_region; - std::vector propagated_layer; - std::vector propagated_chamber; - std::vector propagated_etaP; + std::vector propagated_region; + std::vector propagated_layer; + std::vector propagated_chamber; + std::vector propagated_etaP; std::vector propagatedLoc_x; std::vector propagatedLoc_y; @@ -148,10 +148,10 @@ void MuGEMMuonExtTableProducer::fillTable(edm::Event& ev) { std::vector propagated_EtaPartition_rMax; std::vector propagated_EtaPartition_rMin; - std::vector propagated_nME1hits; - std::vector propagated_nME2hits; - std::vector propagated_nME3hits; - std::vector propagated_nME4hits; + std::vector propagated_nME1hits; + std::vector propagated_nME2hits; + std::vector propagated_nME3hits; + std::vector propagated_nME4hits; auto muons = m_token.conditionalGet(ev); @@ -237,10 +237,10 @@ void MuGEMMuonExtTableProducer::fillTable(edm::Event& ev) { //if at least one CSC hit is found, perform propagation if (is_csc) { // CSC Hits - int8_t nME1_hits = 0; - int8_t nME2_hits = 0; - int8_t nME3_hits = 0; - int8_t nME4_hits = 0; + int16_t nME1_hits = 0; + int16_t nME2_hits = 0; + int16_t nME3_hits = 0; + int16_t nME4_hits = 0; int nHits{htp.numberOfAllHits(htp.TRACK_HITS)}; diff --git a/DPGAnalysis/MuonTools/python/muNtupleProducer_cff.py b/DPGAnalysis/MuonTools/python/muNtupleProducer_cff.py index 001671e872ca3..2fa4cfbbd6391 100644 --- a/DPGAnalysis/MuonTools/python/muNtupleProducer_cff.py +++ b/DPGAnalysis/MuonTools/python/muNtupleProducer_cff.py @@ -11,14 +11,14 @@ muDPGNanoProducer = cms.Sequence(lhcInfoTableProducer + lumiTableProducer - + muDigiProducers - + muLocalRecoProducers + + muDigiProducers + + muLocalRecoProducers + muRecoProducers + muL1TriggerProducers ) - + def muDPGNanoCustomize(process) : - + if hasattr(process, "dtrpcPointFlatTableProducer") and \ hasattr(process, "cscrpcPointFlatTableProducer") and \ hasattr(process, "RawToDigiTask"): @@ -27,7 +27,7 @@ def muDPGNanoCustomize(process) : process.rpcPointProducer.cscSegments = 'cscSegments' process.rpcPointProducer.ExtrapolatedRegion = 0.6 process.RawToDigiTask.add(process.rpcPointProducer) - + if hasattr(process, "muGEMMuonExtTableProducer") or hasattr(process, "muCSCTnPFlatTableProducer"): process.load("TrackingTools/TransientTrack/TransientTrackBuilder_cfi") process.load("TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagatorAny_cfi") diff --git a/DPGAnalysis/MuonTools/python/nano_mu_digi_cff.py b/DPGAnalysis/MuonTools/python/nano_mu_digi_cff.py index 1be4ef59d022e..5c8c16139805f 100644 --- a/DPGAnalysis/MuonTools/python/nano_mu_digi_cff.py +++ b/DPGAnalysis/MuonTools/python/nano_mu_digi_cff.py @@ -11,20 +11,20 @@ dtDigiFlatTableProducer.variables = cms.PSet( time = Var("time()", float, doc = "digi time"), - wire = Var("wire()", "int8", doc="wire - [1:X] range" + wire = Var("wire()", "int16", doc="wire - [1:X] range" "
(X varies for different chambers SLs and layers)") ) dtDigiFlatTableProducer.detIdVariables = cms.PSet( - wheel = DetIdVar("wheel()", "int8", doc = "wheel - [-2:2] range"), - sector = DetIdVar("sector()", "int8", doc = "sector - [1:14] range" + wheel = DetIdVar("wheel()", "int16", doc = "wheel - [-2:2] range"), + sector = DetIdVar("sector()", "int16", doc = "sector - [1:14] range" "
sector 13 used for the second MB4 of sector 4" "
sector 14 used for the second MB4 of sector 10"), - station = DetIdVar("station()", "int8", doc = "station - [1:4] range"), - superLayer = DetIdVar("superLayer()", "int8", doc = "superlayer - [1:3] range" + station = DetIdVar("station()", "int16", doc = "station - [1:4] range"), + superLayer = DetIdVar("superLayer()", "int16", doc = "superlayer - [1:3] range" "
SL 1 and 3 are phi SLs" "
SL 2 is theta SL"), - layer = DetIdVar("layer()", "int8", doc = "layer - [1:4] range") + layer = DetIdVar("layer()", "int16", doc = "layer - [1:4] range") ) @@ -40,18 +40,18 @@ ) rpcDigiFlatTableProducer.detIdVariables = cms.PSet( - region = DetIdVar("region()", "int8", doc = "0: barrel, +/-1: endcap"), - ring = DetIdVar("ring()", "int8", doc = "ring id:" + region = DetIdVar("region()", "int16", doc = "0: barrel, +/-1: endcap"), + ring = DetIdVar("ring()", "int16", doc = "ring id:" "
wheel number in barrel - [-2:+2] range" "
ring number in endcap - [1:3] range"), - station = DetIdVar("station()", "int8", doc = "chambers at same R in barrel, chambers at same Z ion endcap"), - layer = DetIdVar("layer()", "int8", doc = "layer id:" + station = DetIdVar("station()", "int16", doc = "chambers at same R in barrel, chambers at same Z ion endcap"), + layer = DetIdVar("layer()", "int16", doc = "layer id:" "
barrel stations 1 and 2, have two layers of chambers " "(layer 1 is the inner chamber and layer 2 is the outer chamber)"), - sector = DetIdVar("sector()", "int8", doc = "group of chambers at same phi"), - subsector = DetIdVar("subsector()", "int8", doc = "Some sectors are divided along the phi direction in subsectors " + sector = DetIdVar("sector()", "int16", doc = "group of chambers at same phi"), + subsector = DetIdVar("subsector()", "int16", doc = "Some sectors are divided along the phi direction in subsectors " "(from 1 to 4 in Barrel, from 1 to 6 in Endcap)"), - roll = DetIdVar("roll()", "int8", doc = "roll id (also known as eta partition):" + roll = DetIdVar("roll()", "int16", doc = "roll id (also known as eta partition):" "
each chamber is divided along the strip direction"), rawId = DetIdVar("rawId()", "uint", doc = "unique detector unit ID") ) @@ -63,19 +63,19 @@ gemDigiFlatTableProducer.doc = "GEM digi information" gemDigiFlatTableProducer.variables = cms.PSet( - strip = Var("strip()", "int8", doc = "index of the readout strip associated to the digi"), - bx = Var("bx()", "int8", doc="bunch crossing associated to the digi") + strip = Var("strip()", "int16", doc = "index of the readout strip associated to the digi"), + bx = Var("bx()", "int16", doc="bunch crossing associated to the digi") ) gemDigiFlatTableProducer.detIdVariables = cms.PSet( - station = DetIdVar("station()", "int8", doc = "GEM station
(always 1 for GE1/1)"), - region = DetIdVar("region()", "int8", doc = "GE11 region where the digi is detected" + station = DetIdVar("station()", "int16", doc = "GEM station
(always 1 for GE1/1)"), + region = DetIdVar("region()", "int16", doc = "GE11 region where the digi is detected" "
(int, positive endcap: +1, negative endcap: -1)"), - roll = DetIdVar("roll()", "int8", doc = "roll id (also known as eta partition)" + roll = DetIdVar("roll()", "int16", doc = "roll id (also known as eta partition)" "
(partitions numbered from 1 to 8)"), - chamber = DetIdVar("chamber()", "int8", doc = "GE11 superchamber where the hit is reconstructed" + chamber = DetIdVar("chamber()", "int16", doc = "GE11 superchamber where the hit is reconstructed" "
(chambers numbered from 0 to 35)"), - layer = DetIdVar("layer()", "int8", doc = "GE11 layer where the hit is reconstructed" + layer = DetIdVar("layer()", "int16", doc = "GE11 layer where the hit is reconstructed" "
(layer1: 1, layer2: 2)") ) @@ -98,11 +98,11 @@ ) gemohStatusFlatTableProducer.detIdVariables = cms.PSet( - station = DetIdVar("station()", "int8", doc = "GEM station
always 1 for GE1/1)"), - region = DetIdVar("region()", "int8", doc = "region with which the GEMOHStatus is associated" + station = DetIdVar("station()", "int16", doc = "GEM station
always 1 for GE1/1)"), + region = DetIdVar("region()", "int16", doc = "region with which the GEMOHStatus is associated" "
int, positive endcap: +1, negative endcap: -1"), - chamber = DetIdVar("chamber()", "int8", doc = "chamber with which the GEMOHStatus is associated"), - layer = DetIdVar("layer()", "int8", doc = "layer with which the GEMOHStatus is associated
either 1 or 2 for GE1/1 and GE2/1") + chamber = DetIdVar("chamber()", "int16", doc = "chamber with which the GEMOHStatus is associated"), + layer = DetIdVar("layer()", "int16", doc = "layer with which the GEMOHStatus is associated
either 1 or 2 for GE1/1 and GE2/1") ) @@ -113,17 +113,17 @@ cscWireDigiFlatTableProducer.doc = "CSC wire digi information" cscWireDigiFlatTableProducer.variables = cms.PSet( - timeBin = Var("getTimeBin()", "int8", doc = ""), - wireGroup = Var("getWireGroup()", "int8", doc=""), - wireGroupBX = Var("getWireGroupBX()", "int8", doc="") + timeBin = Var("getTimeBin()", "int16", doc = ""), + wireGroup = Var("getWireGroup()", "int16", doc=""), + wireGroupBX = Var("getWireGroupBX()", "int16", doc="") ) cscWireDigiFlatTableProducer.detIdVariables = cms.PSet( - endcap = DetIdVar("endcap()", "int8", doc = ""), - station = DetIdVar("station()", "int8", doc = ""), - ring = DetIdVar("ring()", "int8", doc = ""), - chamber = DetIdVar("chamber()", "int8", doc = ""), - layer = DetIdVar("layer()", "int8", doc = "") + endcap = DetIdVar("endcap()", "int16", doc = ""), + station = DetIdVar("station()", "int16", doc = ""), + ring = DetIdVar("ring()", "int16", doc = ""), + chamber = DetIdVar("chamber()", "int16", doc = ""), + layer = DetIdVar("layer()", "int16", doc = "") ) from DPGAnalysis.MuonTools.cscAlctDigiFlatTableProducer_cfi import cscAlctDigiFlatTableProducer @@ -133,16 +133,16 @@ cscAlctDigiFlatTableProducer.doc = "CSC ALCT digi information" cscAlctDigiFlatTableProducer.variables = cms.PSet( - keyWireGroup = Var("getKeyWG()", "int8", doc = ""), - bx = Var("getBX()", "int8", doc="") + keyWireGroup = Var("getKeyWG()", "int16", doc = ""), + bx = Var("getBX()", "int16", doc="") ) cscAlctDigiFlatTableProducer.detIdVariables = cms.PSet( - endcap = DetIdVar("endcap()", "int8", doc = ""), - station = DetIdVar("station()", "int8", doc = ""), - ring = DetIdVar("ring()", "int8", doc = ""), - chamber = DetIdVar("chamber()", "int8", doc = ""), - layer = DetIdVar("layer()", "int8", doc = "") + endcap = DetIdVar("endcap()", "int16", doc = ""), + station = DetIdVar("station()", "int16", doc = ""), + ring = DetIdVar("ring()", "int16", doc = ""), + chamber = DetIdVar("chamber()", "int16", doc = ""), + layer = DetIdVar("layer()", "int16", doc = "") ) muDigiProducers = cms.Sequence(dtDigiFlatTableProducer diff --git a/DPGAnalysis/MuonTools/python/nano_mu_local_reco_cff.py b/DPGAnalysis/MuonTools/python/nano_mu_local_reco_cff.py index 40fd37ccfa739..5046674c7c9a7 100644 --- a/DPGAnalysis/MuonTools/python/nano_mu_local_reco_cff.py +++ b/DPGAnalysis/MuonTools/python/nano_mu_local_reco_cff.py @@ -20,21 +20,21 @@ seg4D_dirLoc_z = Var("localDirection().z()", float, doc = "direction z in local coordinates"), seg2D_phi_t0 = Var(f"? hasPhi() ? phiSegment().t0() : {defaults.FLOAT}", float, doc = "t0 from segments with phi view - ns"), - seg2D_phi_nHits = Var(f"? hasPhi() ? phiSegment().specificRecHits().size() : 0", "int8", doc = "# hits in phi view - [0:8] range"), + seg2D_phi_nHits = Var(f"? hasPhi() ? phiSegment().specificRecHits().size() : 0", "int16", doc = "# hits in phi view - [0:8] range"), seg2D_phi_vDrift = Var(f"? hasPhi() ? phiSegment().vDrift() : {defaults.FLOAT_POS}", float, doc = "v_drift from segments with phi view"), seg2D_phi_normChi2 = Var(f"? hasPhi() ? (phiSegment().chi2() / phiSegment().degreesOfFreedom()) : {defaults.FLOAT_POS}", float, doc = "chi2/n.d.o.f. from segments with phi view"), seg2D_z_t0 = Var(f"? hasZed() ? zSegment().t0() : {defaults.FLOAT}", float, doc = "t0 from segments with z view - ns"), - seg2D_z_nHits = Var(f"? hasZed() ? zSegment().specificRecHits().size() : 0", "int8", doc = "# hits in z view - [0:4] range"), + seg2D_z_nHits = Var(f"? hasZed() ? zSegment().specificRecHits().size() : 0", "int16", doc = "# hits in z view - [0:4] range"), seg2D_z_normChi2 = Var(f"? hasZed() ? (zSegment().chi2() / zSegment().degreesOfFreedom()) : {defaults.FLOAT_POS}", float, doc = "chi2/n.d.o.f. from segments with z view"), ) dtSegmentFlatTableProducer.detIdVariables = cms.PSet( - wheel = DetIdVar("wheel()", "int8", doc = "wheel - [-2:2] range"), - sector = DetIdVar("sector()", "int8", doc = "sector - [1:14] range" + wheel = DetIdVar("wheel()", "int16", doc = "wheel - [-2:2] range"), + sector = DetIdVar("sector()", "int16", doc = "sector - [1:14] range" "
sector 13 used for the second MB4 of sector 4" "
sector 14 used for the second MB4 of sector 10"), - station = DetIdVar("station()", "int8", doc = "station - [1:4] range") + station = DetIdVar("station()", "int16", doc = "station - [1:4] range") ) dtSegmentFlatTableProducer.globalPosVariables = cms.PSet( @@ -58,26 +58,26 @@ rpcRecHitFlatTableProducer.variables = cms.PSet( bx = Var("BunchX()", int, doc="bunch crossing number"), time = Var("time()", float, doc = "time information in ns"), - firstClusterStrip = Var("firstClusterStrip()", "int8", doc = "lowest-numbered strip in the cluster"), - clusterSize = Var("clusterSize()", "int8", doc = "number of strips in the cluster"), + firstClusterStrip = Var("firstClusterStrip()", "int16", doc = "lowest-numbered strip in the cluster"), + clusterSize = Var("clusterSize()", "int16", doc = "number of strips in the cluster"), coordX = Var("localPosition().x()", float, doc = "position x in local coordinates - cm"), coordY = Var("localPosition().y()", float, doc = "position y in local coordinates - cm"), coordZ = Var("localPosition().z()", float, doc = "position z in local coordinates - cm"), ) rpcRecHitFlatTableProducer.detIdVariables = cms.PSet( - region = DetIdVar("region()", "int8", doc = "0: barrel, +-1: endcap"), - ring = DetIdVar("ring()", "int8", doc = "ring id:" + region = DetIdVar("region()", "int16", doc = "0: barrel, +-1: endcap"), + ring = DetIdVar("ring()", "int16", doc = "ring id:" "
wheel number in barrel (from -2 to +2)" "
ring number in endcap (from 1 to 3)"), - station = DetIdVar("station()", "int8", doc = "chambers at same R in barrel, chambers at same Z ion endcap"), - layer = DetIdVar("layer()", "int8", doc = "layer id:" + station = DetIdVar("station()", "int16", doc = "chambers at same R in barrel, chambers at same Z ion endcap"), + layer = DetIdVar("layer()", "int16", doc = "layer id:" "
in station 1 and 2 for barrel, we have two layers of chambers:" "
layer 1 is the inner chamber and layer 2 is the outer chamber"), - sector = DetIdVar("sector()", "int8", doc = "group of chambers at same phi"), - subsector = DetIdVar("subsector()", "int8", doc = "Some sectors are divided along the phi direction in subsectors " + sector = DetIdVar("sector()", "int16", doc = "group of chambers at same phi"), + subsector = DetIdVar("subsector()", "int16", doc = "Some sectors are divided along the phi direction in subsectors " "(from 1 to 4 in Barrel, from 1 to 6 in Endcap)"), - roll = DetIdVar("roll()", "int8", doc = "roll id (also known as eta partition):" + roll = DetIdVar("roll()", "int16", doc = "roll id (also known as eta partition):" "
each chamber is divided along the strip direction"), rawId = DetIdVar("rawId()", "uint", doc = "unique detector unit ID") ) @@ -91,18 +91,18 @@ ) dtrpcPointFlatTableProducer.detIdVariables = cms.PSet( - region = DetIdVar("region()", "int8", doc = "0: barrel, +-1: endcap"), - ring = DetIdVar("ring()", "int8", doc = "ring id:" + region = DetIdVar("region()", "int16", doc = "0: barrel, +-1: endcap"), + ring = DetIdVar("ring()", "int16", doc = "ring id:" "
wheel number in barrel (from -2 to +2)" "
ring number in endcap (from 1 to 3)"), - station = DetIdVar("station()", "int8", doc = "chambers at same R in barrel, chambers at same Z ion endcap"), - layer = DetIdVar("layer()", "int8", doc = "layer id:" + station = DetIdVar("station()", "int16", doc = "chambers at same R in barrel, chambers at same Z ion endcap"), + layer = DetIdVar("layer()", "int16", doc = "layer id:" "
in station 1 and 2 for barrel, we have two layers of chambers:" "
layer 1 is the inner chamber and layer 2 is the outer chamber"), - sector = DetIdVar("sector()", "int8", doc = "group of chambers at same phi"), - subsector = DetIdVar("subsector()", "int8", doc = "Some sectors are divided along the phi direction in subsectors " + sector = DetIdVar("sector()", "int16", doc = "group of chambers at same phi"), + subsector = DetIdVar("subsector()", "int16", doc = "Some sectors are divided along the phi direction in subsectors " "(from 1 to 4 in Barrel, from 1 to 6 in Endcap)"), - roll = DetIdVar("roll()", "int8", doc = "roll id (also known as eta partition):" + roll = DetIdVar("roll()", "int16", doc = "roll id (also known as eta partition):" "
each chamber is divided along the strip direction"), rawId = DetIdVar("rawId()", "uint", doc = "unique detector unit ID") ) @@ -117,18 +117,18 @@ ) cscrpcPointFlatTableProducer.detIdVariables = cms.PSet( - region = DetIdVar("region()", "int8", doc = "0: barrel, +-1: endcap"), - ring = DetIdVar("ring()", "int8", doc = "ring id:" + region = DetIdVar("region()", "int16", doc = "0: barrel, +-1: endcap"), + ring = DetIdVar("ring()", "int16", doc = "ring id:" "
wheel number in barrel (from -2 to +2)" "
ring number in endcap (from 1 to 3)"), - station = DetIdVar("station()", "int8", doc = "chambers at same R in barrel, chambers at same Z ion endcap"), - layer = DetIdVar("layer()", "int8", doc = "layer id:" + station = DetIdVar("station()", "int16", doc = "chambers at same R in barrel, chambers at same Z ion endcap"), + layer = DetIdVar("layer()", "int16", doc = "layer id:" "
in station 1 and 2 for barrel, we have two layers of chambers:" "
layer 1 is the inner chamber and layer 2 is the outer chamber"), - sector = DetIdVar("sector()", "int8", doc = "group of chambers at same phi"), - subsector = DetIdVar("subsector()", "int8", doc = "Some sectors are divided along the phi direction in subsectors " + sector = DetIdVar("sector()", "int16", doc = "group of chambers at same phi"), + subsector = DetIdVar("subsector()", "int16", doc = "Some sectors are divided along the phi direction in subsectors " "(from 1 to 4 in Barrel, from 1 to 6 in Endcap)"), - roll = DetIdVar("roll()", "int8", doc = "roll id (also known as eta partition):" + roll = DetIdVar("roll()", "int16", doc = "roll id (also known as eta partition):" "
each chamber is divided along the strip direction"), rawId = DetIdVar("rawId()", "uint", doc = "unique detector unit ID") ) @@ -144,18 +144,18 @@ ) dtrpcPointFlatTableProducer.detIdVariables = cms.PSet( - region = DetIdVar("region()", "int8", doc = "0: barrel, +-1: endcap"), - ring = DetIdVar("ring()", "int8", doc = "ring id:" + region = DetIdVar("region()", "int16", doc = "0: barrel, +-1: endcap"), + ring = DetIdVar("ring()", "int16", doc = "ring id:" "
wheel number in barrel (from -2 to +2)" "
ring number in endcap (from 1 to 3)"), - station = DetIdVar("station()", "int8", doc = "chambers at same R in barrel, chambers at same Z ion endcap"), - layer = DetIdVar("layer()", "int8", doc = "layer id:" + station = DetIdVar("station()", "int16", doc = "chambers at same R in barrel, chambers at same Z ion endcap"), + layer = DetIdVar("layer()", "int16", doc = "layer id:" "
in station 1 and 2 for barrel, we have two layers of chambers:" "
layer 1 is the inner chamber and layer 2 is the outer chamber"), - sector = DetIdVar("sector()", "int8", doc = "group of chambers at same phi"), - subsector = DetIdVar("subsector()", "int8", doc = "Some sectors are divided along the phi direction in subsectors " + sector = DetIdVar("sector()", "int16", doc = "group of chambers at same phi"), + subsector = DetIdVar("subsector()", "int16", doc = "Some sectors are divided along the phi direction in subsectors " "(from 1 to 4 in Barrel, from 1 to 6 in Endcap)"), - roll = DetIdVar("roll()", "int8", doc = "roll id (also known as eta partition):" + roll = DetIdVar("roll()", "int16", doc = "roll id (also known as eta partition):" "
each chamber is divided along the strip direction"), rawId = DetIdVar("rawId()", "uint", doc = "unique detector unit ID") ) @@ -169,21 +169,21 @@ gemRecHitFlatTableProducer.variables = cms.PSet( bx = Var("BunchX()", int, doc="bunch crossing number"), - clusterSize = Var("clusterSize()", "int8", doc = "number of strips in the cluster"), loc_x = Var("localPosition().x()", float, doc = "hit position x in local coordinates - cm"), - firstClusterStrip = Var("firstClusterStrip()", "int8", doc = "lowest-numbered strip in the cluster"), + clusterSize = Var("clusterSize()", "int16", doc = "number of strips in the cluster"), loc_x = Var("localPosition().x()", float, doc = "hit position x in local coordinates - cm"), + firstClusterStrip = Var("firstClusterStrip()", "int16", doc = "lowest-numbered strip in the cluster"), loc_phi = Var("localPosition().phi().value()", float, doc = "hit position phi in local coordinates - rad"), loc_y = Var("localPosition().y()", float, doc = "hit position y in local coordinates - cm"), loc_z = Var("localPosition().z()", float, doc = "hit position z in local coordinates - cm"), ) gemRecHitFlatTableProducer.detIdVariables = cms.PSet( - roll = DetIdVar("roll()", "int8", doc = "roll id, also known as eta partition:" + roll = DetIdVar("roll()", "int16", doc = "roll id, also known as eta partition:" "
(partitions numbered from 1 to 8)"), - region = DetIdVar("region()", "int8", doc = "GE11 region where the hit is reconstructed" + region = DetIdVar("region()", "int16", doc = "GE11 region where the hit is reconstructed" "
(int, positive endcap: +1, negative endcap: -1)"), - chamber = DetIdVar("chamber()", "int8", doc = "GE11 superchamber where the hit is reconstructed" + chamber = DetIdVar("chamber()", "int16", doc = "GE11 superchamber where the hit is reconstructed" "
(chambers numbered from 0 to 35)"), - layer = DetIdVar("layer()", "int8", doc = "GE11 layer where the hit is reconstructed" + layer = DetIdVar("layer()", "int16", doc = "GE11 layer where the hit is reconstructed" "
(layer1: 1, layer2: 2)") ) @@ -213,11 +213,11 @@ ) gemSegmentFlatTableProducer.detIdVariables = cms.PSet( - region = DetIdVar("region()", "int8", doc = "GE11 region where the hit is reconstructed" + region = DetIdVar("region()", "int16", doc = "GE11 region where the hit is reconstructed" "
(int, positive endcap: +1, negative endcap: -1)"), - ring = DetIdVar("ring()", "int8", doc = ""), - station = DetIdVar("station()", "int8", doc = "GEM station
(always 1 for GE1/1)"), - chamber = DetIdVar("chamber()", "int8", doc = "GE11 superchamber where the hit is reconstructed" + ring = DetIdVar("ring()", "int16", doc = ""), + station = DetIdVar("station()", "int16", doc = "GEM station
(always 1 for GE1/1)"), + chamber = DetIdVar("chamber()", "int16", doc = "GE11 superchamber where the hit is reconstructed" "
(chambers numbered from 0 to 35)") ) diff --git a/DPGAnalysis/MuonTools/python/nano_mu_reco_cff.py b/DPGAnalysis/MuonTools/python/nano_mu_reco_cff.py index 1d4716f5099bf..d32ab9dcf79fc 100644 --- a/DPGAnalysis/MuonTools/python/nano_mu_reco_cff.py +++ b/DPGAnalysis/MuonTools/python/nano_mu_reco_cff.py @@ -30,18 +30,18 @@ trk_dz = Var(f"?!innerTrack().isNull()? dB('PVDZ') : {defaults.FLOAT}", float, doc="dz (with sign) wrt first PV - cm", precision=10), trk_dxy = Var(f"?!innerTrack().isNull()? dB('PV2D') : {defaults.FLOAT}", float, doc="dxy (with sign) wrt first PV - cm", precision=10), - trk_algo = Var(f"?!innerTrack().isNull()? innerTrack().algo() : {defaults.INT_POS}", "int8", doc="iterative tracking algorithm used to build the inner track"), - trk_origAlgo = Var(f"?!innerTrack().isNull()? innerTrack().originalAlgo() : {defaults.INT_POS}", "int8", doc="original (pre muon iterations) iterative tracking algorithm used to build the inner track"), + trk_algo = Var(f"?!innerTrack().isNull()? innerTrack().algo() : {defaults.INT_POS}", "int16", doc="iterative tracking algorithm used to build the inner track"), + trk_origAlgo = Var(f"?!innerTrack().isNull()? innerTrack().originalAlgo() : {defaults.INT_POS}", "int16", doc="original (pre muon iterations) iterative tracking algorithm used to build the inner track"), - trk_numberOfValidPixelHits = Var(f"?!innerTrack().isNull()? innerTrack().hitPattern().numberOfValidPixelHits() : {defaults.INT_POS}", "int8", doc="number of valid pixel hits"), - trk_numberOfValidTrackerLayers = Var(f"?!innerTrack().isNull()? innerTrack().hitPattern().trackerLayersWithMeasurement() : {defaults.INT_POS}", "int8", doc="number of valid tracker layers"), - trk_validFraction = Var(f"?!innerTrack().isNull()? innerTrack().validFraction() : {defaults.FLOAT_POS}", "int8", doc="fraction of tracker layer with muon hits"), + trk_numberOfValidPixelHits = Var(f"?!innerTrack().isNull()? innerTrack().hitPattern().numberOfValidPixelHits() : {defaults.INT_POS}", "int16", doc="number of valid pixel hits"), + trk_numberOfValidTrackerLayers = Var(f"?!innerTrack().isNull()? innerTrack().hitPattern().trackerLayersWithMeasurement() : {defaults.INT_POS}", "int16", doc="number of valid tracker layers"), + trk_validFraction = Var(f"?!innerTrack().isNull()? innerTrack().validFraction() : {defaults.FLOAT_POS}", "int16", doc="fraction of tracker layer with muon hits"), trkMu_stationMask = Var("stationMask()", "uint8", doc="bit map of stations with tracks within given distance (in cm) of chamber edges"), - trkMu_numberOfMatchedStations = Var("numberOfMatchedStations()", "int8", doc="number of matched DT/CSC stations"), - rpcMu_numberOfMatchedRPCLayers = Var("numberOfMatchedRPCLayers()", "int8", doc="number of matched RPC layers"), + trkMu_numberOfMatchedStations = Var("numberOfMatchedStations()", "int16", doc="number of matched DT/CSC stations"), + rpcMu_numberOfMatchedRPCLayers = Var("numberOfMatchedRPCLayers()", "int16", doc="number of matched RPC layers"), - staMu_numberOfValidMuonHits = Var(f"?isStandAloneMuon()? outerTrack().hitPattern().numberOfValidMuonHits() : {defaults.INT_POS}", "int8", doc="Number of valid muon hits"), + staMu_numberOfValidMuonHits = Var(f"?isStandAloneMuon()? outerTrack().hitPattern().numberOfValidMuonHits() : {defaults.INT_POS}", "int16", doc="Number of valid muon hits"), staMu_normChi2 = Var(f"?isStandAloneMuon()? outerTrack().chi2()/outerTrack().ndof() : {defaults.FLOAT_POS}", float, doc="chi2/ndof (standalone track)", precision=10), glbMu_normChi2 = Var(f"?isGlobalMuon()? globalTrack().chi2()/globalTrack().ndof() : {defaults.FLOAT_POS}", float, doc="chi2/ndof (global track)", precision=10) diff --git a/DPGAnalysis/Skims/python/Skims_DPG_cff.py b/DPGAnalysis/Skims/python/Skims_DPG_cff.py index 222bca5ee7fe4..6dea86124f6dc 100644 --- a/DPGAnalysis/Skims/python/Skims_DPG_cff.py +++ b/DPGAnalysis/Skims/python/Skims_DPG_cff.py @@ -416,6 +416,20 @@ ##################### +from DPGAnalysis.Skims.TeVJetSkim_cff import * +teVJetPath = cms.Path( teVJetSequence ) + +SKIMStreamTeVJet = cms.FilteredStream( + responsible = 'L1 DPG/JME POG', + name = 'TeVJet', + paths = ( teVJetPath ), + content = skimContent.outputCommands, + selectEvents = cms.untracked.PSet(), + dataTier = cms.untracked.string('RAW-RECO') + ) + +##################### + from DPGAnalysis.Skims.HighMETSkim_cff import * condPath = cms.Path(CondMETSelSeq) #pfPath = cms.Path(pfMETSelSeq) diff --git a/DPGAnalysis/Skims/python/TeVJetSkim_cff.py b/DPGAnalysis/Skims/python/TeVJetSkim_cff.py new file mode 100644 index 0000000000000..8c67bf0b8e2d0 --- /dev/null +++ b/DPGAnalysis/Skims/python/TeVJetSkim_cff.py @@ -0,0 +1,31 @@ +import FWCore.ParameterSet.Config as cms + + +# run on MIONAOD +RUN_ON_MINIAOD = False + + +# cuts +JET_CUT=("pt > 1000 && abs(eta)<5.0") + +# single lepton selectors +if RUN_ON_MINIAOD: + teVJets = cms.EDFilter("CandViewRefSelector", + src = cms.InputTag("slimmedJets"), + cut = cms.string(JET_CUT) + ) +else: + teVJets = cms.EDFilter("CandViewRefSelector", + src = cms.InputTag("ak4PFJets"), + cut = cms.string(JET_CUT) + ) + +teVJetsCountFilter = cms.EDFilter("CandViewCountFilter", + src = cms.InputTag("teVJets"), + minNumber = cms.uint32(1) + ) + + + +#sequences +teVJetSequence = cms.Sequence(teVJets*teVJetsCountFilter ) diff --git a/DPGAnalysis/Skims/python/TopMuEGSkim_cff.py b/DPGAnalysis/Skims/python/TopMuEGSkim_cff.py index dc6dc91dc3673..9421827ec271d 100755 --- a/DPGAnalysis/Skims/python/TopMuEGSkim_cff.py +++ b/DPGAnalysis/Skims/python/TopMuEGSkim_cff.py @@ -21,6 +21,10 @@ hltBtagTopMuEGSelection = cms.EDFilter("HLTHighLevel", TriggerResultsTag = cms.InputTag("TriggerResults","","HLT"), HLTPaths = cms.vstring( + # 2022 + 'HLT_Mu8_TrkIsoVVL_Ele23_CaloIdL_TrackIdL_IsoVL_DZ_PFDiJet30_PFBtagDeepCSV_1p5_v*', + 'HLT_Mu8_TrkIsoVVL_Ele23_CaloIdL_TrackIdL_IsoVL_DZ_CaloDiJet30_CaloBtagDeepCSV_1p5_v*', + # 2023 'HLT_Mu8_TrkIsoVVL_Ele23_CaloIdL_TrackIdL_IsoVL_DZ_PFDiJet30_PFBtagDeepJet_1p5_v*', # DeepCSV paths not available anymore. See https://its.cern.ch/jira/browse/CMSHLT-2592 'HLT_Mu8_TrkIsoVVL_Ele23_CaloIdL_TrackIdL_IsoVL_DZ_PFDiJet30_PNet2BTagMean0p50_v*', # Taken from HLTrigger/Configuration/python/HLTrigger_Datasets_GRun_cff.py ), diff --git a/DQM/CSCMonitorModule/plugins/CSCDQM_Configuration.h b/DQM/CSCMonitorModule/plugins/CSCDQM_Configuration.h index ff1da1c1f8b56..9df35b0f0f48b 100644 --- a/DQM/CSCMonitorModule/plugins/CSCDQM_Configuration.h +++ b/DQM/CSCMonitorModule/plugins/CSCDQM_Configuration.h @@ -21,6 +21,7 @@ #include #include +#include #include #include @@ -45,8 +46,6 @@ #include #include -#include - /** Headers for Global DQM Only */ #ifdef DQMGLOBAL @@ -245,6 +244,7 @@ namespace cscdqm { void init() { /** Assign default values to parameters */ BOOST_PP_SEQ_FOR_EACH_I(CONFIG_PARAMETER_DEFAULT_MACRO, _, CONFIG_PARAMETERS_SEQ) + globalTimer = eventTimer = fraTimer = effTimer = std::clock(); reset(); } @@ -421,16 +421,16 @@ namespace cscdqm { private: /** Global Timer */ - boost::timer globalTimer; + std::clock_t globalTimer; /** Event processing Timer */ - boost::timer eventTimer; + std::clock_t eventTimer; /** Fractional MO update Timer */ - boost::timer fraTimer; + std::clock_t fraTimer; /** Efficiency MO update Timer */ - boost::timer effTimer; + std::clock_t effTimer; /** Event processing time cummulative */ double eventTimeSum; @@ -468,7 +468,7 @@ namespace cscdqm { * @return */ void printStats() { - double allTime = globalTimer.elapsed(); + double allTime = double(std::clock() - globalTimer) / CLOCKS_PER_SEC; LogInfo logger; logger << std::endl; @@ -523,9 +523,9 @@ namespace cscdqm { */ void eventProcessTimer(const bool start) { if (start) { - eventTimer.restart(); + eventTimer = std::clock(); } else { - eventTimeSum += eventTimer.elapsed(); + eventTimeSum += double(std::clock() - eventTimer) / CLOCKS_PER_SEC; } } @@ -536,9 +536,9 @@ namespace cscdqm { */ void updateFraTimer(const bool start) { if (start) { - fraTimer.restart(); + fraTimer = std::clock(); } else { - fraTimeSum += fraTimer.elapsed(); + fraTimeSum += double(std::clock() - fraTimer) / CLOCKS_PER_SEC; fraCount++; } } @@ -550,9 +550,9 @@ namespace cscdqm { */ void updateEffTimer(const bool start) { if (start) { - effTimer.restart(); + effTimer = std::clock(); } else { - effTimeSum += effTimer.elapsed(); + effTimeSum += double(std::clock() - effTimer) / CLOCKS_PER_SEC; effCount++; } } diff --git a/DQM/CSCMonitorModule/plugins/CSCDQM_HistoDef.h b/DQM/CSCMonitorModule/plugins/CSCDQM_HistoDef.h index fffe9aa2bf5cf..4a273aaff0527 100644 --- a/DQM/CSCMonitorModule/plugins/CSCDQM_HistoDef.h +++ b/DQM/CSCMonitorModule/plugins/CSCDQM_HistoDef.h @@ -72,6 +72,11 @@ namespace cscdqm { */ HistoDef(const HistoId p_id) : id(p_id) {} + /** + * @brief Copy constructor + */ + HistoDef(const HistoDef&) = default; + /** * @brief Base virtual destructor */ diff --git a/DQM/EcalCommon/interface/MESet.h b/DQM/EcalCommon/interface/MESet.h index 2abd1ff9056e3..052f69bc7af27 100644 --- a/DQM/EcalCommon/interface/MESet.h +++ b/DQM/EcalCommon/interface/MESet.h @@ -275,11 +275,10 @@ namespace ecaldqm { const_iterator(EcalElectronicsMapping const *, MESet const &_meSet, unsigned _iME = 0, int _iBin = 1) : bin_(_meSet, _iME, _iBin) {} const_iterator(EcalElectronicsMapping const *, MESet const &, DetId const &); - const_iterator(const_iterator const &_orig) : bin_(_orig.bin_) {} - const_iterator &operator=(const_iterator const &_rhs) { - bin_ = _rhs.bin_; - return *this; - } + const_iterator(const_iterator const &_orig) = default; + const_iterator(const_iterator &&_orig) = default; + const_iterator &operator=(const_iterator const &_orig) = default; + const_iterator &operator=(const_iterator &&_orig) = default; bool operator==(const_iterator const &_rhs) const { return bin_ == _rhs.bin_; } bool operator!=(const_iterator const &_rhs) const { return !(bin_ == _rhs.bin_); } ConstBin const *operator->() const { return &bin_; } @@ -304,7 +303,8 @@ namespace ecaldqm { : const_iterator(electronicsMap, _meSet, _id), bin_(_meSet) { bin_.ConstBin::operator=(const_iterator::bin_); } - iterator(iterator const &_orig) : const_iterator(_orig), bin_(_orig.bin_) {} + iterator(iterator const &_orig) = default; + iterator &operator=(iterator const &) = default; iterator &operator=(const_iterator const &_rhs) { const_iterator::operator=(_rhs); bin_.ConstBin::operator=(const_iterator::bin_); diff --git a/DQM/HLTEvF/plugins/HLTObjectMonitor.cc b/DQM/HLTEvF/plugins/HLTObjectMonitor.cc index d4223fcad86cb..f05ff73a84891 100644 --- a/DQM/HLTEvF/plugins/HLTObjectMonitor.cc +++ b/DQM/HLTEvF/plugins/HLTObjectMonitor.cc @@ -84,17 +84,12 @@ class HLTObjectMonitor : public DQMEDAnalyzer { public: explicit HLTObjectMonitor(const edm::ParameterSet&); - ~HLTObjectMonitor() override; - - // static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: void analyze(const edm::Event&, const edm::EventSetup&) override; void bookHistograms(DQMStore::IBooker& i, edm::Run const&, edm::EventSetup const&) override; void dqmBeginRun(edm::Run const&, edm::EventSetup const&) override; vector plotList; - //virtual void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - //virtual void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; double dxyFinder(double, double, edm::Handle, edm::Handle); double get_wall_time(void); // ----------member data --------------------------- @@ -346,11 +341,6 @@ HLTObjectMonitor::HLTObjectMonitor(const edm::ParameterSet& iConfig) csvPfJetsToken_ = consumes>(edm::InputTag("hltPFJetForBtag", "", processName_)); } -HLTObjectMonitor::~HLTObjectMonitor() { - // do anything here that needs to be done at desctruction time - // (e.g. close files, deallocate resources etc.) -} - // // member functions // diff --git a/DQM/HLTEvF/plugins/HLTObjectMonitorProtonLead.cc b/DQM/HLTEvF/plugins/HLTObjectMonitorProtonLead.cc index 0c37de07debd0..bb4e16b92e471 100644 --- a/DQM/HLTEvF/plugins/HLTObjectMonitorProtonLead.cc +++ b/DQM/HLTEvF/plugins/HLTObjectMonitorProtonLead.cc @@ -83,7 +83,6 @@ class HLTObjectMonitorProtonLead : public DQMEDAnalyzer { public: explicit HLTObjectMonitorProtonLead(const edm::ParameterSet&); - ~HLTObjectMonitorProtonLead() override; // static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); @@ -92,8 +91,6 @@ class HLTObjectMonitorProtonLead : public DQMEDAnalyzer { void bookHistograms(DQMStore::IBooker& i, edm::Run const&, edm::EventSetup const&) override; void dqmBeginRun(edm::Run const&, edm::EventSetup const&) override; vector plotList; - //virtual void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - //virtual void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; double get_wall_time(void); // ----------member data --------------------------- @@ -327,11 +324,6 @@ HLTObjectMonitorProtonLead::HLTObjectMonitorProtonLead(const edm::ParameterSet& aodTriggerToken_ = consumes(iConfig.getParameter("triggerEvent")); } -HLTObjectMonitorProtonLead::~HLTObjectMonitorProtonLead() { - // do anything here that needs to be done at desctruction time - // (e.g. close files, deallocate resources etc.) -} - // // member functions // diff --git a/DQM/HLTEvF/python/HLTObjectMonitor_cfi.py b/DQM/HLTEvF/python/HLTObjectMonitor_cfi.py index eb14832c2c9de..92828bc0d8893 100644 --- a/DQM/HLTEvF/python/HLTObjectMonitor_cfi.py +++ b/DQM/HLTEvF/python/HLTObjectMonitor_cfi.py @@ -19,8 +19,8 @@ caloMet_pathName = "HLT_MET60_IsoTrk35_Loose" caloMet_moduleName = "hltMET60" -pfMet_pathName = "HLT_PFMET110_PFMHT110_IDTight" # "HLT_PFMET120_PFMHT120_IDTight" -pfMet_moduleName = "hltPFMET110" # "hltPFMET120" +pfMet_pathName = "HLT_PFMET120_PFMHT120_IDTight" +pfMet_moduleName = "hltPFMET120" jetAk8_pathName = "HLT_AK8PFJet400_TrimMass30" # "HLT_AK8PFJet360_TrimMass30" jetAk8_moduleName = "hltAK8SinglePFJet400TrimModMass30" # "hltAK8SinglePFJet360TrimModMass30" diff --git a/DQM/HLTEvF/python/HLTObjectsMonitor_EXO_cfi.py b/DQM/HLTEvF/python/HLTObjectsMonitor_EXO_cfi.py index 1874a9a407eb9..1736ccc37b611 100644 --- a/DQM/HLTEvF/python/HLTObjectsMonitor_EXO_cfi.py +++ b/DQM/HLTEvF/python/HLTObjectsMonitor_EXO_cfi.py @@ -80,8 +80,8 @@ doPlotDiMass = cms.untracked.bool(False), ), cms.PSet( - pathNAME = cms.string("HLT_PFMETNoMu110_PFMHTNoMu110_IDTight"), - moduleNAME = cms.string("hltPFMHTNoMuTightID110"), + pathNAME = cms.string("HLT_PFMETNoMu120_PFMHTNoMu120_IDTight"), + moduleNAME = cms.string("hltPFMHTNoMuTightID120"), label = cms.string("PF HT (no mu)"), xTITLE = cms.string("PF HT (no mu)"), etaBINNING = cms.vdouble(), @@ -119,8 +119,8 @@ doPlotDiMass = cms.untracked.bool(False), ), cms.PSet( - pathNAME = cms.string("HLT_PFMETNoMu110_PFMHTNoMu110_IDTight"), - moduleNAME = cms.string("hltPFMETNoMu110"), + pathNAME = cms.string("HLT_PFMETNoMu120_PFMHTNoMu120_IDTight"), + moduleNAME = cms.string("hltPFMETNoMu120"), label = cms.string("MET (no mu)"), xTITLE = cms.string("MET (no mu)"), etaBINNING = cms.vdouble(), @@ -158,8 +158,8 @@ doPlotDiMass = cms.untracked.bool(False), ), cms.PSet( - pathNAME = cms.string("HLT_PFMET110_PFMHT110_IDTight"), - moduleNAME = cms.string("hltPFMET110"), + pathNAME = cms.string("HLT_PFMET120_PFMHT120_IDTight"), + moduleNAME = cms.string("hltPFMET120"), label = cms.string("PF MET"), xTITLE = cms.string("PF MET"), etaBINNING = cms.vdouble(), @@ -197,8 +197,8 @@ doPlotDiMass = cms.untracked.bool(False), ), cms.PSet( - pathNAME = cms.string("HLT_PFMET110_PFMHT110_IDTight"), - moduleNAME = cms.string("hltPFMHTTightID110"), + pathNAME = cms.string("HLT_PFMET120_PFMHT120_IDTight"), + moduleNAME = cms.string("hltPFMHTTightID120"), label = cms.string("PF MHT"), xTITLE = cms.string("PF MHT"), etaBINNING = cms.vdouble(), diff --git a/DQM/HLTEvF/python/HLTObjectsMonitor_JME_cfi.py b/DQM/HLTEvF/python/HLTObjectsMonitor_JME_cfi.py index b65a87f72332c..895b1216fcafa 100644 --- a/DQM/HLTEvF/python/HLTObjectsMonitor_JME_cfi.py +++ b/DQM/HLTEvF/python/HLTObjectsMonitor_JME_cfi.py @@ -20,8 +20,8 @@ displayInPrimary_energy = cms.bool(False), displayInPrimary_csv = cms.bool(False), displayInPrimary_etaVSphi = cms.bool(True), - displayInPrimary_pt_HEP17 = cms.bool(True), - displayInPrimary_pt_HEM17 = cms.bool(True), + displayInPrimary_pt_HEP17 = cms.bool(False), + displayInPrimary_pt_HEM17 = cms.bool(False), displayInPrimary_MR = cms.bool(False), displayInPrimary_RSQ = cms.bool(False), displayInPrimary_dxy = cms.bool(False), @@ -59,8 +59,8 @@ displayInPrimary_energy = cms.bool(False), displayInPrimary_csv = cms.bool(False), displayInPrimary_etaVSphi = cms.bool(True), - displayInPrimary_pt_HEP17 = cms.bool(True), - displayInPrimary_pt_HEM17 = cms.bool(True), + displayInPrimary_pt_HEP17 = cms.bool(False), + displayInPrimary_pt_HEM17 = cms.bool(False), displayInPrimary_MR = cms.bool(False), displayInPrimary_RSQ = cms.bool(False), displayInPrimary_dxy = cms.bool(False), @@ -98,8 +98,8 @@ displayInPrimary_energy = cms.bool(False), displayInPrimary_csv = cms.bool(False), displayInPrimary_etaVSphi = cms.bool(True), - displayInPrimary_pt_HEP17 = cms.bool(True), - displayInPrimary_pt_HEM17 = cms.bool(True), + displayInPrimary_pt_HEP17 = cms.bool(False), + displayInPrimary_pt_HEM17 = cms.bool(False), displayInPrimary_MR = cms.bool(False), displayInPrimary_RSQ = cms.bool(False), displayInPrimary_dxy = cms.bool(False), @@ -137,8 +137,8 @@ displayInPrimary_energy = cms.bool(False), displayInPrimary_csv = cms.bool(False), displayInPrimary_etaVSphi = cms.bool(True), - displayInPrimary_pt_HEP17 = cms.bool(True), - displayInPrimary_pt_HEM17 = cms.bool(True), + displayInPrimary_pt_HEP17 = cms.bool(False), + displayInPrimary_pt_HEM17 = cms.bool(False), displayInPrimary_MR = cms.bool(False), displayInPrimary_RSQ = cms.bool(False), displayInPrimary_dxy = cms.bool(False), @@ -176,8 +176,8 @@ displayInPrimary_energy = cms.bool(False), displayInPrimary_csv = cms.bool(False), displayInPrimary_etaVSphi = cms.bool(True), - displayInPrimary_pt_HEP17 = cms.bool(True), - displayInPrimary_pt_HEM17 = cms.bool(True), + displayInPrimary_pt_HEP17 = cms.bool(False), + displayInPrimary_pt_HEM17 = cms.bool(False), displayInPrimary_MR = cms.bool(False), displayInPrimary_RSQ = cms.bool(False), displayInPrimary_dxy = cms.bool(False), @@ -215,8 +215,8 @@ displayInPrimary_energy = cms.bool(False), displayInPrimary_csv = cms.bool(False), displayInPrimary_etaVSphi = cms.bool(True), - displayInPrimary_pt_HEP17 = cms.bool(True), - displayInPrimary_pt_HEM17 = cms.bool(True), + displayInPrimary_pt_HEP17 = cms.bool(False), + displayInPrimary_pt_HEM17 = cms.bool(False), displayInPrimary_MR = cms.bool(False), displayInPrimary_RSQ = cms.bool(False), displayInPrimary_dxy = cms.bool(False), @@ -254,8 +254,8 @@ displayInPrimary_energy = cms.bool(False), displayInPrimary_csv = cms.bool(False), displayInPrimary_etaVSphi = cms.bool(True), - displayInPrimary_pt_HEP17 = cms.bool(True), - displayInPrimary_pt_HEM17 = cms.bool(True), + displayInPrimary_pt_HEP17 = cms.bool(False), + displayInPrimary_pt_HEM17 = cms.bool(False), displayInPrimary_MR = cms.bool(False), displayInPrimary_RSQ = cms.bool(False), displayInPrimary_dxy = cms.bool(False), diff --git a/DQM/HLTEvF/python/HLTSiStripMonitoring_cff.py b/DQM/HLTEvF/python/HLTSiStripMonitoring_cff.py index 3eb16302732de..4f01a52b1b625 100644 --- a/DQM/HLTEvF/python/HLTSiStripMonitoring_cff.py +++ b/DQM/HLTEvF/python/HLTSiStripMonitoring_cff.py @@ -215,26 +215,24 @@ RejectTracks = cms.bool( True ) ) - - from DQMOffline.Trigger.SiStrip_OfflineMonitoring_cff import * -hltTrackRefitterForSiStripMonitorTrack.TTRHBuilder = cms.string('hltESPTTRHBWithTrackAngle') -hltTrackRefitterForSiStripMonitorTrack.Propagator = cms.string('hltESPRungeKuttaTrackerPropagator') -hltTrackRefitterForSiStripMonitorTrack.Fitter = cms.string('hltESPFittingSmootherIT') -hltTrackRefitterForSiStripMonitorTrack.MeasurementTrackerEvent = cms.InputTag('hltMeasurementTrackerEvent') -hltTrackRefitterForSiStripMonitorTrack.NavigationSchool = cms.string('navigationSchoolESProducer') -hltTrackRefitterForSiStripMonitorTrack.src = cms.InputTag("hltTracksMerged") # hltIter2Merged +hltTrackRefitterForSiStripMonitorTrack.TTRHBuilder = 'hltESPTTRHBWithTrackAngle' +hltTrackRefitterForSiStripMonitorTrack.Propagator = 'hltESPRungeKuttaTrackerPropagator' +hltTrackRefitterForSiStripMonitorTrack.Fitter = 'hltESPFittingSmootherIT' +hltTrackRefitterForSiStripMonitorTrack.MeasurementTrackerEvent = 'hltMeasurementTrackerEvent' +hltTrackRefitterForSiStripMonitorTrack.NavigationSchool = 'navigationSchoolESProducer' +hltTrackRefitterForSiStripMonitorTrack.src = 'hltMergedTracks' # hltIter2Merged -HLTSiStripMonitorTrack.TopFolderName = cms.string('HLT/SiStrip') +HLTSiStripMonitorTrack.TopFolderName = 'HLT/SiStrip' HLTSiStripMonitorTrack.TrackProducer = 'hltTrackRefitterForSiStripMonitorTrack' HLTSiStripMonitorTrack.TrackLabel = '' -HLTSiStripMonitorTrack.Cluster_src = cms.InputTag('hltSiStripRawToClustersFacility') -HLTSiStripMonitorTrack.AlgoName = cms.string("HLT") -HLTSiStripMonitorTrack.Trend_On = cms.bool(True) -HLTSiStripMonitorTrack.Mod_On = cms.bool(False) -HLTSiStripMonitorTrack.OffHisto_On = cms.bool(True) -HLTSiStripMonitorTrack.HistoFlag_On = cms.bool(False) -HLTSiStripMonitorTrack.TkHistoMap_On = cms.bool(False) +HLTSiStripMonitorTrack.Cluster_src = 'hltSiStripRawToClustersFacility' +HLTSiStripMonitorTrack.AlgoName = 'HLT' +HLTSiStripMonitorTrack.Trend_On = True +HLTSiStripMonitorTrack.Mod_On = False +HLTSiStripMonitorTrack.OffHisto_On = True +HLTSiStripMonitorTrack.HistoFlag_On = False +HLTSiStripMonitorTrack.TkHistoMap_On = False HLTSiStripMonitorClusterAPVgainCalibration = HLTSiStripMonitorCluster.clone() from DQM.TrackingMonitorSource.pset4GenericTriggerEventFlag_cfi import * diff --git a/DQM/HLTEvF/python/HLTTrackingMonitoring_cff.py b/DQM/HLTEvF/python/HLTTrackingMonitoring_cff.py index 564c59a98e3c8..c9df482e8dd39 100644 --- a/DQM/HLTEvF/python/HLTTrackingMonitoring_cff.py +++ b/DQM/HLTEvF/python/HLTTrackingMonitoring_cff.py @@ -12,7 +12,7 @@ trackingMonitoringHLTsequence = cms.Sequence( pixelTracksMonitoringHLT # hltPixel tracks monitoring * iter2MergedTracksMonitoringHLT # hltIter2Merged tracks monitoring - * iterHLTTracksMonitoringHLT # hltTracksMerged tracks monitoring + * iterHLTTracksMonitoringHLT # hltMergedTracks tracks monitoring ) egmTrackingMonitorHLTsequence = cms.Sequence( diff --git a/DQM/HcalCommon/interface/ContainerXXX.h b/DQM/HcalCommon/interface/ContainerXXX.h index 1ce4b870adc39..9656650363f04 100644 --- a/DQM/HcalCommon/interface/ContainerXXX.h +++ b/DQM/HcalCommon/interface/ContainerXXX.h @@ -22,6 +22,7 @@ namespace hcaldqm { ContainerXXX() {} ContainerXXX(hashfunctions::HashType ht) : _hashmap(ht) {} ContainerXXX(ContainerXXX const &x); + ContainerXXX &operator=(const ContainerXXX &other) = default; virtual ~ContainerXXX() { _cmap.clear(); } // initialize, booking. booking is done from Electronicsmap. diff --git a/DQM/HcalTasks/plugins/UMNioTask.cc b/DQM/HcalTasks/plugins/UMNioTask.cc index 0d2273a356c14..6146cdd2a56d9 100644 --- a/DQM/HcalTasks/plugins/UMNioTask.cc +++ b/DQM/HcalTasks/plugins/UMNioTask.cc @@ -60,7 +60,7 @@ UMNioTask::UMNioTask(edm::ParameterSet const& ps) } int UMNioTask::getOrbitGapIndex(uint8_t eventType, uint32_t laserType) { - constants::OrbitGapType orbitGapType; + constants::OrbitGapType orbitGapType = tNull; if (eventType == constants::EVENTTYPE_PHYSICS) { orbitGapType = tPhysics; } else if (eventType == constants::EVENTTYPE_PEDESTAL) { diff --git a/DQM/Integration/python/clients/beam_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/beam_dqm_sourceclient-live_cfg.py index 48131b4e4bde6..a15d331b490d1 100644 --- a/DQM/Integration/python/clients/beam_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/beam_dqm_sourceclient-live_cfg.py @@ -17,12 +17,10 @@ from Configuration.Eras.Era_Run3_cff import Run3 process = cms.Process("BeamMonitorLegacy", Run3) -process.MessageLogger = cms.Service("MessageLogger", - debugModules = cms.untracked.vstring('*'), - cerr = cms.untracked.PSet( - threshold = cms.untracked.string('WARNING') - ), - destinations = cms.untracked.vstring('cerr') +process.load('FWCore.MessageService.MessageLogger_cfi') +process.MessageLogger.debugModules = cms.untracked.vstring('*') +process.MessageLogger.cerr = cms.untracked.PSet( + threshold = cms.untracked.string('WARNING') ) # switch @@ -309,7 +307,7 @@ process.castorDigis.InputLabel = rawDataInputTag process.csctfDigis.producer = rawDataInputTag process.dttfDigis.DTTF_FED_Source = rawDataInputTag -process.ecalDigis.cpu.InputLabel = rawDataInputTag +process.ecalDigisCPU.InputLabel = rawDataInputTag process.ecalPreshowerDigis.sourceTag = rawDataInputTag process.gctDigis.inputLabel = rawDataInputTag process.gtDigis.DaqGtInputTag = rawDataInputTag diff --git a/DQM/Integration/python/clients/beamfake_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/beamfake_dqm_sourceclient-live_cfg.py index 589cb0bd790f5..c1ce11e58c568 100644 --- a/DQM/Integration/python/clients/beamfake_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/beamfake_dqm_sourceclient-live_cfg.py @@ -121,7 +121,7 @@ """ process.castorDigis.InputLabel = rawDataInputTag process.csctfDigis.producer = rawDataInputTag process.dttfDigis.DTTF_FED_Source = rawDataInputTag -process.ecalDigis.cpu.InputLabel = rawDataInputTag +process.ecalDigisCPU.InputLabel = rawDataInputTag process.ecalPreshowerDigis.sourceTag = rawDataInputTag process.gctDigis.inputLabel = rawDataInputTag process.gtDigis.DaqGtInputTag = rawDataInputTag diff --git a/DQM/Integration/python/clients/beamhlt_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/beamhlt_dqm_sourceclient-live_cfg.py index 3f3b6fba6e2d2..767fd8e02bdfd 100644 --- a/DQM/Integration/python/clients/beamhlt_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/beamhlt_dqm_sourceclient-live_cfg.py @@ -46,14 +46,22 @@ if unitTest: process.load("DQM.Integration.config.unitteststreamerinputsource_cfi") from DQM.Integration.config.unitteststreamerinputsource_cfi import options - # new stream label - process.source.streamLabel = cms.untracked.string('streamDQMOnlineBeamspot') + # stream label + if process.runType.getRunType() == process.runType.hi_run: + process.source.streamLabel = 'streamHIDQMOnlineBeamspot' + else: + process.source.streamLabel = 'streamDQMOnlineBeamspot' + elif live: # for live online DQM in P5 process.load("DQM.Integration.config.inputsource_cfi") from DQM.Integration.config.inputsource_cfi import options - # new stream label - process.source.streamLabel = cms.untracked.string('streamDQMOnlineBeamspot') + # stream label + if process.runType.getRunType() == process.runType.hi_run: + process.source.streamLabel = 'streamHIDQMOnlineBeamspot' + else: + process.source.streamLabel = 'streamDQMOnlineBeamspot' + else: process.load("DQM.Integration.config.fileinputsource_cfi") from DQM.Integration.config.fileinputsource_cfi import options diff --git a/DQM/Integration/python/clients/beamhltfake_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/beamhltfake_dqm_sourceclient-live_cfg.py index 6dbdc97fa5e4a..266f72b6eefe4 100644 --- a/DQM/Integration/python/clients/beamhltfake_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/beamhltfake_dqm_sourceclient-live_cfg.py @@ -38,13 +38,6 @@ process.load("DQM.Integration.config.fileinputsource_cfi") from DQM.Integration.config.fileinputsource_cfi import options -# new stream label -#process.source.streamLabel = cms.untracked.string('streamDQMOnlineBeamspot') - -# for testing in lxplus -#process.load("DQM.Integration.config.fileinputsource_cfi") -#from DQM.Integration.config.fileinputsource_cfi import options - #-------------------------- # HLT Filter # 0=random, 1=physics, 2=calibration, 3=technical @@ -173,6 +166,4 @@ # Final path print("Final Source settings:", process.source) -process.p = cms.Path(process.dqmcommon - * process.monitor ) - +process.p = cms.Path( process.dqmcommon * process.monitor ) diff --git a/DQM/Integration/python/clients/beampixel_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/beampixel_dqm_sourceclient-live_cfg.py index d91ba52ffc396..a20d7e6435458 100644 --- a/DQM/Integration/python/clients/beampixel_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/beampixel_dqm_sourceclient-live_cfg.py @@ -121,7 +121,7 @@ process.castorDigis.InputLabel = "rawDataCollector" process.csctfDigis.producer = "rawDataCollector" process.dttfDigis.DTTF_FED_Source = "rawDataCollector" - process.ecalDigis.cpu.InputLabel = "rawDataCollector" + process.ecalDigisCPU.InputLabel = "rawDataCollector" process.ecalPreshowerDigis.sourceTag = "rawDataCollector" process.gctDigis.inputLabel = "rawDataCollector" process.gtDigis.DaqGtInputTag = "rawDataCollector" @@ -174,7 +174,7 @@ process.castorDigis.InputLabel = "rawDataRepacker" process.csctfDigis.producer = "rawDataRepacker" process.dttfDigis.DTTF_FED_Source = "rawDataRepacker" - process.ecalDigis.cpu.InputLabel = "rawDataRepacker" + process.ecalDigisCPU.InputLabel = "rawDataRepacker" process.ecalPreshowerDigis.sourceTag = "rawDataRepacker" process.gctDigis.inputLabel = "rawDataRepacker" process.gtDigis.DaqGtInputTag = "rawDataRepacker" diff --git a/DQM/Integration/python/clients/csc_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/csc_dqm_sourceclient-live_cfg.py index 6630a69535dfa..d35d5114bf361 100644 --- a/DQM/Integration/python/clients/csc_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/csc_dqm_sourceclient-live_cfg.py @@ -180,7 +180,7 @@ process.castorDigis.InputLabel = "rawDataCollector" process.csctfDigis.producer = "rawDataCollector" process.dttfDigis.DTTF_FED_Source = "rawDataCollector" -process.ecalDigis.cpu.InputLabel = "rawDataCollector" +process.ecalDigisCPU.InputLabel = "rawDataCollector" process.ecalPreshowerDigis.sourceTag = "rawDataCollector" process.gctDigis.inputLabel = "rawDataCollector" process.gtDigis.DaqGtInputTag = "rawDataCollector" @@ -205,7 +205,7 @@ process.castorDigis.InputLabel = "rawDataRepacker" process.csctfDigis.producer = "rawDataRepacker" process.dttfDigis.DTTF_FED_Source = "rawDataRepacker" - process.ecalDigis.cpu.InputLabel = "rawDataRepacker" + process.ecalDigisCPU.InputLabel = "rawDataRepacker" process.ecalPreshowerDigis.sourceTag = "rawDataRepacker" process.gctDigis.inputLabel = "rawDataRepacker" process.gtDigis.DaqGtInputTag = "rawDataRepacker" diff --git a/DQM/Integration/python/clients/ecal_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/ecal_dqm_sourceclient-live_cfg.py index ef96ea65c011a..73564f1de5d7c 100644 --- a/DQM/Integration/python/clients/ecal_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/ecal_dqm_sourceclient-live_cfg.py @@ -53,13 +53,15 @@ ### Individual module setups ### # Use the ratio timing method for the online DQM -process.ecalMultiFitUncalibRecHit.cpu.algoPSet.timealgo = "RatioMethod" -process.ecalMultiFitUncalibRecHit.cpu.algoPSet.outOfTimeThresholdGain12pEB = 5. -process.ecalMultiFitUncalibRecHit.cpu.algoPSet.outOfTimeThresholdGain12mEB = 5. -process.ecalMultiFitUncalibRecHit.cpu.algoPSet.outOfTimeThresholdGain61pEB = 5. -process.ecalMultiFitUncalibRecHit.cpu.algoPSet.outOfTimeThresholdGain61mEB = 5. -process.ecalMultiFitUncalibRecHit.cpu.algoPSet.timeCalibTag = ':' -process.ecalMultiFitUncalibRecHit.cpu.algoPSet.timeOffsetTag = ':' +process.ecalMultiFitUncalibRecHitCPU.algoPSet.timealgo = "RatioMethod" +process.ecalMultiFitUncalibRecHitCPU.algoPSet.outOfTimeThresholdGain12pEB = 5. +process.ecalMultiFitUncalibRecHitCPU.algoPSet.outOfTimeThresholdGain12mEB = 5. +process.ecalMultiFitUncalibRecHitCPU.algoPSet.outOfTimeThresholdGain61pEB = 5. +process.ecalMultiFitUncalibRecHitCPU.algoPSet.outOfTimeThresholdGain61mEB = 5. +process.ecalMultiFitUncalibRecHitCPU.algoPSet.timeCalibTag = ':' +process.ecalMultiFitUncalibRecHitCPU.algoPSet.timeOffsetTag = ':' +process.ecalRecHit.cpu.timeCalibTag = ':' +process.ecalRecHit.cpu.timeOffsetTag = ':' process.ecalPhysicsFilter = cms.EDFilter("EcalMonitorPrescaler", cosmics = cms.untracked.uint32(1), @@ -67,31 +69,26 @@ EcalRawDataCollection = cms.InputTag("ecalDigis") ) -process.MessageLogger = cms.Service("MessageLogger", - cerr = cms.untracked.PSet( - default = cms.untracked.PSet( - limit = cms.untracked.int32(-1) - ), - EcalLaserDbService = cms.untracked.PSet( - limit = cms.untracked.int32(10) - ), - noTimeStamps = cms.untracked.bool(True), - threshold = cms.untracked.string('WARNING'), - noLineBreaks = cms.untracked.bool(True) +process.load('FWCore.MessageService.MessageLogger_cfi') +process.MessageLogger.cerr = cms.untracked.PSet( + default = cms.untracked.PSet( + limit = cms.untracked.int32(-1) ), - cout = cms.untracked.PSet( - default = cms.untracked.PSet( - limit = cms.untracked.int32(0) - ), - EcalDQM = cms.untracked.PSet( - limit = cms.untracked.int32(-1) - ), - threshold = cms.untracked.string('INFO') + EcalLaserDbService = cms.untracked.PSet( + limit = cms.untracked.int32(10) ), - categories = cms.untracked.vstring('EcalDQM', - 'EcalLaserDbService'), - destinations = cms.untracked.vstring('cerr', - 'cout') + noTimeStamps = cms.untracked.bool(True), + threshold = cms.untracked.string('WARNING'), + noLineBreaks = cms.untracked.bool(True) +) +process.MessageLogger.cout = cms.untracked.PSet( + default = cms.untracked.PSet( + limit = cms.untracked.int32(0) + ), + EcalDQM = cms.untracked.PSet( + limit = cms.untracked.int32(-1) + ), + threshold = cms.untracked.string('INFO') ) process.maxEvents = cms.untracked.PSet( @@ -196,7 +193,7 @@ process.ecalMonitorTask.workerParameters.PresampleTask.params.doPulseMaxCheck = False elif runTypeName == 'hi_run': process.ecalMonitorTask.collectionTags.Source = "rawDataRepacker" - process.ecalDigis.cpu.InputLabel = 'rawDataRepacker' + process.ecalDigisCPU.InputLabel = 'rawDataRepacker' elif runTypeName == 'hpu_run': if not unitTest: process.source.SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('*')) diff --git a/DQM/Integration/python/clients/ecalcalib_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/ecalcalib_dqm_sourceclient-live_cfg.py index e1598f63dfb0c..1bc1b49f9f4a0 100644 --- a/DQM/Integration/python/clients/ecalcalib_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/ecalcalib_dqm_sourceclient-live_cfg.py @@ -159,11 +159,9 @@ process.source.streamLabel = "streamDQMCalibration" - process.ecalPedestalMonitorTask.verbosity = 0 process.ecalPedestalMonitorTask.commonParameters.onlineMode = True - process.ecalLaserLedMonitorTask.verbosity = 0 process.ecalLaserLedMonitorTask.collectionTags.EBLaserLedUncalibRecHit = "ecalLaserLedUncalibRecHit:EcalUncalibRecHitsEB" process.ecalLaserLedMonitorTask.collectionTags.EELaserLedUncalibRecHit = "ecalLaserLedUncalibRecHit:EcalUncalibRecHitsEE" @@ -183,6 +181,8 @@ process.ecalRecHit.EEuncalibRecHitCollection = "ecalGlobalUncalibRecHit:EcalUncalibRecHitsEE" process.ecalRecHit.EBuncalibRecHitCollection = "ecalGlobalUncalibRecHit:EcalUncalibRecHitsEB" +process.ecalRecHit.timeCalibTag = ':' +process.ecalRecHit.timeOffsetTag = ':' process.ecalPNDiodeMonitorTask.verbosity = 0 process.ecalPNDiodeMonitorTask.commonParameters.onlineMode = True diff --git a/DQM/Integration/python/clients/ecalgpu_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/ecalgpu_dqm_sourceclient-live_cfg.py index e62d67e468478..30436be1deb18 100644 --- a/DQM/Integration/python/clients/ecalgpu_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/ecalgpu_dqm_sourceclient-live_cfg.py @@ -4,9 +4,7 @@ from Configuration.Eras.Era_Run3_cff import Run3 process = cms.Process("process", Run3) -unitTest = False -if 'unitTest=True' in sys.argv: - unitTest=True +unitTest = 'unitTest=True' in sys.argv ### Load cfis ### @@ -58,7 +56,11 @@ process.preScaler.prescaleFactor = 1 if not options.inputFiles: - process.source.streamLabel = cms.untracked.string("streamDQMGPUvsCPU") + # stream label + if process.runType.getRunType() == process.runType.hi_run: + process.source.streamLabel = "streamHIDQMGPUvsCPU" + else: + process.source.streamLabel = "streamDQMGPUvsCPU" process.dqmEnv.subSystemFolder = 'Ecal' process.dqmSaver.tag = 'EcalGPU' diff --git a/DQM/Integration/python/clients/hcalcalib_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/hcalcalib_dqm_sourceclient-live_cfg.py index 11ca9f357b3bf..43dfb475db9d0 100644 --- a/DQM/Integration/python/clients/hcalcalib_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/hcalcalib_dqm_sourceclient-live_cfg.py @@ -240,11 +240,11 @@ print("Final Source settings:", process.source) process.options = cms.untracked.PSet( Rethrow = cms.untracked.vstring( -# "ProductNotFound", + "ProductNotFound", "TooManyProducts", "TooFewProducts" - ), - SkipEvent = cms.untracked.vstring( - 'ProductNotFound' ) +# SkipEvent = cms.untracked.vstring( +# 'ProductNotFound' +# ) ) diff --git a/DQM/Integration/python/clients/hcalgpu_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/hcalgpu_dqm_sourceclient-live_cfg.py index 7c1c674c61265..3b4d7630b2a92 100644 --- a/DQM/Integration/python/clients/hcalgpu_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/hcalgpu_dqm_sourceclient-live_cfg.py @@ -21,36 +21,42 @@ useOfflineGT = False useFileInput = False useMap = False - -unitTest = False -if 'unitTest=True' in sys.argv: - unitTest=True - useFileInput=False +unitTest = 'unitTest=True' in sys.argv #------------------------------------- # Central DQM Stuff imports #------------------------------------- from DQM.Integration.config.online_customizations_cfi import * + if useOfflineGT: - process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff") - process.GlobalTag.globaltag = autoCond['run3_data_prompt'] + process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff") + process.GlobalTag.globaltag = autoCond['run3_data_prompt'] else: - process.load('DQM.Integration.config.FrontierCondition_GT_cfi') + process.load('DQM.Integration.config.FrontierCondition_GT_cfi') + if unitTest: - process.load("DQM.Integration.config.unitteststreamerinputsource_cfi") - from DQM.Integration.config.unitteststreamerinputsource_cfi import options + process.load("DQM.Integration.config.unitteststreamerinputsource_cfi") + from DQM.Integration.config.unitteststreamerinputsource_cfi import options elif useFileInput: - process.load("DQM.Integration.config.fileinputsource_cfi") - from DQM.Integration.config.fileinputsource_cfi import options + process.load("DQM.Integration.config.fileinputsource_cfi") + from DQM.Integration.config.fileinputsource_cfi import options else: - process.load('DQM.Integration.config.inputsource_cfi') - from DQM.Integration.config.inputsource_cfi import options + process.load('DQM.Integration.config.inputsource_cfi') + from DQM.Integration.config.inputsource_cfi import options + process.load('DQM.Integration.config.environment_cfi') #------------------------------------- # Central DQM Customization #------------------------------------- -process.source.streamLabel = cms.untracked.string("streamDQMGPUvsCPU") + +if not useFileInput: + # stream label + if process.runType.getRunType() == process.runType.hi_run: + process.source.streamLabel = "streamHIDQMGPUvsCPU" + else: + process.source.streamLabel = "streamDQMGPUvsCPU" + process.dqmEnv.subSystemFolder = subsystem process.dqmSaver.tag = 'HcalGPU' process.dqmSaver.runNumber = options.runNumber diff --git a/DQM/Integration/python/clients/l1t_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/l1t_dqm_sourceclient-live_cfg.py index 84a996a8e0251..b356b80900fa9 100644 --- a/DQM/Integration/python/clients/l1t_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/l1t_dqm_sourceclient-live_cfg.py @@ -173,7 +173,7 @@ process.castorDigis.InputLabel = "rawDataCollector" process.csctfDigis.producer = "rawDataCollector" process.dttfDigis.DTTF_FED_Source = "rawDataCollector" -process.ecalDigis.cpu.InputLabel = "rawDataCollector" +process.ecalDigisCPU.InputLabel = "rawDataCollector" process.ecalPreshowerDigis.sourceTag = "rawDataCollector" process.gctDigis.inputLabel = "rawDataCollector" process.gtDigis.DaqGtInputTag = "rawDataCollector" @@ -192,7 +192,7 @@ process.castorDigis.InputLabel = "rawDataRepacker" process.csctfDigis.producer = "rawDataRepacker" process.dttfDigis.DTTF_FED_Source = "rawDataRepacker" - process.ecalDigis.cpu.InputLabel = "rawDataRepacker" + process.ecalDigisCPU.InputLabel = "rawDataRepacker" process.ecalPreshowerDigis.sourceTag = "rawDataRepacker" process.gctDigis.inputLabel = "rawDataRepacker" process.gtDigis.DaqGtInputTag = "rawDataRepacker" diff --git a/DQM/Integration/python/clients/l1temulator_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/l1temulator_dqm_sourceclient-live_cfg.py index c42e7eabcb60c..6435e7e224413 100644 --- a/DQM/Integration/python/clients/l1temulator_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/l1temulator_dqm_sourceclient-live_cfg.py @@ -186,7 +186,7 @@ process.castorDigis.InputLabel = "rawDataCollector" process.csctfDigis.producer = "rawDataCollector" process.dttfDigis.DTTF_FED_Source = "rawDataCollector" -process.ecalDigis.cpu.InputLabel = "rawDataCollector" +process.ecalDigisCPU.InputLabel = "rawDataCollector" process.ecalPreshowerDigis.sourceTag = "rawDataCollector" process.gctDigis.inputLabel = "rawDataCollector" process.gtDigis.DaqGtInputTag = "rawDataCollector" @@ -208,7 +208,7 @@ process.castorDigis.InputLabel = "rawDataRepacker" process.csctfDigis.producer = "rawDataRepacker" process.dttfDigis.DTTF_FED_Source = "rawDataRepacker" - process.ecalDigis.cpu.InputLabel = "rawDataRepacker" + process.ecalDigisCPU.InputLabel = "rawDataRepacker" process.ecalPreshowerDigis.sourceTag = "rawDataRepacker" process.gctDigis.inputLabel = "rawDataRepacker" process.gtDigis.DaqGtInputTag = "rawDataRepacker" diff --git a/DQM/Integration/python/clients/l1tstage1_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/l1tstage1_dqm_sourceclient-live_cfg.py index a71cea1aef341..47272fe19a1a0 100644 --- a/DQM/Integration/python/clients/l1tstage1_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/l1tstage1_dqm_sourceclient-live_cfg.py @@ -183,7 +183,7 @@ process.castorDigis.InputLabel = "rawDataCollector" process.csctfDigis.producer = "rawDataCollector" process.dttfDigis.DTTF_FED_Source = "rawDataCollector" -process.ecalDigis.cpu.InputLabel = "rawDataCollector" +process.ecalDigisCPU.InputLabel = "rawDataCollector" process.ecalPreshowerDigis.sourceTag = "rawDataCollector" process.gctDigis.inputLabel = "rawDataCollector" process.gtDigis.DaqGtInputTag = "rawDataCollector" @@ -202,7 +202,7 @@ process.castorDigis.InputLabel = "rawDataRepacker" process.csctfDigis.producer = "rawDataRepacker" process.dttfDigis.DTTF_FED_Source = "rawDataRepacker" - process.ecalDigis.cpu.InputLabel = "rawDataRepacker" + process.ecalDigisCPU.InputLabel = "rawDataRepacker" process.ecalPreshowerDigis.sourceTag = "rawDataRepacker" process.gctDigis.inputLabel = "rawDataRepacker" process.gtDigis.DaqGtInputTag = "rawDataRepacker" diff --git a/DQM/Integration/python/clients/l1tstage1emulator_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/l1tstage1emulator_dqm_sourceclient-live_cfg.py index 07821ec686fdd..5b8559bc502d5 100644 --- a/DQM/Integration/python/clients/l1tstage1emulator_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/l1tstage1emulator_dqm_sourceclient-live_cfg.py @@ -195,7 +195,7 @@ process.castorDigis.InputLabel = "rawDataCollector" process.csctfDigis.producer = "rawDataCollector" process.dttfDigis.DTTF_FED_Source = "rawDataCollector" -process.ecalDigis.cpu.InputLabel = "rawDataCollector" +process.ecalDigisCPU.InputLabel = "rawDataCollector" process.ecalPreshowerDigis.sourceTag = "rawDataCollector" process.gctDigis.inputLabel = "rawDataCollector" process.gtDigis.DaqGtInputTag = "rawDataCollector" @@ -217,7 +217,7 @@ process.castorDigis.InputLabel = "rawDataRepacker" process.csctfDigis.producer = "rawDataRepacker" process.dttfDigis.DTTF_FED_Source = "rawDataRepacker" - process.ecalDigis.cpu.InputLabel = "rawDataRepacker" + process.ecalDigisCPU.InputLabel = "rawDataRepacker" process.ecalPreshowerDigis.sourceTag = "rawDataRepacker" process.gctDigis.inputLabel = "rawDataRepacker" process.gtDigis.DaqGtInputTag = "rawDataRepacker" diff --git a/DQM/Integration/python/clients/l1tstage2_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/l1tstage2_dqm_sourceclient-live_cfg.py index 1e08647bed02c..0350ce6412c73 100644 --- a/DQM/Integration/python/clients/l1tstage2_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/l1tstage2_dqm_sourceclient-live_cfg.py @@ -132,7 +132,7 @@ process.castorDigis.InputLabel = rawDataRepackerLabel process.ctppsDiamondRawToDigi.rawDataTag = rawDataRepackerLabel process.ctppsPixelDigis.inputLabel = rawDataRepackerLabel - process.ecalDigis.cpu.InputLabel = rawDataRepackerLabel + process.ecalDigisCPU.InputLabel = rawDataRepackerLabel process.ecalPreshowerDigis.sourceTag = rawDataRepackerLabel process.hcalDigis.InputLabel = rawDataRepackerLabel process.muonCSCDigis.InputObjects = rawDataRepackerLabel diff --git a/DQM/Integration/python/clients/l1tstage2emulator_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/l1tstage2emulator_dqm_sourceclient-live_cfg.py index 50f00b5cea742..41e11e6a4bd97 100644 --- a/DQM/Integration/python/clients/l1tstage2emulator_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/l1tstage2emulator_dqm_sourceclient-live_cfg.py @@ -131,7 +131,7 @@ process.castorDigis.InputLabel = rawDataRepackerLabel process.ctppsDiamondRawToDigi.rawDataTag = rawDataRepackerLabel process.ctppsPixelDigis.inputLabel = rawDataRepackerLabel - process.ecalDigis.cpu.InputLabel = rawDataRepackerLabel + process.ecalDigisCPU.InputLabel = rawDataRepackerLabel process.ecalPreshowerDigis.sourceTag = rawDataRepackerLabel process.hcalDigis.InputLabel = rawDataRepackerLabel process.muonCSCDigis.InputObjects = rawDataRepackerLabel diff --git a/DQM/Integration/python/clients/onlinebeammonitor_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/onlinebeammonitor_dqm_sourceclient-live_cfg.py index 98cc2d229ad10..bd176dd037099 100644 --- a/DQM/Integration/python/clients/onlinebeammonitor_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/onlinebeammonitor_dqm_sourceclient-live_cfg.py @@ -23,10 +23,8 @@ # destinations = cms.untracked.vstring('cerr'), #) +unitTest = 'unitTest=True' in sys.argv -unitTest=False -if 'unitTest=True' in sys.argv: - unitTest=True #----------------------------- if unitTest: import FWCore.ParameterSet.VarParsing as VarParsing @@ -97,16 +95,11 @@ process.source.firstRun = cms.untracked.uint32(options.runNumber) process.source.firstLuminosityBlock = cms.untracked.uint32(1) process.source.numberEventsInLuminosityBlock = cms.untracked.uint32(2) - process.maxEvents = cms.untracked.PSet( - input = cms.untracked.int32(100) -) + process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(100)) else: process.load("DQM.Integration.config.inputsource_cfi") from DQM.Integration.config.inputsource_cfi import options - # for live online DQM in P5 - # new stream label - #process.source.streamLabel = cms.untracked.string('streamDQMOnlineBeamspot') #ESProducer process.load("CondCore.CondDB.CondDB_cfi") @@ -168,7 +161,6 @@ from DQM.Integration.config.online_customizations_cfi import * process = customise(process) +process.p = cms.Path( process.dqmcommon * process.monitor ) -process.p = cms.Path( process.dqmcommon - * process.monitor ) print("Final Source settings:", process.source) diff --git a/DQM/Integration/python/clients/pixel_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/pixel_dqm_sourceclient-live_cfg.py index 7df9fa22ac802..c069029538198 100644 --- a/DQM/Integration/python/clients/pixel_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/pixel_dqm_sourceclient-live_cfg.py @@ -22,14 +22,12 @@ TAG ="PixelPhase1" -process.MessageLogger = cms.Service("MessageLogger", - debugModules = cms.untracked.vstring('siPixelDigis', - 'siStripClusters', - 'SiPixelRawDataErrorSource', - 'SiPixelDigiSource'), - cout = cms.untracked.PSet(threshold = cms.untracked.string('ERROR')), - destinations = cms.untracked.vstring('cout') -) +process.load('FWCore.MessageService.MessageLogger_cfi') +process.MessageLogger.debugModules = cms.untracked.vstring('siPixelDigis', + 'siStripClusters', + 'SiPixelRawDataErrorSource', + 'SiPixelDigiSource') +process.MessageLogger.cout = cms.untracked.PSet(threshold = cms.untracked.string('ERROR')) #---------------------------- # Event Source diff --git a/DQM/Integration/python/clients/pixelgpu_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/pixelgpu_dqm_sourceclient-live_cfg.py index 9b78532960769..6f22a866399d2 100644 --- a/DQM/Integration/python/clients/pixelgpu_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/pixelgpu_dqm_sourceclient-live_cfg.py @@ -21,36 +21,42 @@ useOfflineGT = False useFileInput = False useMap = False - -unitTest = False -if 'unitTest=True' in sys.argv: - unitTest=True - useFileInput=False +unitTest = 'unitTest=True' in sys.argv #------------------------------------- # Central DQM Stuff imports #------------------------------------- from DQM.Integration.config.online_customizations_cfi import * + if useOfflineGT: - process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff") - process.GlobalTag.globaltag = autoCond['run3_data_prompt'] + process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff") + process.GlobalTag.globaltag = autoCond['run3_data_prompt'] else: - process.load('DQM.Integration.config.FrontierCondition_GT_cfi') + process.load('DQM.Integration.config.FrontierCondition_GT_cfi') + if unitTest: - process.load("DQM.Integration.config.unitteststreamerinputsource_cfi") - from DQM.Integration.config.unitteststreamerinputsource_cfi import options + process.load("DQM.Integration.config.unitteststreamerinputsource_cfi") + from DQM.Integration.config.unitteststreamerinputsource_cfi import options elif useFileInput: - process.load("DQM.Integration.config.fileinputsource_cfi") - from DQM.Integration.config.fileinputsource_cfi import options + process.load("DQM.Integration.config.fileinputsource_cfi") + from DQM.Integration.config.fileinputsource_cfi import options else: - process.load('DQM.Integration.config.inputsource_cfi') - from DQM.Integration.config.inputsource_cfi import options + process.load('DQM.Integration.config.inputsource_cfi') + from DQM.Integration.config.inputsource_cfi import options + process.load('DQM.Integration.config.environment_cfi') #------------------------------------- # Central DQM Customization #------------------------------------- -process.source.streamLabel = cms.untracked.string("streamDQMGPUvsCPU") + +if not useFileInput: + # stream label + if process.runType.getRunType() == process.runType.hi_run: + process.source.streamLabel = "streamHIDQMGPUvsCPU" + else: + process.source.streamLabel = "streamDQMGPUvsCPU" + process.dqmEnv.subSystemFolder = subsystem process.dqmSaver.tag = 'PixelGPU' process.dqmSaver.runNumber = options.runNumber diff --git a/DQM/Integration/python/clients/ppsrandom_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/ppsrandom_dqm_sourceclient-live_cfg.py index 8240ed8331816..7679b85bd1989 100644 --- a/DQM/Integration/python/clients/ppsrandom_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/ppsrandom_dqm_sourceclient-live_cfg.py @@ -5,10 +5,7 @@ process = cms.Process('CTPPSDQM', Run3) test = False -unitTest = False - -if 'unitTest=True' in sys.argv: - unitTest=True +unitTest = 'unitTest=True' in sys.argv # event source if unitTest: @@ -29,7 +26,7 @@ 'drop *', 'keep FEDRawDataCollection_*_*_*' ) - + process.source.streamLabel = "streamDQMPPSRandom" # DQM environment diff --git a/DQM/Integration/python/clients/ramdisk_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/ramdisk_dqm_sourceclient-live_cfg.py index ef24c64d778bf..e442b86085a1c 100644 --- a/DQM/Integration/python/clients/ramdisk_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/ramdisk_dqm_sourceclient-live_cfg.py @@ -30,6 +30,10 @@ ) ) +# stream label +if process.runType.getRunType() == process.runType.hi_run: + process.analyzer.streamLabels[0] = "streamHIDQM" + process.p = cms.Path(process.analyzer) process.dqmsave_step = cms.Path(process.dqmEnv * process.dqmSaver) diff --git a/DQM/Integration/python/clients/scal_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/scal_dqm_sourceclient-live_cfg.py index 8388d384151ec..e677a2c05ede6 100644 --- a/DQM/Integration/python/clients/scal_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/scal_dqm_sourceclient-live_cfg.py @@ -37,10 +37,8 @@ process.load("DQMServices.Components.DQMScalInfo_cfi") # message logger -process.MessageLogger = cms.Service("MessageLogger", - destinations = cms.untracked.vstring('cout'), - cout = cms.untracked.PSet(threshold = cms.untracked.string('WARNING')) - ) +process.load('FWCore.MessageService.MessageLogger_cfi') +process.MessageLogger.cout = cms.untracked.PSet(threshold = cms.untracked.string('WARNING')) # Global tag # Condition for P5 cluster @@ -91,7 +89,7 @@ process.castorDigis.InputLabel = "rawDataRepacker" process.csctfDigis.producer = "rawDataRepacker" process.dttfDigis.DTTF_FED_Source = "rawDataRepacker" - process.ecalDigis.cpu.InputLabel = "rawDataRepacker" + process.ecalDigisCPU.InputLabel = "rawDataRepacker" process.ecalPreshowerDigis.sourceTag = "rawDataRepacker" process.gctDigis.inputLabel = "rawDataRepacker" process.gtDigis.DaqGtInputTag = "rawDataRepacker" diff --git a/DQM/Integration/python/clients/sistrip_approx_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/sistrip_approx_dqm_sourceclient-live_cfg.py index 1708fc82aeae6..19f43ef65315e 100644 --- a/DQM/Integration/python/clients/sistrip_approx_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/sistrip_approx_dqm_sourceclient-live_cfg.py @@ -9,15 +9,13 @@ from Configuration.Eras.Era_Run3_cff import Run3 process = cms.Process("SiStripApproxMonitor", Run3) -process.MessageLogger = cms.Service("MessageLogger", - debugModules = cms.untracked.vstring('siStripDigis', - 'siStripClusters', - 'siStripZeroSuppression', - 'SiStripClusterizer', - 'siStripApproximateClusterComparator'), - cout = cms.untracked.PSet(threshold = cms.untracked.string('ERROR')), - destinations = cms.untracked.vstring('cout') - ) +process.load('FWCore.MessageService.MessageLogger_cfi') +process.MessageLogger.debugModules = cms.untracked.vstring('siStripDigis', + 'siStripClusters', + 'siStripZeroSuppression', + 'SiStripClusterizer', + 'siStripApproximateClusterComparator') +process.MessageLogger.cout = cms.untracked.PSet(threshold = cms.untracked.string('ERROR')) live=True unitTest=False @@ -189,7 +187,7 @@ process.castorDigis.InputLabel = rawDataRepackerLabel process.csctfDigis.producer = rawDataRepackerLabel process.dttfDigis.DTTF_FED_Source = rawDataRepackerLabel - process.ecalDigis.cpu.InputLabel = rawDataRepackerLabel + process.ecalDigisCPU.InputLabel = rawDataRepackerLabel process.ecalPreshowerDigis.sourceTag = rawDataRepackerLabel process.gctDigis.inputLabel = rawDataRepackerLabel process.hcalDigis.InputLabel = rawDataRepackerLabel diff --git a/DQM/Integration/python/clients/sistrip_dqm_sourceclient-live_cfg.py b/DQM/Integration/python/clients/sistrip_dqm_sourceclient-live_cfg.py index e06ddfada3199..a784c2d35e345 100644 --- a/DQM/Integration/python/clients/sistrip_dqm_sourceclient-live_cfg.py +++ b/DQM/Integration/python/clients/sistrip_dqm_sourceclient-live_cfg.py @@ -9,14 +9,12 @@ from Configuration.Eras.Era_Run3_cff import Run3 process = cms.Process("SiStripMonitor", Run3) -process.MessageLogger = cms.Service("MessageLogger", - debugModules = cms.untracked.vstring('siStripDigis', - 'siStripClusters', - 'siStripZeroSuppression', - 'SiStripClusterizer'), - cout = cms.untracked.PSet(threshold = cms.untracked.string('ERROR')), - destinations = cms.untracked.vstring('cout') -) +process.load('FWCore.MessageService.MessageLogger_cfi') +process.MessageLogger.debugModules = cms.untracked.vstring('siStripDigis', + 'siStripClusters', + 'siStripZeroSuppression', + 'SiStripClusterizer') +process.MessageLogger.cout = cms.untracked.PSet(threshold = cms.untracked.string('ERROR')) live=True unitTest=False @@ -517,7 +515,7 @@ process.castorDigis.InputLabel = rawDataCollectorLabel process.csctfDigis.producer = rawDataCollectorLabel process.dttfDigis.DTTF_FED_Source = rawDataCollectorLabel -process.ecalDigis.cpu.InputLabel = rawDataCollectorLabel +process.ecalDigisCPU.InputLabel = rawDataCollectorLabel process.ecalPreshowerDigis.sourceTag = rawDataCollectorLabel process.gctDigis.inputLabel = rawDataCollectorLabel process.gtDigis.DaqGtInputTag = rawDataCollectorLabel @@ -541,7 +539,7 @@ process.castorDigis.InputLabel = rawDataRepackerLabel process.csctfDigis.producer = rawDataRepackerLabel process.dttfDigis.DTTF_FED_Source = rawDataRepackerLabel - process.ecalDigis.cpu.InputLabel = rawDataRepackerLabel + process.ecalDigisCPU.InputLabel = rawDataRepackerLabel process.ecalPreshowerDigis.sourceTag = rawDataRepackerLabel process.gctDigis.inputLabel = rawDataRepackerLabel process.hcalDigis.InputLabel = rawDataRepackerLabel diff --git a/DQM/Integration/python/clients/visualization-live-secondInstance_cfg.py b/DQM/Integration/python/clients/visualization-live-secondInstance_cfg.py index ec2b36cea9a08..3a648a7cf7501 100644 --- a/DQM/Integration/python/clients/visualization-live-secondInstance_cfg.py +++ b/DQM/Integration/python/clients/visualization-live-secondInstance_cfg.py @@ -7,9 +7,7 @@ Example configuration for online reconstruction meant for visualization clients. """ -unitTest = False -if 'unitTest=True' in sys.argv: - unitTest=True +unitTest = 'unitTest=True' in sys.argv if unitTest: from DQM.Integration.config.unittestinputsource_cfi import options, runType, source @@ -73,9 +71,15 @@ process.source.skipFirstLumis = True process.source.minEventsPerLumi = 0 process.source.nextLumiTimeoutMillis = 10000 - process.source.streamLabel = 'streamDQMEventDisplay' - if options.BeamSplashRun : - set_BeamSplashRun_settings( process.source ) + + if options.BeamSplashRun: + set_BeamSplashRun_settings( process.source ) + + # stream label + if runType.getRunType() == runType.hi_run: + process.source.streamLabel = "streamHIDQMEventDisplay" + else: + process.source.streamLabel = "streamDQMEventDisplay" m = re.search(r"\((\w+)\)", str(source.runNumber)) runno = str(m.group(1)) diff --git a/DQM/Integration/python/clients/visualization-live_cfg.py b/DQM/Integration/python/clients/visualization-live_cfg.py index d62f03b8e9400..774be0eccc717 100644 --- a/DQM/Integration/python/clients/visualization-live_cfg.py +++ b/DQM/Integration/python/clients/visualization-live_cfg.py @@ -71,7 +71,6 @@ process.source.skipFirstLumis = True process.source.minEventsPerLumi = 0 process.source.nextLumiTimeoutMillis = 10000 - process.source.streamLabel = 'streamDQM' if options.BeamSplashRun : set_BeamSplashRun_settings( process.source ) diff --git a/DQM/Integration/python/config/inputsource_cfi.py b/DQM/Integration/python/config/inputsource_cfi.py index f343fa7f9a5a7..240c5445fe968 100644 --- a/DQM/Integration/python/config/inputsource_cfi.py +++ b/DQM/Integration/python/config/inputsource_cfi.py @@ -89,7 +89,7 @@ runType = RunType() if not options.runkey.strip(): - options.runkey = 'pp_run' + options.runkey = 'pp_run' runType.setRunType(options.runkey.strip()) @@ -97,16 +97,22 @@ # Input source nextLumiTimeoutMillis = 240000 endOfRunKills = True - + if options.scanOnce: endOfRunKills = False nextLumiTimeoutMillis = 0 - + + # stream label + if runType.getRunType() == runType.hi_run: + streamLabel = 'streamHIDQM' + else: + streamLabel = 'streamDQM' + source = cms.Source("DQMStreamerReader", runNumber = cms.untracked.uint32(options.runNumber), runInputDir = cms.untracked.string(options.runInputDir), SelectEvents = cms.untracked.vstring('*'), - streamLabel = cms.untracked.string('streamDQM'), + streamLabel = cms.untracked.string(streamLabel), scanOnce = cms.untracked.bool(options.scanOnce), datafnPosition = cms.untracked.uint32(options.datafnPosition), minEventsPerLumi = cms.untracked.int32(1), @@ -117,6 +123,7 @@ endOfRunKills = cms.untracked.bool(endOfRunKills), inputFileTransitionsEachEvent = cms.untracked.bool(False) ) + else: print("The list of input files is provided. Disabling discovery and running on everything.") files = ["file://" + x for x in options.inputFiles] diff --git a/DQM/Integration/python/config/unitteststreamerinputsource_cfi.py b/DQM/Integration/python/config/unitteststreamerinputsource_cfi.py index 2ba13ca602804..c4ff7d8a27ccd 100644 --- a/DQM/Integration/python/config/unitteststreamerinputsource_cfi.py +++ b/DQM/Integration/python/config/unitteststreamerinputsource_cfi.py @@ -12,12 +12,12 @@ check that the input directory exists and there are files in it ''' def checkInputFolder(streamer_folder): - if not (os.path.exists(streamer_folder) and os.path.isdir(os.path.join(streamer_folder))): - raise IOError("Input folder '%s' does not exist in CMSSW_SEARCH_PATH" % streamer_folder) + if not (os.path.exists(streamer_folder) and os.path.isdir(os.path.join(streamer_folder))): + raise IOError("Input folder '%s' does not exist in CMSSW_SEARCH_PATH" % streamer_folder) - items = os.listdir(dqm_streamer_folder) - if not items: - raise IOError("Input folder '%s' does not contain any file" % streamer_folder) + items = os.listdir(dqm_streamer_folder) + if not items: + raise IOError("Input folder '%s' does not contain any file" % streamer_folder) # Dedine and register options options = VarParsing.VarParsing("analysis") @@ -49,7 +49,7 @@ def checkInputFolder(streamer_folder): "Data filename position in the positional arguments array 'data' in json file.") options.register('streamLabel', - 'streamDQM', # default DQM stream value + '', VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.string, "Name of the stream") @@ -94,12 +94,32 @@ def checkInputFolder(streamer_folder): print("Reading streamer files from:\n ",dqm_streamer_folder) checkInputFolder(dqm_streamer_folder) +maxEvents = cms.untracked.PSet( + input = cms.untracked.int32(-1) +) + +runType = RunType() +if not options.runkey.strip(): + options.runkey = "pp_run" + +runType.setRunType(options.runkey.strip()) + +# stream label +# If streamLabel is empty, so not specified as a command-line argument, +# use the default value, i.e. "streamHIDQM" for runType==hi_run and "streamDQM" otherwise +streamLabel = options.streamLabel +if streamLabel == '': + if runType.getRunType() == runType.hi_run: + streamLabel = 'streamHIDQM' + else: + streamLabel = 'streamDQM' + # Set the process source source = cms.Source("DQMStreamerReader", runNumber = cms.untracked.uint32(options.runNumber), runInputDir = cms.untracked.string(dqm_integration_data), SelectEvents = cms.untracked.vstring('*'), - streamLabel = cms.untracked.string(options.streamLabel), + streamLabel = cms.untracked.string(streamLabel), scanOnce = cms.untracked.bool(options.scanOnce), datafnPosition = cms.untracked.uint32(options.datafnPosition), minEventsPerLumi = cms.untracked.int32(1000), @@ -110,13 +130,3 @@ def checkInputFolder(streamer_folder): endOfRunKills = cms.untracked.bool(False), inputFileTransitionsEachEvent = cms.untracked.bool(False) ) - -maxEvents = cms.untracked.PSet( - input = cms.untracked.int32(-1) -) - -runType = RunType() -if not options.runkey.strip(): - options.runkey = "pp_run" - -runType.setRunType(options.runkey.strip()) diff --git a/DQM/L1TMonitorClient/src/L1TOccupancyClient.cc b/DQM/L1TMonitorClient/src/L1TOccupancyClient.cc index 52a7a1b6a436c..9abb66db22879 100644 --- a/DQM/L1TMonitorClient/src/L1TOccupancyClient.cc +++ b/DQM/L1TMonitorClient/src/L1TOccupancyClient.cc @@ -247,14 +247,6 @@ void L1TOccupancyClient::dqmEndJob(DQMStore::IBooker& ibooker, DQMStore::IGetter delete hservice_; } -//____________________________________________________________________________ -// Function: beginLuminosityBlock -// Description: This is will be run at the begining of each luminosity block -// Inputs: -// * const LuminosityBlock& lumiSeg = Luminosity Block information -// * const EventSetup& context = Event Setup information -//____________________________________________________________________________ - //____________________________________________________________________________ // Function: endLuminosityBlock // Description: This is will be run at the end of each luminosity block diff --git a/DQM/SiPixelHeterogeneous/plugins/BuildFile.xml b/DQM/SiPixelHeterogeneous/plugins/BuildFile.xml index 66adf1666762e..79925fdcb6cf8 100644 --- a/DQM/SiPixelHeterogeneous/plugins/BuildFile.xml +++ b/DQM/SiPixelHeterogeneous/plugins/BuildFile.xml @@ -5,8 +5,11 @@ + + + + - diff --git a/DQM/SiPixelHeterogeneous/plugins/SiPixelCompareRecHitsSoAAlpaka.cc b/DQM/SiPixelHeterogeneous/plugins/SiPixelCompareRecHitsSoAAlpaka.cc new file mode 100644 index 0000000000000..474194ad72616 --- /dev/null +++ b/DQM/SiPixelHeterogeneous/plugins/SiPixelCompareRecHitsSoAAlpaka.cc @@ -0,0 +1,244 @@ +#include "DQMServices/Core/interface/MonitorElement.h" +#include "DQMServices/Core/interface/DQMEDAnalyzer.h" +#include "DQMServices/Core/interface/DQMStore.h" +#include "DataFormats/Math/interface/approx_atan2.h" +#include "DataFormats/SiPixelDetId/interface/PixelSubdetector.h" +#include "DataFormats/TrackerCommon/interface/TrackerTopology.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsHost.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "Geometry/CommonDetUnit/interface/PixelGeomDetUnit.h" +#include "Geometry/CommonTopologies/interface/PixelTopology.h" +#include "Geometry/Records/interface/TrackerDigiGeometryRecord.h" +#include "Geometry/TrackerGeometryBuilder/interface/TrackerGeometry.h" + +template +class SiPixelCompareRecHitsSoAAlpaka : public DQMEDAnalyzer { +public: + using HitsOnHost = TrackingRecHitHost; + + explicit SiPixelCompareRecHitsSoAAlpaka(const edm::ParameterSet&); + ~SiPixelCompareRecHitsSoAAlpaka() override = default; + void dqmBeginRun(const edm::Run&, const edm::EventSetup&) override; + void bookHistograms(DQMStore::IBooker& ibooker, edm::Run const& iRun, edm::EventSetup const& iSetup) override; + void analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) override; + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + +private: + const edm::ESGetToken geomToken_; + const edm::ESGetToken topoToken_; + const edm::EDGetTokenT tokenSoAHitsHost_; //these two are both on Host but originally they have been + const edm::EDGetTokenT tokenSoAHitsDevice_; //produced on Host or on Device + const std::string topFolderName_; + const float mind2cut_; + static constexpr uint32_t invalidHit_ = std::numeric_limits::max(); + static constexpr float micron_ = 10000.; + const TrackerGeometry* tkGeom_ = nullptr; + const TrackerTopology* tTopo_ = nullptr; + MonitorElement* hnHits_; + MonitorElement* hBchargeL_[4]; // max 4 barrel hits + MonitorElement* hBsizexL_[4]; + MonitorElement* hBsizeyL_[4]; + MonitorElement* hBposxL_[4]; + MonitorElement* hBposyL_[4]; + MonitorElement* hFchargeD_[2][12]; // max 12 endcap disks + MonitorElement* hFsizexD_[2][12]; + MonitorElement* hFsizeyD_[2][12]; + MonitorElement* hFposxD_[2][12]; + MonitorElement* hFposyD_[2][12]; + //differences + MonitorElement* hBchargeDiff_; + MonitorElement* hFchargeDiff_; + MonitorElement* hBsizeXDiff_; + MonitorElement* hFsizeXDiff_; + MonitorElement* hBsizeYDiff_; + MonitorElement* hFsizeYDiff_; + MonitorElement* hBposXDiff_; + MonitorElement* hFposXDiff_; + MonitorElement* hBposYDiff_; + MonitorElement* hFposYDiff_; +}; + +// +// constructors +// +template +SiPixelCompareRecHitsSoAAlpaka::SiPixelCompareRecHitsSoAAlpaka(const edm::ParameterSet& iConfig) + : geomToken_(esConsumes()), + topoToken_(esConsumes()), + tokenSoAHitsHost_(consumes(iConfig.getParameter("pixelHitsSrcHost"))), + tokenSoAHitsDevice_(consumes(iConfig.getParameter("pixelHitsSrcDevice"))), + topFolderName_(iConfig.getParameter("topFolderName")), + mind2cut_(iConfig.getParameter("minD2cut")) {} + +// +// Begin Run +// +template +void SiPixelCompareRecHitsSoAAlpaka::dqmBeginRun(const edm::Run& iRun, const edm::EventSetup& iSetup) { + tkGeom_ = &iSetup.getData(geomToken_); + tTopo_ = &iSetup.getData(topoToken_); +} + +// +// -- Analyze +// +template +void SiPixelCompareRecHitsSoAAlpaka::analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) { + const auto& rhsoaHandleHost = iEvent.getHandle(tokenSoAHitsHost_); + const auto& rhsoaHandleDevice = iEvent.getHandle(tokenSoAHitsDevice_); + if (not rhsoaHandleHost or not rhsoaHandleDevice) { + edm::LogWarning out("SiPixelCompareRecHitsSoAAlpaka"); + if (not rhsoaHandleHost) { + out << "reference (Host) rechits not found; "; + } + if (not rhsoaHandleDevice) { + out << "target (Device) rechits not found; "; + } + out << "the comparison will not run."; + return; + } + + auto const& rhsoaHost = *rhsoaHandleHost; + auto const& rhsoaDevice = *rhsoaHandleDevice; + + auto const& soa2dHost = rhsoaHost.const_view(); + auto const& soa2dDevice = rhsoaDevice.const_view(); + + uint32_t nHitsHost = soa2dHost.metadata().size(); + uint32_t nHitsDevice = soa2dDevice.metadata().size(); + + hnHits_->Fill(nHitsHost, nHitsDevice); + auto detIds = tkGeom_->detUnitIds(); + for (uint32_t i = 0; i < nHitsHost; i++) { + float minD = mind2cut_; + uint32_t matchedHit = invalidHit_; + uint16_t indHost = soa2dHost[i].detectorIndex(); + float xLocalHost = soa2dHost[i].xLocal(); + float yLocalHost = soa2dHost[i].yLocal(); + for (uint32_t j = 0; j < nHitsDevice; j++) { + if (soa2dDevice.detectorIndex(j) == indHost) { + float dx = xLocalHost - soa2dDevice[j].xLocal(); + float dy = yLocalHost - soa2dDevice[j].yLocal(); + float distance = dx * dx + dy * dy; + if (distance < minD) { + minD = distance; + matchedHit = j; + } + } + } + DetId id = detIds[indHost]; + uint32_t chargeHost = soa2dHost[i].chargeAndStatus().charge; + int16_t sizeXHost = std::ceil(float(std::abs(soa2dHost[i].clusterSizeX()) / 8.)); + int16_t sizeYHost = std::ceil(float(std::abs(soa2dHost[i].clusterSizeY()) / 8.)); + uint32_t chargeDevice = 0; + int16_t sizeXDevice = -99; + int16_t sizeYDevice = -99; + float xLocalDevice = -999.; + float yLocalDevice = -999.; + if (matchedHit != invalidHit_) { + chargeDevice = soa2dDevice[matchedHit].chargeAndStatus().charge; + sizeXDevice = std::ceil(float(std::abs(soa2dDevice[matchedHit].clusterSizeX()) / 8.)); + sizeYDevice = std::ceil(float(std::abs(soa2dDevice[matchedHit].clusterSizeY()) / 8.)); + xLocalDevice = soa2dDevice[matchedHit].xLocal(); + yLocalDevice = soa2dDevice[matchedHit].yLocal(); + } + switch (id.subdetId()) { + case PixelSubdetector::PixelBarrel: + hBchargeL_[tTopo_->pxbLayer(id) - 1]->Fill(chargeHost, chargeDevice); + hBsizexL_[tTopo_->pxbLayer(id) - 1]->Fill(sizeXHost, sizeXDevice); + hBsizeyL_[tTopo_->pxbLayer(id) - 1]->Fill(sizeYHost, sizeYDevice); + hBposxL_[tTopo_->pxbLayer(id) - 1]->Fill(xLocalHost, xLocalDevice); + hBposyL_[tTopo_->pxbLayer(id) - 1]->Fill(yLocalHost, yLocalDevice); + hBchargeDiff_->Fill(chargeHost - chargeDevice); + hBsizeXDiff_->Fill(sizeXHost - sizeXDevice); + hBsizeYDiff_->Fill(sizeYHost - sizeYDevice); + hBposXDiff_->Fill(micron_ * (xLocalHost - xLocalDevice)); + hBposYDiff_->Fill(micron_ * (yLocalHost - yLocalDevice)); + break; + case PixelSubdetector::PixelEndcap: + hFchargeD_[tTopo_->pxfSide(id) - 1][tTopo_->pxfDisk(id) - 1]->Fill(chargeHost, chargeDevice); + hFsizexD_[tTopo_->pxfSide(id) - 1][tTopo_->pxfDisk(id) - 1]->Fill(sizeXHost, sizeXDevice); + hFsizeyD_[tTopo_->pxfSide(id) - 1][tTopo_->pxfDisk(id) - 1]->Fill(sizeYHost, sizeYDevice); + hFposxD_[tTopo_->pxfSide(id) - 1][tTopo_->pxfDisk(id) - 1]->Fill(xLocalHost, xLocalDevice); + hFposyD_[tTopo_->pxfSide(id) - 1][tTopo_->pxfDisk(id) - 1]->Fill(yLocalHost, yLocalDevice); + hFchargeDiff_->Fill(chargeHost - chargeDevice); + hFsizeXDiff_->Fill(sizeXHost - sizeXDevice); + hFsizeYDiff_->Fill(sizeYHost - sizeYDevice); + hFposXDiff_->Fill(micron_ * (xLocalHost - xLocalDevice)); + hFposYDiff_->Fill(micron_ * (yLocalHost - yLocalDevice)); + break; + } + } +} + +// +// -- Book Histograms +// +template +void SiPixelCompareRecHitsSoAAlpaka::bookHistograms(DQMStore::IBooker& iBook, + edm::Run const& iRun, + edm::EventSetup const& iSetup) { + iBook.cd(); + iBook.setCurrentFolder(topFolderName_); + + // clang-format off + //Global + hnHits_ = iBook.book2I("nHits", "HostvsDevice RecHits per event;#Host RecHits;#Device RecHits", 200, 0, 5000,200, 0, 5000); + //Barrel Layer + for(unsigned int il=0;ilnumberOfLayers(PixelSubdetector::PixelBarrel);il++){ + hBchargeL_[il] = iBook.book2I(Form("recHitsBLay%dCharge",il+1), Form("HostvsDevice RecHits Charge Barrel Layer%d;Host Charge;Device Charge",il+1), 250, 0, 100000, 250, 0, 100000); + hBsizexL_[il] = iBook.book2I(Form("recHitsBLay%dSizex",il+1), Form("HostvsDevice RecHits SizeX Barrel Layer%d;Host SizeX;Device SizeX",il+1), 30, 0, 30, 30, 0, 30); + hBsizeyL_[il] = iBook.book2I(Form("recHitsBLay%dSizey",il+1), Form("HostvsDevice RecHits SizeY Barrel Layer%d;Host SizeY;Device SizeY",il+1), 30, 0, 30, 30, 0, 30); + hBposxL_[il] = iBook.book2D(Form("recHitsBLay%dPosx",il+1), Form("HostvsDevice RecHits x-pos in Barrel Layer%d;Host pos x;Device pos x",il+1), 200, -5, 5, 200,-5,5); + hBposyL_[il] = iBook.book2D(Form("recHitsBLay%dPosy",il+1), Form("HostvsDevice RecHits y-pos in Barrel Layer%d;Host pos y;Device pos y",il+1), 200, -5, 5, 200,-5,5); + } + //Endcaps + //Endcaps Disk + for(int is=0;is<2;is++){ + int sign=is==0? -1:1; + for(unsigned int id=0;idnumberOfLayers(PixelSubdetector::PixelEndcap);id++){ + hFchargeD_[is][id] = iBook.book2I(Form("recHitsFDisk%+dCharge",id*sign+sign), Form("HostvsDevice RecHits Charge Endcaps Disk%+d;Host Charge;Device Charge",id*sign+sign), 250, 0, 100000, 250, 0, 100000); + hFsizexD_[is][id] = iBook.book2I(Form("recHitsFDisk%+dSizex",id*sign+sign), Form("HostvsDevice RecHits SizeX Endcaps Disk%+d;Host SizeX;Device SizeX",id*sign+sign), 30, 0, 30, 30, 0, 30); + hFsizeyD_[is][id] = iBook.book2I(Form("recHitsFDisk%+dSizey",id*sign+sign), Form("HostvsDevice RecHits SizeY Endcaps Disk%+d;Host SizeY;Device SizeY",id*sign+sign), 30, 0, 30, 30, 0, 30); + hFposxD_[is][id] = iBook.book2D(Form("recHitsFDisk%+dPosx",id*sign+sign), Form("HostvsDevice RecHits x-pos Endcaps Disk%+d;Host pos x;Device pos x",id*sign+sign), 200, -5, 5, 200, -5, 5); + hFposyD_[is][id] = iBook.book2D(Form("recHitsFDisk%+dPosy",id*sign+sign), Form("HostvsDevice RecHits y-pos Endcaps Disk%+d;Host pos y;Device pos y",id*sign+sign), 200, -5, 5, 200, -5, 5); + } + } + //1D differences + hBchargeDiff_ = iBook.book1D("rechitChargeDiffBpix","Charge differnce of rechits in BPix; rechit charge difference (Host - Device)", 101, -50.5, 50.5); + hFchargeDiff_ = iBook.book1D("rechitChargeDiffFpix","Charge differnce of rechits in FPix; rechit charge difference (Host - Device)", 101, -50.5, 50.5); + hBsizeXDiff_ = iBook.book1D("rechitsizeXDiffBpix","SizeX difference of rechits in BPix; rechit sizex difference (Host - Device)", 21, -10.5, 10.5); + hFsizeXDiff_ = iBook.book1D("rechitsizeXDiffFpix","SizeX difference of rechits in FPix; rechit sizex difference (Host - Device)", 21, -10.5, 10.5); + hBsizeYDiff_ = iBook.book1D("rechitsizeYDiffBpix","SizeY difference of rechits in BPix; rechit sizey difference (Host - Device)", 21, -10.5, 10.5); + hFsizeYDiff_ = iBook.book1D("rechitsizeYDiffFpix","SizeY difference of rechits in FPix; rechit sizey difference (Host - Device)", 21, -10.5, 10.5); + hBposXDiff_ = iBook.book1D("rechitsposXDiffBpix","x-position difference of rechits in BPix; rechit x-pos difference (Host - Device)", 1000, -10, 10); + hFposXDiff_ = iBook.book1D("rechitsposXDiffFpix","x-position difference of rechits in FPix; rechit x-pos difference (Host - Device)", 1000, -10, 10); + hBposYDiff_ = iBook.book1D("rechitsposYDiffBpix","y-position difference of rechits in BPix; rechit y-pos difference (Host - Device)", 1000, -10, 10); + hFposYDiff_ = iBook.book1D("rechitsposYDiffFpix","y-position difference of rechits in FPix; rechit y-pos difference (Host - Device)", 1000, -10, 10); +} + +template +void SiPixelCompareRecHitsSoAAlpaka::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + // monitorpixelRecHitsSoAAlpaka + edm::ParameterSetDescription desc; + desc.add("pixelHitsSrcHost", edm::InputTag("siPixelRecHitsPreSplittingAlpakaSerial")); + desc.add("pixelHitsSrcDevice", edm::InputTag("siPixelRecHitsPreSplittingAlpaka")); + desc.add("topFolderName", "SiPixelHeterogeneous/PixelRecHitsCompareDeviceVSHost"); + desc.add("minD2cut", 0.0001); + descriptions.addWithDefaultLabel(desc); +} + +using SiPixelPhase1CompareRecHitsSoAAlpaka = SiPixelCompareRecHitsSoAAlpaka; +using SiPixelPhase2CompareRecHitsSoAAlpaka = SiPixelCompareRecHitsSoAAlpaka; +using SiPixelHIonPhase1CompareRecHitsSoAAlpaka = SiPixelCompareRecHitsSoAAlpaka; + +#include "FWCore/Framework/interface/MakerMacros.h" +DEFINE_FWK_MODULE(SiPixelPhase1CompareRecHitsSoAAlpaka); +DEFINE_FWK_MODULE(SiPixelPhase2CompareRecHitsSoAAlpaka); +DEFINE_FWK_MODULE(SiPixelHIonPhase1CompareRecHitsSoAAlpaka); diff --git a/DQM/SiPixelHeterogeneous/plugins/SiPixelCompareTrackSoAAlpaka.cc b/DQM/SiPixelHeterogeneous/plugins/SiPixelCompareTrackSoAAlpaka.cc new file mode 100644 index 0000000000000..025bdfd988ea6 --- /dev/null +++ b/DQM/SiPixelHeterogeneous/plugins/SiPixelCompareTrackSoAAlpaka.cc @@ -0,0 +1,308 @@ +// for string manipulations +#include +#include "DataFormats/Common/interface/Handle.h" +#include "DataFormats/Math/interface/deltaR.h" +#include "DataFormats/Math/interface/deltaPhi.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/Utilities/interface/InputTag.h" +// DQM Histograming +#include "DQMServices/Core/interface/MonitorElement.h" +#include "DQMServices/Core/interface/DQMEDAnalyzer.h" +#include "DQMServices/Core/interface/DQMStore.h" +// DataFormats +#include "DataFormats/TrackSoA/interface/TracksHost.h" +#include "DataFormats/TrackSoA/interface/alpaka/TrackUtilities.h" + +namespace { + // same logic used for the MTV: + // cf https://github.com/cms-sw/cmssw/blob/master/Validation/RecoTrack/src/MTVHistoProducerAlgoForTracker.cc + typedef dqm::reco::DQMStore DQMStore; + + void setBinLog(TAxis* axis) { + int bins = axis->GetNbins(); + float from = axis->GetXmin(); + float to = axis->GetXmax(); + float width = (to - from) / bins; + std::vector new_bins(bins + 1, 0); + for (int i = 0; i <= bins; i++) { + new_bins[i] = TMath::Power(10, from + i * width); + } + axis->Set(bins, new_bins.data()); + } + + void setBinLogX(TH1* h) { + TAxis* axis = h->GetXaxis(); + setBinLog(axis); + } + void setBinLogY(TH1* h) { + TAxis* axis = h->GetYaxis(); + setBinLog(axis); + } + + template + dqm::reco::MonitorElement* make2DIfLog(DQMStore::IBooker& ibook, bool logx, bool logy, Args&&... args) { + auto h = std::make_unique(std::forward(args)...); + if (logx) + setBinLogX(h.get()); + if (logy) + setBinLogY(h.get()); + const auto& name = h->GetName(); + return ibook.book2I(name, h.release()); + } +} // namespace + +template +class SiPixelCompareTrackSoAAlpaka : public DQMEDAnalyzer { +public: + using PixelTrackSoA = TracksHost; + + explicit SiPixelCompareTrackSoAAlpaka(const edm::ParameterSet&); + ~SiPixelCompareTrackSoAAlpaka() override = default; + void bookHistograms(DQMStore::IBooker& ibooker, edm::Run const& iRun, edm::EventSetup const& iSetup) override; + void analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) override; + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + +private: + const edm::EDGetTokenT tokenSoATrackHost_; + const edm::EDGetTokenT tokenSoATrackDevice_; + const std::string topFolderName_; + const bool useQualityCut_; + const pixelTrack::Quality minQuality_; + const float dr2cut_; + MonitorElement* hnTracks_; + MonitorElement* hnLooseAndAboveTracks_; + MonitorElement* hnLooseAndAboveTracks_matched_; + MonitorElement* hnHits_; + MonitorElement* hnHitsVsPhi_; + MonitorElement* hnHitsVsEta_; + MonitorElement* hnLayers_; + MonitorElement* hnLayersVsPhi_; + MonitorElement* hnLayersVsEta_; + MonitorElement* hCharge_; + MonitorElement* hchi2_; + MonitorElement* hChi2VsPhi_; + MonitorElement* hChi2VsEta_; + MonitorElement* hpt_; + MonitorElement* hptLogLog_; + MonitorElement* heta_; + MonitorElement* hphi_; + MonitorElement* hz_; + MonitorElement* htip_; + MonitorElement* hquality_; + //1D differences + MonitorElement* hptdiffMatched_; + MonitorElement* hCurvdiffMatched_; + MonitorElement* hetadiffMatched_; + MonitorElement* hphidiffMatched_; + MonitorElement* hzdiffMatched_; + MonitorElement* htipdiffMatched_; + + //for matching eff vs region: derive the ratio at harvesting + MonitorElement* hpt_eta_tkAllHost_; + MonitorElement* hpt_eta_tkAllHostMatched_; + MonitorElement* hphi_z_tkAllHost_; + MonitorElement* hphi_z_tkAllHostMatched_; +}; + +// +// constructors +// + +template +SiPixelCompareTrackSoAAlpaka::SiPixelCompareTrackSoAAlpaka(const edm::ParameterSet& iConfig) + : tokenSoATrackHost_(consumes(iConfig.getParameter("pixelTrackSrcHost"))), + tokenSoATrackDevice_(consumes(iConfig.getParameter("pixelTrackSrcDevice"))), + topFolderName_(iConfig.getParameter("topFolderName")), + useQualityCut_(iConfig.getParameter("useQualityCut")), + minQuality_(pixelTrack::qualityByName(iConfig.getParameter("minQuality"))), + dr2cut_(iConfig.getParameter("deltaR2cut")) {} + +// +// -- Analyze +// +template +void SiPixelCompareTrackSoAAlpaka::analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) { + using helper = TracksUtilities; + const auto& tsoaHandleHost = iEvent.getHandle(tokenSoATrackHost_); + const auto& tsoaHandleDevice = iEvent.getHandle(tokenSoATrackDevice_); + if (not tsoaHandleHost or not tsoaHandleDevice) { + edm::LogWarning out("SiPixelCompareTrackSoAAlpaka"); + if (not tsoaHandleHost) { + out << "reference (cpu) tracks not found; "; + } + if (not tsoaHandleDevice) { + out << "target (gpu) tracks not found; "; + } + out << "the comparison will not run."; + return; + } + + auto const& tsoaHost = *tsoaHandleHost; + auto const& tsoaDevice = *tsoaHandleDevice; + auto maxTracksHost = tsoaHost.view().metadata().size(); //this should be same for both? + auto maxTracksDevice = tsoaDevice.view().metadata().size(); //this should be same for both? + auto const* qualityHost = tsoaHost.view().quality(); + auto const* qualityDevice = tsoaDevice.view().quality(); + int32_t nTracksHost = 0; + int32_t nTracksDevice = 0; + int32_t nLooseAndAboveTracksHost = 0; + int32_t nLooseAndAboveTracksHost_matchedDevice = 0; + int32_t nLooseAndAboveTracksDevice = 0; + + //Loop over Device tracks and store the indices of the loose tracks. Whats happens if useQualityCut_ is false? + std::vector looseTrkidxDevice; + for (int32_t jt = 0; jt < maxTracksDevice; ++jt) { + if (helper::nHits(tsoaDevice.view(), jt) == 0) + break; // this is a guard + if (!(tsoaDevice.view()[jt].pt() > 0.)) + continue; + nTracksDevice++; + if (useQualityCut_ && qualityDevice[jt] < minQuality_) + continue; + nLooseAndAboveTracksDevice++; + looseTrkidxDevice.emplace_back(jt); + } + + //Now loop over Host tracks//nested loop for loose gPU tracks + for (int32_t it = 0; it < maxTracksHost; ++it) { + int nHitsHost = helper::nHits(tsoaHost.view(), it); + + if (nHitsHost == 0) + break; // this is a guard + + float ptHost = tsoaHost.view()[it].pt(); + float etaHost = tsoaHost.view()[it].eta(); + float phiHost = reco::phi(tsoaHost.view(), it); + float zipHost = reco::zip(tsoaHost.view(), it); + float tipHost = reco::tip(tsoaHost.view(), it); + + if (!(ptHost > 0.)) + continue; + nTracksHost++; + if (useQualityCut_ && qualityHost[it] < minQuality_) + continue; + nLooseAndAboveTracksHost++; + //Now loop over loose Device trk and find the closest in DeltaR//do we need pt cut? + const int32_t notFound = -1; + int32_t closestTkidx = notFound; + float mindr2 = dr2cut_; + + for (auto gid : looseTrkidxDevice) { + float etaDevice = tsoaDevice.view()[gid].eta(); + float phiDevice = reco::phi(tsoaDevice.view(), gid); + float dr2 = reco::deltaR2(etaHost, phiHost, etaDevice, phiDevice); + if (dr2 > dr2cut_) + continue; // this is arbitrary + if (mindr2 > dr2) { + mindr2 = dr2; + closestTkidx = gid; + } + } + + hpt_eta_tkAllHost_->Fill(etaHost, ptHost); //all Host tk + hphi_z_tkAllHost_->Fill(phiHost, zipHost); + if (closestTkidx == notFound) + continue; + nLooseAndAboveTracksHost_matchedDevice++; + + hchi2_->Fill(tsoaHost.view()[it].chi2(), tsoaDevice.view()[closestTkidx].chi2()); + hCharge_->Fill(reco::charge(tsoaHost.view(), it), reco::charge(tsoaDevice.view(), closestTkidx)); + hnHits_->Fill(helper::nHits(tsoaHost.view(), it), helper::nHits(tsoaDevice.view(), closestTkidx)); + hnLayers_->Fill(tsoaHost.view()[it].nLayers(), tsoaDevice.view()[closestTkidx].nLayers()); + hpt_->Fill(tsoaHost.view()[it].pt(), tsoaDevice.view()[closestTkidx].pt()); + hptLogLog_->Fill(tsoaHost.view()[it].pt(), tsoaDevice.view()[closestTkidx].pt()); + heta_->Fill(etaHost, tsoaDevice.view()[closestTkidx].eta()); + hphi_->Fill(phiHost, reco::phi(tsoaDevice.view(), closestTkidx)); + hz_->Fill(zipHost, reco::zip(tsoaDevice.view(), closestTkidx)); + htip_->Fill(tipHost, reco::tip(tsoaDevice.view(), closestTkidx)); + hptdiffMatched_->Fill(ptHost - tsoaDevice.view()[closestTkidx].pt()); + hCurvdiffMatched_->Fill((reco::charge(tsoaHost.view(), it) / tsoaHost.view()[it].pt()) - + (reco::charge(tsoaDevice.view(), closestTkidx) / tsoaDevice.view()[closestTkidx].pt())); + hetadiffMatched_->Fill(etaHost - tsoaDevice.view()[closestTkidx].eta()); + hphidiffMatched_->Fill(reco::deltaPhi(phiHost, reco::phi(tsoaDevice.view(), closestTkidx))); + hzdiffMatched_->Fill(zipHost - reco::zip(tsoaDevice.view(), closestTkidx)); + htipdiffMatched_->Fill(tipHost - reco::tip(tsoaDevice.view(), closestTkidx)); + hpt_eta_tkAllHostMatched_->Fill(etaHost, tsoaHost.view()[it].pt()); //matched to gpu + hphi_z_tkAllHostMatched_->Fill(etaHost, zipHost); + } + hnTracks_->Fill(nTracksHost, nTracksDevice); + hnLooseAndAboveTracks_->Fill(nLooseAndAboveTracksHost, nLooseAndAboveTracksDevice); + hnLooseAndAboveTracks_matched_->Fill(nLooseAndAboveTracksHost, nLooseAndAboveTracksHost_matchedDevice); +} + +// +// -- Book Histograms +// +template +void SiPixelCompareTrackSoAAlpaka::bookHistograms(DQMStore::IBooker& iBook, + edm::Run const& iRun, + edm::EventSetup const& iSetup) { + iBook.cd(); + iBook.setCurrentFolder(topFolderName_); + + // clang-format off + std::string toRep = "Number of tracks"; + // FIXME: all the 2D correlation plots are quite heavy in terms of memory consumption, so a as soon as DQM supports THnSparse + // these should be moved to a less resource consuming format + hnTracks_ = iBook.book2I("nTracks", fmt::format("{} per event; Host; Device",toRep), 501, -0.5, 500.5, 501, -0.5, 500.5); + hnLooseAndAboveTracks_ = iBook.book2I("nLooseAndAboveTracks", fmt::format("{} (quality #geq loose) per event; Host; Device",toRep), 501, -0.5, 500.5, 501, -0.5, 500.5); + hnLooseAndAboveTracks_matched_ = iBook.book2I("nLooseAndAboveTracks_matched", fmt::format("{} (quality #geq loose) per event; Host; Device",toRep), 501, -0.5, 500.5, 501, -0.5, 500.5); + + toRep = "Number of all RecHits per track (quality #geq loose)"; + hnHits_ = iBook.book2I("nRecHits", fmt::format("{};Host;Device",toRep), 15, -0.5, 14.5, 15, -0.5, 14.5); + + toRep = "Number of all layers per track (quality #geq loose)"; + hnLayers_ = iBook.book2I("nLayers", fmt::format("{};Host;Device",toRep), 15, -0.5, 14.5, 15, -0.5, 14.5); + + toRep = "Track (quality #geq loose) #chi^{2}/ndof"; + hchi2_ = iBook.book2I("nChi2ndof", fmt::format("{};Host;Device",toRep), 40, 0., 20., 40, 0., 20.); + + toRep = "Track (quality #geq loose) charge"; + hCharge_ = iBook.book2I("charge",fmt::format("{};Host;Device",toRep),3, -1.5, 1.5, 3, -1.5, 1.5); + + hpt_ = iBook.book2I("pt", "Track (quality #geq loose) p_{T} [GeV];Host;Device", 200, 0., 200., 200, 0., 200.); + hptLogLog_ = make2DIfLog(iBook, true, true, "ptLogLog", "Track (quality #geq loose) p_{T} [GeV];Host;Device", 200, log10(0.5), log10(200.), 200, log10(0.5), log10(200.)); + heta_ = iBook.book2I("eta", "Track (quality #geq loose) #eta;Host;Device", 30, -3., 3., 30, -3., 3.); + hphi_ = iBook.book2I("phi", "Track (quality #geq loose) #phi;Host;Device", 30, -M_PI, M_PI, 30, -M_PI, M_PI); + hz_ = iBook.book2I("z", "Track (quality #geq loose) z [cm];Host;Device", 30, -30., 30., 30, -30., 30.); + htip_ = iBook.book2I("tip", "Track (quality #geq loose) TIP [cm];Host;Device", 100, -0.5, 0.5, 100, -0.5, 0.5); + //1D difference plots + hptdiffMatched_ = iBook.book1D("ptdiffmatched", " p_{T} diff [GeV] between matched tracks; #Delta p_{T} [GeV]", 60, -30., 30.); + hCurvdiffMatched_ = iBook.book1D("curvdiffmatched", "q/p_{T} diff [GeV] between matched tracks; #Delta q/p_{T} [GeV]", 60, -30., 30.); + hetadiffMatched_ = iBook.book1D("etadiffmatched", " #eta diff between matched tracks; #Delta #eta", 160, -0.04 ,0.04); + hphidiffMatched_ = iBook.book1D("phidiffmatched", " #phi diff between matched tracks; #Delta #phi", 160, -0.04 ,0.04); + hzdiffMatched_ = iBook.book1D("zdiffmatched", " z diff between matched tracks; #Delta z [cm]", 300, -1.5, 1.5); + htipdiffMatched_ = iBook.book1D("tipdiffmatched", " TIP diff between matched tracks; #Delta TIP [cm]", 300, -1.5, 1.5); + //2D plots for eff + hpt_eta_tkAllHost_ = iBook.book2I("ptetatrkAllHost", "Track (quality #geq loose) on Host; #eta; p_{T} [GeV];", 30, -M_PI, M_PI, 200, 0., 200.); + hpt_eta_tkAllHostMatched_ = iBook.book2I("ptetatrkAllHostmatched", "Track (quality #geq loose) on Host matched to Device track; #eta; p_{T} [GeV];", 30, -M_PI, M_PI, 200, 0., 200.); + + hphi_z_tkAllHost_ = iBook.book2I("phiztrkAllHost", "Track (quality #geq loose) on Host; #phi; z [cm];", 30, -M_PI, M_PI, 30, -30., 30.); + hphi_z_tkAllHostMatched_ = iBook.book2I("phiztrkAllHostmatched", "Track (quality #geq loose) on Host; #phi; z [cm];", 30, -M_PI, M_PI, 30, -30., 30.); + +} + +template +void SiPixelCompareTrackSoAAlpaka::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + // monitorpixelTrackSoA + edm::ParameterSetDescription desc; + desc.add("pixelTrackSrcHost", edm::InputTag("pixelTracksAlpakaSerial")); + desc.add("pixelTrackSrcDevice", edm::InputTag("pixelTracksAlpaka")); + desc.add("topFolderName", "SiPixelHeterogeneous/PixelTrackCompareDeviceVSHost"); + desc.add("useQualityCut", true); + desc.add("minQuality", "loose"); + desc.add("deltaR2cut", 0.04); + descriptions.addWithDefaultLabel(desc); +} + +using SiPixelPhase1CompareTrackSoAAlpaka = SiPixelCompareTrackSoAAlpaka; +using SiPixelPhase2CompareTrackSoAAlpaka = SiPixelCompareTrackSoAAlpaka; +using SiPixelHIonPhase1CompareTrackSoAAlpaka = SiPixelCompareTrackSoAAlpaka; + +DEFINE_FWK_MODULE(SiPixelPhase1CompareTrackSoAAlpaka); +DEFINE_FWK_MODULE(SiPixelPhase2CompareTrackSoAAlpaka); +DEFINE_FWK_MODULE(SiPixelHIonPhase1CompareTrackSoAAlpaka); diff --git a/DQM/SiPixelHeterogeneous/plugins/SiPixelCompareVertexSoAAlpaka.cc b/DQM/SiPixelHeterogeneous/plugins/SiPixelCompareVertexSoAAlpaka.cc new file mode 100644 index 0000000000000..2eea6a980d9c5 --- /dev/null +++ b/DQM/SiPixelHeterogeneous/plugins/SiPixelCompareVertexSoAAlpaka.cc @@ -0,0 +1,186 @@ +// -*- C++ -*- +// Package: SiPixelCompareVertexSoAAlpaka +// Class: SiPixelCompareVertexSoAAlpaka +// +/**\class SiPixelCompareVertexSoAAlpaka SiPixelCompareVertexSoAAlpaka.cc +*/ +// +// Author: Suvankar Roy Chowdhury +// +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "DataFormats/Common/interface/Handle.h" +// DQM Histograming +#include "DQMServices/Core/interface/MonitorElement.h" +#include "DQMServices/Core/interface/DQMEDAnalyzer.h" +#include "DQMServices/Core/interface/DQMStore.h" +#include "DataFormats/VertexSoA/interface/ZVertexHost.h" +#include "DataFormats/BeamSpot/interface/BeamSpot.h" + +class SiPixelCompareVertexSoAAlpaka : public DQMEDAnalyzer { +public: + using IndToEdm = std::vector; + explicit SiPixelCompareVertexSoAAlpaka(const edm::ParameterSet&); + ~SiPixelCompareVertexSoAAlpaka() override = default; + void bookHistograms(DQMStore::IBooker& ibooker, edm::Run const& iRun, edm::EventSetup const& iSetup) override; + void analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) override; + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + +private: + const edm::EDGetTokenT tokenSoAVertexHost_; + const edm::EDGetTokenT tokenSoAVertexDevice_; + const edm::EDGetTokenT tokenBeamSpot_; + const std::string topFolderName_; + const float dzCut_; + MonitorElement* hnVertex_; + MonitorElement* hx_; + MonitorElement* hy_; + MonitorElement* hz_; + MonitorElement* hchi2_; + MonitorElement* hchi2oNdof_; + MonitorElement* hptv2_; + MonitorElement* hntrks_; + MonitorElement* hxdiff_; + MonitorElement* hydiff_; + MonitorElement* hzdiff_; +}; + +// +// constructors +// + +// Note tokenSoAVertexDevice_ contains data copied from device to host, hence is a HostCollection +SiPixelCompareVertexSoAAlpaka::SiPixelCompareVertexSoAAlpaka(const edm::ParameterSet& iConfig) + : tokenSoAVertexHost_(consumes(iConfig.getParameter("pixelVertexSrcHost"))), + tokenSoAVertexDevice_(consumes(iConfig.getParameter("pixelVertexSrcDevice"))), + tokenBeamSpot_(consumes(iConfig.getParameter("beamSpotSrc"))), + topFolderName_(iConfig.getParameter("topFolderName")), + dzCut_(iConfig.getParameter("dzCut")) {} + +// +// -- Analyze +// +void SiPixelCompareVertexSoAAlpaka::analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) { + const auto& vsoaHandleHost = iEvent.getHandle(tokenSoAVertexHost_); + const auto& vsoaHandleDevice = iEvent.getHandle(tokenSoAVertexDevice_); + if (not vsoaHandleHost or not vsoaHandleDevice) { + edm::LogWarning out("SiPixelCompareVertexSoAAlpaka"); + if (not vsoaHandleHost) { + out << "reference (cpu) tracks not found; "; + } + if (not vsoaHandleDevice) { + out << "target (gpu) tracks not found; "; + } + out << "the comparison will not run."; + return; + } + + auto const& vsoaHost = *vsoaHandleHost; + int nVerticesHost = vsoaHost.view().nvFinal(); + auto const& vsoaDevice = *vsoaHandleDevice; + int nVerticesDevice = vsoaDevice.view().nvFinal(); + + auto bsHandle = iEvent.getHandle(tokenBeamSpot_); + float x0 = 0., y0 = 0., z0 = 0., dxdz = 0., dydz = 0.; + if (!bsHandle.isValid()) { + edm::LogWarning("SiPixelCompareVertexSoAAlpaka") << "No beamspot found. returning vertexes with (0,0,Z) "; + } else { + const reco::BeamSpot& bs = *bsHandle; + x0 = bs.x0(); + y0 = bs.y0(); + z0 = bs.z0(); + dxdz = bs.dxdz(); + dydz = bs.dydz(); + } + + for (int ivc = 0; ivc < nVerticesHost; ivc++) { + auto sic = vsoaHost.view()[ivc].sortInd(); + auto zc = vsoaHost.view()[sic].zv(); + auto xc = x0 + dxdz * zc; + auto yc = y0 + dydz * zc; + zc += z0; + + auto ndofHost = vsoaHost.view()[sic].ndof(); + auto chi2Host = vsoaHost.view()[sic].chi2(); + + const int32_t notFound = -1; + int32_t closestVtxidx = notFound; + float mindz = dzCut_; + + for (int ivg = 0; ivg < nVerticesDevice; ivg++) { + auto sig = vsoaDevice.view()[ivg].sortInd(); + auto zgc = vsoaDevice.view()[sig].zv() + z0; + auto zDist = std::abs(zc - zgc); + //insert some matching condition + if (zDist > dzCut_) + continue; + if (mindz > zDist) { + mindz = zDist; + closestVtxidx = sig; + } + } + if (closestVtxidx == notFound) + continue; + + auto zg = vsoaDevice.view()[closestVtxidx].zv(); + auto xg = x0 + dxdz * zg; + auto yg = y0 + dydz * zg; + zg += z0; + auto ndofDevice = vsoaDevice.view()[closestVtxidx].ndof(); + auto chi2Device = vsoaDevice.view()[closestVtxidx].chi2(); + + hx_->Fill(xc - x0, xg - x0); + hy_->Fill(yc - y0, yg - y0); + hz_->Fill(zc, zg); + hxdiff_->Fill(xc - xg); + hydiff_->Fill(yc - yg); + hzdiff_->Fill(zc - zg); + hchi2_->Fill(chi2Host, chi2Device); + hchi2oNdof_->Fill(chi2Host / ndofHost, chi2Device / ndofDevice); + hptv2_->Fill(vsoaHost.view()[sic].ptv2(), vsoaDevice.view()[closestVtxidx].ptv2()); + hntrks_->Fill(ndofHost + 1, ndofDevice + 1); + } + hnVertex_->Fill(nVerticesHost, nVerticesDevice); +} + +// +// -- Book Histograms +// +void SiPixelCompareVertexSoAAlpaka::bookHistograms(DQMStore::IBooker& ibooker, + edm::Run const& iRun, + edm::EventSetup const& iSetup) { + ibooker.cd(); + ibooker.setCurrentFolder(topFolderName_); + + // FIXME: all the 2D correlation plots are quite heavy in terms of memory consumption, so a as soon as DQM supports either TH2I or THnSparse + // these should be moved to a less resource consuming format + hnVertex_ = ibooker.book2I("nVertex", "# of Vertices;Host;Device", 101, -0.5, 100.5, 101, -0.5, 100.5); + hx_ = ibooker.book2I("vx", "Vertez x - Beamspot x;Host;Device", 50, -0.1, 0.1, 50, -0.1, 0.1); + hy_ = ibooker.book2I("vy", "Vertez y - Beamspot y;Host;Device", 50, -0.1, 0.1, 50, -0.1, 0.1); + hz_ = ibooker.book2I("vz", "Vertez z;Host;Device", 30, -30., 30., 30, -30., 30.); + hchi2_ = ibooker.book2I("chi2", "Vertex chi-squared;Host;Device", 40, 0., 20., 40, 0., 20.); + hchi2oNdof_ = ibooker.book2I("chi2oNdof", "Vertex chi-squared/Ndof;Host;Device", 40, 0., 20., 40, 0., 20.); + hptv2_ = ibooker.book2I("ptsq", "Vertex #sum (p_{T})^{2};Host;Device", 200, 0., 200., 200, 0., 200.); + hntrks_ = ibooker.book2I("ntrk", "#tracks associated;Host;Device", 100, -0.5, 99.5, 100, -0.5, 99.5); + hntrks_ = ibooker.book2I("ntrk", "#tracks associated;Host;Device", 100, -0.5, 99.5, 100, -0.5, 99.5); + hxdiff_ = ibooker.book1D("vxdiff", ";Vertex x difference (Host - Device);#entries", 100, -0.001, 0.001); + hydiff_ = ibooker.book1D("vydiff", ";Vertex y difference (Host - Device);#entries", 100, -0.001, 0.001); + hzdiff_ = ibooker.book1D("vzdiff", ";Vertex z difference (Host - Device);#entries", 100, -2.5, 2.5); +} + +void SiPixelCompareVertexSoAAlpaka::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + // monitorpixelVertexSoA + edm::ParameterSetDescription desc; + desc.add("pixelVertexSrcHost", edm::InputTag("pixelVerticesAlpakaSerial")); + desc.add("pixelVertexSrcDevice", edm::InputTag("pixelVerticesAlpaka")); + desc.add("beamSpotSrc", edm::InputTag("offlineBeamSpot")); + desc.add("topFolderName", "SiPixelHeterogeneous/PixelVertexCompareSoADeviceVSHost"); + desc.add("dzCut", 1.); + descriptions.addWithDefaultLabel(desc); +} + +DEFINE_FWK_MODULE(SiPixelCompareVertexSoAAlpaka); diff --git a/DQM/SiPixelHeterogeneous/plugins/SiPixelMonitorRecHitsSoAAlpaka.cc b/DQM/SiPixelHeterogeneous/plugins/SiPixelMonitorRecHitsSoAAlpaka.cc new file mode 100644 index 0000000000000..f4c8968fafb16 --- /dev/null +++ b/DQM/SiPixelHeterogeneous/plugins/SiPixelMonitorRecHitsSoAAlpaka.cc @@ -0,0 +1,198 @@ +#include "DQMServices/Core/interface/MonitorElement.h" +#include "DQMServices/Core/interface/DQMEDAnalyzer.h" +#include "DQMServices/Core/interface/DQMStore.h" +#include "DataFormats/Math/interface/approx_atan2.h" +#include "DataFormats/SiPixelDetId/interface/PixelSubdetector.h" +#include "DataFormats/TrackerCommon/interface/TrackerTopology.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsHost.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "Geometry/CommonDetUnit/interface/PixelGeomDetUnit.h" +#include "Geometry/CommonTopologies/interface/PixelTopology.h" +#include "Geometry/Records/interface/TrackerDigiGeometryRecord.h" +#include "Geometry/TrackerGeometryBuilder/interface/TrackerGeometry.h" + +template +class SiPixelMonitorRecHitsSoAAlpaka : public DQMEDAnalyzer { +public: + using HitsOnHost = TrackingRecHitHost; + + explicit SiPixelMonitorRecHitsSoAAlpaka(const edm::ParameterSet&); + ~SiPixelMonitorRecHitsSoAAlpaka() override = default; + void dqmBeginRun(const edm::Run&, const edm::EventSetup&) override; + void bookHistograms(DQMStore::IBooker& ibooker, edm::Run const& iRun, edm::EventSetup const& iSetup) override; + void analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) override; + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + +private: + const edm::ESGetToken geomToken_; + const edm::ESGetToken topoToken_; + const edm::EDGetTokenT tokenSoAHits_; + const std::string topFolderName_; + const TrackerGeometry* tkGeom_ = nullptr; + const TrackerTopology* tTopo_ = nullptr; + MonitorElement* hnHits; + MonitorElement* hBFposZP; + MonitorElement* hBFposZR; + MonitorElement* hBposXY; + MonitorElement* hBposZP; + MonitorElement* hBcharge; + MonitorElement* hBsizex; + MonitorElement* hBsizey; + MonitorElement* hBposZPL[4]; // max 4 barrel hits + MonitorElement* hBchargeL[4]; + MonitorElement* hBsizexL[4]; + MonitorElement* hBsizeyL[4]; + MonitorElement* hFposXY; + MonitorElement* hFposZP; + MonitorElement* hFcharge; + MonitorElement* hFsizex; + MonitorElement* hFsizey; + MonitorElement* hFposXYD[2][12]; // max 12 endcap disks + MonitorElement* hFchargeD[2][12]; + MonitorElement* hFsizexD[2][12]; + MonitorElement* hFsizeyD[2][12]; +}; + +// +// constructors +// +template +SiPixelMonitorRecHitsSoAAlpaka::SiPixelMonitorRecHitsSoAAlpaka(const edm::ParameterSet& iConfig) + : geomToken_(esConsumes()), + topoToken_(esConsumes()), + tokenSoAHits_(consumes(iConfig.getParameter("pixelHitsSrc"))), + topFolderName_(iConfig.getParameter("TopFolderName")) {} + +// +// Begin Run +// +template +void SiPixelMonitorRecHitsSoAAlpaka::dqmBeginRun(const edm::Run& iRun, const edm::EventSetup& iSetup) { + tkGeom_ = &iSetup.getData(geomToken_); + tTopo_ = &iSetup.getData(topoToken_); +} + +// +// -- Analyze +// +template +void SiPixelMonitorRecHitsSoAAlpaka::analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) { + const auto& rhsoaHandle = iEvent.getHandle(tokenSoAHits_); + if (!rhsoaHandle.isValid()) { + edm::LogWarning("SiPixelMonitorRecHitsSoAAlpaka") << "No RecHits SoA found \n returning!"; + return; + } + auto const& rhsoa = *rhsoaHandle; + auto const& soa2d = rhsoa.const_view(); + + uint32_t nHits_ = soa2d.metadata().size(); + hnHits->Fill(nHits_); + auto detIds = tkGeom_->detUnitIds(); + for (uint32_t i = 0; i < nHits_; i++) { + DetId id = detIds[soa2d[i].detectorIndex()]; + float xG = soa2d[i].xGlobal(); + float yG = soa2d[i].yGlobal(); + float zG = soa2d[i].zGlobal(); + float rG = soa2d[i].rGlobal(); + float fphi = short2phi(soa2d[i].iphi()); + uint32_t charge = soa2d[i].chargeAndStatus().charge; + int16_t sizeX = std::ceil(float(std::abs(soa2d[i].clusterSizeX()) / 8.)); + int16_t sizeY = std::ceil(float(std::abs(soa2d[i].clusterSizeY()) / 8.)); + hBFposZP->Fill(zG, fphi); + int16_t ysign = yG >= 0 ? 1 : -1; + hBFposZR->Fill(zG, rG * ysign); + switch (id.subdetId()) { + case PixelSubdetector::PixelBarrel: + hBposXY->Fill(xG, yG); + hBposZP->Fill(zG, fphi); + hBcharge->Fill(charge); + hBsizex->Fill(sizeX); + hBsizey->Fill(sizeY); + hBposZPL[tTopo_->pxbLayer(id) - 1]->Fill(zG, fphi); + hBchargeL[tTopo_->pxbLayer(id) - 1]->Fill(charge); + hBsizexL[tTopo_->pxbLayer(id) - 1]->Fill(sizeX); + hBsizeyL[tTopo_->pxbLayer(id) - 1]->Fill(sizeY); + break; + case PixelSubdetector::PixelEndcap: + hFposXY->Fill(xG, yG); + hFposZP->Fill(zG, fphi); + hFcharge->Fill(charge); + hFsizex->Fill(sizeX); + hFsizey->Fill(sizeY); + hFposXYD[tTopo_->pxfSide(id) - 1][tTopo_->pxfDisk(id) - 1]->Fill(xG, yG); + hFchargeD[tTopo_->pxfSide(id) - 1][tTopo_->pxfDisk(id) - 1]->Fill(charge); + hFsizexD[tTopo_->pxfSide(id) - 1][tTopo_->pxfDisk(id) - 1]->Fill(sizeX); + hFsizeyD[tTopo_->pxfSide(id) - 1][tTopo_->pxfDisk(id) - 1]->Fill(sizeY); + break; + } + } +} + +// +// -- Book Histograms +// +template +void SiPixelMonitorRecHitsSoAAlpaka::bookHistograms(DQMStore::IBooker& iBook, + edm::Run const& iRun, + edm::EventSetup const& iSetup) { + iBook.cd(); + iBook.setCurrentFolder(topFolderName_); + + // clang-format off + //Global + hnHits = iBook.book1D("nHits", "RecHits per event;RecHits;#events", 200, 0, 5000); + hBFposZP = iBook.book2D("recHitsGlobalPosZP", "RecHits position Global;Z;#phi", 1000, -60, 60, 200,-3.2,3.2); + hBFposZR = iBook.book2D("recHitsGlobalPosZR", "RecHits position Global;Z;R", 1000, -60, 60, 200,-20,20); + //Barrel + hBposXY = iBook.book2D("recHitsBarrelPosXY", "RecHits position Barrel;X;Y", 200, -20, 20, 200,-20,20); + hBposZP = iBook.book2D("recHitsBarrelPosZP", "RecHits position Barrel;Z;#phi", 300, -30, 30, 200,-3.2,3.2); + hBcharge = iBook.book1D("recHitsBarrelCharge", "RecHits Charge Barrel;Charge;#events", 250, 0, 100000); + hBsizex = iBook.book1D("recHitsBarrelSizex", "RecHits SizeX Barrel;SizeX;#events", 50, 0, 50); + hBsizey = iBook.book1D("recHitsBarrelSizey", "RecHits SizeY Barrel;SizeY;#events", 50, 0, 50); + //Barrel Layer + for(unsigned int il=0;ilnumberOfLayers(PixelSubdetector::PixelBarrel);il++){ + hBposZPL[il] = iBook.book2D(Form("recHitsBLay%dPosZP",il+1), Form("RecHits position Barrel Layer%d;Z;#phi",il+1), 300, -30, 30, 200,-3.2,3.2); + hBchargeL[il] = iBook.book1D(Form("recHitsBLay%dCharge",il+1), Form("RecHits Charge Barrel Layer%d;Charge;#events",il+1), 250, 0, 100000); + hBsizexL[il] = iBook.book1D(Form("recHitsBLay%dSizex",il+1), Form("RecHits SizeX Barrel Layer%d;SizeX;#events",il+1), 50, 0, 50); + hBsizeyL[il] = iBook.book1D(Form("recHitsBLay%dSizey",il+1), Form("RecHits SizeY Barrel Layer%d;SizeY;#events",il+1), 50, 0, 50); + } + //Endcaps + hFposXY = iBook.book2D("recHitsEndcapsPosXY", "RecHits position Endcaps;X;Y", 200, -20, 20, 200,-20, 20); + hFposZP = iBook.book2D("recHitsEndcapsPosZP", "RecHits position Endcaps;Z;#phi", 600, -60, 60, 200,-3.2,3.2); + hFcharge = iBook.book1D("recHitsEndcapsCharge", "RecHits Charge Endcaps;Charge;#events", 250, 0, 100000); + hFsizex = iBook.book1D("recHitsEndcapsSizex", "RecHits SizeX Endcaps;SizeX;#events", 50, 0, 50); + hFsizey = iBook.book1D("recHitsEndcapsSizey", "RecHits SizeY Endcaps;SizeY;#events", 50, 0, 50); + //Endcaps Disk + for(int is=0;is<2;is++){ + int sign=is==0? -1:1; + for(unsigned int id=0;idnumberOfLayers(PixelSubdetector::PixelEndcap);id++){ + hFposXYD[is][id] = iBook.book2D(Form("recHitsFDisk%+dPosXY",id*sign+sign), Form("RecHits position Endcaps Disk%+d;X;Y",id*sign+sign), 200, -20, 20, 200,-20,20); + hFchargeD[is][id] = iBook.book1D(Form("recHitsFDisk%+dCharge",id*sign+sign), Form("RecHits Charge Endcaps Disk%+d;Charge;#events",id*sign+sign), 250, 0, 100000); + hFsizexD[is][id] = iBook.book1D(Form("recHitsFDisk%+dSizex",id*sign+sign), Form("RecHits SizeX Endcaps Disk%+d;SizeX;#events",id*sign+sign), 50, 0, 50); + hFsizeyD[is][id] = iBook.book1D(Form("recHitsFDisk%+dSizey",id*sign+sign), Form("RecHits SizeY Endcaps Disk%+d;SizeY;#events",id*sign+sign), 50, 0, 50); + } + } +} + +template +void SiPixelMonitorRecHitsSoAAlpaka::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + // monitorpixelRecHitsSoA + edm::ParameterSetDescription desc; + desc.add("pixelHitsSrc", edm::InputTag("siPixelRecHitsPreSplittingAlpaka")); + desc.add("TopFolderName", "SiPixelHeterogeneous/PixelRecHitsAlpaka"); + descriptions.addWithDefaultLabel(desc); +} + +using SiPixelPhase1MonitorRecHitsSoAAlpaka = SiPixelMonitorRecHitsSoAAlpaka; +using SiPixelPhase2MonitorRecHitsSoAAlpaka = SiPixelMonitorRecHitsSoAAlpaka; +using SiPixelHIonPhase1MonitorRecHitsSoAAlpaka = SiPixelMonitorRecHitsSoAAlpaka; + +#include "FWCore/Framework/interface/MakerMacros.h" +DEFINE_FWK_MODULE(SiPixelPhase1MonitorRecHitsSoAAlpaka); +DEFINE_FWK_MODULE(SiPixelPhase2MonitorRecHitsSoAAlpaka); +DEFINE_FWK_MODULE(SiPixelHIonPhase1MonitorRecHitsSoAAlpaka); diff --git a/DQM/SiPixelHeterogeneous/plugins/SiPixelMonitorTrackSoAAlpaka.cc b/DQM/SiPixelHeterogeneous/plugins/SiPixelMonitorTrackSoAAlpaka.cc new file mode 100644 index 0000000000000..fd98957ee8492 --- /dev/null +++ b/DQM/SiPixelHeterogeneous/plugins/SiPixelMonitorTrackSoAAlpaka.cc @@ -0,0 +1,197 @@ +// -*- C++ -*- +// Package: SiPixelMonitorTrackSoAAlpaka +// Class: SiPixelMonitorTrackSoAAlpaka +// +/**\class SiPixelMonitorTrackSoAAlpaka SiPixelMonitorTrackSoAAlpaka.cc +*/ +// +// Author: Suvankar Roy Chowdhury +// + +// for string manipulations +#include +#include "DataFormats/Common/interface/Handle.h" +#include "FWCore/Framework/interface/ESHandle.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ServiceRegistry/interface/Service.h" +#include "FWCore/Utilities/interface/InputTag.h" +// DQM Histograming +#include "DQMServices/Core/interface/MonitorElement.h" +#include "DQMServices/Core/interface/DQMEDAnalyzer.h" +#include "DQMServices/Core/interface/DQMStore.h" +#include "DataFormats/TrackSoA/interface/TracksHost.h" + +template +class SiPixelMonitorTrackSoAAlpaka : public DQMEDAnalyzer { +public: + using PixelTrackHeterogeneous = TracksHost; + explicit SiPixelMonitorTrackSoAAlpaka(const edm::ParameterSet&); + ~SiPixelMonitorTrackSoAAlpaka() override = default; + void bookHistograms(DQMStore::IBooker& ibooker, edm::Run const& iRun, edm::EventSetup const& iSetup) override; + void analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) override; + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + +private: + edm::EDGetTokenT tokenSoATrack_; + std::string topFolderName_; + bool useQualityCut_; + pixelTrack::Quality minQuality_; + MonitorElement* hnTracks; + MonitorElement* hnLooseAndAboveTracks; + MonitorElement* hnHits; + MonitorElement* hnHitsVsPhi; + MonitorElement* hnHitsVsEta; + MonitorElement* hnLayers; + MonitorElement* hnLayersVsPhi; + MonitorElement* hnLayersVsEta; + MonitorElement* hchi2; + MonitorElement* hChi2VsPhi; + MonitorElement* hChi2VsEta; + MonitorElement* hpt; + MonitorElement* heta; + MonitorElement* hphi; + MonitorElement* hz; + MonitorElement* htip; + MonitorElement* hquality; +}; + +// +// constructors +// + +template +SiPixelMonitorTrackSoAAlpaka::SiPixelMonitorTrackSoAAlpaka(const edm::ParameterSet& iConfig) { + tokenSoATrack_ = consumes(iConfig.getParameter("pixelTrackSrc")); + topFolderName_ = iConfig.getParameter("topFolderName"); //"SiPixelHeterogeneous/PixelTrackSoA"; + useQualityCut_ = iConfig.getParameter("useQualityCut"); + minQuality_ = pixelTrack::qualityByName(iConfig.getParameter("minQuality")); +} + +// +// -- Analyze +// +template +void SiPixelMonitorTrackSoAAlpaka::analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) { + const auto& tsoaHandle = iEvent.getHandle(tokenSoATrack_); + if (!tsoaHandle.isValid()) { + edm::LogWarning("SiPixelMonitorTrackSoAAlpaka") << "No Track SoA found \n returning!" << std::endl; + return; + } + + auto const& tsoa = *tsoaHandle.product(); + auto maxTracks = tsoa.view().metadata().size(); + auto const* quality = tsoa.view().quality(); + int32_t nTracks = 0; + int32_t nLooseAndAboveTracks = 0; + + for (int32_t it = 0; it < maxTracks; ++it) { + auto nHits = tsoa.view().detIndices().size(it); + auto nLayers = tsoa.view()[it].nLayers(); + if (nHits == 0) + break; // this is a guard + float pt = tsoa.view()[it].pt(); + if (!(pt > 0.)) + continue; + + // fill the quality for all tracks + pixelTrack::Quality qual = quality[it]; + hquality->Fill(int(qual)); + nTracks++; + + if (useQualityCut_ && quality[it] < minQuality_) + continue; + + // fill parameters only for quality >= loose + + float chi2 = tsoa.view()[it].chi2(); + float phi = tsoa.view()[it].state()(0); //TODO: put these numbers in enum + float zip = tsoa.view()[it].state()(4); + float eta = tsoa.view()[it].eta(); + float tip = tsoa.view()[it].state()(1); + + hchi2->Fill(chi2); + hChi2VsPhi->Fill(phi, chi2); + hChi2VsEta->Fill(eta, chi2); + hnHits->Fill(nHits); + hnLayers->Fill(nLayers); + hnHitsVsPhi->Fill(phi, nHits); + hnHitsVsEta->Fill(eta, nHits); + hnLayersVsPhi->Fill(phi, nLayers); + hnLayersVsEta->Fill(eta, nLayers); + hpt->Fill(pt); + heta->Fill(eta); + hphi->Fill(phi); + hz->Fill(zip); + htip->Fill(tip); + nLooseAndAboveTracks++; + } + hnTracks->Fill(nTracks); + hnLooseAndAboveTracks->Fill(nLooseAndAboveTracks); +} + +// +// -- Book Histograms +// +template +void SiPixelMonitorTrackSoAAlpaka::bookHistograms(DQMStore::IBooker& iBook, + edm::Run const& iRun, + edm::EventSetup const& iSetup) { + iBook.cd(); + iBook.setCurrentFolder(topFolderName_); + + // clang-format off +std::string toRep = "Number of tracks"; +hnTracks = iBook.book1D("nTracks", fmt::format(";{} per event;#events",toRep), 1001, -0.5, 1000.5); +hnLooseAndAboveTracks = iBook.book1D("nLooseAndAboveTracks", fmt::format(";{} (quality #geq loose) per event;#events",toRep), 1001, -0.5, 1000.5); + +toRep = "Number of all RecHits per track (quality #geq loose)"; +hnHits = iBook.book1D("nRecHits", fmt::format(";{};#tracks",toRep), 15, -0.5, 14.5); +hnHitsVsPhi = iBook.bookProfile("nHitsPerTrackVsPhi", fmt::format("{} vs track #phi;Track #phi;{}",toRep,toRep), 30, -M_PI, M_PI,0., 15.); +hnHitsVsEta = iBook.bookProfile("nHitsPerTrackVsEta", fmt::format("{} vs track #eta;Track #eta;{}",toRep,toRep), 30, -3., 3., 0., 15.); + +toRep = "Number of all layers per track (quality #geq loose)"; +hnLayers = iBook.book1D("nLayers", fmt::format(";{};#tracks",toRep), 15, -0.5, 14.5); +hnLayersVsPhi = iBook.bookProfile("nLayersPerTrackVsPhi", fmt::format("{} vs track #phi;Track #phi;{}",toRep,toRep), 30, -M_PI, M_PI,0., 15.); +hnLayersVsEta = iBook.bookProfile("nLayersPerTrackVsEta", fmt::format("{} vs track #eta;Track #eta;{}",toRep,toRep), 30, -3., 3., 0., 15.); + +toRep = "Track (quality #geq loose) #chi^{2}/ndof"; +hchi2 = iBook.book1D("nChi2ndof", fmt::format(";{};#tracks",toRep), 40, 0., 20.); +hChi2VsPhi = iBook.bookProfile("nChi2ndofVsPhi", fmt::format("{} vs track #phi;Track #phi;{}",toRep,toRep), 30, -M_PI, M_PI, 0., 20.); +hChi2VsEta = iBook.bookProfile("nChi2ndofVsEta", fmt::format("{} vs track #eta;Track #eta;{}",toRep,toRep), 30, -3., 3., 0., 20.); + // clang-format on + + hpt = iBook.book1D("pt", ";Track (quality #geq loose) p_{T} [GeV];#tracks", 200, 0., 200.); + heta = iBook.book1D("eta", ";Track (quality #geq loose) #eta;#tracks", 30, -3., 3.); + hphi = iBook.book1D("phi", ";Track (quality #geq loose) #phi;#tracks", 30, -M_PI, M_PI); + hz = iBook.book1D("z", ";Track (quality #geq loose) z [cm];#tracks", 30, -30., 30.); + htip = iBook.book1D("tip", ";Track (quality #geq loose) TIP [cm];#tracks", 100, -0.5, 0.5); + hquality = iBook.book1D("quality", ";Track Quality;#tracks", 7, -0.5, 6.5); + uint i = 1; + for (const auto& q : pixelTrack::qualityName) { + hquality->setBinLabel(i, q.data(), 1); + i++; + } +} + +template +void SiPixelMonitorTrackSoAAlpaka::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + // monitorpixelTrackSoA + edm::ParameterSetDescription desc; + desc.add("pixelTrackSrc", edm::InputTag("pixelTracksAlpaka")); + desc.add("topFolderName", "SiPixelHeterogeneous/PixelTrackAlpaka"); + desc.add("useQualityCut", true); + desc.add("minQuality", "loose"); + descriptions.addWithDefaultLabel(desc); +} + +using SiPixelPhase1MonitorTrackSoAAlpaka = SiPixelMonitorTrackSoAAlpaka; +using SiPixelPhase2MonitorTrackSoAAlpaka = SiPixelMonitorTrackSoAAlpaka; +using SiPixelHIonPhase1MonitorTrackSoAAlpaka = SiPixelMonitorTrackSoAAlpaka; + +DEFINE_FWK_MODULE(SiPixelPhase1MonitorTrackSoAAlpaka); +DEFINE_FWK_MODULE(SiPixelPhase2MonitorTrackSoAAlpaka); +DEFINE_FWK_MODULE(SiPixelHIonPhase1MonitorTrackSoAAlpaka); diff --git a/DQM/SiPixelHeterogeneous/plugins/SiPixelMonitorVertexSoAAlpaka.cc b/DQM/SiPixelHeterogeneous/plugins/SiPixelMonitorVertexSoAAlpaka.cc new file mode 100644 index 0000000000000..d3121f77bccb8 --- /dev/null +++ b/DQM/SiPixelHeterogeneous/plugins/SiPixelMonitorVertexSoAAlpaka.cc @@ -0,0 +1,131 @@ +// -*- C++ -*- +///bookLayer +// Package: SiPixelMonitorVertexSoAAlpaka +// Class: SiPixelMonitorVertexSoAAlpaka +// +/**\class SiPixelMonitorVertexSoAAlpaka SiPixelMonitorVertexSoAAlpaka.cc +*/ +// +// Author: Suvankar Roy Chowdhury +// +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/ESHandle.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ServiceRegistry/interface/Service.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "DataFormats/Common/interface/Handle.h" +// DQM Histograming +#include "DQMServices/Core/interface/MonitorElement.h" +#include "DQMServices/Core/interface/DQMEDAnalyzer.h" +#include "DQMServices/Core/interface/DQMStore.h" +#include "DataFormats/VertexSoA/interface/ZVertexHost.h" +#include "DataFormats/BeamSpot/interface/BeamSpot.h" + +class SiPixelMonitorVertexSoAAlpaka : public DQMEDAnalyzer { +public: + using IndToEdm = std::vector; + explicit SiPixelMonitorVertexSoAAlpaka(const edm::ParameterSet&); + ~SiPixelMonitorVertexSoAAlpaka() override = default; + void bookHistograms(DQMStore::IBooker& ibooker, edm::Run const& iRun, edm::EventSetup const& iSetup) override; + void analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) override; + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + +private: + const edm::EDGetTokenT tokenSoAVertex_; + const edm::EDGetTokenT tokenBeamSpot_; + std::string topFolderName_; + MonitorElement* hnVertex; + MonitorElement* hx; + MonitorElement* hy; + MonitorElement* hz; + MonitorElement* hchi2; + MonitorElement* hchi2oNdof; + MonitorElement* hptv2; + MonitorElement* hntrks; +}; + +// +// constructors +// + +SiPixelMonitorVertexSoAAlpaka::SiPixelMonitorVertexSoAAlpaka(const edm::ParameterSet& iConfig) + : tokenSoAVertex_(consumes(iConfig.getParameter("pixelVertexSrc"))), + tokenBeamSpot_(consumes(iConfig.getParameter("beamSpotSrc"))), + topFolderName_(iConfig.getParameter("topFolderName")) {} + +// +// -- Analyze +// +void SiPixelMonitorVertexSoAAlpaka::analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) { + const auto& vsoaHandle = iEvent.getHandle(tokenSoAVertex_); + if (!vsoaHandle.isValid()) { + edm::LogWarning("SiPixelMonitorVertexSoAAlpaka") << "No Vertex SoA found \n returning!" << std::endl; + return; + } + + auto const& vsoa = *vsoaHandle; + int nVertices = vsoa.view().nvFinal(); + auto bsHandle = iEvent.getHandle(tokenBeamSpot_); + float x0 = 0., y0 = 0., z0 = 0., dxdz = 0., dydz = 0.; + if (!bsHandle.isValid()) { + edm::LogWarning("SiPixelMonitorVertexSoAAlpaka") << "No beamspot found. returning vertexes with (0,0,Z) "; + } else { + const reco::BeamSpot& bs = *bsHandle; + x0 = bs.x0(); + y0 = bs.y0(); + z0 = bs.z0(); + dxdz = bs.dxdz(); + dydz = bs.dydz(); + } + + for (int iv = 0; iv < nVertices; iv++) { + auto si = vsoa.view()[iv].sortInd(); + auto z = vsoa.view()[si].zv(); + auto x = x0 + dxdz * z; + auto y = y0 + dydz * z; + + z += z0; + hx->Fill(x); + hy->Fill(y); + hz->Fill(z); + auto ndof = vsoa.view()[si].ndof(); + hchi2->Fill(vsoa.view()[si].chi2()); + hchi2oNdof->Fill(vsoa.view()[si].chi2() / ndof); + hptv2->Fill(vsoa.view()[si].ptv2()); + hntrks->Fill(ndof + 1); + } + hnVertex->Fill(nVertices); +} + +// +// -- Book Histograms +// +void SiPixelMonitorVertexSoAAlpaka::bookHistograms(DQMStore::IBooker& ibooker, + edm::Run const& iRun, + edm::EventSetup const& iSetup) { + //std::string top_folder = ""// + ibooker.cd(); + ibooker.setCurrentFolder(topFolderName_); + hnVertex = ibooker.book1D("nVertex", ";# of Vertices;#entries", 101, -0.5, 100.5); + hx = ibooker.book1D("vx", ";Vertex x;#entries", 10, -5., 5.); + hy = ibooker.book1D("vy", ";Vertex y;#entries", 10, -5., 5.); + hz = ibooker.book1D("vz", ";Vertex z;#entries", 30, -30., 30); + hchi2 = ibooker.book1D("chi2", ";Vertex chi-squared;#entries", 40, 0., 20.); + hchi2oNdof = ibooker.book1D("chi2oNdof", ";Vertex chi-squared/Ndof;#entries", 40, 0., 20.); + hptv2 = ibooker.book1D("ptsq", ";Vertex #sum (p_{T})^{2};#entries", 200, 0., 200.); + hntrks = ibooker.book1D("ntrk", ";#tracks associated;#entries", 100, -0.5, 99.5); +} + +void SiPixelMonitorVertexSoAAlpaka::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + // monitorpixelVertexSoA + edm::ParameterSetDescription desc; + desc.add("pixelVertexSrc", edm::InputTag("pixelVerticesAlpaka")); + desc.add("beamSpotSrc", edm::InputTag("offlineBeamSpot")); + desc.add("topFolderName", "SiPixelHeterogeneous/PixelVertexAlpaka"); + descriptions.addWithDefaultLabel(desc); +} + +DEFINE_FWK_MODULE(SiPixelMonitorVertexSoAAlpaka); diff --git a/DQM/SiPixelHeterogeneous/python/SiPixelHeterogenousDQM_FirstStep_cff.py b/DQM/SiPixelHeterogeneous/python/SiPixelHeterogenousDQM_FirstStep_cff.py index dfb83708c95cf..95245a3fea968 100644 --- a/DQM/SiPixelHeterogeneous/python/SiPixelHeterogenousDQM_FirstStep_cff.py +++ b/DQM/SiPixelHeterogeneous/python/SiPixelHeterogenousDQM_FirstStep_cff.py @@ -7,20 +7,35 @@ from DQM.SiPixelHeterogeneous.siPixelPhase2MonitorTrackSoA_cfi import * from DQM.SiPixelHeterogeneous.siPixelHIonPhase1MonitorTrackSoA_cfi import * from DQM.SiPixelHeterogeneous.siPixelMonitorVertexSoA_cfi import * +# Alpaka Modules +from Configuration.ProcessModifiers.alpaka_cff import alpaka +from DQM.SiPixelHeterogeneous.siPixelPhase1MonitorRecHitsSoAAlpaka_cfi import * +from DQM.SiPixelHeterogeneous.siPixelPhase2MonitorRecHitsSoAAlpaka_cfi import * +from DQM.SiPixelHeterogeneous.siPixelHIonPhase1MonitorRecHitsSoAAlpaka_cfi import * +from DQM.SiPixelHeterogeneous.siPixelPhase1MonitorTrackSoAAlpaka_cfi import * +from DQM.SiPixelHeterogeneous.siPixelPhase2MonitorTrackSoAAlpaka_cfi import * +from DQM.SiPixelHeterogeneous.siPixelHIonPhase1MonitorTrackSoAAlpaka_cfi import * +from DQM.SiPixelHeterogeneous.siPixelMonitorVertexSoAAlpaka_cfi import * # Run-3 sequence monitorpixelSoASource = cms.Sequence(siPixelPhase1MonitorRecHitsSoA * siPixelPhase1MonitorTrackSoA * siPixelMonitorVertexSoA) - +# Run-3 Alpaka sequence +monitorpixelSoASourceAlpaka = cms.Sequence(siPixelPhase1MonitorRecHitsSoAAlpaka * siPixelPhase1MonitorTrackSoAAlpaka * siPixelMonitorVertexSoAAlpaka) +alpaka.toReplaceWith(monitorpixelSoASource, monitorpixelSoASourceAlpaka) # Phase-2 sequence from Configuration.Eras.Modifier_phase2_tracker_cff import phase2_tracker _monitorpixelSoARecHitsSource = cms.Sequence(siPixelPhase2MonitorRecHitsSoA * siPixelPhase2MonitorTrackSoA * siPixelMonitorVertexSoA) -phase2_tracker.toReplaceWith(monitorpixelSoASource, _monitorpixelSoARecHitsSource) +(phase2_tracker & ~alpaka).toReplaceWith(monitorpixelSoASource, _monitorpixelSoARecHitsSource) +_monitorpixelSoARecHitsSourceAlpaka = cms.Sequence(siPixelPhase2MonitorRecHitsSoAAlpaka * siPixelPhase2MonitorTrackSoAAlpaka * siPixelMonitorVertexSoAAlpaka) +(phase2_tracker & alpaka).toReplaceWith(monitorpixelSoASource, _monitorpixelSoARecHitsSourceAlpaka) # HIon Phase 1 sequence from Configuration.ProcessModifiers.pp_on_AA_cff import pp_on_AA _monitorpixelSoARecHitsSourceHIon = cms.Sequence(siPixelHIonPhase1MonitorRecHitsSoA * siPixelHIonPhase1MonitorTrackSoA * siPixelMonitorVertexSoA) (pp_on_AA & ~phase2_tracker).toReplaceWith(monitorpixelSoASource, _monitorpixelSoARecHitsSourceHIon) +_monitorpixelSoARecHitsSourceHIonAlpaka = cms.Sequence(siPixelHIonPhase1MonitorRecHitsSoAAlpaka * siPixelHIonPhase1MonitorTrackSoAAlpaka * siPixelMonitorVertexSoAAlpaka) +(pp_on_AA & ~phase2_tracker & alpaka).toReplaceWith(monitorpixelSoASource, _monitorpixelSoARecHitsSourceHIonAlpaka) #Define the sequence for GPU vs CPU validation #This should run:- individual monitor for the 2 collections + comparison module @@ -33,6 +48,14 @@ from DQM.SiPixelHeterogeneous.siPixelCompareVertexSoA_cfi import * from DQM.SiPixelHeterogeneous.siPixelPhase1RawDataErrorComparator_cfi import * from DQM.SiPixelPhase1Common.SiPixelPhase1RawData_cfi import * +#Alpaka +from DQM.SiPixelHeterogeneous.siPixelPhase1CompareRecHitsSoAAlpaka_cfi import * +from DQM.SiPixelHeterogeneous.siPixelPhase2CompareRecHitsSoAAlpaka_cfi import * +from DQM.SiPixelHeterogeneous.siPixelHIonPhase1CompareRecHitsSoAAlpaka_cfi import * +from DQM.SiPixelHeterogeneous.siPixelPhase1CompareTrackSoAAlpaka_cfi import * +from DQM.SiPixelHeterogeneous.siPixelPhase2CompareTrackSoAAlpaka_cfi import * +from DQM.SiPixelHeterogeneous.siPixelHIonPhase1CompareTrackSoAAlpaka_cfi import * +from DQM.SiPixelHeterogeneous.siPixelCompareVertexSoAAlpaka_cfi import * # digi errors SiPixelPhase1RawDataConfForCPU = copy.deepcopy(SiPixelPhase1RawDataConf) @@ -126,6 +149,43 @@ topFolderName = 'SiPixelHeterogeneous/PixelVertexSoAGPU', ) +### Alpaka + +# PixelRecHits: monitor of CPUSerial product (Alpaka backend: 'serial_sync') +siPixelRecHitsSoAMonitorSerial = siPixelPhase1MonitorRecHitsSoAAlpaka.clone( + pixelHitsSrc = cms.InputTag( 'siPixelRecHitsPreSplittingAlpakaSerial' ), + TopFolderName = cms.string( 'SiPixelHeterogeneous/PixelRecHitsSerial' ) +) + +# PixelRecHits: monitor of Device product (Alpaka backend: '') +siPixelRecHitsSoAMonitorDevice = siPixelPhase1MonitorRecHitsSoAAlpaka.clone( + pixelHitsSrc = cms.InputTag( 'siPixelRecHitsPreSplittingAlpaka' ), + TopFolderName = cms.string( 'SiPixelHeterogeneous/PixelRecHitsDevice' ) +) + +# PixelTracks: monitor of CPUSerial product (Alpaka backend: 'serial_sync') +siPixelTrackSoAMonitorSerial = siPixelPhase1MonitorTrackSoAAlpaka.clone( + pixelTrackSrc = cms.InputTag('pixelTracksAlpakaSerial'), + topFolderName = cms.string('SiPixelHeterogeneous/PixelTrackSerial') +) + +# PixelTracks: monitor of CPUSerial product (Alpaka backend: 'serial_sync') +siPixelTrackSoAMonitorDevice = siPixelPhase1MonitorTrackSoAAlpaka.clone( + pixelTrackSrc = cms.InputTag('pixelTracksAlpaka'), + topFolderName = cms.string('SiPixelHeterogeneous/PixelTrackDevice') +) + +# PixelVertices: monitor of CPUSerial product (Alpaka backend: 'serial_sync') +siPixelVertexSoAMonitorSerial = siPixelMonitorVertexSoAAlpaka.clone( + pixelVertexSrc = cms.InputTag("pixelVerticesAlpakaSerial"), + topFolderName = cms.string('SiPixelHeterogeneous/PixelVertexSerial') +) + +siPixelVertexSoAMonitorDevice = siPixelMonitorVertexSoAAlpaka.clone( + pixelVertexSrc = cms.InputTag("pixelVerticesAlpaka"), + topFolderName = cms.string('SiPixelHeterogeneous/PixelVertexDevice') +) + # Run-3 sequence monitorpixelSoACompareSource = cms.Sequence(siPixelPhase1MonitorRawDataACPU * siPixelPhase1MonitorRawDataAGPU * @@ -139,6 +199,17 @@ siPixelMonitorVertexSoAGPU * siPixelCompareVertexSoA * siPixelPhase1RawDataErrorComparator) +# and the Alpaka version +monitorpixelSoACompareSourceAlpaka = cms.Sequence( + siPixelRecHitsSoAMonitorSerial * + siPixelRecHitsSoAMonitorDevice * + siPixelPhase1CompareRecHitsSoAAlpaka * + siPixelTrackSoAMonitorSerial * + siPixelTrackSoAMonitorDevice * + siPixelPhase1CompareTrackSoAAlpaka * + siPixelVertexSoAMonitorSerial * + siPixelVertexSoAMonitorDevice * + siPixelCompareVertexSoAAlpaka ) # Phase-2 sequence _monitorpixelSoACompareSource = cms.Sequence(siPixelPhase2MonitorRecHitsSoACPU * @@ -166,3 +237,6 @@ from Configuration.ProcessModifiers.gpuValidationPixel_cff import gpuValidationPixel gpuValidationPixel.toReplaceWith(monitorpixelSoASource, monitorpixelSoACompareSource) + +from Configuration.ProcessModifiers.alpakaValidationPixel_cff import alpakaValidationPixel +(alpakaValidationPixel & ~gpuValidationPixel).toReplaceWith(monitorpixelSoASource, monitorpixelSoACompareSourceAlpaka) diff --git a/DQM/SiStripCommissioningClients/src/CalibrationHistograms.cc b/DQM/SiStripCommissioningClients/src/CalibrationHistograms.cc index b2444d11f6270..a578015a87511 100644 --- a/DQM/SiStripCommissioningClients/src/CalibrationHistograms.cc +++ b/DQM/SiStripCommissioningClients/src/CalibrationHistograms.cc @@ -29,9 +29,9 @@ CalibrationHistograms::CalibrationHistograms(const edm::ParameterSet& pset, DQMS << " Constructing object..."; if (task == sistrip::CALIBRATION_SCAN or task == sistrip::CALIBRATION_SCAN_DECO) - factory_ = auto_ptr(new CalibrationScanSummaryFactory); + factory_ = make_unique(); else - factory_ = auto_ptr(new CalibrationSummaryFactory); + factory_ = make_unique(); targetRiseTime_ = this->pset().existsAs("targetRiseTime") ? this->pset().getParameter("targetRiseTime") : 50; diff --git a/DQM/TrackerRemapper/interface/mat4.h b/DQM/TrackerRemapper/interface/mat4.h index 2336a19e02194..ca1e7ef80e240 100644 --- a/DQM/TrackerRemapper/interface/mat4.h +++ b/DQM/TrackerRemapper/interface/mat4.h @@ -46,6 +46,8 @@ class mat4 { data[i] = mat[i]; } + mat4& operator=(const mat4& mat) = default; + mat4& operator&(const mat4& mat) { if (this != &mat) { for (unsigned i = 0; i < 12; ++i) diff --git a/DQM/TrackingMonitorSource/plugins/ShortenedTrackResolution.cc b/DQM/TrackingMonitorSource/plugins/ShortenedTrackResolution.cc new file mode 100644 index 0000000000000..ab6dbc45e8411 --- /dev/null +++ b/DQM/TrackingMonitorSource/plugins/ShortenedTrackResolution.cc @@ -0,0 +1,139 @@ +// user includes +#include "DQMServices/Core/interface/DQMEDAnalyzer.h" +#include "DQMServices/Core/interface/DQMStore.h" +#include "DQMServices/Core/interface/MonitorElement.h" +#include "DataFormats/TrackReco/interface/Track.h" +#include "DataFormats/VertexReco/interface/Vertex.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/Utilities/interface/transform.h" // for edm::vector_transform + +// ROOT includes +#include "TLorentzVector.h" + +// standard includes +#include + +class ShortenedTrackResolution : public DQMEDAnalyzer { +public: + ShortenedTrackResolution(const edm::ParameterSet &); + static void fillDescriptions(edm::ConfigurationDescriptions &descriptions); + +protected: + void analyze(edm::Event const &iEvent, edm::EventSetup const &iSetup) override; + void bookHistograms(DQMStore::IBooker &, edm::Run const &, edm::EventSetup const &) override; + +private: + const std::string folderName_; + const std::vector hitsRemain_; + const double minTracksEta_; + const double maxTracksEta_; + const double minTracksPt_; + const double maxTracksPt_; + + const double maxDr_; + const edm::InputTag tracksTag_; + const std::vector tracksRerecoTag_; + const edm::EDGetTokenT> tracksToken_; + const std::vector>> tracksRerecoToken_; + + std::vector histsPtAll_; +}; + +// ----------------------------- +// constructors and destructor +// ----------------------------- +ShortenedTrackResolution::ShortenedTrackResolution(const edm::ParameterSet &ps) + : folderName_(ps.getUntrackedParameter("folderName", "TrackRefitting")), + hitsRemain_(ps.getUntrackedParameter>("hitsRemainInput")), + minTracksEta_(ps.getUntrackedParameter("minTracksEtaInput", 0.0)), + maxTracksEta_(ps.getUntrackedParameter("maxTracksEtaInput", 2.2)), + minTracksPt_(ps.getUntrackedParameter("minTracksPtInput", 15.0)), + maxTracksPt_(ps.getUntrackedParameter("maxTracksPtInput", 99999.9)), + maxDr_(ps.getUntrackedParameter("maxDrInput", 0.01)), + tracksTag_(ps.getUntrackedParameter("tracksInputTag", edm::InputTag("generalTracks", "", "DQM"))), + tracksRerecoTag_(ps.getUntrackedParameter>("tracksRerecoInputTag")), + tracksToken_(consumes>(tracksTag_)), + tracksRerecoToken_(edm::vector_transform( + tracksRerecoTag_, [this](edm::InputTag const &tag) { return consumes>(tag); })) { + histsPtAll_.clear(); +} + +//__________________________________________________________________________________ +void ShortenedTrackResolution::bookHistograms(DQMStore::IBooker &iBook, + edm::Run const &iRun, + edm::EventSetup const &iSetup) { + std::string currentFolder = folderName_ + "/"; + iBook.setCurrentFolder(currentFolder); + + for (int i = 0; i < int(hitsRemain_.size()); ++i) { + histsPtAll_.push_back(iBook.book1D( + fmt::sprintf("trackPtRatio_%s", hitsRemain_[i]).c_str(), + fmt::sprintf("Short Track p_{T} / Full Track p_{T} - %s layers;p_{T}^{short}/p_{T}^{full};n. tracks", + hitsRemain_[i]) + .c_str(), + 101, + -0.05, + 2.05)); + } +} + +//__________________________________________________________________________________ +void ShortenedTrackResolution::analyze(edm::Event const &iEvent, edm::EventSetup const &iSetup) { + const auto &tracks = iEvent.getHandle(tracksToken_); + + if (!tracks.isValid()) { + edm::LogError("ShortenedTrackResolution") << "Missing input track collection " << tracksTag_.encode() << std::endl; + return; + } + + for (const auto &track : *tracks) { + const reco::HitPattern &hp = track.hitPattern(); + if (int(int(hp.numberOfValidHits()) - int(hp.numberOfAllHits(reco::HitPattern::TRACK_HITS))) != 0) { + break; + } + + TLorentzVector tvec; + tvec.SetPtEtaPhiM(track.pt(), track.eta(), track.phi(), 0.0); + + int i = 0; // token index + for (const auto &token : tracksRerecoToken_) { + const auto &tracks_rereco = iEvent.getHandle(token); + + for (const auto &track_rereco : *tracks_rereco) { + TLorentzVector trerecovec; + trerecovec.SetPtEtaPhiM(track_rereco.pt(), track_rereco.eta(), track_rereco.phi(), 0.0); + double deltaR = tvec.DeltaR(trerecovec); + + if (deltaR < maxDr_) { + if (track_rereco.pt() >= minTracksPt_ && track_rereco.pt() <= maxTracksPt_ && + std::abs(track_rereco.eta()) >= minTracksEta_ && std::abs(track_rereco.eta()) <= maxTracksEta_) { + histsPtAll_[i]->Fill(1.0 * track_rereco.pt() / track.pt()); + } + } + } + ++i; + } + } +} + +//__________________________________________________________________________________ +void ShortenedTrackResolution::fillDescriptions(edm::ConfigurationDescriptions &descriptions) { + edm::ParameterSetDescription desc; + desc.addUntracked("folderName", "TrackRefitting"); + desc.addUntracked>("hitsRemainInput", {}); + desc.addUntracked("minTracksEtaInput", 0.0); + desc.addUntracked("maxTracksEtaInput", 2.2); + desc.addUntracked("minTracksPtInput", 15.0); + desc.addUntracked("maxTracksPtInput", 99999.9); + desc.addUntracked("maxDrInput", 0.01); + desc.addUntracked("tracksInputTag", edm::InputTag("generalTracks", "", "DQM")); + desc.addUntracked>("tracksRerecoInputTag", {}); + descriptions.addWithDefaultLabel(desc); +} + +// Define this as a plug-in +#include "FWCore/Framework/interface/MakerMacros.h" +DEFINE_FWK_MODULE(ShortenedTrackResolution); diff --git a/DQM/TrackingMonitorSource/plugins/StandaloneTrackMonitor.cc b/DQM/TrackingMonitorSource/plugins/StandaloneTrackMonitor.cc index c390913a7471b..e1dc903f3443a 100644 --- a/DQM/TrackingMonitorSource/plugins/StandaloneTrackMonitor.cc +++ b/DQM/TrackingMonitorSource/plugins/StandaloneTrackMonitor.cc @@ -1853,8 +1853,10 @@ void StandaloneTrackMonitor::analyze(edm::Event const& iEvent, edm::EventSetup c } } - // off track cluster properties - processClusters(iEvent, iSetup, tkGeom, wfac); + // off track cluster properties (only on RECO data-tier) + if (isRECO_) { + processClusters(iEvent, iSetup, tkGeom, wfac); + } if (verbose_) edm::LogInfo("StandaloneTrackMonitor") << "Ends StandaloneTrackMonitor successfully"; diff --git a/DQM/TrackingMonitorSource/plugins/V0EventSelector.cc b/DQM/TrackingMonitorSource/plugins/V0EventSelector.cc new file mode 100644 index 0000000000000..2a7b0066650f5 --- /dev/null +++ b/DQM/TrackingMonitorSource/plugins/V0EventSelector.cc @@ -0,0 +1,43 @@ +#include +#include +#include "DataFormats/Common/interface/Handle.h" +#include "DataFormats/Common/interface/View.h" +#include "DataFormats/Candidate/interface/VertexCompositeCandidate.h" +#include "FWCore/Framework/interface/stream/EDFilter.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" + +class V0EventSelector : public edm::stream::EDFilter<> { +public: + explicit V0EventSelector(const edm::ParameterSet&); + ~V0EventSelector() override = default; + + bool filter(edm::Event&, const edm::EventSetup&) override; + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + +private: + const edm::EDGetTokenT vccToken_; + const unsigned int minNumCandidates_; +}; + +V0EventSelector::V0EventSelector(const edm::ParameterSet& iConfig) + : vccToken_{consumes( + iConfig.getParameter("vertexCompositeCandidates"))}, + minNumCandidates_{iConfig.getParameter("minCandidates")} {} + +bool V0EventSelector::filter(edm::Event& iEvent, const edm::EventSetup& iSetup) { + edm::Handle vccHandle; + iEvent.getByToken(vccToken_, vccHandle); + + return vccHandle->size() >= minNumCandidates_; +} + +void V0EventSelector::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("vertexCompositeCandidates", edm::InputTag("generalV0Candidates:Kshort")); + desc.add("minCandidates", 1); // Change '1' to your desired minimum number of candidates + descriptions.addWithDefaultLabel(desc); +} + +DEFINE_FWK_MODULE(V0EventSelector); diff --git a/DQM/TrackingMonitorSource/plugins/V0VertexTrackProducer.cc b/DQM/TrackingMonitorSource/plugins/V0VertexTrackProducer.cc new file mode 100644 index 0000000000000..e0dc8348b4bf1 --- /dev/null +++ b/DQM/TrackingMonitorSource/plugins/V0VertexTrackProducer.cc @@ -0,0 +1,63 @@ +#include +#include +#include "DataFormats/Common/interface/Handle.h" +#include "DataFormats/Common/interface/View.h" +#include "DataFormats/TrackReco/interface/Track.h" +#include "DataFormats/Candidate/interface/VertexCompositeCandidate.h" +#include "DataFormats/RecoCandidate/interface/RecoChargedCandidate.h" +#include "FWCore/Framework/interface/global/EDProducer.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/Framework/interface/ESHandle.h" + +class V0VertexTrackProducer : public edm::global::EDProducer<> { +public: + explicit V0VertexTrackProducer(const edm::ParameterSet&); + ~V0VertexTrackProducer() override = default; + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + + void produce(edm::StreamID streamID, edm::Event& iEvent, edm::EventSetup const& iSetup) const override; + +private: + const edm::EDGetTokenT vccToken_; +}; + +V0VertexTrackProducer::V0VertexTrackProducer(const edm::ParameterSet& iConfig) + : vccToken_{consumes( + iConfig.getParameter("vertexCompositeCandidates"))} { + produces>(); +} + +void V0VertexTrackProducer::produce(edm::StreamID streamID, edm::Event& iEvent, edm::EventSetup const& iSetup) const { + edm::Handle vccHandle; + iEvent.getByToken(vccToken_, vccHandle); + + std::unique_ptr> outputTracks(new std::vector()); + + if (vccHandle.isValid()) { + // Loop over VertexCompositeCandidates and associate tracks + for (const auto& vcc : *vccHandle) { + for (size_t i = 0; i < vcc.numberOfDaughters(); ++i) { + const reco::Candidate* daughter = vcc.daughter(i); + const reco::RecoChargedCandidate* chargedDaughter = dynamic_cast(daughter); + if (chargedDaughter) { + const reco::TrackRef trackRef = chargedDaughter->track(); + if (trackRef.isNonnull()) { + outputTracks->push_back(*trackRef); + } + } + } + } + } else { + edm::LogError("V0VertexTrackProducer") << "Error >> Failed to get VertexCompositeCandidateCollection"; + } + iEvent.put(std::move(outputTracks)); +} + +void V0VertexTrackProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("vertexCompositeCandidates", edm::InputTag("generalV0Candidates:Kshort")); + descriptions.addWithDefaultLabel(desc); +} + +DEFINE_FWK_MODULE(V0VertexTrackProducer); diff --git a/DQM/TrackingMonitorSource/python/TrackingDataMCValidation_Standalone_cff.py b/DQM/TrackingMonitorSource/python/TrackingDataMCValidation_Standalone_cff.py index 3618415e32401..5dfdebb02870b 100644 --- a/DQM/TrackingMonitorSource/python/TrackingDataMCValidation_Standalone_cff.py +++ b/DQM/TrackingMonitorSource/python/TrackingDataMCValidation_Standalone_cff.py @@ -1,12 +1,13 @@ import FWCore.ParameterSet.Config as cms from DQM.TrackingMonitorSource.StandaloneTrackMonitor_cfi import * from DQM.TrackingMonitorSource.ZEEDetails_cfi import * -# from DQM.TrackingMonitor.V0Monitor_cfi import * +from DQM.TrackingMonitorSource.V0Selections_cfi import * +from DQM.TrackingMonitor.V0Monitor_cfi import * # Primary Vertex Selector selectedPrimaryVertices = cms.EDFilter("VertexSelector", src = cms.InputTag('offlinePrimaryVertices'), - # cut = cms.string("!isFake && ndof >= 4 && abs(z) < 24 && abs(position.Rho) < 2.0"), + # cut = cms.string("!isFake && ndof >= 4 && abs(z) < 24 && abs(position.Rho) < 2.0"), cut = cms.string(""), filter = cms.bool(True) ) @@ -32,11 +33,11 @@ src = cms.InputTag('generalTracks'), #cut = cms.string("pt > 0.65 && abs(eta) < 3.5 && p > 1.5 && hitPattern.numberOfAllHits('TRACK_HITS') > 7"), #cut = cms.string(""), - ptmin = cms.untracked.double(0.65), + ptmin = cms.untracked.double(0.65), pmin = cms.untracked.double(1.5), etamin = cms.untracked.double(-3.5), etamax = cms.untracked.double(3.5), - nhits = cms.untracked.uint32(7) + nhits = cms.untracked.uint32(7) ) ''' # Track ALCARECO Selection for singlemuon @@ -93,17 +94,23 @@ ttbarEventSelector = cms.EDFilter("ttbarEventSelector") ttbarTracks = cms.EDProducer("TtbarTrackProducer") -# Added module for V0Monitoring for Ks only -# KshortMonitor = v0Monitor.clone() -# KshortMonitor.FolderName = cms.string("Tracking/V0Monitoring/Ks") -# KshortMonitor.v0 = cms.InputTag('generalV0Candidates:Kshort') -# KshortMonitor.histoPSet.massPSet = cms.PSet( -# nbins = cms.int32 ( 100 ), -# xmin = cms.double( 0.400), -# xmax = cms.double( 0.600), -# ) +# Added modules for V0Monitoring +KshortMonitor = v0Monitor.clone() +KshortMonitor.FolderName = "StandaloneTrackMonitor/V0Monitoring/Ks" +KshortMonitor.v0 = "generalV0Candidates:Kshort" +KshortMonitor.histoPSet.massPSet = cms.PSet(nbins = cms.int32 (100), + xmin = cms.double(0.400), + xmax = cms.double(0.600)) +LambdaMonitor = v0Monitor.clone() +LambdaMonitor.FolderName = "StandaloneTrackMonitor/V0Monitoring/Lambda" +LambdaMonitor.v0 = "generalV0Candidates:Lambda" +LambdaMonitor.histoPSet.massPSet = cms.PSet(nbins = cms.int32(100), + xmin = cms.double(1.050), + xmax = cms.double(1.250)) +################## # For MinBias +################## standaloneTrackMonitorMC = standaloneTrackMonitor.clone( puScaleFactorFile = "PileupScaleFactor_316060_wrt_nVertex_ZeroBias.root", doPUCorrection = True, @@ -115,15 +122,84 @@ # * selectedMultiplicityTracks # Use selectedMultiplicityTracks if needed nTracks > desired multiplicity # * selectedAlcaRecoZBTracks * selectedTracks - * standaloneTrackMonitor) + * standaloneTrackMonitor + * KshortMonitor + * LambdaMonitor) + standaloneValidationMinbiasMC = cms.Sequence( hltPathFilter * selectedPrimaryVertices # * selectedMultiplicityTracks # Use selectedMultiplicityTracks if needed nTracks > desired multiplicity # * selectedAlcaRecoZBTracks * selectedTracks - * standaloneTrackMonitorMC) + * standaloneTrackMonitorMC + * KshortMonitor + * LambdaMonitor) + +################## +# For V0s in MinBias +################## +standaloneTrackMonitorK0 = standaloneTrackMonitor.clone( + folderName = "K0Tracks", + trackInputTag = 'KshortTracks', + ) + +standaloneTrackMonitorK0MC = standaloneTrackMonitor.clone( + folderName = "K0Tracks", + trackInputTag = 'KshortTracks', + puScaleFactorFile = "PileupScaleFactor_316082_wrt_nVertex_DYToLL.root", + doPUCorrection = True, + isMC = True + ) + +standaloneTrackMonitorLambda = standaloneTrackMonitor.clone( + folderName = "LambdaTracks", + trackInputTag = 'LambdaTracks', + ) + +standaloneTrackMonitorLambdaMC = standaloneTrackMonitor.clone( + folderName = "LambdaTracks", + trackInputTag = 'LambdaTracks', + puScaleFactorFile = "PileupScaleFactor_316082_wrt_nVertex_DYToLL.root", + doPUCorrection = True, + isMC = True + ) + +standaloneValidationK0s = cms.Sequence( + hltPathFilter + * selectedPrimaryVertices + * KShortEventSelector + * KshortTracks + * standaloneTrackMonitorK0 + * KshortMonitor) + +standaloneValidationK0sMC = cms.Sequence( + hltPathFilter + * selectedPrimaryVertices + * KShortEventSelector + * KshortTracks + * standaloneTrackMonitorK0 + * KshortMonitor) + +standaloneValidationLambdas = cms.Sequence( + hltPathFilter + * selectedPrimaryVertices + * LambdaEventSelector + * LambdaTracks + * standaloneTrackMonitorLambda + * LambdaMonitor) + +standaloneValidationLambdasMC = cms.Sequence( + hltPathFilter + * selectedPrimaryVertices + * LambdaEventSelector + * LambdaTracks + * standaloneTrackMonitorLambdaMC + * LambdaMonitor) + +################## # For ZtoEE +################## standaloneTrackMonitorElec = standaloneTrackMonitor.clone( folderName = "ElectronTracks", trackInputTag = 'electronTracks', @@ -161,7 +237,10 @@ * standaloneTrackMonitorElecMC * standaloneTrackMonitorMC * ZEEDetailsMC) + +################## # For ZtoMM +################## standaloneTrackMonitorMuon = standaloneTrackMonitor.clone( folderName = "MuonTracks", trackInputTag = 'muonTracks', @@ -192,7 +271,9 @@ * standaloneTrackMonitorMuonMC * standaloneTrackMonitorMC) +################## # For ttbar +################## standaloneTrackMonitorTTbar = standaloneTrackMonitor.clone( folderName = "TTbarTracks", trackInputTag = 'ttbarTracks', diff --git a/DQM/TrackingMonitorSource/python/TrackingSourceConfig_Tier0_cff.py b/DQM/TrackingMonitorSource/python/TrackingSourceConfig_Tier0_cff.py index 02d3da248a109..faef9e465b492 100644 --- a/DQM/TrackingMonitorSource/python/TrackingSourceConfig_Tier0_cff.py +++ b/DQM/TrackingMonitorSource/python/TrackingSourceConfig_Tier0_cff.py @@ -397,6 +397,8 @@ def _copyIfExists(mod, pset, name): TrackingDQMSourceTier0 += TrackSeedMonSequence +from DQM.TrackingMonitorSource.shortTrackResolution_cff import * + # MessageLog for module in selectedModules : label = str(module)+'LogMessageMonCommon' @@ -404,6 +406,7 @@ def _copyIfExists(mod, pset, name): TrackingDQMSourceTier0 += voMonitoringSequence TrackingDQMSourceTier0 += voWcutMonitoringSequence TrackingDQMSourceTier0 += primaryVertexResolution +TrackingDQMSourceTier0 += shortTrackResolution3to8 TrackingDQMSourceTier0 += dqmInfoTracking @@ -426,6 +429,7 @@ def _copyIfExists(mod, pset, name): TrackingDQMSourceTier0Common += voMonitoringCommonSequence TrackingDQMSourceTier0Common += voWcutMonitoringCommonSequence TrackingDQMSourceTier0Common += primaryVertexResolution +TrackingDQMSourceTier0Common += shortTrackResolution3to8 TrackingDQMSourceTier0Common += dqmInfoTracking TrackingDQMSourceTier0MinBias = cms.Sequence(cms.ignore(trackingDQMgoodOfflinePrimaryVertices)) diff --git a/DQM/TrackingMonitorSource/python/V0Selections_cfi.py b/DQM/TrackingMonitorSource/python/V0Selections_cfi.py new file mode 100644 index 0000000000000..45df833be8995 --- /dev/null +++ b/DQM/TrackingMonitorSource/python/V0Selections_cfi.py @@ -0,0 +1,12 @@ +from DQM.TrackingMonitorSource.v0EventSelector_cfi import * +from DQM.TrackingMonitorSource.v0VertexTrackProducer_cfi import * + +KShortEventSelector = v0EventSelector.clone() +LambdaEventSelector = v0EventSelector.clone( + vertexCompositeCandidates = "generalV0Candidates:Lambda" +) + +KshortTracks = v0VertexTrackProducer.clone() +LambdaTracks = v0VertexTrackProducer.clone( + vertexCompositeCandidates = "generalV0Candidates:Lambda" +) diff --git a/DQM/TrackingMonitorSource/python/shortTrackResolution_cff.py b/DQM/TrackingMonitorSource/python/shortTrackResolution_cff.py new file mode 100644 index 0000000000000..b07c24d88cb99 --- /dev/null +++ b/DQM/TrackingMonitorSource/python/shortTrackResolution_cff.py @@ -0,0 +1,74 @@ +import FWCore.ParameterSet.Config as cms +from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer + +from RecoTracker.FinalTrackSelectors.SingleLongTrackProducer_cfi import * + +from RecoTracker.FinalTrackSelectors.trackerTrackHitFilter_cfi import trackerTrackHitFilter as _trackerTrackHitFilter +ShortTrackCandidates = _trackerTrackHitFilter.clone(src = "SingleLongTrackProducer", + truncateTracks = True, + replaceWithInactiveHits = True, + rejectBadStoNHits = True, + usePixelQualityFlag = True) + +from Configuration.Eras.Modifier_phase2_tracker_cff import phase2_tracker +phase2_tracker.toModify(ShortTrackCandidates, + isPhase2 = True) + +ShortTrackCandidates3 = ShortTrackCandidates.clone(minimumHits = 3, + layersRemaining = 3) + +ShortTrackCandidates4 = ShortTrackCandidates.clone(minimumHits = 4, + layersRemaining = 4) + +ShortTrackCandidates5 = ShortTrackCandidates.clone(minimumHits = 5, + layersRemaining = 5) + +ShortTrackCandidates6 = ShortTrackCandidates.clone(minimumHits = 6, + layersRemaining = 6) + +ShortTrackCandidates7 = ShortTrackCandidates.clone(minimumHits = 7, + layersRemaining = 7) + +ShortTrackCandidates8 = ShortTrackCandidates.clone(minimumHits = 8, + layersRemaining = 8) + +import RecoTracker.TrackProducer.CTFFinalFitWithMaterial_cff +RefittedShortTracks = RecoTracker.TrackProducer.CTFFinalFitWithMaterial_cff.ctfWithMaterialTracks.clone(src = 'ShortTrackCandidates') + +RefittedShortTracks3 = RefittedShortTracks.clone(src = 'ShortTrackCandidates3') +RefittedShortTracks4 = RefittedShortTracks.clone(src = 'ShortTrackCandidates4') +RefittedShortTracks5 = RefittedShortTracks.clone(src = 'ShortTrackCandidates5') +RefittedShortTracks6 = RefittedShortTracks.clone(src = 'ShortTrackCandidates6') +RefittedShortTracks7 = RefittedShortTracks.clone(src = 'ShortTrackCandidates7') +RefittedShortTracks8 = RefittedShortTracks.clone(src = 'ShortTrackCandidates8') + +from DQM.TrackingMonitorSource.shortenedTrackResolution_cfi import shortenedTrackResolution as _shortenedTrackResolution +trackingResolution = _shortenedTrackResolution.clone(folderName = "Tracking/ShortTrackResolution", + hitsRemainInput = ["3","4","5","6","7","8"], + minTracksEtaInput = 0.0, + maxTracksEtaInput = 2.2, + minTracksPtInput = 15.0, + maxTracksPtInput = 99999.9, + maxDrInput = 0.01, + tracksInputTag = "SingleLongTrackProducer", + tracksRerecoInputTag = ["RefittedShortTracks3", + "RefittedShortTracks4", + "RefittedShortTracks5", + "RefittedShortTracks6", + "RefittedShortTracks7", + "RefittedShortTracks8"]) + +shortTrackResolution3to8 = cms.Sequence(SingleLongTrackProducer * + ShortTrackCandidates3 * + ShortTrackCandidates4 * + ShortTrackCandidates5 * + ShortTrackCandidates6 * + ShortTrackCandidates7 * + ShortTrackCandidates8 * + RefittedShortTracks3 * + RefittedShortTracks4 * + RefittedShortTracks5 * + RefittedShortTracks6 * + RefittedShortTracks7 * + RefittedShortTracks8 * + trackingResolution) diff --git a/DQM/TrackingMonitorSource/test/BuildFile.xml b/DQM/TrackingMonitorSource/test/BuildFile.xml index 80f374037d92c..5d606b7bfcc91 100644 --- a/DQM/TrackingMonitorSource/test/BuildFile.xml +++ b/DQM/TrackingMonitorSource/test/BuildFile.xml @@ -1 +1,2 @@ + diff --git a/DQM/TrackingMonitorSource/test/Tracker_DataMCValidation_cfg.py b/DQM/TrackingMonitorSource/test/Tracker_DataMCValidation_cfg.py index 016f0bb29c63d..90e0df62f1f32 100644 --- a/DQM/TrackingMonitorSource/test/Tracker_DataMCValidation_cfg.py +++ b/DQM/TrackingMonitorSource/test/Tracker_DataMCValidation_cfg.py @@ -19,6 +19,11 @@ VarParsing.VarParsing.multiplicity.singleton, # singleton or list VarParsing.VarParsing.varType.string, # string, int, or float "type of sequence to run") +options.register('isRECO', + False, + VarParsing.VarParsing.multiplicity.singleton, # singleton or list + VarParsing.VarParsing.varType.bool, # string, int, or float + "is the input sample RECO or AOD, assume AOD") options.parseArguments() # import of standard configurations @@ -74,7 +79,17 @@ # Tracker Data MC validation suite process.load('DQM.TrackingMonitorSource.TrackingDataMCValidation_Standalone_cff') +# Set the flag is this is AOD or RECO analysis +process.standaloneTrackMonitor.isRECO = options.isRECO +process.standaloneTrackMonitorK0.isRECO = options.isRECO +process.standaloneTrackMonitorLambda.isRECO = options.isRECO +process.standaloneTrackMonitorElec.isRECO = options.isRECO +process.standaloneTrackMonitorMuon.isRECO = options.isRECO +process.standaloneTrackMonitorTTbar.isRECO = options.isRECO + minbias_analysis_step = cms.Path(process.standaloneValidationMinbias) +k0_analysis_step = cms.Path(process.standaloneValidationK0s) +lambda_analysis_step = cms.Path(process.standaloneValidationLambdas) zee_analysis_step = cms.Path(process.standaloneValidationElec) zmm_analysis_step = cms.Path(process.standaloneValidationMuon) ttbar_analysis_step = cms.Path(process.standaloneValidationTTbar) @@ -87,6 +102,9 @@ process.analysis_step = ttbar_analysis_step elif (options.sequenceType == "minbias") : process.analysis_step = minbias_analysis_step +elif (options.sequenceType == "V0s") : + process.analysis_1_step = k0_analysis_step + process.analysis_2_step = lambda_analysis_step else : raise RuntimeError("Unrecognized sequenceType given option: %. Exiting" % options.sequenceType) @@ -95,7 +113,10 @@ process.DQMoutput_step = cms.EndPath(process.DQMoutput) # Schedule definition -process.schedule = cms.Schedule(process.analysis_step, process.endjob_step, process.DQMoutput_step) +if (options.sequenceType == "V0s"): + process.schedule = cms.Schedule(process.analysis_1_step, process.analysis_2_step, process.endjob_step, process.DQMoutput_step) +else: + process.schedule = cms.Schedule(process.analysis_step, process.endjob_step, process.DQMoutput_step) ################################################################### # Set the process to run multi-threaded diff --git a/DQM/TrackingMonitorSource/test/testTrackResolutionHarvesting_cfg.py b/DQM/TrackingMonitorSource/test/testTrackResolutionHarvesting_cfg.py new file mode 100644 index 0000000000000..3a399aae2523f --- /dev/null +++ b/DQM/TrackingMonitorSource/test/testTrackResolutionHarvesting_cfg.py @@ -0,0 +1,91 @@ +import FWCore.ParameterSet.Config as cms +import FWCore.ParameterSet.VarParsing as VarParsing + +options = VarParsing.VarParsing() +options.register('maxEvents', + -1, + VarParsing.VarParsing.multiplicity.singleton, + VarParsing.VarParsing.varType.int, + "maximum events") +options.register('globalTag', + '125X_mcRun3_2022_design_v6', + VarParsing.VarParsing.multiplicity.singleton, + VarParsing.VarParsing.varType.string, + "conditions") +options.register('inputFile', + 'step1_DQM_LayerRot_9p43e-6_fromRECO.root', + VarParsing.VarParsing.multiplicity.singleton, + VarParsing.VarParsing.varType.string, + "input file") +options.parseArguments() + +from Configuration.Eras.Era_Run3_cff import Run3 +process = cms.Process('HARVESTING',Run3) + +# import of standard configurations +process.load('Configuration.StandardSequences.Services_cff') +process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi') +process.load('FWCore.MessageService.MessageLogger_cfi') +process.load('Configuration.EventContent.EventContent_cff') +process.load('SimGeneral.MixingModule.mixNoPU_cfi') +process.load('Configuration.StandardSequences.GeometryRecoDB_cff') +process.load('Configuration.StandardSequences.MagneticField_cff') +process.load('Configuration.StandardSequences.DQMSaverAtRunEnd_cff') +process.load('Configuration.StandardSequences.Harvesting_cff') +process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff') + +process.maxEvents = cms.untracked.PSet( + input = cms.untracked.int32(options.maxEvents), + output = cms.optional.untracked.allowed(cms.int32,cms.PSet) +) + +# Input source +process.source = cms.Source("DQMRootSource", + fileNames = cms.untracked.vstring('file:'+options.inputFile) + ) + +process.options = cms.untracked.PSet( + IgnoreCompletely = cms.untracked.vstring(), + Rethrow = cms.untracked.vstring('ProductNotFound'), + accelerators = cms.untracked.vstring('*'), + allowUnscheduled = cms.obsolete.untracked.bool, + canDeleteEarly = cms.untracked.vstring(), + deleteNonConsumedUnscheduledModules = cms.untracked.bool(True), + dumpOptions = cms.untracked.bool(False), + emptyRunLumiMode = cms.obsolete.untracked.string, + eventSetup = cms.untracked.PSet( + forceNumberOfConcurrentIOVs = cms.untracked.PSet( + allowAnyLabel_=cms.required.untracked.uint32 + ), + numberOfConcurrentIOVs = cms.untracked.uint32(0) + ), + fileMode = cms.untracked.string('FULLMERGE'), + forceEventSetupCacheClearOnNewRun = cms.untracked.bool(False), + makeTriggerResults = cms.obsolete.untracked.bool, + numberOfConcurrentLuminosityBlocks = cms.untracked.uint32(0), + numberOfConcurrentRuns = cms.untracked.uint32(1), + numberOfStreams = cms.untracked.uint32(0), + numberOfThreads = cms.untracked.uint32(1), + printDependencies = cms.untracked.bool(False), + sizeOfStackForThreadsInKB = cms.optional.untracked.uint32, + throwIfIllegalParameter = cms.untracked.bool(True), + wantSummary = cms.untracked.bool(False) +) + +# Other statements +from Configuration.AlCa.GlobalTag import GlobalTag +process.GlobalTag = GlobalTag(process.GlobalTag, options.globalTag, '') + +process.dqmsave_step = cms.Path(process.DQMSaver) + +# Schedule definition +process.schedule = cms.Schedule(process.alcaHarvesting,process.dqmsave_step) +from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask +associatePatAlgosToolsTask(process) + +# Customisation from command line + +# Add early deletion of temporary data products to reduce peak memory need +from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete +process = customiseEarlyDelete(process) +# End adding early deletion diff --git a/DQM/TrackingMonitorSource/test/testTrackResolution_cfg.py b/DQM/TrackingMonitorSource/test/testTrackResolution_cfg.py new file mode 100644 index 0000000000000..49ef4149d18de --- /dev/null +++ b/DQM/TrackingMonitorSource/test/testTrackResolution_cfg.py @@ -0,0 +1,157 @@ +import FWCore.ParameterSet.Config as cms +import FWCore.Utilities.FileUtils as FileUtils +from FWCore.ParameterSet.VarParsing import VarParsing + +options = VarParsing('analysis') +options.register('inputTag', + 'LayerRot_9p43e-6', + VarParsing.multiplicity.singleton, + VarParsing.varType.string, + "input tag") +options.register('inputFile', + '/store/relval/CMSSW_14_0_0_pre1/RelValZMM_14/GEN-SIM-RECO/133X_mcRun3_2023_realistic_v3-v1/2590000/586487a4-71be-4b23-b5a4-5662fab803c9.root', + VarParsing.multiplicity.singleton, + VarParsing.varType.string, + "input file") +options.register('isAlCaReco', + False, + VarParsing.multiplicity.singleton, + VarParsing.varType.bool, + "is alcareco input file?") +options.register('isUnitTest', + False, + VarParsing.multiplicity.singleton, + VarParsing.varType.bool, + "is this configuration run in unit test?") +options.parseArguments() + +from Configuration.Eras.Era_Run3_cff import Run3 +process = cms.Process("TrackingResolution", Run3) + +##################################################################### +# import of standard configurations +##################################################################### +process.load('Configuration.StandardSequences.Services_cff') +process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi') +process.load('FWCore.MessageService.MessageLogger_cfi') +process.MessageLogger.cerr.FwkReport.reportEvery = (100 if options.isUnitTest else 100000) +process.load('Configuration.EventContent.EventContent_cff') +process.load('Configuration.StandardSequences.GeometryRecoDB_cff') +process.load('Configuration.StandardSequences.MagneticField_cff') +process.load('DQMOffline.Configuration.DQMOffline_cff') +process.load('Configuration.StandardSequences.EndOfProcess_cff') +process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff') + +##################################################################### +## BeamSpot from database (i.e. GlobalTag), needed for Refitter +##################################################################### +process.load("RecoVertex.BeamSpotProducer.BeamSpot_cfi") + +##################################################################### +# Load and Configure Measurement Tracker Event +##################################################################### +process.load("RecoTracker.MeasurementDet.MeasurementTrackerEventProducer_cfi") +if(options.isAlCaReco): + # customize MeasurementTrackerEvent for ALCARECO + process.MeasurementTrackerEvent.pixelClusterProducer = "ALCARECOTkAlDiMuon" + process.MeasurementTrackerEvent.stripClusterProducer = "ALCARECOTkAlDiMuon" + process.MeasurementTrackerEvent.inactivePixelDetectorLabels = cms.VInputTag() + process.MeasurementTrackerEvent.inactiveStripDetectorLabels = cms.VInputTag() + +process.maxEvents = cms.untracked.PSet( + input = cms.untracked.int32(10 if options.isUnitTest else -1) +) + +##################################################################### +# Input source +##################################################################### +#filelist = FileUtils.loadListFromFile("listOfFiles_idealMC_GEN-SIM-RECO.txt") +#filelist = FileUtils.loadListFromFile("listOfFiles_idealMC_TkAlDiMuonAndVertex.txt") +#readFiles = cms.untracked.vstring( *filelist) + +readFiles = cms.untracked.vstring(options.inputFile) +process.source = cms.Source("PoolSource",fileNames = readFiles) + +process.options = cms.untracked.PSet() + +##################################################################### +# Output +##################################################################### +process.DQMoutput = cms.OutputModule("DQMRootOutputModule", + dataset = cms.untracked.PSet( + dataTier = cms.untracked.string('DQMIO'), + filterName = cms.untracked.string('') + ), + fileName = cms.untracked.string('file:step1_DQM_'+options.inputTag+'_'+('fromALCA' if options.isAlCaReco else 'fromRECO' )+'.root'), + outputCommands = process.DQMEventContent.outputCommands, + splitLevel = cms.untracked.int32(0) +) + +##################################################################### +# Other statements +##################################################################### +from Configuration.AlCa.GlobalTag import GlobalTag +#process.GlobalTag = GlobalTag(process.GlobalTag,"133X_mcRun3_2023_realistic_v3", '') +process.GlobalTag = GlobalTag(process.GlobalTag, "125X_mcRun3_2022_design_v6", '') +process.GlobalTag.toGet = cms.VPSet(cms.PSet(connect = cms.string("frontier://FrontierProd/CMS_CONDITIONS"), + record = cms.string('TrackerAlignmentRcd'), + tag = cms.string(options.inputTag))) + +##################################################################### +# The DQM analysis sequence +##################################################################### +process.load("DQM.TrackingMonitorSource.shortTrackResolution_cff") +process.load("RecoTracker.TrackProducer.TrackRefitters_cff") +import RecoTracker.TrackProducer.TrackRefitters_cff +process.LongTracksRefit = process.TrackRefitter.clone( + src = 'SingleLongTrackProducer', + TrajectoryInEvent = True, + TTRHBuilder = "WithAngleAndTemplate", + NavigationSchool = '' +) + +process.ShortTrackCandidates3.src = cms.InputTag("LongTracksRefit") +process.ShortTrackCandidates4.src = cms.InputTag("LongTracksRefit") +process.ShortTrackCandidates5.src = cms.InputTag("LongTracksRefit") +process.ShortTrackCandidates6.src = cms.InputTag("LongTracksRefit") +process.ShortTrackCandidates7.src = cms.InputTag("LongTracksRefit") +process.ShortTrackCandidates8.src = cms.InputTag("LongTracksRefit") + +process.SingleLongTrackProducer.matchMuons = cms.InputTag("muons") +if(options.isAlCaReco): + process.SingleLongTrackProducer.requiredDr = cms.double(-9999.) # do not require any matchings + process.SingleLongTrackProducer.allTracks = cms.InputTag("ALCARECOTkAlDiMuon") + +##################################################################### +# Path +##################################################################### +process.analysis_step = cms.Path(process.offlineBeamSpot * + process.MeasurementTrackerEvent * + process.SingleLongTrackProducer * + process.LongTracksRefit * + process.ShortTrackCandidates3 * + process.ShortTrackCandidates4 * + process.ShortTrackCandidates5 * + process.ShortTrackCandidates6 * + process.ShortTrackCandidates7 * + process.ShortTrackCandidates8 * + process.RefittedShortTracks3 * + process.RefittedShortTracks4 * + process.RefittedShortTracks5 * + process.RefittedShortTracks6 * + process.RefittedShortTracks7 * + process.RefittedShortTracks8 * + process.trackingResolution) + +##################################################################### +# Path and EndPath definitions +##################################################################### +process.endjob_step = cms.EndPath(process.endOfProcess) +process.DQMoutput_step = cms.EndPath(process.DQMoutput) + +process.schedule = cms.Schedule(process.analysis_step, process.endjob_step, process.DQMoutput_step) + +################################################################### +# Set the process to run multi-threaded +################################################################### +process.options.numberOfThreads = 8 diff --git a/DQM/TrackingMonitorSource/test/testTrackingDATAMC.sh b/DQM/TrackingMonitorSource/test/testTrackingDATAMC.sh index bda4333692e4a..4fc11498ff9ed 100755 --- a/DQM/TrackingMonitorSource/test/testTrackingDATAMC.sh +++ b/DQM/TrackingMonitorSource/test/testTrackingDATAMC.sh @@ -2,44 +2,53 @@ function die { echo $1: status $2 ; exit $2; } -echo "TESTING Tracking DATA/MC comparison codes ..." +function runTests { -echo -e "TESTING step1 (electrons) ...\n\n" -cmsRun ${SCRAM_TEST_PATH}/Tracker_DataMCValidation_cfg.py maxEvents=100 inputFiles=/store/relval/CMSSW_13_3_0_pre2/RelValZEE_14/GEN-SIM-RECO/PU_132X_mcRun3_2023_realistic_v2_RV213-v1/2580000/c02ca5ba-f454-4cd3-b114-b55e0309f9db.root || die "Failure running Tracker_DataMCValidation_cfg.py" $? + # local variables + local testType="$1" + local inputFiles="$2" + local sequenceType="$3" + local isRECO="$4" + local globalTag="$5" -mv step1_DQM_1.root step1_DQM_1_electrons.root + echo -e "TESTING step1 ($testType) ...\n\n" -echo -e "TESTING step2 (electrons)...\n\n" -cmsRun ${SCRAM_TEST_PATH}/Tracker_DataMCValidation_Harvest_cfg.py inputFiles=file:step1_DQM_1_electrons.root || die "Failure running Tracker_DataMCValidation_Harvest_cfg.py" $? + # optional for the cmsRun sequence + local sequenceArg="" + [ -n "$sequenceType" ] && sequenceArg="sequenceType=$sequenceType" + local globalTagArg="" + [ -n "$globalTag" ] && globalTagArg="globalTag=$globalTag" -echo -e "================== Done with testing electrons ==================\n\n" + cmsRun ${SCRAM_TEST_PATH}/Tracker_DataMCValidation_cfg.py maxEvents=100 inputFiles="$inputFiles" $sequenceArg isRECO="$isRECO" $globalTagArg || die "Failure running Tracker_DataMCValidation_cfg.py sequenceType=$sequenceType" $? -echo -e "TESTING step1 (muons) ...\n\n" -cmsRun ${SCRAM_TEST_PATH}/Tracker_DataMCValidation_cfg.py maxEvents=100 inputFiles=/store/relval/CMSSW_13_3_0_pre2/RelValZMM_14/GEN-SIM-RECO/PU_132X_mcRun3_2023_realistic_v2_RV213-v1/2580000/4096bfe7-bc10-4f7f-81ab-4f4adb59e838.root sequenceType=muons || die "Failure running Tracker_DataMCValidation_cfg.py sequenceType=muons" $? + mv step1_DQM_1.root "step1_DQM_1_${testType}.root" -mv step1_DQM_1.root step1_DQM_1_muons.root + echo -e "TESTING step2 ($testType)...\n\n" + cmsRun ${SCRAM_TEST_PATH}/Tracker_DataMCValidation_Harvest_cfg.py inputFiles="file:step1_DQM_1_${testType}.root" || die "Failure running Tracker_DataMCValidation_Harvest_cfg.py" $? -echo -e "TESTING step2 (muons)...\n\n" -cmsRun ${SCRAM_TEST_PATH}/Tracker_DataMCValidation_Harvest_cfg.py inputFiles=file:step1_DQM_1_muons.root || die "Failure running Tracker_DataMCValidation_Harvest_cfg.py" $? + mv DQM_V0001_R000000001__Global__CMSSW_X_Y_Z__RECO.root "step2_DQM_${testType}.root" -echo -e "================== Done with testing muons ==================...\n\n" + echo -e "================== Done with testing $testType ==================\n\n" +} -echo -e "TESTING step1 (ttbar) ...\n\n" -cmsRun ${SCRAM_TEST_PATH}/Tracker_DataMCValidation_cfg.py maxEvents=100 inputFiles=/store/relval/CMSSW_13_3_0_pre2/RelValTTbar_14TeV/GEN-SIM-RECO/PU_132X_mcRun3_2023_realistic_v2_RV213-v1/2580000/fc1ccb5e-b038-45f2-a06b-e26a6a01681e.root sequenceType=ttbar || die "Failure running Tracker_DataMCValidation_cfg.py sequenceType=ttbar" $? +####################################################### +# RECO checks +####################################################### +echo "TESTING Tracking DATA/MC comparison codes on RECO ..." -mv step1_DQM_1.root step1_DQM_1_ttbar.root +runTests "electrons" "/store/relval/CMSSW_13_3_0_pre2/RelValZEE_14/GEN-SIM-RECO/PU_132X_mcRun3_2023_realistic_v2_RV213-v1/2580000/c02ca5ba-f454-4cd3-b114-b55e0309f9db.root" "" "True" +runTests "muons" "/store/relval/CMSSW_13_3_0_pre2/RelValZMM_14/GEN-SIM-RECO/PU_132X_mcRun3_2023_realistic_v2_RV213-v1/2580000/4096bfe7-bc10-4f7f-81ab-4f4adb59e838.root" "muons" "True" +runTests "ttbar" "/store/relval/CMSSW_13_3_0_pre2/RelValTTbar_14TeV/GEN-SIM-RECO/PU_132X_mcRun3_2023_realistic_v2_RV213-v1/2580000/fc1ccb5e-b038-45f2-a06b-e26a6a01681e.root" "ttbar" "True" +runTests "minbias" "/store/relval/CMSSW_13_3_0_pre2/RelValNuGun/GEN-SIM-RECO/PU_132X_mcRun3_2023_realistic_v2_RV213-v1/2580000/bc506605-d659-468e-b75a-5d3de82e579f.root" "minbias" "True" +runTests "V0s" "/store/relval/CMSSW_13_3_0_pre2/RelValNuGun/GEN-SIM-RECO/PU_132X_mcRun3_2023_realistic_v2_RV213-v1/2580000/bc506605-d659-468e-b75a-5d3de82e579f.root" "V0s" "True" -echo -e "TESTING step2 (ttbar)...\n\n" -cmsRun ${SCRAM_TEST_PATH}/Tracker_DataMCValidation_Harvest_cfg.py inputFiles=file:step1_DQM_1_ttbar.root || die "Failure running Tracker_DataMCValidation_Harvest_cfg.py" $? +####################################################### +# AOD checks +####################################################### +echo "TESTING Tracking DATA/MC comparison codes on AOD..." -echo -e "================== Done with testing ttbar ==================...\n\n" - -echo "TESTING step1 (minbias) ...\n\n" -cmsRun ${SCRAM_TEST_PATH}/Tracker_DataMCValidation_cfg.py maxEvents=100 inputFiles=/store/relval/CMSSW_13_3_0_pre2/RelValNuGun/GEN-SIM-RECO/PU_132X_mcRun3_2023_realistic_v2_RV213-v1/2580000/bc506605-d659-468e-b75a-5d3de82e579f.root sequenceType=minbias || die "Failure running Tracker_DataMCValidation_cfg.py sequenceType=ttbar" $? - -mv step1_DQM_1.root step1_DQM_1_minbias.root - -echo "TESTING step2 (minbias)...\n\n" -cmsRun ${SCRAM_TEST_PATH}/Tracker_DataMCValidation_Harvest_cfg.py inputFiles=file:step1_DQM_1_minbias.root || die "Failure running Tracker_DataMCValidation_Harvest_cfg.py" $? - -echo -e "================== Done with testing minbias ==================...\n\n" +runTests "electrons (AOD)" "/store/relval/CMSSW_13_0_12/RelValZEE_PU_13p6/AODSIM/PU_130X_mcRun3_2023_realistic_postBPix_v2_RV201-v1/2580000/0d49e310-e06f-4c26-a637-1116b02ef1ce.root" "" "False" "130X_mcRun3_2023_realistic_postBPix_v2" +runTests "muons (AOD)" "/store/relval/CMSSW_13_0_12/RelValZMM_PU_13p6/AODSIM/PU_130X_mcRun3_2023_realistic_postBPix_v2_RV201-v1/2580000/d2a2506c-8954-464b-beda-48242472406d.root" "muons" "False" "130X_mcRun3_2023_realistic_postBPix_v2" +runTests "ttbar (AOD)" "/store/relval/CMSSW_13_0_12/RelValTTbar_SemiLeptonic_PU_13p6/AODSIM/PU_130X_mcRun3_2023_realistic_postBPix_v2_RV201-v1/2580000/08c015c3-c9bd-4017-b21d-264dbaa06445.root" "ttbar" "False" "130X_mcRun3_2023_realistic_postBPix_v2" +runTests "minbias (AOD)" "/store/relval/CMSSW_13_0_12/RelValSingleNuGun_E10_PU/AODSIM/PU_130X_mcRun3_2023_realistic_postBPix_v2_RV201-v1/2580000/37ee5a61-8896-4eb3-8e6c-20ed0ad5b2dc.root" "minbias" "False" "130X_mcRun3_2023_realistic_postBPix_v2" +runTests "V0s (AOD)" "/store/relval/CMSSW_13_0_12/RelValSingleNuGun_E10_PU/AODSIM/PU_130X_mcRun3_2023_realistic_postBPix_v2_RV201-v1/2580000/37ee5a61-8896-4eb3-8e6c-20ed0ad5b2dc.root" "V0s" "False" "130X_mcRun3_2023_realistic_postBPix_v2" diff --git a/DQM/TrackingMonitorSource/test/testTrackingResolution.sh b/DQM/TrackingMonitorSource/test/testTrackingResolution.sh new file mode 100755 index 0000000000000..25c44a8fdc3d7 --- /dev/null +++ b/DQM/TrackingMonitorSource/test/testTrackingResolution.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +function die { echo $1: status $2 ; exit $2; } + +echo -e "TESTING step1 with RECO inputs ...\n\n" +cmsRun ${SCRAM_TEST_PATH}/testTrackResolution_cfg.py isUnitTest=True || die "Failure running testTrackResolution_cfg.py isUnitTest=True" $? + +echo -e "TESTING step1 with ALCARECO inputs ...\n\n" +cmsRun ${SCRAM_TEST_PATH}/testTrackResolution_cfg.py isUnitTest=True isAlCaReco=True inputFile=/store/mc/Run3Winter23Reco/DYJetsToMuMu_M-50_TuneCP5_13p6TeV-madgraphMLM-pythia8/ALCARECO/TkAlDiMuonAndVertex-TRKDesignNoPU_AlcaRecoTRKMu_designGaussSigmaZ4cm_125X_mcRun3_2022_design_v6-v1/60000/93401af5-0da6-40ce-82e4-d5571c93dd97.root || die "Failure running testTrackResolution_cfg.py isUnitTest=True isAlCaReco=True" $? + +echo -e "TESTING harvesting with RECO inputs ...\n\n" +cmsRun ${SCRAM_TEST_PATH}/testTrackResolutionHarvesting_cfg.py || die "Failure running testTrackResolutionHarvesting_cfg.py" $? + +echo -e "TESTING harvesting with ALCARECO inputs ...\n\n" +cmsRun ${SCRAM_TEST_PATH}/testTrackResolutionHarvesting_cfg.py inputFile=step1_DQM_LayerRot_9p43e-6_fromALCA.root || die "Failure running testTrackResolutionHarvesting_cfg.py inputFile=step1_DQM_LayerRot_9p43e-6_fromALCA.root" $? diff --git a/DQM/TrigXMonitorClient/interface/L1ScalersClient.h b/DQM/TrigXMonitorClient/interface/L1ScalersClient.h index 7de0c878afa82..b6e51dd4e84f2 100644 --- a/DQM/TrigXMonitorClient/interface/L1ScalersClient.h +++ b/DQM/TrigXMonitorClient/interface/L1ScalersClient.h @@ -28,15 +28,9 @@ class L1ScalersClient /// Constructors L1ScalersClient(const edm::ParameterSet &ps); - /// Destructor - ~L1ScalersClient() override{}; - /// BeginJob void beginJob(void) override; - // /// Endjob - // void endJob(void); - /// BeginRun void beginRun(const edm::Run &run, const edm::EventSetup &c) override; diff --git a/DQMOffline/Alignment/python/ALCARECOTkAlDQM_cff.py b/DQMOffline/Alignment/python/ALCARECOTkAlDQM_cff.py index 1f61d6dfadb88..f54a75d8c2099 100644 --- a/DQMOffline/Alignment/python/ALCARECOTkAlDQM_cff.py +++ b/DQMOffline/Alignment/python/ALCARECOTkAlDQM_cff.py @@ -1,5 +1,6 @@ import FWCore.ParameterSet.Config as cms import DQM.TrackingMonitor.TrackingMonitor_cfi +import DQM.TrackingMonitor.V0Monitor_cfi import DQMOffline.Alignment.TkAlCaRecoMonitor_cfi import DQMOffline.Alignment.DiMuonVertexMonitor_cfi import DQMOffline.Alignment.DiMuonMassBiasMonitor_cfi @@ -78,7 +79,11 @@ muonTracks = 'ALCARECO'+__trackCollName, vertices = 'offlinePrimaryVertices', FolderName = "AlCaReco/"+__selectionName, - maxSVdist = 50 + maxSVdist = 50, + SVDistConfig = dict(maxDeltaEta = 3.7, title = 'PV - Z Vertex distance'), + SVDistSigConfig = dict(maxDeltaEta = 3.7, title = 'PV - Z Vertex distance significance'), + SVDist3DConfig = dict(maxDeltaEta = 3.7, title = 'PV - Z Vertex 3D distance'), + SVDist3DSigConfig = dict(maxDeltaEta = 3.7, title = 'PV - Z Vertex 3D distance significance') ) ALCARECOTkAlDiMuonMassBiasDQM = DQMOffline.Alignment.DiMuonMassBiasMonitor_cfi.DiMuonMassBiasMonitor.clone( @@ -166,10 +171,10 @@ FolderName = "AlCaReco/"+__selectionName, maxSVdist = 50, CosPhi3DConfig = dict(maxDeltaEta = 1.3), - SVDistConfig = dict(maxDeltaEta = 1.3), - SVDistSigConfig = dict(maxDeltaEta = 1.3), - SVDist3DConfig = dict(maxDeltaEta = 1.3), - SVDist3DSigConfig = dict(maxDeltaEta = 1.3) + SVDistConfig = dict(maxDeltaEta = 1.3, title = 'PV - J/#psi Vertex distance'), + SVDistSigConfig = dict(maxDeltaEta = 1.3, title = 'PV - J/#psi Vertex distance significance'), + SVDist3DConfig = dict(maxDeltaEta = 1.3, title = 'PV - J/#psi Vertex 3D distance'), + SVDist3DSigConfig = dict(maxDeltaEta = 1.3, title = 'PV - J/#psi Vertex 3D distance significance') ) ALCARECOTkAlJpsiMassBiasDQM = DQMOffline.Alignment.DiMuonMassBiasMonitor_cfi.DiMuonMassBiasMonitor.clone( @@ -241,10 +246,10 @@ FolderName = "AlCaReco/"+__selectionName, maxSVdist = 50, CosPhi3DConfig = dict(maxDeltaEta = 1.6), - SVDistConfig = dict(maxDeltaEta = 1.6), - SVDistSigConfig = dict(maxDeltaEta = 1.6), - SVDist3DConfig = dict(maxDeltaEta = 1.6), - SVDist3DSigConfig = dict(maxDeltaEta = 1.6) + SVDistConfig = dict(maxDeltaEta = 1.6, title = 'PV - #Upsilon Vertex distance'), + SVDistSigConfig = dict(maxDeltaEta = 1.6, title = 'PV - #Upsilon Vertex distance significance'), + SVDist3DConfig = dict(maxDeltaEta = 1.6, title = 'PV - #Upsilon Vertex 3D distance'), + SVDist3DSigConfig = dict(maxDeltaEta = 1.6, title = 'PV - #Upsilon Vertex 3D distance significance') ) ALCARECOTkAlUpsilonMassBiasDQM = DQMOffline.Alignment.DiMuonMassBiasMonitor_cfi.DiMuonMassBiasMonitor.clone( @@ -360,6 +365,96 @@ ALCARECOTkAlMinBiasDQM = cms.Sequence( ALCARECOTkAlMinBiasTrackingDQM + ALCARECOTkAlMinBiasTkAlDQM ) +######################################################## +#############--- TkAlKshorts ---####################### +######################################################## +__selectionName = 'TkAlKShortTracks' +ALCARECOTkAlKShortTracksTrackingDQM = ALCARECOTkAlZMuMuTrackingDQM.clone( + #names and desigantions + TrackProducer = 'ALCARECO'+__selectionName, + AlgoName = 'ALCARECO'+__selectionName, + FolderName = "AlCaReco/"+__selectionName, + BSFolderName = "AlCaReco/"+__selectionName+"/BeamSpot", + doSIPPlots = True, + doDCAPlots = True, + doDCAwrt000Plots = True, + doDCAwrtPVPlots = True, + # margins and settings + TkSizeBin = 71, + TkSizeMin = -0.5, + TkSizeMax = 70.5, + TrackPtMax = 30 +) + +ALCARECOTkAlKShortTracksTkAlDQM = ALCARECOTkAlZMuMuTkAlDQM.clone( + #names and desigantions + TrackProducer = 'ALCARECO'+__selectionName, + AlgoName = 'ALCARECO'+__selectionName, + FolderName = "AlCaReco/"+__selectionName, + # margins and settings + fillInvariantMass = False, + TrackPtMax = 30, + SumChargeBin = 101, + SumChargeMin = -50.5, + SumChargeMax = 50.5 +) + +# Added module for V0Monitoring +ALCARECOTkAlKShortMonitor = DQM.TrackingMonitor.V0Monitor_cfi.v0Monitor.clone( + FolderName = "AlCaReco/"+__selectionName+"/KsCandidates", + v0 = "generalV0Candidates:Kshort") + +ALCARECOTkAlKShortMonitor.histoPSet.massPSet = cms.PSet(nbins = cms.int32 (100), + xmin = cms.double(0.400), + xmax = cms.double(0.600)) + +ALCARECOTkAlKShortTracksDQM = cms.Sequence( ALCARECOTkAlKShortTracksTrackingDQM + ALCARECOTkAlKShortTracksTkAlDQM + ALCARECOTkAlKShortMonitor) + +######################################################## +#############--- TkAlLambdas ---####################### +######################################################## +__selectionName = 'TkAlLambdaTracks' +ALCARECOTkAlLambdaTracksTrackingDQM = ALCARECOTkAlZMuMuTrackingDQM.clone( + #names and desigantions + TrackProducer = 'ALCARECO'+__selectionName, + AlgoName = 'ALCARECO'+__selectionName, + FolderName = "AlCaReco/"+__selectionName, + BSFolderName = "AlCaReco/"+__selectionName+"/BeamSpot", + doSIPPlots = True, + doDCAPlots = True, + doDCAwrt000Plots = True, + doDCAwrtPVPlots = True, + # margins and settings + TkSizeBin = 71, + TkSizeMin = -0.5, + TkSizeMax = 70.5, + TrackPtMax = 30 +) + +ALCARECOTkAlLambdaTracksTkAlDQM = ALCARECOTkAlZMuMuTkAlDQM.clone( + #names and desigantions + TrackProducer = 'ALCARECO'+__selectionName, + AlgoName = 'ALCARECO'+__selectionName, + FolderName = "AlCaReco/"+__selectionName, + # margins and settings + fillInvariantMass = False, + TrackPtMax = 30, + SumChargeBin = 101, + SumChargeMin = -50.5, + SumChargeMax = 50.5 +) + +# Added module for V0Monitoring +ALCARECOLambdaMonitor = DQM.TrackingMonitor.V0Monitor_cfi.v0Monitor.clone( + FolderName = "AlCaReco/"+__selectionName+"/LambdaCandidates", + v0 = "generalV0Candidates:Lambda") + +ALCARECOLambdaMonitor.histoPSet.massPSet = cms.PSet(nbins = cms.int32(100), + xmin = cms.double(1.050), + xmax = cms.double(1.250)) + +ALCARECOTkAlLambdaTracksDQM = cms.Sequence( ALCARECOTkAlLambdaTracksTrackingDQM + ALCARECOTkAlLambdaTracksTkAlDQM + ALCARECOLambdaMonitor) + ######################################################## #############--- TkAlJetHT ---####################### ######################################################## diff --git a/DQMOffline/Configuration/python/DQMOfflineCosmics_cff.py b/DQMOffline/Configuration/python/DQMOfflineCosmics_cff.py index 8782f46aa29f9..00843ebfbd899 100644 --- a/DQMOffline/Configuration/python/DQMOfflineCosmics_cff.py +++ b/DQMOffline/Configuration/python/DQMOfflineCosmics_cff.py @@ -8,6 +8,7 @@ from DQM.HcalTasks.OfflineSourceSequence_cosmic import * from DQM.SiStripMonitorClient.SiStripSourceConfigTier0_Cosmic_cff import * from DQM.SiPixelCommon.SiPixelOfflineDQM_source_cff import * +from DQM.SiTrackerPhase2.Phase2TrackerDQMFirstStep_cff import * from DQM.DTMonitorModule.dtDQMOfflineSources_Cosmics_cff import * from DQM.RPCMonitorClient.RPCTier0Source_cff import * from DQM.CSCMonitorModule.csc_dqm_sourceclient_offline_cff import * @@ -30,6 +31,8 @@ DQMOfflineCosmicsTrackerPixel = cms.Sequence( siPixelOfflineDQM_cosmics_source ) +DQMOfflineCosmicsTrackerPhase2 = cms.Sequence( trackerphase2DQMSource ) + #tnp modules are meant for collisions only (DT has separate cff for cosmics) if cscSources.contains(cscTnPEfficiencyMonitor): cscSources.remove(cscTnPEfficiencyMonitor) @@ -42,10 +45,10 @@ cscSources ) from Configuration.Eras.Modifier_run3_GEM_cff import run3_GEM +from Configuration.Eras.Modifier_phase2_common_cff import phase2_common _run3_GEM_DQMOfflineCosmicsMuonDPG = DQMOfflineCosmicsMuonDPG.copy() _run3_GEM_DQMOfflineCosmicsMuonDPG += gemSourcesCosmics -run3_GEM.toReplaceWith(DQMOfflineCosmicsMuonDPG, _run3_GEM_DQMOfflineCosmicsMuonDPG) - +(run3_GEM & ~phase2_common).toReplaceWith(DQMOfflineCosmicsMuonDPG, _run3_GEM_DQMOfflineCosmicsMuonDPG) DQMOfflineCosmicsCASTOR = cms.Sequence( castorSources ) @@ -58,6 +61,14 @@ DQMOfflineCosmicsCASTOR ) +# No Strip detector in Phase-2 Tracker +from Configuration.Eras.Modifier_phase2_tracker_cff import phase2_tracker +phase2_tracker.toReplaceWith(DQMOfflineCosmicsPreDPG,DQMOfflineCosmicsPreDPG.copyAndExclude([DQMOfflineCosmicsTrackerStrip, DQMOfflineCosmicsTrackerPixel])) + +_DQMOfflineCosmicsPreDPG = DQMOfflineCosmicsPreDPG.copy() +_DQMOfflineCosmicsPreDPG += DQMOfflineCosmicsTrackerPhase2 +phase2_tracker.toReplaceWith(DQMOfflineCosmicsPreDPG,_DQMOfflineCosmicsPreDPG) + DQMOfflineCosmicsDPG = cms.Sequence( DQMOfflineCosmicsPreDPG * DQMMessageLogger ) @@ -92,9 +103,13 @@ # DQMOfflineCosmicsPhysics ) +phase2_common.toReplaceWith(DQMOfflineCosmicsPrePOG,DQMOfflineCosmicsPrePOG.copyAndExclude([DQMOfflineCosmicsTrigger])) + DQMOfflineCosmicsPOG = cms.Sequence( DQMOfflineCosmicsPrePOG * DQMMessageLogger ) DQMOfflineCosmics = cms.Sequence( DQMOfflineCosmicsPreDPG * DQMOfflineCosmicsPrePOG * DQMMessageLogger ) + +PostDQMOffline = cms.Sequence() diff --git a/DQMOffline/Configuration/python/autoDQM.py b/DQMOffline/Configuration/python/autoDQM.py index 7619e6a38657a..291e6d6a6823b 100644 --- a/DQMOffline/Configuration/python/autoDQM.py +++ b/DQMOffline/Configuration/python/autoDQM.py @@ -247,6 +247,10 @@ 'none': ['DQMNone', 'PostDQMOffline', 'DQMNone'], + + 'cosmics' : ['DQMOfflineCosmics', + 'PostDQMOffline', + 'DQMOfflineCosmics'] } _phase2_allowed = ['beam','trackingOnlyDQM','outerTracker', 'trackerPhase2', 'muon','hcal','hcal2','egamma','L1TMonPhase2','HLTMon'] diff --git a/DQMOffline/Configuration/test/BuildFile.xml b/DQMOffline/Configuration/test/BuildFile.xml index 4334126214c15..7e3889dabe548 100644 --- a/DQMOffline/Configuration/test/BuildFile.xml +++ b/DQMOffline/Configuration/test/BuildFile.xml @@ -4,12 +4,12 @@ - + - + diff --git a/DQMOffline/Configuration/test/runrest.sh b/DQMOffline/Configuration/test/runrest.sh new file mode 100755 index 0000000000000..83394ce77d5a8 --- /dev/null +++ b/DQMOffline/Configuration/test/runrest.sh @@ -0,0 +1,12 @@ +#!/bin/bash -ex +ERR=0 +PYTHONUNBUFFERED=1 cmsswSequenceInfo.py --runTheMatrix --steps DQM,VALIDATION --infile $1 --offset $2 --dbfile sequences$2.db --threads 1 >run.log 2>&1 || ERR=1 +cat run.log +seqs=$(grep 'Analyzing [0-9][0-9]* seqs' run.log | sed 's|.*Analyzing *||;s| .*||') +echo "Sequences run by final DQMOfflineConfiguration: $seqs" +if [ "$seqs" -gt 0 ] ; then + echo "Final DQMOfflineConfiguration should not run any sequences." + echo "Please update parameters for TestDQMOfflineConfiguration unittest to run the extra sequences." + exit 1 +fi +exit $ERR diff --git a/DQMOffline/EGamma/plugins/PhotonOfflineClient.h b/DQMOffline/EGamma/plugins/PhotonOfflineClient.h index d86448b6dccde..586402cbd1699 100644 --- a/DQMOffline/EGamma/plugins/PhotonOfflineClient.h +++ b/DQMOffline/EGamma/plugins/PhotonOfflineClient.h @@ -63,15 +63,8 @@ class PhotonOfflineClient : public DQMEDHarvester { explicit PhotonOfflineClient(const edm::ParameterSet& pset); ~PhotonOfflineClient() override; - // virtual void analyze(const edm::Event&, const edm::EventSetup& ) ; - // virtual void beginJob() ; - //virtual void endJob() ; void dqmEndJob(DQMStore::IBooker&, DQMStore::IGetter&) override; - // virtual void endLuminosityBlock( const edm::LuminosityBlock& , const edm::EventSetup& ) ; - //virtual void endRun(const edm::Run& , const edm::EventSetup& ) ; - //virtual void runClient(); - virtual void runClient(DQMStore::IBooker& iBooker, DQMStore::IGetter& iGetter); MonitorElement* bookHisto( DQMStore::IBooker& iBooker, std::string histoName, std::string title, int bin, double min, double max); diff --git a/DQMOffline/L1Trigger/interface/L1TMuonDQMOffline.h b/DQMOffline/L1Trigger/interface/L1TMuonDQMOffline.h index a0dde577aee23..0b18a3674b07a 100644 --- a/DQMOffline/L1Trigger/interface/L1TMuonDQMOffline.h +++ b/DQMOffline/L1Trigger/interface/L1TMuonDQMOffline.h @@ -188,6 +188,7 @@ class MuonGmtPair { public: MuonGmtPair(const reco::Muon* muon, const l1t::Muon* regMu, const PropagateToMuon& propagator, bool useAtVtxCoord); MuonGmtPair(const MuonGmtPair& muonGmtPair); + MuonGmtPair& operator=(const MuonGmtPair& muonGmtPair) = default; ~MuonGmtPair(){}; double dR(); diff --git a/DQMOffline/L1Trigger/interface/L1TPhase2MuonOffline.h b/DQMOffline/L1Trigger/interface/L1TPhase2MuonOffline.h index 36b0f128ea118..9a14124585f84 100644 --- a/DQMOffline/L1Trigger/interface/L1TPhase2MuonOffline.h +++ b/DQMOffline/L1Trigger/interface/L1TPhase2MuonOffline.h @@ -137,6 +137,7 @@ class GenMuonGMTPair { public: GenMuonGMTPair(const reco::GenParticle* mu, const l1t::L1Candidate* gmtmu); GenMuonGMTPair(const GenMuonGMTPair& muongmtPair); + GenMuonGMTPair& operator=(const GenMuonGMTPair& muongmtPair) = default; ~GenMuonGMTPair(){}; float dR2(); diff --git a/DQMOffline/L1Trigger/interface/L1TTauOffline.h b/DQMOffline/L1Trigger/interface/L1TTauOffline.h index 4465cfb31f04b..177456708c6d0 100644 --- a/DQMOffline/L1Trigger/interface/L1TTauOffline.h +++ b/DQMOffline/L1Trigger/interface/L1TTauOffline.h @@ -56,7 +56,7 @@ class TauL1TPair { : m_tau(tau), m_regTau(regTau), m_eta(999.), m_phi_bar(999.), m_phi_end(999.){}; TauL1TPair(const TauL1TPair& tauL1tPair); - + TauL1TPair& operator=(const TauL1TPair& tauL1tPair) = default; ~TauL1TPair(){}; double dR(); diff --git a/DQMOffline/Muon/interface/DiMuonHistograms.h b/DQMOffline/Muon/interface/DiMuonHistograms.h index 94feb93bde699..ae8c762494a17 100644 --- a/DQMOffline/Muon/interface/DiMuonHistograms.h +++ b/DQMOffline/Muon/interface/DiMuonHistograms.h @@ -64,10 +64,10 @@ class DiMuonHistograms : public DQMEDAnalyzer { double etaECMax; //Defining the relevant invariant mass regions - double LowMassMin; - double LowMassMax; - double HighMassMin; - double HighMassMax; + double lowMassMin; + double lowMassMax; + double highMassMin; + double highMassMax; std::vector GlbGlbMuon_LM; std::vector GlbGlbMuon_HM; diff --git a/DQMOffline/Muon/python/diMuonHistograms_cfi.py b/DQMOffline/Muon/python/diMuonHistograms_cfi.py index f0fcb59ddc4d0..70431269e6baf 100644 --- a/DQMOffline/Muon/python/diMuonHistograms_cfi.py +++ b/DQMOffline/Muon/python/diMuonHistograms_cfi.py @@ -17,10 +17,10 @@ etaECMin = cms.double(0.9), etaECMax = cms.double(2.4), - LowMassMin = cms.double(2.0), - LowMassMax = cms.double(12.0), - HighMassMin = cms.double(70.0), - HighMassMax = cms.double(110.0), + lowMassMin = cms.double(2.0), + lowMassMax = cms.double(12.0), + highMassMin = cms.double(70.0), + highMassMax = cms.double(110.0), folder = cms.string("Muons/diMuonHistograms") ) diMuonHistos_miniAOD = DQMEDAnalyzer('DiMuonHistograms', @@ -37,10 +37,10 @@ etaECMin = cms.double(0.9), etaECMax = cms.double(2.4), - LowMassMin = cms.double(2.0), - LowMassMax = cms.double(12.0), - HighMassMin = cms.double(70.0), - HighMassMax = cms.double(110.0), + lowMassMin = cms.double(2.0), + lowMassMax = cms.double(12.0), + highMassMin = cms.double(70.0), + highMassMax = cms.double(110.0), folder = cms.string("Muons_miniAOD/diMuonHistograms") ) @@ -50,8 +50,8 @@ etaBBin = 350, etaEBin = 350, - LowMassMin = 2.0, - LowMassMax = 51.0, - HighMassMin = 55.0, - HighMassMax = 125.0 + lowMassMin = 2.0, + lowMassMax = 51.0, + highMassMin = 55.0, + highMassMax = 125.0 ) diff --git a/DQMOffline/Muon/src/DiMuonHistograms.cc b/DQMOffline/Muon/src/DiMuonHistograms.cc index 35e1ae21cf9d1..f89cacabf8e9e 100644 --- a/DQMOffline/Muon/src/DiMuonHistograms.cc +++ b/DQMOffline/Muon/src/DiMuonHistograms.cc @@ -53,10 +53,10 @@ DiMuonHistograms::DiMuonHistograms(const edm::ParameterSet& pSet) { etaECMin = parameters.getParameter("etaECMin"); etaECMax = parameters.getParameter("etaECMax"); - LowMassMin = parameters.getParameter("LowMassMin"); - LowMassMax = parameters.getParameter("LowMassMax"); - HighMassMin = parameters.getParameter("HighMassMin"); - HighMassMax = parameters.getParameter("HighMassMax"); + lowMassMin = parameters.getParameter("lowMassMin"); + lowMassMax = parameters.getParameter("lowMassMax"); + highMassMin = parameters.getParameter("highMassMin"); + highMassMax = parameters.getParameter("highMassMax"); theFolder = parameters.getParameter("folder"); } @@ -78,51 +78,51 @@ void DiMuonHistograms::bookHistograms(DQMStore::IBooker& ibooker, GlbGlbMuon_LM.push_back(ibooker.book1D("GlbGlbMuon_LM" + EtaName[iEtaRegion], "InvMass_{GLB,GLB}" + EtaName[iEtaRegion], nBin[iEtaRegion], - LowMassMin, - LowMassMax)); + lowMassMin, + lowMassMax)); TrkTrkMuon_LM.push_back(ibooker.book1D("TrkTrkMuon_LM" + EtaName[iEtaRegion], "InvMass_{TRK,TRK}" + EtaName[iEtaRegion], nBin[iEtaRegion], - LowMassMin, - LowMassMax)); + lowMassMin, + lowMassMax)); StaTrkMuon_LM.push_back(ibooker.book1D("StaTrkMuon_LM" + EtaName[iEtaRegion], "InvMass_{STA,TRK}" + EtaName[iEtaRegion], nBin[iEtaRegion], - LowMassMin, - LowMassMax)); + lowMassMin, + lowMassMax)); GlbGlbMuon_HM.push_back(ibooker.book1D("GlbGlbMuon_HM" + EtaName[iEtaRegion], "InvMass_{GLB,GLB}" + EtaName[iEtaRegion], nBin[iEtaRegion], - HighMassMin, - HighMassMax)); + highMassMin, + highMassMax)); TrkTrkMuon_HM.push_back(ibooker.book1D("TrkTrkMuon_HM" + EtaName[iEtaRegion], "InvMass_{TRK,TRK}" + EtaName[iEtaRegion], nBin[iEtaRegion], - HighMassMin, - HighMassMax)); + highMassMin, + highMassMax)); StaTrkMuon_HM.push_back(ibooker.book1D("StaTrkMuon_HM" + EtaName[iEtaRegion], "InvMass_{STA,TRK}" + EtaName[iEtaRegion], nBin[iEtaRegion], - HighMassMin, - HighMassMax)); + highMassMin, + highMassMax)); // arround the Z peak TightTightMuon.push_back(ibooker.book1D("TightTightMuon" + EtaName[iEtaRegion], "InvMass_{Tight,Tight}" + EtaName[iEtaRegion], nBin[iEtaRegion], - LowMassMin, - LowMassMax)); + highMassMin, + highMassMax)); MediumMediumMuon.push_back(ibooker.book1D("MediumMediumMuon" + EtaName[iEtaRegion], "InvMass_{Medium,Medium}" + EtaName[iEtaRegion], nBin[iEtaRegion], - LowMassMin, - LowMassMax)); + highMassMin, + highMassMax)); LooseLooseMuon.push_back(ibooker.book1D("LooseLooseMuon" + EtaName[iEtaRegion], "InvMass_{Loose,Loose}" + EtaName[iEtaRegion], nBin[iEtaRegion], - LowMassMin, - LowMassMax)); + highMassMin, + highMassMax)); //Fraction of bad hits in the tracker track to the total TightTightMuonBadFrac.push_back(ibooker.book1D( "TightTightMuonBadFrac" + EtaName[iEtaRegion], "BadFrac_{Tight,Tight}" + EtaName[iEtaRegion], 10, 0, 0.4)); @@ -222,9 +222,9 @@ void DiMuonHistograms::analyze(const edm::Event& iEvent, const edm::EventSetup& fabs(recoCombinedGlbTrack1->eta()) < EtaCutMax[iEtaRegion] && fabs(recoCombinedGlbTrack2->eta()) > EtaCutMin[iEtaRegion] && fabs(recoCombinedGlbTrack2->eta()) < EtaCutMax[iEtaRegion]) { - if (InvMass < LowMassMax) + if (InvMass < lowMassMax) GlbGlbMuon_LM[iEtaRegion]->Fill(InvMass); - if (InvMass > HighMassMin) + if (InvMass > highMassMin) GlbGlbMuon_HM[iEtaRegion]->Fill(InvMass); } } @@ -300,9 +300,9 @@ void DiMuonHistograms::analyze(const edm::Event& iEvent, const edm::EventSetup& if (fabs(recoStaTrack->eta()) > EtaCutMin[iEtaRegion] && fabs(recoStaTrack->eta()) < EtaCutMax[iEtaRegion] && fabs(recoTrack->eta()) > EtaCutMin[iEtaRegion] && fabs(recoTrack->eta()) < EtaCutMax[iEtaRegion]) { - if (InvMass < LowMassMax) + if (InvMass < lowMassMax) StaTrkMuon_LM[iEtaRegion]->Fill(InvMass); - if (InvMass > HighMassMin) + if (InvMass > highMassMin) StaTrkMuon_HM[iEtaRegion]->Fill(InvMass); } } @@ -322,9 +322,9 @@ void DiMuonHistograms::analyze(const edm::Event& iEvent, const edm::EventSetup& if (fabs(recoStaTrack->eta()) > EtaCutMin[iEtaRegion] && fabs(recoStaTrack->eta()) < EtaCutMax[iEtaRegion] && fabs(recoTrack->eta()) > EtaCutMin[iEtaRegion] && fabs(recoTrack->eta()) < EtaCutMax[iEtaRegion]) { - if (InvMass < LowMassMax) + if (InvMass < lowMassMax) StaTrkMuon_LM[iEtaRegion]->Fill(InvMass); - if (InvMass > HighMassMin) + if (InvMass > highMassMin) StaTrkMuon_HM[iEtaRegion]->Fill(InvMass); } } @@ -345,9 +345,9 @@ void DiMuonHistograms::analyze(const edm::Event& iEvent, const edm::EventSetup& for (unsigned int iEtaRegion = 0; iEtaRegion < 3; iEtaRegion++) { if (fabs(recoTrack1->eta()) > EtaCutMin[iEtaRegion] && fabs(recoTrack1->eta()) < EtaCutMax[iEtaRegion] && fabs(recoTrack2->eta()) > EtaCutMin[iEtaRegion] && fabs(recoTrack2->eta()) < EtaCutMax[iEtaRegion]) { - if (InvMass < LowMassMax) + if (InvMass < lowMassMax) TrkTrkMuon_LM[iEtaRegion]->Fill(InvMass); - if (InvMass > HighMassMin) + if (InvMass > highMassMin) TrkTrkMuon_HM[iEtaRegion]->Fill(InvMass); } } diff --git a/DQMOffline/Trigger/interface/EgHLTOffHelper.h b/DQMOffline/Trigger/interface/EgHLTOffHelper.h index 8dd397fdcdd62..6c61be5e0aead 100644 --- a/DQMOffline/Trigger/interface/EgHLTOffHelper.h +++ b/DQMOffline/Trigger/interface/EgHLTOffHelper.h @@ -49,6 +49,9 @@ #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "FWCore/Framework/interface/ConsumesCollector.h" +#include "CondFormats/EcalObjects/interface/EcalPFRecHitThresholds.h" +#include "CondFormats/DataRecord/interface/EcalPFRecHitThresholdsRcd.h" + class EgammaHLTTrackIsolation; class HLTConfigProvider; class EcalSeverityLevelAlgo; @@ -200,6 +203,8 @@ namespace egHLT { template static bool getHandle(const edm::Event& event, const edm::EDGetTokenT& token, edm::Handle& handle); + + const EcalPFRecHitThresholds* thresholds = nullptr; }; template diff --git a/DQMOffline/Trigger/interface/EgHLTOfflineClient.h b/DQMOffline/Trigger/interface/EgHLTOfflineClient.h index 38b8b4f33703b..bc1434a297223 100644 --- a/DQMOffline/Trigger/interface/EgHLTOfflineClient.h +++ b/DQMOffline/Trigger/interface/EgHLTOfflineClient.h @@ -33,7 +33,6 @@ class EgHLTOfflineClient : public DQMEDHarvester { private: - // DQMStore* dbe_; //dbe seems to be the standard name for this, I dont know why. We of course dont own it std::string dirName_; std::vector eleHLTFilterNames_; //names of the filters monitored using electrons to make plots for @@ -71,15 +70,8 @@ class EgHLTOfflineClient : public DQMEDHarvester { explicit EgHLTOfflineClient(const edm::ParameterSet&); ~EgHLTOfflineClient() override; - // virtual void beginJob(); - // virtual void analyze(const edm::Event&, const edm::EventSetup&); //dummy - // virtual void endJob(); void beginRun(const edm::Run& run, const edm::EventSetup& c) override; - // virtual void endRun(const edm::Run& run, const edm::EventSetup& c); - - // virtual void beginLuminosityBlock(const edm::LuminosityBlock& lumiSeg,const edm::EventSetup& context){} // DQM Client Diagnostic - // virtual void endLuminosityBlock(const edm::LuminosityBlock& lumiSeg,const edm::EventSetup& c); void dqmEndJob(DQMStore::IBooker&, DQMStore::IGetter&) override; //performed in the endJob void dqmEndLuminosityBlock(DQMStore::IBooker&, DQMStore::IGetter&, diff --git a/DQMOffline/Trigger/interface/FSQDiJetAve.h b/DQMOffline/Trigger/interface/FSQDiJetAve.h index 4a9d475dcba95..18e21545a5ea2 100644 --- a/DQMOffline/Trigger/interface/FSQDiJetAve.h +++ b/DQMOffline/Trigger/interface/FSQDiJetAve.h @@ -66,8 +66,6 @@ class FSQDiJetAve : public DQMEDAnalyzer { void bookHistograms(DQMStore::IBooker&, edm::Run const& run, edm::EventSetup const& c) override; void dqmBeginRun(edm::Run const& run, edm::EventSetup const& c) override; - //virtual void beginRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; // ----------member data --------------------------- // diff --git a/DQMOffline/Trigger/plugins/FSQDiJetAve.cc b/DQMOffline/Trigger/plugins/FSQDiJetAve.cc index 40edc9ad5df1f..7543c5d2d018f 100644 --- a/DQMOffline/Trigger/plugins/FSQDiJetAve.cc +++ b/DQMOffline/Trigger/plugins/FSQDiJetAve.cc @@ -852,28 +852,6 @@ void FSQDiJetAve::bookHistograms(DQMStore::IBooker& booker, edm::Run const& run, } } //*/ -// ------------ method called when ending the processing of a run ------------ -/* -void -FSQDiJetAve::endRun(edm::Run const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when starting to processes a luminosity block ------------ -/* -void -FSQDiJetAve::beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when ending the processing of a luminosity block ------------ -/* -void -FSQDiJetAve::endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) -{} -// */ // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void FSQDiJetAve::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { diff --git a/DQMOffline/Trigger/python/TrackingMonitoring_cff.py b/DQMOffline/Trigger/python/TrackingMonitoring_cff.py index ba2e7078c6b5e..d004684d74516 100644 --- a/DQMOffline/Trigger/python/TrackingMonitoring_cff.py +++ b/DQMOffline/Trigger/python/TrackingMonitoring_cff.py @@ -29,11 +29,13 @@ from Configuration.Eras.Modifier_pp_on_PbPb_run3_cff import pp_on_PbPb_run3 pp_on_PbPb_run3.toModify(pixelTracksMonitoringHLT, + primaryVertex = 'hltPixelVerticesPPOnAA', TrackProducer = 'hltPixelTracksPPOnAA', allTrackProducer = 'hltPixelTracksPPOnAA') from Configuration.Eras.Modifier_phase2_tracker_cff import phase2_tracker phase2_tracker.toModify(pixelTracksMonitoringHLT, + primaryVertex = 'hltPhase2PixelVertices', TrackProducer = 'hltPhase2PixelTracks', allTrackProducer = 'hltPhase2PixelTracks') @@ -107,12 +109,14 @@ ) pp_on_PbPb_run3.toModify(iterHLTTracksMonitoringHLT, + primaryVertex = 'hltPixelVerticesPPOnAA', TrackProducer = 'hltMergedTracksPPOnAA', allTrackProducer = 'hltMergedTracksPPOnAA') phase2_tracker.toModify(iterHLTTracksMonitoringHLT, - TrackProducer = cms.InputTag("generalTracks","","HLT"), - allTrackProducer = cms.InputTag("generalTracks","","HLT")) + primaryVertex = 'hltPhase2PixelVertices', + TrackProducer = 'generalTracks::HLT', + allTrackProducer = 'generalTracks::HLT') iter3TracksMonitoringHLT = trackingMonHLT.clone( FolderName = 'HLT/Tracking/iter3Merged', @@ -157,6 +161,7 @@ ) pp_on_PbPb_run3.toModify(doubletRecoveryHPTracksMonitoringHLT, + primaryVertex = 'hltPixelVerticesPPOnAA', TrackProducer = 'hltDoubletRecoveryPFlowTrackSelectionHighPurityPPOnAA', allTrackProducer = 'hltDoubletRecoveryPFlowTrackSelectionHighPurityPPOnAA') diff --git a/DQMOffline/Trigger/src/EgHLTOffHelper.cc b/DQMOffline/Trigger/src/EgHLTOffHelper.cc index a8a31a7f625b5..2ab709ef01819 100644 --- a/DQMOffline/Trigger/src/EgHLTOffHelper.cc +++ b/DQMOffline/Trigger/src/EgHLTOffHelper.cc @@ -309,7 +309,7 @@ void OffHelper::fillIsolData(const reco::GsfElectron& ele, OffEle::IsolData& iso } else isolData.hltTrksPho = 0.; if (calHLTEmIsol_) - isolData.hltEm = ecalIsolAlgoEB.getEtSum(&ele) + ecalIsolAlgoEE.getEtSum(&ele); + isolData.hltEm = ecalIsolAlgoEB.getEtSum(&ele, *thresholds) + ecalIsolAlgoEE.getEtSum(&ele, *thresholds); else isolData.hltEm = 0.; } @@ -475,7 +475,7 @@ void OffHelper::fillIsolData(const reco::Photon& pho, OffPho::IsolData& isolData } else isolData.hltTrks = 0.; if (calHLTEmIsol_) - isolData.hltEm = ecalIsolAlgoEB.getEtSum(&pho) + ecalIsolAlgoEE.getEtSum(&pho); + isolData.hltEm = ecalIsolAlgoEB.getEtSum(&pho, *thresholds) + ecalIsolAlgoEE.getEtSum(&pho, *thresholds); else isolData.hltEm = 0.; } diff --git a/DQMServices/Components/plugins/DQMGenericClient.cc b/DQMServices/Components/plugins/DQMGenericClient.cc index 1d1fa290ea671..bd4999eebcf21 100644 --- a/DQMServices/Components/plugins/DQMGenericClient.cc +++ b/DQMServices/Components/plugins/DQMGenericClient.cc @@ -158,7 +158,7 @@ class FitSlicesYTool { // ... create your hists TH2F* h = me->getTH2F(); TF1 fgaus("fgaus", "gaus", h->GetYaxis()->GetXmin(), h->GetYaxis()->GetXmax(), TF1::EAddToList::kNo); - h->FitSlicesY(&fgaus, 0, -1, 0, "QNRL SERIAL"); + h->FitSlicesY(&fgaus, 0, -1, 0, "QNR SERIAL"); string name(h->GetName()); h0 = (TH1*)gDirectory->Get((name + "_0").c_str()); h1 = (TH1*)gDirectory->Get((name + "_1").c_str()); diff --git a/DQMServices/Core/interface/DQMStore.h b/DQMServices/Core/interface/DQMStore.h index 7413255a48f58..a1ec64fc1c601 100644 --- a/DQMServices/Core/interface/DQMStore.h +++ b/DQMServices/Core/interface/DQMStore.h @@ -554,7 +554,8 @@ namespace dqm { DQMStore* store_ = nullptr; MonitorElementData::Scope scope_ = MonitorElementData::Scope::JOB; - uint64_t moduleID_ = 0; + static constexpr uint64_t kInvalidModuleID = std::numeric_limits::max(); + uint64_t moduleID_ = kInvalidModuleID; edm::LuminosityBlockID runlumi_ = edm::LuminosityBlockID(); }; @@ -684,8 +685,8 @@ namespace dqm { oldid_ = booker_.setModuleID(newid); oldscope_ = booker_.setScope(newscope); oldrunlumi_ = booker_.setRunLumi(newrunlumi); - assert(newid != 0 || !"moduleID must be set for normal booking transaction"); - assert(oldid_ == 0 || !"Nested booking transaction?"); + assert(newid != kInvalidModuleID || !"moduleID must be set for normal booking transaction"); + assert(oldid_ == kInvalidModuleID || !"Nested booking transaction?"); } ~ModuleIdScope() { booker_.setModuleID(oldid_); diff --git a/DQMServices/FwkIO/plugins/DQMRootSource.cc b/DQMServices/FwkIO/plugins/DQMRootSource.cc index b2019ddf00cb9..fc99dd7745882 100644 --- a/DQMServices/FwkIO/plugins/DQMRootSource.cc +++ b/DQMServices/FwkIO/plugins/DQMRootSource.cc @@ -330,7 +330,7 @@ class DQMRootSource : public edm::PuttableSourceBase, DQMTTreeIO { static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: - edm::InputSource::ItemType getNextItemType() override; + edm::InputSource::ItemTypeInfo getNextItemType() override; std::shared_ptr readFile_() override; std::shared_ptr readRunAuxiliary_() override; @@ -440,7 +440,7 @@ DQMRootSource::DQMRootSource(edm::ParameterSet const& iPSet, const edm::InputSou {"LUMI", MonitorElementData::Scope::LUMI}, {"RUN", MonitorElementData::Scope::RUN}, {"JOB", MonitorElementData::Scope::JOB}}[iPSet.getUntrackedParameter("reScope", "JOB")]), - m_nextItemType(edm::InputSource::IsFile), + m_nextItemType(edm::InputSource::ItemType::IsFile), m_treeReaders(kNIndicies, std::shared_ptr()), m_currentIndex(0), m_openFiles(std::vector()), @@ -448,7 +448,7 @@ DQMRootSource::DQMRootSource(edm::ParameterSet const& iPSet, const edm::InputSou edm::sortAndRemoveOverlaps(m_lumisToProcess); if (m_catalog.fileNames(0).empty()) { - m_nextItemType = edm::InputSource::IsStop; + m_nextItemType = edm::InputSource::ItemType::IsStop; } else { m_treeReaders[kIntIndex].reset(new TreeSimpleReader(MonitorElementData::Kind::INT, m_rescope)); m_treeReaders[kFloatIndex].reset(new TreeSimpleReader(MonitorElementData::Kind::REAL, m_rescope)); @@ -483,7 +483,7 @@ DQMRootSource::~DQMRootSource() { // member functions // -edm::InputSource::ItemType DQMRootSource::getNextItemType() { return m_nextItemType; } +edm::InputSource::ItemTypeInfo DQMRootSource::getNextItemType() { return m_nextItemType; } // We will read the metadata of all files and fill m_fileMetadatas vector std::shared_ptr DQMRootSource::readFile_() { @@ -630,9 +630,9 @@ std::shared_ptr DQMRootSource::readFile_() { // Stop if there's nothing to process. Otherwise start the run. if (m_fileMetadatas.empty()) - m_nextItemType = edm::InputSource::IsStop; + m_nextItemType = edm::InputSource::ItemType::IsStop; else - m_nextItemType = edm::InputSource::IsRun; + m_nextItemType = edm::InputSource::ItemType::IsRun; // We have to return something but not sure why return std::make_shared(); @@ -728,18 +728,18 @@ bool DQMRootSource::isRunOrLumiTransition() const { void DQMRootSource::readNextItemType() { if (m_currentIndex == 0) { - m_nextItemType = edm::InputSource::IsRun; + m_nextItemType = edm::InputSource::ItemType::IsRun; } else if (m_currentIndex > m_fileMetadatas.size() - 1) { // We reached the end - m_nextItemType = edm::InputSource::IsStop; + m_nextItemType = edm::InputSource::ItemType::IsStop; } else { FileMetadata previousMetadata = m_fileMetadatas[m_currentIndex - 1]; FileMetadata metadata = m_fileMetadatas[m_currentIndex]; if (previousMetadata.m_run != metadata.m_run) { - m_nextItemType = edm::InputSource::IsRun; + m_nextItemType = edm::InputSource::ItemType::IsRun; } else if (previousMetadata.m_lumi != metadata.m_lumi) { - m_nextItemType = edm::InputSource::IsLumi; + m_nextItemType = edm::InputSource::ItemType::IsLumi; } } } diff --git a/DQMServices/StreamerIO/plugins/DQMProtobufReader.cc b/DQMServices/StreamerIO/plugins/DQMProtobufReader.cc index e7cfb42014a73..3e761778d60b4 100644 --- a/DQMServices/StreamerIO/plugins/DQMProtobufReader.cc +++ b/DQMServices/StreamerIO/plugins/DQMProtobufReader.cc @@ -38,7 +38,7 @@ DQMProtobufReader::DQMProtobufReader(edm::ParameterSet const& pset, edm::InputSo produces("DQMGenerationRecoLumi"); } -edm::InputSource::ItemType DQMProtobufReader::getNextItemType() { +edm::InputSource::ItemTypeInfo DQMProtobufReader::getNextItemType() { typedef DQMFileIterator::State State; typedef DQMFileIterator::LumiEntry LumiEntry; @@ -49,23 +49,23 @@ edm::InputSource::ItemType DQMProtobufReader::getNextItemType() { if (edm::shutdown_flag.load()) { fiterator_.logFileAction("Shutdown flag was set, shutting down."); - return InputSource::IsStop; + return InputSource::ItemType::IsStop; } // check for end of run file and force quit if (flagEndOfRunKills_ && (fiterator_.state() != State::OPEN)) { - return InputSource::IsStop; + return InputSource::ItemType::IsStop; } // check for end of run and quit if everything has been processed. // this is the clean exit if ((!fiterator_.lumiReady()) && (fiterator_.state() == State::EOR)) { - return InputSource::IsStop; + return InputSource::ItemType::IsStop; } // skip to the next file if we have no files openned yet if (fiterator_.lumiReady()) { - return InputSource::IsLumi; + return InputSource::ItemType::IsLumi; } fiterator_.delay(); @@ -73,7 +73,7 @@ edm::InputSource::ItemType DQMProtobufReader::getNextItemType() { // IsSynchronize state // // comment out in order to block at this level - // return InputSource::IsSynchronize; + // return InputSource::ItemType::IsSynchronize; } // this is unreachable diff --git a/DQMServices/StreamerIO/plugins/DQMProtobufReader.h b/DQMServices/StreamerIO/plugins/DQMProtobufReader.h index acea989ac9fa4..59b459246d84d 100644 --- a/DQMServices/StreamerIO/plugins/DQMProtobufReader.h +++ b/DQMServices/StreamerIO/plugins/DQMProtobufReader.h @@ -21,7 +21,7 @@ namespace dqmservices { private: void load(DQMStore* store, std::string filename); - edm::InputSource::ItemType getNextItemType() override; + edm::InputSource::ItemTypeInfo getNextItemType() override; std::shared_ptr readRunAuxiliary_() override; std::shared_ptr readLuminosityBlockAuxiliary_() override; void readRun_(edm::RunPrincipal& rpCache) override; diff --git a/DataFormats/BTauReco/interface/TaggingVariable.h b/DataFormats/BTauReco/interface/TaggingVariable.h index ff85997f7e1a5..a632e29451844 100644 --- a/DataFormats/BTauReco/interface/TaggingVariable.h +++ b/DataFormats/BTauReco/interface/TaggingVariable.h @@ -195,6 +195,7 @@ namespace reco { public: TaggingVariableList() : m_list() {} TaggingVariableList(const TaggingVariableList& list) : m_list(list.m_list) {} + TaggingVariableList& operator=(const TaggingVariableList&) = default; // [begin, end) must identify a valid range of iterators to TaggingVariableList template diff --git a/DataFormats/CaloRecHit/interface/MultifitComputations.h b/DataFormats/CaloRecHit/interface/MultifitComputations.h index f2d57d2ddb1e7..253ba348dfaf7 100644 --- a/DataFormats/CaloRecHit/interface/MultifitComputations.h +++ b/DataFormats/CaloRecHit/interface/MultifitComputations.h @@ -413,7 +413,7 @@ namespace calo { // compute the gradient //w.tail(nactive) = Atb.tail(nactive) - (AtA * solution).tail(nactive); - Eigen::Index w_max_idx; + Eigen::Index w_max_idx = 0; float w_max = -std::numeric_limits::max(); for (int icol = npassive; icol < NPULSES; icol++) { auto const icol_real = pulseOffsets(icol); diff --git a/DataFormats/Common/interface/MapOfVectors.h b/DataFormats/Common/interface/MapOfVectors.h index 78bc569fa105b..20c5c431861d9 100644 --- a/DataFormats/Common/interface/MapOfVectors.h +++ b/DataFormats/Common/interface/MapOfVectors.h @@ -128,12 +128,6 @@ namespace edm { m_data.swap(other.m_data); } - MapOfVectors& operator=(MapOfVectors const& rhs) { - MapOfVectors temp(rhs); - this->swap(temp); - return *this; - } - private: //for testing friend class ::TestMapOfVectors; diff --git a/DataFormats/Common/test/DataFrame_t.cpp b/DataFormats/Common/test/DataFrame_t.cpp index df281fb43d700..f60380cbcc726 100644 --- a/DataFormats/Common/test/DataFrame_t.cpp +++ b/DataFormats/Common/test/DataFrame_t.cpp @@ -8,6 +8,7 @@ #include #include #include +#include class TestDataFrame : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(TestDataFrame); @@ -121,7 +122,9 @@ void TestDataFrame::sort() { std::vector ids(100, 1); ids[0] = 2001; std::partial_sum(ids.begin(), ids.end(), ids.begin()); - std::random_shuffle(ids.begin(), ids.end()); + std::random_device rd; + std::mt19937 g(rd()); + std::shuffle(ids.begin(), ids.end(), g); for (int n = 0; n < 100; ++n) { frames.push_back(ids[n]); diff --git a/DataFormats/DTDigi/test/dumpDTDigi.cc b/DataFormats/DTDigi/test/dumpDTDigi.cc deleted file mode 100644 index fb0cbf3aa8efa..0000000000000 --- a/DataFormats/DTDigi/test/dumpDTDigi.cc +++ /dev/null @@ -1,53 +0,0 @@ -#include -#include -#include -#include - - -#include - -using namespace edm; -using namespace std; - -namespace test{ - class DumpFEDRawDataProduct: public EDAnalyzer{ - - public: - DumpFEDRawDataProduct(const ParameterSet& pset){} - - void analyze(const Event & e, const EventSetup& c){ - cout << "--- Run: " << e.id().run() - << " Event: " << e.id().event() << endl; - Handle dtDigis; - e.getByLabel("dtunpacker", dtDigis); - - DTDigiCollection::DigiRangeIterator detUnitIt; - for (detUnitIt=digiCollection.begin(); - detUnitIt!=digiCollection.end(); - ++detUnitIt){ - - const DTLayerId& id = (*detUnitIt).first; - const DTDigiCollection::Range& range = (*detUnitIt).second; - - // We have inserted digis for only one DetUnit... - CPPUNIT_ASSERT(id==layer); - - // Loop over the digis of this DetUnit - for (DTDigiCollection::const_iterator digiIt = range.first; - digiIt!=range.second; - ++digiIt){ - - - CPPUNIT_ASSERT((*digiIt).wire()==1); - CPPUNIT_ASSERT((*digiIt).number()==4); - CPPUNIT_ASSERT((*digiIt).countsTDC()==5); - - - }// for digis in layer - }// for layers - } - - }; -DEFINE_FWK_MODULE(DumpFEDRawDataProduct) -} - diff --git a/DataFormats/EcalDigi/interface/EBDataFrame_Ph2.h b/DataFormats/EcalDigi/interface/EBDataFrame_Ph2.h new file mode 100644 index 0000000000000..aaaf3f8f8ef1b --- /dev/null +++ b/DataFormats/EcalDigi/interface/EBDataFrame_Ph2.h @@ -0,0 +1,28 @@ +#ifndef DataFormats_EcalDigi_EBDataFrame_Ph2_h +#define DataFormats_EcalDigi_EBDataFrame_Ph2_h + +#include "DataFormats/EcalDetId/interface/EBDetId.h" +#include "DataFormats/EcalDigi/interface/EcalDataFrame_Ph2.h" +#include + +/** \class EBDataFrame + +*/ +class EBDataFrame_Ph2 : public EcalDataFrame_Ph2 { +public: + typedef EBDetId key_type; + typedef EcalDataFrame_Ph2 Base; + + EBDataFrame_Ph2() {} + + EBDataFrame_Ph2(edm::DataFrame const& base) : Base(base) {} + EBDataFrame_Ph2(EcalDataFrame_Ph2 const& base) : Base(base) {} + + ~EBDataFrame_Ph2() override {} + + key_type id() const { return Base::id(); } +}; + +std::ostream& operator<<(std::ostream&, const EBDataFrame_Ph2&); + +#endif diff --git a/DataFormats/EcalDigi/interface/EcalDigiCollections.h b/DataFormats/EcalDigi/interface/EcalDigiCollections.h index f9a79f9830972..5a86f3a0bf8a5 100644 --- a/DataFormats/EcalDigi/interface/EcalDigiCollections.h +++ b/DataFormats/EcalDigi/interface/EcalDigiCollections.h @@ -2,12 +2,14 @@ #define DIGIECAL_ECALDIGICOLLECTION_H #include "DataFormats/EcalDigi/interface/EBDataFrame.h" +#include "DataFormats/EcalDigi/interface/EBDataFrame_Ph2.h" #include "DataFormats/EcalDigi/interface/EcalDataFrame_Ph2.h" #include "DataFormats/EcalDigi/interface/EEDataFrame.h" #include "DataFormats/EcalDigi/interface/ESDataFrame.h" #include "DataFormats/EcalDigi/interface/EcalTimeDigi.h" #include "DataFormats/EcalDigi/interface/EcalTriggerPrimitiveDigi.h" #include "DataFormats/EcalDigi/interface/EcalEBTriggerPrimitiveDigi.h" +#include "DataFormats/EcalDigi/interface/EcalEBPhase2TriggerPrimitiveDigi.h" #include "DataFormats/EcalDigi/interface/EcalTrigPrimCompactColl.h" #include "DataFormats/EcalDigi/interface/EcalPseudoStripInputDigi.h" #include "DataFormats/EcalDigi/interface/EBSrFlag.h" @@ -46,10 +48,13 @@ class EBDigiCollectionPh2 : public EcalDigiCollectionPh2 { class EcalDigiCollection : public edm::DataFrameContainer { public: typedef edm::DataFrameContainer::size_type size_type; - static const size_type MAXSAMPLES = 10; + static const size_type MAXSAMPLES = ecalPh1::sampleSize; explicit EcalDigiCollection(size_type istride = MAXSAMPLES, int isubdet = 0) : edm::DataFrameContainer(istride, isubdet) {} void swap(DataFrameContainer& other) { this->DataFrameContainer::swap(other); } + void swap(DataFrameContainer::IdContainer& otherIds, DataFrameContainer::DataContainer& otherData) { + this->DataFrameContainer::swap(otherIds, otherData); + } }; // make edm (and ecal client) happy @@ -61,6 +66,9 @@ class EBDigiCollection : public EcalDigiCollection { EBDigiCollection(size_type istride = MAXSAMPLES) : EcalDigiCollection(istride, EcalBarrel) {} void swap(EBDigiCollection& other) { this->EcalDigiCollection::swap(other); } + void swap(EBDigiCollection::IdContainer& otherIds, EBDigiCollection::DataContainer& otherData) { + this->EcalDigiCollection::swap(otherIds, otherData); + } void push_back(const Digi& digi) { DataFrameContainer::push_back(digi.id(), digi.frame().begin()); } void push_back(id_type iid) { DataFrameContainer::push_back(iid); } void push_back(id_type iid, data_type const* idata) { DataFrameContainer::push_back(iid, idata); } @@ -74,6 +82,9 @@ class EEDigiCollection : public EcalDigiCollection { EEDigiCollection(size_type istride = MAXSAMPLES) : EcalDigiCollection(istride, EcalEndcap) {} void swap(EEDigiCollection& other) { this->EcalDigiCollection::swap(other); } + void swap(EEDigiCollection::IdContainer& otherIds, EEDigiCollection::DataContainer& otherData) { + this->EcalDigiCollection::swap(otherIds, otherData); + } void push_back(const Digi& digi) { edm::DataFrameContainer::push_back(digi.id(), digi.frame().begin()); } void push_back(id_type iid) { DataFrameContainer::push_back(iid); } void push_back(id_type iid, data_type const* idata) { DataFrameContainer::push_back(iid, idata); } @@ -120,6 +131,7 @@ inline void swap(EBDigiCollectionPh2& lhs, EBDigiCollectionPh2& rhs) { lhs.swap( typedef edm::SortedCollection EcalTimeDigiCollection; typedef edm::SortedCollection EcalTrigPrimDigiCollection; typedef edm::SortedCollection EcalEBTrigPrimDigiCollection; +typedef edm::SortedCollection EcalEBPhase2TrigPrimDigiCollection; typedef edm::SortedCollection EcalPSInputDigiCollection; typedef edm::SortedCollection EBSrFlagCollection; diff --git a/DataFormats/EcalDigi/interface/EcalEBPhase2TriggerPrimitiveDigi.h b/DataFormats/EcalDigi/interface/EcalEBPhase2TriggerPrimitiveDigi.h new file mode 100644 index 0000000000000..82293612f16d7 --- /dev/null +++ b/DataFormats/EcalDigi/interface/EcalEBPhase2TriggerPrimitiveDigi.h @@ -0,0 +1,63 @@ +#ifndef DataFormats_EcalDigi_EcalEBPhase2TriggerPrimitiveDigi_h +#define DataFormats_EcalDigi_EcalEBPhase2TriggerPrimitiveDigi_h + +#include +#include +#include "DataFormats/EcalDetId/interface/EBDetId.h" +#include "DataFormats/EcalDigi/interface/EcalEBPhase2TriggerPrimitiveSample.h" + +/** \class EcalEBPhase2TriggerPrimitiveDigi +\author N. Marinelli - Univ. of Notre Dame + + +*/ + +class EcalEBPhase2TriggerPrimitiveDigi { +public: + typedef EBDetId key_type; ///< For the sorted collection + + EcalEBPhase2TriggerPrimitiveDigi(); // for persistence + EcalEBPhase2TriggerPrimitiveDigi(const EBDetId& id); + + void swap(EcalEBPhase2TriggerPrimitiveDigi& rh) { + std::swap(id_, rh.id_); + std::swap(size_, rh.size_); + std::swap(data_, rh.data_); + } + + const EBDetId& id() const { return id_; } + int size() const { return size_; } + + const EcalEBPhase2TriggerPrimitiveSample& operator[](int i) const { return data_[i]; } + const EcalEBPhase2TriggerPrimitiveSample& sample(int i) const { return data_[i]; } + + void setSize(int size); + void setSample(int i, const EcalEBPhase2TriggerPrimitiveSample& sam); + void setSampleValue(int i, uint16_t value) { data_[i].setValue(value); } + + /// get the 12 bits Et of interesting sample + int encodedEt() const; + + /// Spike flag + bool l1aSpike() const; + + /// Time info + int time() const; + + /// True if debug mode (# of samples > 1) + bool isDebug() const; + + /// Gets the interesting sample + int sampleOfInterest() const; + +private: + EBDetId id_; + int size_; + std::vector data_; +}; + +inline void swap(EcalEBPhase2TriggerPrimitiveDigi& lh, EcalEBPhase2TriggerPrimitiveDigi& rh) { lh.swap(rh); } + +std::ostream& operator<<(std::ostream& s, const EcalEBPhase2TriggerPrimitiveDigi& digi); + +#endif diff --git a/DataFormats/EcalDigi/interface/EcalEBPhase2TriggerPrimitiveSample.h b/DataFormats/EcalDigi/interface/EcalEBPhase2TriggerPrimitiveSample.h new file mode 100644 index 0000000000000..3cc7806b1b275 --- /dev/null +++ b/DataFormats/EcalDigi/interface/EcalEBPhase2TriggerPrimitiveSample.h @@ -0,0 +1,49 @@ +#ifndef DataFormats_EcalDig_EcalEBPhase2TriggerPrimitiveSample_h +#define DataFormats_EcalDig_EcalEBPhase2TriggerPrimitiveSample_h + +#include +#include + +/** \class EcalEBPhase2TriggerPrimitiveSample +\author N. Marinelli - Univ of Notre Dame + +*/ + +class EcalEBPhase2TriggerPrimitiveSample { +public: + EcalEBPhase2TriggerPrimitiveSample(); + EcalEBPhase2TriggerPrimitiveSample(uint32_t data); + EcalEBPhase2TriggerPrimitiveSample(int encodedEt); + EcalEBPhase2TriggerPrimitiveSample(int encodedEt, bool isASpike); + EcalEBPhase2TriggerPrimitiveSample(int encodedEt, bool isASpike, int timing); + + ///Set data + void setValue(uint32_t data) { theSample_ = data; } + // The sample is a 18 bit word defined as: + // + // o o o o o o o o o o o o o o o o o o + // |________| |_______________________| + // ~60ps res spike Et + // time info flag + // + + /// get the raw word + uint32_t raw() const { return theSample_ & 0x3ffff; } + + /// get the encoded Et (12 bits) + int encodedEt() const { return (theSample_ & 0x3ffff) & 0xFFF; } + + bool l1aSpike() const { return (theSample_ & 0x3ffff & 0x1000) != 0; } + + int time() const { return (theSample_ & 0x3ffff) >> 13; } + + /// for streaming + uint32_t operator()() { return theSample_ & 0x3ffff; } + +private: + uint32_t theSample_; +}; + +std::ostream& operator<<(std::ostream& s, const EcalEBPhase2TriggerPrimitiveSample& samp); + +#endif diff --git a/DataFormats/EcalDigi/interface/EcalEBTriggerPrimitiveSample.h b/DataFormats/EcalDigi/interface/EcalEBTriggerPrimitiveSample.h index 105eac61088db..271f76c329d9d 100644 --- a/DataFormats/EcalDigi/interface/EcalEBTriggerPrimitiveSample.h +++ b/DataFormats/EcalDigi/interface/EcalEBTriggerPrimitiveSample.h @@ -21,8 +21,8 @@ class EcalEBTriggerPrimitiveSample { void setValue(uint16_t data) { theSample = data; } // The sample is a 16 bit word defined as: // - // o o o o o o o o o o o o o o o o - // |________| |____________________| + // o o o o o o o o o o o o o o o o + // |________| |_______________________| // ~60ps res spike Et // time info flag // @@ -31,7 +31,7 @@ class EcalEBTriggerPrimitiveSample { uint16_t raw() const { return theSample; } /// get the encoded Et (10 bits) - int encodedEt() const { return theSample & 0x3FF; } + int encodedEt() const { return theSample & 0x3ff; } bool l1aSpike() const { return (theSample & 0x400) != 0; } diff --git a/DataFormats/EcalDigi/src/EBDataFrame_Ph2.cc b/DataFormats/EcalDigi/src/EBDataFrame_Ph2.cc new file mode 100644 index 0000000000000..f48a09cf0eb88 --- /dev/null +++ b/DataFormats/EcalDigi/src/EBDataFrame_Ph2.cc @@ -0,0 +1,10 @@ +#include "DataFormats/EcalDigi/interface/EBDataFrame_Ph2.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include + +std::ostream& operator<<(std::ostream& s, const EBDataFrame_Ph2& digi) { + s << digi.id() << " " << digi.size() << " samples " << std::endl; + for (int i = 0; i < digi.size(); i++) + s << " " << digi[i] << std::endl; + return s; +} diff --git a/DataFormats/EcalDigi/src/EcalEBPhase2TriggerPrimitiveDigi.cc b/DataFormats/EcalDigi/src/EcalEBPhase2TriggerPrimitiveDigi.cc new file mode 100644 index 0000000000000..08be20ff807e3 --- /dev/null +++ b/DataFormats/EcalDigi/src/EcalEBPhase2TriggerPrimitiveDigi.cc @@ -0,0 +1,61 @@ +#include "DataFormats/EcalDigi/interface/EcalEBPhase2TriggerPrimitiveDigi.h" +#include +#include + +static constexpr int MAXSAMPLES = 20; +// This is the number of digi samples one wants to put in the TP digis. In Phase I was 5, but this setting was used. In the end in Phase I one one sample, corresponding to BC0 was filled. For Phase2 we have not decided yet. We leave this setting as it is while we decide what to do + +EcalEBPhase2TriggerPrimitiveDigi::EcalEBPhase2TriggerPrimitiveDigi() : size_(0), data_(MAXSAMPLES) {} + +EcalEBPhase2TriggerPrimitiveDigi::EcalEBPhase2TriggerPrimitiveDigi(const EBDetId& id) + : id_(id), size_(0), data_(MAXSAMPLES) {} + +void EcalEBPhase2TriggerPrimitiveDigi::setSample(int i, const EcalEBPhase2TriggerPrimitiveSample& sam) { + data_[i] = sam; +} + +int EcalEBPhase2TriggerPrimitiveDigi::sampleOfInterest() const { + // sample of interest to be save in the TP digis + if (size_ == 1) + return 0; + else if (size_ == 5) + return 2; + else + return -1; +} + +/// get the encoded/compressed Et of interesting sample +int EcalEBPhase2TriggerPrimitiveDigi::encodedEt() const { + int sample = sampleOfInterest(); + if (sample != -1) + return data_[sample].encodedEt(); + else + return -1; +} + +bool EcalEBPhase2TriggerPrimitiveDigi::l1aSpike() const { + int sample = sampleOfInterest(); + if (sample != -1) + return data_[sample].l1aSpike(); + else + return -1; +} + +int EcalEBPhase2TriggerPrimitiveDigi::time() const { + int sample = sampleOfInterest(); + if (sample != -1) + return data_[sample].time(); + else + return -1; +} + +bool EcalEBPhase2TriggerPrimitiveDigi::isDebug() const { return (size_ > 1); } + +void EcalEBPhase2TriggerPrimitiveDigi::setSize(int size) { size_ = std::clamp(size_, 0, MAXSAMPLES); } + +std::ostream& operator<<(std::ostream& s, const EcalEBPhase2TriggerPrimitiveDigi& digi) { + s << digi.id() << " " << digi.size() << " samples " << std::endl; + for (int i = 0; i < digi.size(); i++) + s << " " << digi.sample(i) << std::endl; + return s; +} diff --git a/DataFormats/EcalDigi/src/EcalEBPhase2TriggerPrimitiveSample.cc b/DataFormats/EcalDigi/src/EcalEBPhase2TriggerPrimitiveSample.cc new file mode 100644 index 0000000000000..c52762709801b --- /dev/null +++ b/DataFormats/EcalDigi/src/EcalEBPhase2TriggerPrimitiveSample.cc @@ -0,0 +1,26 @@ +#include "DataFormats/EcalDigi/interface/EcalEBPhase2TriggerPrimitiveSample.h" +#include + +EcalEBPhase2TriggerPrimitiveSample::EcalEBPhase2TriggerPrimitiveSample() : theSample_(0) {} +EcalEBPhase2TriggerPrimitiveSample::EcalEBPhase2TriggerPrimitiveSample(uint32_t data) : theSample_(data) { + theSample_ = theSample_ & 0x3ffff; +} + +EcalEBPhase2TriggerPrimitiveSample::EcalEBPhase2TriggerPrimitiveSample(int encodedEt, bool isASpike) { + theSample_ = (encodedEt & 0xFFF) | ((isASpike) ? (0x1000) : (0)); + theSample_ = theSample_ & 0x3ffff; +} + +EcalEBPhase2TriggerPrimitiveSample::EcalEBPhase2TriggerPrimitiveSample(int encodedEt, bool isASpike, int timing) { + theSample_ = (encodedEt & 0xFFF) | ((isASpike) ? (0x1000) : (0)) | timing << 13; + theSample_ = theSample_ & 0x3ffff; +} + +EcalEBPhase2TriggerPrimitiveSample::EcalEBPhase2TriggerPrimitiveSample(int encodedEt) { + theSample_ = encodedEt & 0xFFF; + theSample_ = theSample_ & 0x3ffff; +} + +std::ostream& operator<<(std::ostream& s, const EcalEBPhase2TriggerPrimitiveSample& samp) { + return s << "ET=" << samp.encodedEt() << ", isASpike=" << samp.l1aSpike() << " timing= " << samp.time(); +} diff --git a/DataFormats/EcalDigi/src/EcalEBTriggerPrimitiveSample.cc b/DataFormats/EcalDigi/src/EcalEBTriggerPrimitiveSample.cc index 2ff8b7b7f271c..d5b6b4010a91c 100644 --- a/DataFormats/EcalDigi/src/EcalEBTriggerPrimitiveSample.cc +++ b/DataFormats/EcalDigi/src/EcalEBTriggerPrimitiveSample.cc @@ -1,4 +1,5 @@ #include "DataFormats/EcalDigi/interface/EcalEBTriggerPrimitiveSample.h" +#include EcalEBTriggerPrimitiveSample::EcalEBTriggerPrimitiveSample() : theSample(0) {} EcalEBTriggerPrimitiveSample::EcalEBTriggerPrimitiveSample(uint16_t data) : theSample(data) {} diff --git a/DataFormats/EcalDigi/src/classes_def.xml b/DataFormats/EcalDigi/src/classes_def.xml index f3dd954167148..539e51c5acc49 100644 --- a/DataFormats/EcalDigi/src/classes_def.xml +++ b/DataFormats/EcalDigi/src/classes_def.xml @@ -11,6 +11,9 @@ + + + @@ -20,12 +23,18 @@ + + + + + + @@ -69,6 +78,7 @@ + @@ -114,6 +124,8 @@ + + @@ -126,6 +138,7 @@ + diff --git a/DataFormats/EcalRecHit/interface/RecoTypes.h b/DataFormats/EcalRecHit/interface/RecoTypes.h new file mode 100644 index 0000000000000..a7b1469fa57d3 --- /dev/null +++ b/DataFormats/EcalRecHit/interface/RecoTypes.h @@ -0,0 +1,13 @@ +#ifndef DataFormats_EcalRecHit_interface_RecoTypes_h +#define DataFormats_EcalRecHit_interface_RecoTypes_h + +namespace ecal { + namespace reco { + + using ComputationScalarType = float; + using StorageScalarType = float; + + } // namespace reco +} // namespace ecal + +#endif // DataFormats_EcalRecHit_interface_RecoTypes_h diff --git a/DataFormats/EgammaCandidates/interface/Photon.h b/DataFormats/EgammaCandidates/interface/Photon.h index 25522830a816b..d6e6e1da91fc2 100644 --- a/DataFormats/EgammaCandidates/interface/Photon.h +++ b/DataFormats/EgammaCandidates/interface/Photon.h @@ -40,6 +40,9 @@ namespace reco { /// constructor from values Photon(const LorentzVector& p4, const Point& caloPos, const PhotonCoreRef& core, const Point& vtx = Point(0, 0, 0)); + /// assignment operator + Photon& operator=(const Photon&) = default; + /// destructor ~Photon() override; diff --git a/DataFormats/FEDRawData/interface/FEDRawData.h b/DataFormats/FEDRawData/interface/FEDRawData.h index 8def41c8e270b..3fecaaa304a2a 100644 --- a/DataFormats/FEDRawData/interface/FEDRawData.h +++ b/DataFormats/FEDRawData/interface/FEDRawData.h @@ -26,12 +26,15 @@ class FEDRawData { /// Ctor specifying the size to be preallocated, in bytes. /// It is required that the size is a multiple of the size of a FED - /// word (8 bytes) - FEDRawData(size_t newsize); + /// word (8 bytes default) + FEDRawData(size_t newsize, size_t wordsize = 8); /// Copy constructor FEDRawData(const FEDRawData &); + /// Assignment operator + FEDRawData &operator=(const FEDRawData &) = default; + /// Dtor ~FEDRawData(); @@ -45,8 +48,8 @@ class FEDRawData { size_t size() const { return data_.size(); } /// Resize to the specified size in bytes. It is required that - /// the size is a multiple of the size of a FED word (8 bytes) - void resize(size_t newsize); + /// the size is a multiple of the size of a FED word (8 bytes default) + void resize(size_t newsize, size_t wordsize = 8); private: Data data_; diff --git a/DataFormats/FEDRawData/interface/FEDRawDataCollection.h b/DataFormats/FEDRawData/interface/FEDRawDataCollection.h index f7f52c09a51bf..a05577b3a9d20 100644 --- a/DataFormats/FEDRawData/interface/FEDRawDataCollection.h +++ b/DataFormats/FEDRawData/interface/FEDRawDataCollection.h @@ -29,6 +29,8 @@ class FEDRawDataCollection : public edm::DoNotRecordParents { FEDRawDataCollection(const FEDRawDataCollection&); + FEDRawDataCollection& operator=(const FEDRawDataCollection&) = default; + void swap(FEDRawDataCollection& other) { data_.swap(other.data_); } private: diff --git a/DataFormats/FEDRawData/src/FEDRawData.cc b/DataFormats/FEDRawData/src/FEDRawData.cc index 28bbaa55bd19f..4f617d1e2a46a 100644 --- a/DataFormats/FEDRawData/src/FEDRawData.cc +++ b/DataFormats/FEDRawData/src/FEDRawData.cc @@ -13,10 +13,10 @@ using namespace std; FEDRawData::FEDRawData() {} -FEDRawData::FEDRawData(size_t newsize) : data_(newsize) { - if (newsize % 8 != 0) - throw cms::Exception("DataCorrupt") << "FEDRawData::resize: " << newsize << " is not a multiple of 8 bytes." - << endl; +FEDRawData::FEDRawData(size_t newsize, size_t wordsize) : data_(newsize) { + if (newsize % wordsize != 0) + throw cms::Exception("DataCorrupt") << "FEDRawData::resize: " << newsize << " is not a multiple of " << wordsize + << " bytes." << endl; } FEDRawData::FEDRawData(const FEDRawData &in) : data_(in.data_) {} @@ -25,13 +25,13 @@ const unsigned char *FEDRawData::data() const { return data_.data(); } unsigned char *FEDRawData::data() { return data_.data(); } -void FEDRawData::resize(size_t newsize) { +void FEDRawData::resize(size_t newsize, size_t wordsize) { if (size() == newsize) return; data_.resize(newsize); - if (newsize % 8 != 0) - throw cms::Exception("DataCorrupt") << "FEDRawData::resize: " << newsize << " is not a multiple of 8 bytes." - << endl; + if (newsize % wordsize != 0) + throw cms::Exception("DataCorrupt") << "FEDRawData::resize: " << newsize << " is not a multiple of " << wordsize + << " bytes." << endl; } diff --git a/DataFormats/FTLDigi/interface/BTLSample.h b/DataFormats/FTLDigi/interface/BTLSample.h index b6ea9d1ee8d55..0f97075ae64e6 100644 --- a/DataFormats/FTLDigi/interface/BTLSample.h +++ b/DataFormats/FTLDigi/interface/BTLSample.h @@ -25,6 +25,7 @@ class BTLSample { BTLSample(uint32_t value, uint16_t flag, uint8_t row, uint8_t col) : value_(value), flag_(flag), row_(row), col_(col) {} BTLSample(const BTLSample& o) : value_(o.value_), flag_(o.flag_), row_(o.row_), col_(o.col_) {} + BTLSample& operator=(const BTLSample&) = default; /** @short setters diff --git a/DataFormats/FTLDigi/interface/ETLSample.h b/DataFormats/FTLDigi/interface/ETLSample.h index 08583091873c1..bc439f4061d14 100644 --- a/DataFormats/FTLDigi/interface/ETLSample.h +++ b/DataFormats/FTLDigi/interface/ETLSample.h @@ -35,6 +35,7 @@ class ETLSample { ETLSample() : value_(0) {} ETLSample(uint32_t value) : value_(value) {} ETLSample(const ETLSample& o) : value_(o.value_) {} + ETLSample& operator=(const ETLSample&) = default; /** @short setters diff --git a/DataFormats/ForwardDetId/interface/BTLDetId.h b/DataFormats/ForwardDetId/interface/BTLDetId.h index 1867ae7b80c82..0a1bd7eab1699 100644 --- a/DataFormats/ForwardDetId/interface/BTLDetId.h +++ b/DataFormats/ForwardDetId/interface/BTLDetId.h @@ -43,7 +43,7 @@ class BTLDetId : public MTDDetId { static constexpr uint32_t kCrystalsBTL = kCrystalsPerModuleV2 * kModulesPerRUV2 * kRUPerTypeV2 * kCrystalTypes * HALF_ROD * 2; - enum class CrysLayout { tile = 1, bar = 2, barzflat = 3, barphiflat = 4, v2 = 5 }; + enum class CrysLayout { tile = 1, bar = 2, barzflat = 3, barphiflat = 4, v2 = 5, v3 = 6 }; // ---------- Constructors, enumerated types ---------- @@ -89,6 +89,18 @@ class BTLDetId : public MTDDetId { /** Returns BTL readout unit number per type. */ inline int runit() const { return (id_ >> kBTLRUOffset) & kBTLRUMask; } + /** Returns BTL global readout unit number. */ + inline int globalRunit() const { + if (runit() == 0) { + // pre-V2: build a RU identifier from available information + return (module() - 1) / kModulePerTypeBarPhiFlat / kRUPerTypeV2 + 1; + } else if (runit() > 0 && modType() > 0) { + // V2/V3: build global RU identifier from RU per type and type + return (modType() - 1) * kRUPerTypeV2 + runit(); + } + return 0; + } + /** return the row in GeomDet language **/ inline int row(unsigned nrows = kCrystalsPerModuleV2) const { return (crystal() - 1) % nrows; // anything else for now diff --git a/DataFormats/ForwardDetId/interface/ETLDetId.h b/DataFormats/ForwardDetId/interface/ETLDetId.h index 4c7ff33e2a505..5e1f508e0bdcd 100644 --- a/DataFormats/ForwardDetId/interface/ETLDetId.h +++ b/DataFormats/ForwardDetId/interface/ETLDetId.h @@ -10,6 +10,7 @@ bit 15-5 : module sequential number bit 4-3 : module type (unused so far) + bit 2-1 : sensor */ class ETLDetId : public MTDDetId { @@ -24,6 +25,8 @@ class ETLDetId : public MTDDetId { static constexpr uint32_t kETLmoduleMask = 0x7FF; static constexpr uint32_t kETLmodTypeOffset = 3; static constexpr uint32_t kETLmodTypeMask = 0x3; + static constexpr uint32_t kETLsensorOffset = 1; + static constexpr uint32_t kETLsensorMask = 0x3; static constexpr int kETLv1maxRing = 11; static constexpr int kETLv1maxModule = 176; @@ -48,7 +51,7 @@ class ETLDetId : public MTDDetId { static constexpr uint32_t kSoff = 4; - enum class EtlLayout { tp = 1, v4 = 2, v5 = 3 }; + enum class EtlLayout { tp = 1, v4 = 2, v5 = 3, v8 = 4 }; // ---------- Constructors, enumerated types ---------- @@ -77,6 +80,7 @@ class ETLDetId : public MTDDetId { } /** Construct and fill only the det and sub-det fields. */ + // pre v8 ETLDetId(uint32_t zside, uint32_t ring, uint32_t module, uint32_t modtyp) : MTDDetId(DetId::Forward, ForwardSubdetector::FastTime) { id_ |= (MTDType::ETL & kMTDsubdMask) << kMTDsubdOffset | (zside & kZsideMask) << kZsideOffset | @@ -84,6 +88,14 @@ class ETLDetId : public MTDDetId { (modtyp & kETLmodTypeMask) << kETLmodTypeOffset; id_ |= kETLformatV2; } + // v8 + ETLDetId(uint32_t zside, uint32_t ring, uint32_t module, uint32_t modtyp, uint32_t sensor) + : MTDDetId(DetId::Forward, ForwardSubdetector::FastTime) { + id_ |= (MTDType::ETL & kMTDsubdMask) << kMTDsubdOffset | (zside & kZsideMask) << kZsideOffset | + (ring & kRodRingMask) << kRodRingOffset | (module & kETLmoduleMask) << kETLmoduleOffset | + (modtyp & kETLmodTypeMask) << kETLmodTypeOffset | (sensor & kETLsensorMask) << kETLsensorOffset; + id_ |= kETLformatV2; + } /** ETL TDR Construct and fill only the det and sub-det fields. */ @@ -91,6 +103,7 @@ class ETLDetId : public MTDDetId { return (sector + discside * kSoff + 2 * kSoff * disc); } + // pre v8 ETLDetId(uint32_t zside, uint32_t disc, uint32_t discside, uint32_t sector, uint32_t module, uint32_t modtyp) : MTDDetId(DetId::Forward, ForwardSubdetector::FastTime) { id_ |= (MTDType::ETL & kMTDsubdMask) << kMTDsubdOffset | (zside & kZsideMask) << kZsideOffset | @@ -98,6 +111,21 @@ class ETLDetId : public MTDDetId { (module & kETLmoduleMask) << kETLmoduleOffset | (modtyp & kETLmodTypeMask) << kETLmodTypeOffset; id_ |= kETLformatV2; } + // v8 + ETLDetId(uint32_t zside, + uint32_t disc, + uint32_t discside, + uint32_t sector, + uint32_t module, + uint32_t modtyp, + uint32_t sensor) + : MTDDetId(DetId::Forward, ForwardSubdetector::FastTime) { + id_ |= (MTDType::ETL & kMTDsubdMask) << kMTDsubdOffset | (zside & kZsideMask) << kZsideOffset | + (encodeSector(disc, discside, sector) & kRodRingMask) << kRodRingOffset | + (module & kETLmoduleMask) << kETLmoduleOffset | (modtyp & kETLmodTypeMask) << kETLmodTypeOffset | + (sensor & kETLsensorMask) << kETLsensorOffset; + id_ |= kETLformatV2; + } // ---------- Common methods ---------- @@ -107,6 +135,9 @@ class ETLDetId : public MTDDetId { /** Returns ETL module type number. */ inline int modType() const { return (id_ >> kETLmodTypeOffset) & kETLmodTypeMask; } + /** Returns ETL module sensor number. */ + inline int sensor() const { return (id_ >> kETLsensorOffset) & kETLsensorMask; } + ETLDetId geographicalId() const { return id_; } // --------- Methods for the TDR ETL model only ----------- diff --git a/DataFormats/ForwardDetId/src/BTLDetId.cc b/DataFormats/ForwardDetId/src/BTLDetId.cc index 53f16954b34df..7a3ab9461fbdc 100644 --- a/DataFormats/ForwardDetId/src/BTLDetId.cc +++ b/DataFormats/ForwardDetId/src/BTLDetId.cc @@ -6,8 +6,9 @@ BTLDetId BTLDetId::geographicalId(CrysLayout lay) const { if (lay == CrysLayout::barphiflat) { // barphiflat: count modules in a rod, combining all types return BTLDetId(mtdSide(), mtdRR(), module() + kModulePerTypeBarPhiFlat * (modType() - 1), 0, 1); - } else if (lay == CrysLayout::v2) { + } else if (lay == CrysLayout::v2 || lay == CrysLayout::v3) { // v2: set number of crystals to 17 to distinguish from crystal BTLDetId + // v3: set number of crystals to 17 to distinguish from crystal BTLDetId, build V2-like type and RU number as in BTLNumberingScheme return BTLDetId(mtdSide(), mtdRR(), runit(), module(), modType(), kCrystalsPerModuleV2 + 1); } @@ -23,6 +24,7 @@ std::ostream& operator<<(std::ostream& os, const BTLDetId& id) { << " Rod : " << id.mtdRR() << std::endl << " Crystal type: " << id.modType() << std::endl << " Readout unit: " << id.runit() << std::endl + << " Global RU : " << id.globalRunit() << std::endl << " Module : " << id.module() << std::endl << " Crystal : " << id.crystal() << std::endl; return os; diff --git a/DataFormats/ForwardDetId/src/ETLDetId.cc b/DataFormats/ForwardDetId/src/ETLDetId.cc index 18030360a8b73..e0ce3d86b0ecb 100644 --- a/DataFormats/ForwardDetId/src/ETLDetId.cc +++ b/DataFormats/ForwardDetId/src/ETLDetId.cc @@ -8,6 +8,7 @@ std::ostream& operator<<(std::ostream& os, const ETLDetId& id) { << " Ring : " << id.mtdRR() << " " << " Disc/Side/Sector = " << id.nDisc() << " " << id.discSide() << " " << id.sector() << std::endl << " Module : " << id.module() << std::endl - << " Module type : " << id.modType() << std::endl; + << " Module type : " << id.modType() << std::endl + << " Sensor : " << id.sensor() << std::endl; return os; } diff --git a/DataFormats/GeometrySurface/test/gpuFrameTransformTest.cpp b/DataFormats/GeometrySurface/test/gpuFrameTransformTest.cpp index ad62b7a1d131c..54282aa77f6f1 100644 --- a/DataFormats/GeometrySurface/test/gpuFrameTransformTest.cpp +++ b/DataFormats/GeometrySurface/test/gpuFrameTransformTest.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h" #include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h" @@ -42,6 +43,9 @@ int main(void) { constexpr uint32_t size = 10000; constexpr uint32_t size32 = size * sizeof(float); + std::random_device rd; + std::mt19937 g(rd()); + float xl[size], yl[size]; float x[size], y[size], z[size]; @@ -79,8 +83,8 @@ int main(void) { le[3 * i + 2] = (i > size / 2) ? 1.f : 0.04f; le[2 * i + 1] = 0.; } - std::random_shuffle(xl, xl + size); - std::random_shuffle(yl, yl + size); + std::shuffle(xl, xl + size, g); + std::shuffle(yl, yl + size, g); cudaCheck(cudaMemcpy(d_xl.get(), xl, size32, cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(d_yl.get(), yl, size32, cudaMemcpyHostToDevice)); diff --git a/DataFormats/GeometryVector/interface/private/Basic3DVectorLD.h b/DataFormats/GeometryVector/interface/private/Basic3DVectorLD.h index 8a5e44080d560..c639985156bd7 100644 --- a/DataFormats/GeometryVector/interface/private/Basic3DVectorLD.h +++ b/DataFormats/GeometryVector/interface/private/Basic3DVectorLD.h @@ -35,6 +35,9 @@ class Basic3DVector { /// constructor from 2D vector (X and Y from 2D vector, z set to zero) Basic3DVector(const Basic2DVector& p) : theX(p.x()), theY(p.y()), theZ(0), theW(0) {} + /// Assignment operator + Basic3DVector& operator=(const Basic3DVector&) = default; + /** Explicit constructor from other (possibly unrelated) vector classes * The only constraint on the argument type is that it has methods * x(), y() and z(), and that these methods return a type convertible to T. diff --git a/DataFormats/GeometryVector/interface/private/sseBasic2DVector.h b/DataFormats/GeometryVector/interface/private/sseBasic2DVector.h index 8f47d3603445b..068570243c828 100644 --- a/DataFormats/GeometryVector/interface/private/sseBasic2DVector.h +++ b/DataFormats/GeometryVector/interface/private/sseBasic2DVector.h @@ -26,6 +26,9 @@ class Basic2DVector { /// Copy constructor from same type. Should not be needed but for gcc bug 12685 Basic2DVector(const Basic2DVector& p) : v(p.v) {} + /// Assignment operator + Basic2DVector& operator=(const Basic2DVector&) = default; + template Basic2DVector(const Basic2DVector& p) : v(p.v) {} diff --git a/DataFormats/GeometryVector/interface/private/sseBasic3DVector.h b/DataFormats/GeometryVector/interface/private/sseBasic3DVector.h index 817d746dc46cd..212d4d91fd286 100644 --- a/DataFormats/GeometryVector/interface/private/sseBasic3DVector.h +++ b/DataFormats/GeometryVector/interface/private/sseBasic3DVector.h @@ -45,6 +45,9 @@ class Basic3DVector { /// Copy constructor from same type. Should not be needed but for gcc bug 12685 Basic3DVector(const Basic3DVector& p) : v(p.v) {} + /// Assignment operator + Basic3DVector& operator=(const Basic3DVector&) = default; + /// Copy constructor and implicit conversion from Basic3DVector of different precision template Basic3DVector(const Basic3DVector& p) : v(p.v) {} diff --git a/DataFormats/HGCDigi/interface/HGCSample.h b/DataFormats/HGCDigi/interface/HGCSample.h index 084ea5287559e..425fb39ad7339 100644 --- a/DataFormats/HGCDigi/interface/HGCSample.h +++ b/DataFormats/HGCDigi/interface/HGCSample.h @@ -35,6 +35,7 @@ class HGCSample { HGCSample() : value_(0) {} HGCSample(uint32_t value) : value_(value) {} HGCSample(const HGCSample& o) : value_(o.value_) {} + HGCSample& operator=(const HGCSample&) = default; /** @short setters diff --git a/DataFormats/HGCalDigi/interface/HGCROCChannelDataFrame.h b/DataFormats/HGCalDigi/interface/HGCROCChannelDataFrame.h index 224243befe508..f9f9925884a0f 100644 --- a/DataFormats/HGCalDigi/interface/HGCROCChannelDataFrame.h +++ b/DataFormats/HGCalDigi/interface/HGCROCChannelDataFrame.h @@ -38,6 +38,7 @@ class HGCROCChannelDataFrame { HGCROCChannelDataFrame(uint32_t value) : id_(0), value_(value) {} HGCROCChannelDataFrame(const D& id, uint32_t value) : id_(id), value_(value) {} HGCROCChannelDataFrame(const HGCROCChannelDataFrame& o) : id_(o.id_), value_(o.value_) {} + HGCROCChannelDataFrame& operator=(const HGCROCChannelDataFrame&) = default; /** @short det id diff --git a/DataFormats/HLTReco/interface/TriggerEventWithRefs.h b/DataFormats/HLTReco/interface/TriggerEventWithRefs.h index a99105a311d6f..f9e0c6877438b 100644 --- a/DataFormats/HLTReco/interface/TriggerEventWithRefs.h +++ b/DataFormats/HLTReco/interface/TriggerEventWithRefs.h @@ -63,6 +63,7 @@ namespace trigger { size_type l1tp2etsum_; size_type l1ttau_; size_type l1tetsum_; + size_type l1tp2gtcand_; /// constructor TriggerFilterObject() @@ -96,7 +97,8 @@ namespace trigger { l1tpftrack_(0), l1tp2etsum_(0), l1ttau_(0), - l1tetsum_(0) { + l1tetsum_(0), + l1tp2gtcand_(0) { filterTag_ = edm::InputTag().encode(); } TriggerFilterObject(const edm::InputTag& filterTag, @@ -129,7 +131,8 @@ namespace trigger { size_type l1tpftrack, size_type l1tp2etsum, size_type l1ttau, - size_type l1tetsum) + size_type l1tetsum, + size_type l1tp2gtcand) : filterTag_(filterTag.encode()), photons_(np), electrons_(ne), @@ -160,7 +163,8 @@ namespace trigger { l1tpftrack_(l1tpftrack), l1tp2etsum_(l1tp2etsum), l1ttau_(l1ttau), - l1tetsum_(l1tetsum) {} + l1tetsum_(l1tetsum), + l1tp2gtcand_(l1tp2gtcand) {} }; /// data members @@ -213,7 +217,8 @@ namespace trigger { addObjects(tfowr.l1tpftrackIds(), tfowr.l1tpftrackRefs()), addObjects(tfowr.l1tp2etsumIds(), tfowr.l1tp2etsumRefs()), addObjects(tfowr.l1ttauIds(), tfowr.l1ttauRefs()), - addObjects(tfowr.l1tetsumIds(), tfowr.l1tetsumRefs())) + addObjects(tfowr.l1tetsumIds(), tfowr.l1tetsumRefs()), + addObjects(tfowr.l1tp2gtcandIds(), tfowr.l1tp2gtcandRefs())) ); } @@ -415,6 +420,12 @@ namespace trigger { return std::pair(begin, end); } + std::pair l1tp2gtcandSlice(size_type filter) const { + const size_type begin(filter == 0 ? 0 : filterObjects_.at(filter - 1).l1tp2gtcand_); + const size_type end(filterObjects_.at(filter).l1tp2gtcand_); + return std::pair(begin, end); + } + /// extract Refs for a specific filter and of specific physics type void getObjects(size_type filter, Vids& ids, VRphoton& photons) const { @@ -747,6 +758,17 @@ namespace trigger { const size_type end(l1tetsumSlice(filter).second); TriggerRefsCollections::getObjects(id, l1tetsum, begin, end); } + + void getObjects(size_type filter, Vids& ids, VRl1tp2gtcand& l1tp2gtcand) const { + const size_type begin(l1tp2gtcandSlice(filter).first); + const size_type end(l1tp2gtcandSlice(filter).second); + TriggerRefsCollections::getObjects(ids, l1tp2gtcand, begin, end); + } + void getObjects(size_type filter, int id, VRl1tp2gtcand& l1tp2gtcand) const { + const size_type begin(l1tp2gtcandSlice(filter).first); + const size_type end(l1tp2gtcandSlice(filter).second); + TriggerRefsCollections::getObjects(id, l1tp2gtcand, begin, end); + } }; } // namespace trigger diff --git a/DataFormats/HLTReco/interface/TriggerRefsCollections.h b/DataFormats/HLTReco/interface/TriggerRefsCollections.h index 73ba440151d8b..0fced2acffa85 100644 --- a/DataFormats/HLTReco/interface/TriggerRefsCollections.h +++ b/DataFormats/HLTReco/interface/TriggerRefsCollections.h @@ -44,6 +44,7 @@ #include "DataFormats/L1Trigger/interface/Jet.h" #include "DataFormats/L1Trigger/interface/Tau.h" #include "DataFormats/L1Trigger/interface/EtSum.h" +#include "DataFormats/L1Trigger/interface/P2GTCandidate.h" #include "DataFormats/L1TMuonPhase2/interface/TrackerMuon.h" #include "DataFormats/L1TCorrelator/interface/TkElectron.h" #include "DataFormats/L1TCorrelator/interface/TkElectronFwd.h" @@ -103,6 +104,8 @@ namespace trigger { typedef std::vector VRpftau; typedef std::vector VRpfmet; + typedef l1t::P2GTCandidateVectorRef VRl1tp2gtcand; + class TriggerRefsCollections { /// data members private: @@ -173,6 +176,9 @@ namespace trigger { Vids pfmetIds_; VRpfmet pfmetRefs_; + Vids l1tp2gtcandIds_; + VRl1tp2gtcand l1tp2gtcandRefs_; + /// methods public: /// constructors @@ -241,7 +247,10 @@ namespace trigger { pftauIds_(), pftauRefs_(), pfmetIds_(), - pfmetRefs_() {} + pfmetRefs_(), + + l1tp2gtcandIds_(), + l1tp2gtcandRefs_() {} /// utility void swap(TriggerRefsCollections& other) { @@ -310,6 +319,9 @@ namespace trigger { std::swap(pftauRefs_, other.pftauRefs_); std::swap(pfmetIds_, other.pfmetIds_); std::swap(pfmetRefs_, other.pfmetRefs_); + + std::swap(l1tp2gtcandIds_, other.l1tp2gtcandIds_); + std::swap(l1tp2gtcandRefs_, other.l1tp2gtcandRefs_); } /// setters for L3 collections: (id=physics type, and Ref) @@ -436,6 +448,10 @@ namespace trigger { pfmetIds_.push_back(id); pfmetRefs_.push_back(ref); } + void addObject(int id, const l1t::P2GTCandidateRef& ref) { + l1tp2gtcandIds_.push_back(id); + l1tp2gtcandRefs_.push_back(ref); + } /// size_type addObjects(const Vids& ids, const VRphoton& refs) { @@ -622,6 +638,12 @@ namespace trigger { pfmetRefs_.insert(pfmetRefs_.end(), refs.begin(), refs.end()); return pfmetIds_.size(); } + size_type addObjects(const Vids& ids, const VRl1tp2gtcand& refs) { + assert(ids.size() == refs.size()); + l1tp2gtcandIds_.insert(l1tp2gtcandIds_.end(), ids.begin(), ids.end()); + l1tp2gtcandRefs_.insert(l1tp2gtcandRefs_.end(), refs.begin(), refs.end()); + return l1tp2gtcandIds_.size(); + } /// various physics-level getters: void getObjects(Vids& ids, VRphoton& refs) const { getObjects(ids, refs, 0, photonIds_.size()); } @@ -1675,6 +1697,41 @@ namespace trigger { return; } + void getObjects(Vids& ids, VRl1tp2gtcand& refs) const { getObjects(ids, refs, 0, l1tp2gtcandIds_.size()); } + void getObjects(Vids& ids, VRl1tp2gtcand& refs, size_type begin, size_type end) const { + assert(begin <= end); + assert(end <= l1tp2gtcandIds_.size()); + const size_type n(end - begin); + ids.resize(n); + refs.resize(n); + size_type j(0); + for (size_type i = begin; i != end; ++i) { + ids[j] = l1tp2gtcandIds_[i]; + refs[j] = l1tp2gtcandRefs_[i]; + ++j; + } + } + void getObjects(int id, VRl1tp2gtcand& refs) const { getObjects(id, refs, 0, l1tp2gtcandIds_.size()); } + void getObjects(int id, VRl1tp2gtcand& refs, size_type begin, size_type end) const { + assert(begin <= end); + assert(end <= l1tp2gtcandIds_.size()); + size_type n(0); + for (size_type i = begin; i != end; ++i) { + if (id == l1tp2gtcandIds_[i]) { + ++n; + } + } + refs.resize(n); + size_type j(0); + for (size_type i = begin; i != end; ++i) { + if (id == l1tp2gtcandIds_[i]) { + refs[j] = l1tp2gtcandRefs_[i]; + ++j; + } + } + return; + } + /// low-level getters for data members size_type photonSize() const { return photonIds_.size(); } const Vids& photonIds() const { return photonIds_; } @@ -1797,6 +1854,10 @@ namespace trigger { size_type l1tetsumSize() const { return l1tetsumIds_.size(); } const Vids& l1tetsumIds() const { return l1tetsumIds_; } const VRl1tetsum& l1tetsumRefs() const { return l1tetsumRefs_; } + + size_type l1tp2gtcandSize() const { return l1tp2gtcandIds_.size(); } + const Vids& l1tp2gtcandIds() const { return l1tp2gtcandIds_; } + const VRl1tp2gtcand& l1tp2gtcandRefs() const { return l1tp2gtcandRefs_; } }; // picked up via argument dependent lookup, e-g- by boost::swap() diff --git a/DataFormats/HLTReco/src/classes_def.xml b/DataFormats/HLTReco/src/classes_def.xml index c0da653aa6527..a104854bce32d 100644 --- a/DataFormats/HLTReco/src/classes_def.xml +++ b/DataFormats/HLTReco/src/classes_def.xml @@ -43,7 +43,8 @@ - + + @@ -51,7 +52,8 @@ - + + @@ -69,7 +71,8 @@ - + + @@ -78,7 +81,8 @@ - + + diff --git a/DataFormats/L1GlobalMuonTrigger/interface/L1MuGMTCand.h b/DataFormats/L1GlobalMuonTrigger/interface/L1MuGMTCand.h index 4631eb48e14dd..16bafbb39e04d 100644 --- a/DataFormats/L1GlobalMuonTrigger/interface/L1MuGMTCand.h +++ b/DataFormats/L1GlobalMuonTrigger/interface/L1MuGMTCand.h @@ -47,6 +47,9 @@ class L1MuGMTCand { /// copy constructor L1MuGMTCand(const L1MuGMTCand&); + /// assignment operator + L1MuGMTCand& operator=(const L1MuGMTCand&) = default; + /// destructor virtual ~L1MuGMTCand(); diff --git a/DataFormats/L1GlobalMuonTrigger/interface/L1MuGMTExtendedCand.h b/DataFormats/L1GlobalMuonTrigger/interface/L1MuGMTExtendedCand.h index a2287a0c51cfe..443285a5993b3 100644 --- a/DataFormats/L1GlobalMuonTrigger/interface/L1MuGMTExtendedCand.h +++ b/DataFormats/L1GlobalMuonTrigger/interface/L1MuGMTExtendedCand.h @@ -54,6 +54,9 @@ class L1MuGMTExtendedCand : public L1MuGMTCand { /// copy constructor L1MuGMTExtendedCand(const L1MuGMTExtendedCand&); + /// assignment operator + L1MuGMTExtendedCand& operator=(const L1MuGMTExtendedCand&) = default; + /// destructor ~L1MuGMTExtendedCand() override; diff --git a/DataFormats/L1Scouting/BuildFile.xml b/DataFormats/L1Scouting/BuildFile.xml new file mode 100644 index 0000000000000..0f37f92979ed5 --- /dev/null +++ b/DataFormats/L1Scouting/BuildFile.xml @@ -0,0 +1,5 @@ + + + + + \ No newline at end of file diff --git a/DataFormats/L1Scouting/README.md b/DataFormats/L1Scouting/README.md new file mode 100644 index 0000000000000..c25fbf3387ee8 --- /dev/null +++ b/DataFormats/L1Scouting/README.md @@ -0,0 +1,8 @@ +# DataFormats/L1Scouting + +## L1 Trigger Scouting data formats + +Any changes to the L1 scouting data formats must be backwards compatible. +In order to ensure the L1 Scouting formats can be read by future CMSSW releases, +there is a `TestWriteL1ScoutingDataFormats` unit test, which makes use of the `TestReadL1Scouting` analyzer and the `TestWriteL1Scouting` producer. +The unit test checks that objects can be written and read properly. \ No newline at end of file diff --git a/DataFormats/L1Scouting/interface/L1ScoutingCalo.h b/DataFormats/L1Scouting/interface/L1ScoutingCalo.h new file mode 100644 index 0000000000000..53913fe840b0b --- /dev/null +++ b/DataFormats/L1Scouting/interface/L1ScoutingCalo.h @@ -0,0 +1,198 @@ +#ifndef DataFormats_L1Scouting_L1ScoutingCalo_h +#define DataFormats_L1Scouting_L1ScoutingCalo_h + +#include "DataFormats/L1Scouting/interface/OrbitCollection.h" + +namespace l1ScoutingRun3 { + + class CaloObject { + public: + CaloObject() : hwEt_(0), hwEta_(0), hwPhi_(0), hwIso_(0) {} + + CaloObject(int hwEt, int hwEta, int hwPhi, int iso) : hwEt_(hwEt), hwEta_(hwEta), hwPhi_(hwPhi), hwIso_(iso) {} + + void setHwEt(int hwEt) { hwEt_ = hwEt; } + void setHwEta(int hwEta) { hwEta_ = hwEta; } + void setHwPhi(int hwPhi) { hwPhi_ = hwPhi; } + void setHwIso(int hwIso) { hwIso_ = hwIso; } + + int hwEt() const { return hwEt_; } + int hwEta() const { return hwEta_; } + int hwPhi() const { return hwPhi_; } + int hwIso() const { return hwIso_; } + + private: + int hwEt_; + int hwEta_; + int hwPhi_; + int hwIso_; + }; + + class Jet : public CaloObject { + public: + Jet() : CaloObject(0, 0, 0, 0) {} + + Jet(int hwEt, int hwEta, int hwPhi, int hwQual) : CaloObject(hwEt, hwEta, hwPhi, hwQual) {} + + // store quality instead of iso + void setHwQual(int hwQual) { setHwIso(hwQual); } + int hwQual() const { return hwIso(); } + }; + + class EGamma : public CaloObject { + public: + EGamma() : CaloObject(0, 0, 0, 0) {} + + EGamma(int hwEt, int hwEta, int hwPhi, int iso) : CaloObject(hwEt, hwEta, hwPhi, iso) {} + }; + + class Tau : public CaloObject { + public: + Tau() : CaloObject(0, 0, 0, 0) {} + + Tau(int hwEt, int hwEta, int hwPhi, int iso) : CaloObject(hwEt, hwEta, hwPhi, iso) {} + }; + + class BxSums { + public: + BxSums() + : hwTotalEt_(0), + hwTotalEtEm_(0), + hwTotalHt_(0), + hwMissEt_(0), + hwMissEtPhi_(0), + hwMissHt_(0), + hwMissHtPhi_(0), + hwMissEtHF_(0), + hwMissEtHFPhi_(0), + hwMissHtHF_(0), + hwMissHtHFPhi_(0), + hwAsymEt_(0), + hwAsymHt_(0), + hwAsymEtHF_(0), + hwAsymHtHF_(0), + minBiasHFP0_(0), + minBiasHFM0_(0), + minBiasHFP1_(0), + minBiasHFM1_(0), + towerCount_(0), + centrality_(0) {} + + BxSums(int hwTotalEt, + int hwTotalEtEm, + int hwTotalHt, + int hwMissEt, + int hwMissEtPhi, + int hwMissHt, + int hwMissHtPhi, + int hwMissEtHF, + int hwMissEtHFPhi, + int hwMissHtHF, + int hwMissHtHFPhi, + int hwAsymEt, + int hwAsymHt, + int hwAsymEtHF, + int hwAsymHtHF, + int minBiasHFP0, + int minBiasHFM0, + int minBiasHFP1, + int minBiasHFM1, + int towerCount, + int centrality) + : hwTotalEt_(hwTotalEt), + hwTotalEtEm_(hwTotalEtEm), + hwTotalHt_(hwTotalHt), + hwMissEt_(hwMissEt), + hwMissEtPhi_(hwMissEtPhi), + hwMissHt_(hwMissHt), + hwMissHtPhi_(hwMissHtPhi), + hwMissEtHF_(hwMissEtHF), + hwMissEtHFPhi_(hwMissEtHFPhi), + hwMissHtHF_(hwMissHtHF), + hwMissHtHFPhi_(hwMissHtHFPhi), + hwAsymEt_(hwAsymEt), + hwAsymHt_(hwAsymHt), + hwAsymEtHF_(hwAsymEtHF), + hwAsymHtHF_(hwAsymHtHF), + minBiasHFP0_(minBiasHFP0), + minBiasHFM0_(minBiasHFM0), + minBiasHFP1_(minBiasHFP1), + minBiasHFM1_(minBiasHFM1), + towerCount_(towerCount), + centrality_(centrality) {} + + void setHwTotalEt(int hwTotalEt) { hwTotalEt_ = hwTotalEt; } + void setHwTotalEtEm(int hwTotalEtEm) { hwTotalEtEm_ = hwTotalEtEm; } + void setMinBiasHFP0(int minBiasHFP0) { minBiasHFP0_ = minBiasHFP0; } + void setHwTotalHt(int hwTotalHt) { hwTotalHt_ = hwTotalHt; } + void setTowerCount(int towerCount) { towerCount_ = towerCount; } + void setMinBiasHFM0(int minBiasHFM0) { minBiasHFM0_ = minBiasHFM0; } + void setHwMissEt(int hwMissEt) { hwMissEt_ = hwMissEt; } + void setHwMissEtPhi(int hwMissEtPhi) { hwMissEtPhi_ = hwMissEtPhi; } + void setHwAsymEt(int hwAsymEt) { hwAsymEt_ = hwAsymEt; } + void setMinBiasHFP1(int minBiasHFP1) { minBiasHFP1_ = minBiasHFP1; } + void setHwMissHt(int hwMissHt) { hwMissHt_ = hwMissHt; } + void setHwMissHtPhi(int hwMissHtPhi) { hwMissHtPhi_ = hwMissHtPhi; } + void setHwAsymHt(int hwAsymHt) { hwAsymHt_ = hwAsymHt; } + void setMinBiasHFM1(int minBiasHFM1) { minBiasHFM1_ = minBiasHFM1; } + void setHwMissEtHF(int hwMissEtHF) { hwMissEtHF_ = hwMissEtHF; } + void setHwMissEtHFPhi(int hwMissEtHFPhi) { hwMissEtHFPhi_ = hwMissEtHFPhi; } + void setHwAsymEtHF(int hwAsymEtHF) { hwAsymEtHF_ = hwAsymEtHF; } + void setHwMissHtHF(int hwMissHtHF) { hwMissHtHF_ = hwMissHtHF; } + void setHwMissHtHFPhi(int hwMissHtHFPhi) { hwMissHtHFPhi_ = hwMissHtHFPhi; } + void setHwAsymHtHF(int hwAsymHtHF) { hwAsymHtHF_ = hwAsymHtHF; } + void setCentrality(int centrality) { centrality_ = centrality; } + + const int hwTotalEt() const { return hwTotalEt_; } + const int hwTotalEtEm() const { return hwTotalEtEm_; } + const int minBiasHFP0() const { return minBiasHFP0_; } + const int hwTotalHt() const { return hwTotalHt_; } + const int towerCount() const { return towerCount_; } + const int minBiasHFM0() const { return minBiasHFM0_; } + const int hwMissEt() const { return hwMissEt_; } + const int hwMissEtPhi() const { return hwMissEtPhi_; } + const int hwAsymEt() const { return hwAsymEt_; } + const int minBiasHFP1() const { return minBiasHFP1_; } + const int hwMissHt() const { return hwMissHt_; } + const int hwMissHtPhi() const { return hwMissHtPhi_; } + const int hwAsymHt() const { return hwAsymHt_; } + const int minBiasHFM1() const { return minBiasHFM1_; } + const int hwMissEtHF() const { return hwMissEtHF_; } + const int hwMissEtHFPhi() const { return hwMissEtHFPhi_; } + const int hwAsymEtHF() const { return hwAsymEtHF_; } + const int hwMissHtHF() const { return hwMissHtHF_; } + const int hwMissHtHFPhi() const { return hwMissHtHFPhi_; } + const int hwAsymHtHF() const { return hwAsymHtHF_; } + const int centrality() const { return centrality_; } + + private: + int hwTotalEt_; + int hwTotalEtEm_; + int hwTotalHt_; + int hwMissEt_; + int hwMissEtPhi_; + int hwMissHt_; + int hwMissHtPhi_; + int hwMissEtHF_; + int hwMissEtHFPhi_; + int hwMissHtHF_; + int hwMissHtHFPhi_; + int hwAsymEt_; + int hwAsymHt_; + int hwAsymEtHF_; + int hwAsymHtHF_; + int minBiasHFP0_; + int minBiasHFM0_; + int minBiasHFP1_; + int minBiasHFM1_; + int towerCount_; + int centrality_; + }; + + typedef OrbitCollection JetOrbitCollection; + typedef OrbitCollection EGammaOrbitCollection; + typedef OrbitCollection TauOrbitCollection; + typedef OrbitCollection BxSumsOrbitCollection; + +} // namespace l1ScoutingRun3 +#endif // DataFormats_L1Scouting_L1ScoutingCalo_h \ No newline at end of file diff --git a/DataFormats/L1Scouting/interface/L1ScoutingMuon.h b/DataFormats/L1Scouting/interface/L1ScoutingMuon.h new file mode 100644 index 0000000000000..59882addb1b6a --- /dev/null +++ b/DataFormats/L1Scouting/interface/L1ScoutingMuon.h @@ -0,0 +1,95 @@ +#ifndef DataFormats_L1Scouting_L1ScoutingMuon_h +#define DataFormats_L1Scouting_L1ScoutingMuon_h + +#include "DataFormats/L1Scouting/interface/OrbitCollection.h" + +namespace l1ScoutingRun3 { + + class Muon { + public: + Muon() + : hwPt_(0), + hwEta_(0), + hwPhi_(0), + hwQual_(0), + hwChrg_(0), + hwChrgv_(0), + hwIso_(0), + tfIndex_(0), + hwEtaAtVtx_(0), + hwPhiAtVtx_(0), + hwPtUnconstrained_(0), + hwDXY_(0) {} + + Muon(int hwPt, + int hwEta, + int hwPhi, + int hwQual, + int hwChrg, + int hwChrgv, + int hwIso, + int tfIndex, + int hwEtaAtVtx, + int hwPhiAtVtx, + int hwPtUnconstrained, + int hwDXY) + : hwPt_(hwPt), + hwEta_(hwEta), + hwPhi_(hwPhi), + hwQual_(hwQual), + hwChrg_(hwChrg), + hwChrgv_(hwChrgv), + hwIso_(hwIso), + tfIndex_(tfIndex), + hwEtaAtVtx_(hwEtaAtVtx), + hwPhiAtVtx_(hwPhiAtVtx), + hwPtUnconstrained_(hwPtUnconstrained), + hwDXY_(hwDXY) {} + + void setHwPt(int hwPt) { hwPt_ = hwPt; } + void setHwEta(int hwEta) { hwEta_ = hwEta; } + void setHwPhi(int hwPhi) { hwPhi_ = hwPhi; } + void setHwQual(int hwQual) { hwQual_ = hwQual; } + void setHwChrg(int hwChrg) { hwChrg_ = hwChrg; } + void setHwChrgv(int hwChrgv) { hwChrgv_ = hwChrgv; } + void setHwIso(int hwIso) { hwIso_ = hwIso; } + void setTfIndex(int tfIndex) { tfIndex_ = tfIndex; } + void setHwEtaAtVtx(int hwEtaAtVtx) { hwEtaAtVtx_ = hwEtaAtVtx; } + void setHwPhiAtVtx(int hwPhiAtVtx) { hwPhiAtVtx_ = hwPhiAtVtx; } + void setHwPtUnconstrained(int hwPtUnconstrained) { hwPtUnconstrained_ = hwPtUnconstrained; } + void setHwDXY(int hwDXY) { hwDXY_ = hwDXY; } + + int hwPt() const { return hwPt_; } + int hwEta() const { return hwEta_; } + int hwPhi() const { return hwPhi_; } + int hwQual() const { return hwQual_; } + int hwCharge() const { return hwChrg_; } + int hwChargeValid() const { return hwChrgv_; } + int hwIso() const { return hwIso_; } + int hwIndex() const { return tfIndex_; } + int hwEtaAtVtx() const { return hwEtaAtVtx_; } + int hwPhiAtVtx() const { return hwPhiAtVtx_; } + int hwPtUnconstrained() const { return hwPtUnconstrained_; } + int hwDXY() const { return hwDXY_; } + int tfMuonIndex() const { return tfIndex_; } + + private: + int hwPt_; + int hwEta_; + int hwPhi_; + int hwQual_; + int hwChrg_; + int hwChrgv_; + int hwIso_; + int tfIndex_; + int hwEtaAtVtx_; + int hwPhiAtVtx_; + int hwPtUnconstrained_; + int hwDXY_; + }; + + typedef OrbitCollection MuonOrbitCollection; + +} // namespace l1ScoutingRun3 + +#endif // DataFormats_L1Scouting_L1ScoutingMuon_h \ No newline at end of file diff --git a/DataFormats/L1Scouting/interface/OrbitCollection.h b/DataFormats/L1Scouting/interface/OrbitCollection.h new file mode 100644 index 0000000000000..d9d2c34ca75a9 --- /dev/null +++ b/DataFormats/L1Scouting/interface/OrbitCollection.h @@ -0,0 +1,135 @@ +#ifndef DataFormats_L1Scouting_OrbitCollection_h +#define DataFormats_L1Scouting_OrbitCollection_h + +#include "DataFormats/Common/interface/CMS_CLASS_VERSION.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/Utilities/interface/Span.h" + +#include +#include + +template +class OrbitCollection { +public: + typedef typename std::vector::iterator iterator; + typedef typename std::vector::const_iterator const_iterator; + typedef T value_type; + typedef typename std::vector::size_type size_type; + + // Initialize the offset vector with 0s from 0 to 3565. + // BX range is [1,3564], an extra entry is needed for the offserts of the last BX + OrbitCollection() : bxOffsets_(orbitBufferSize_ + 1, 0), data_(0) {} + // Construct the flat orbit collection starting from an OrbitBuffer. + // The method fillAndClear will be used, meaning that, after copying the objects, + // orbitBuffer's vectors will be cleared. + OrbitCollection(std::vector>& orbitBuffer, unsigned nObjects = 0) + : bxOffsets_(orbitBufferSize_ + 1, 0), data_(nObjects) { + fillAndClear(orbitBuffer, nObjects); + } + + OrbitCollection(const OrbitCollection& other) = default; + OrbitCollection(OrbitCollection&& other) = default; + OrbitCollection& operator=(const OrbitCollection& other) = default; + OrbitCollection& operator=(OrbitCollection&& other) = default; + + // Fill the orbit collection starting from a vector of vectors, one per BX. + // Objects are copied into a flat data vector, and a second vector is used to keep track + // of the starting index in the data vector for every BX. + // After the copy, the original input buffer is cleared. + // Input vector must be sorted with increasing BX and contain 3565 elements (BX in [1,3564]) + void fillAndClear(std::vector>& orbitBuffer, unsigned nObjects = 0) { + if (orbitBuffer.size() != orbitBufferSize_) + throw cms::Exception("OrbitCollection::fillAndClear") + << "Trying to fill the collection by passing an orbit buffer with incorrect size. " + << "Passed " << orbitBuffer.size() << ", expected 3565"; + data_.reserve(nObjects); + bxOffsets_[0] = 0; + unsigned bxIdx = 1; + for (auto& bxVec : orbitBuffer) { + // increase offset by the currect vec size + bxOffsets_[bxIdx] = bxOffsets_[bxIdx - 1] + bxVec.size(); + + // if bxVec contains something, copy it into the data_ vector + // and clear original bxVec objects + if (bxVec.size() > 0) { + data_.insert(data_.end(), bxVec.begin(), bxVec.end()); + bxVec.clear(); + } + + // increment bx index + bxIdx++; + } + } + + // iterate over all elements contained in data + const_iterator begin() const { return data_.begin(); } + const_iterator end() const { return data_.end(); } + + // iterate over elements of a bx + edm::Span bxIterator(unsigned bx) const { + if (bx >= orbitBufferSize_) + throw cms::Exception("OrbitCollection::bxIterator") << "Trying to access and object outside the orbit range. " + << " BX = " << bx; + if (getBxSize(bx) > 0) { + return edm::Span(data_.begin() + bxOffsets_[bx], data_.begin() + bxOffsets_[bx + 1]); + } else { + return edm::Span(end(), end()); + } + } + + // get number of objects stored in a BX + int getBxSize(unsigned bx) const { + if (bx >= orbitBufferSize_) { + cms::Exception("OrbitCollection") << "Called getBxSize() of a bx out of the orbit range." + << " BX = " << bx; + return 0; + } + return bxOffsets_[bx + 1] - bxOffsets_[bx]; + } + + // get i-th object from BX + const T& getBxObject(unsigned bx, unsigned i) const { + if (bx >= orbitBufferSize_) + throw cms::Exception("OrbitCollection::getBxObject") << "Trying to access and object outside the orbit range. " + << " BX = " << bx; + if (i >= getBxSize(bx)) + throw cms::Exception("OrbitCollection::getBxObject") + << "Trying to get element " << i << " but for" + << " BX = " << bx << " there are " << getBxSize(bx) << " elements."; + + return data_[bxOffsets_[bx] + i]; + } + + // get the list of non empty BXs + std::vector getFilledBxs() const { + std::vector filledBxVec; + if (!data_.empty()) { + for (unsigned bx = 0; bx < orbitBufferSize_; bx++) { + if ((bxOffsets_[bx + 1] - bxOffsets_[bx]) > 0) + filledBxVec.push_back(bx); + } + } + return filledBxVec; + } + + int size() const { return data_.size(); } + + T& operator[](std::size_t i) { return data_[i]; } + const T& operator[](std::size_t i) const { return data_[i]; } + + // used by ROOT storage + CMS_CLASS_VERSION(3) + +private: + // store data vector and BX offsets as flat vectors. + // offset contains one entry per BX, indicating the starting index + // of the objects for that BX. + std::vector bxOffsets_; + std::vector data_; + + // there are 3564 BX in one orbtit [1,3564], one extra + // count added to keep first entry of the vector + static constexpr int orbitBufferSize_ = 3565; +}; + +#endif // DataFormats_L1Scouting_OrbitCollection_h diff --git a/DataFormats/L1Scouting/src/classes.h b/DataFormats/L1Scouting/src/classes.h new file mode 100644 index 0000000000000..e9c045dd13f21 --- /dev/null +++ b/DataFormats/L1Scouting/src/classes.h @@ -0,0 +1,6 @@ +#include "DataFormats/Common/interface/RefProd.h" +#include "DataFormats/Common/interface/Wrapper.h" + +#include "DataFormats/L1Scouting/interface/OrbitCollection.h" +#include "DataFormats/L1Scouting/interface/L1ScoutingMuon.h" +#include "DataFormats/L1Scouting/interface/L1ScoutingCalo.h" \ No newline at end of file diff --git a/DataFormats/L1Scouting/src/classes_def.xml b/DataFormats/L1Scouting/src/classes_def.xml new file mode 100644 index 0000000000000..994fe65b0d442 --- /dev/null +++ b/DataFormats/L1Scouting/src/classes_def.xml @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/DataFormats/L1Scouting/test/BuildFile.xml b/DataFormats/L1Scouting/test/BuildFile.xml new file mode 100644 index 0000000000000..2448764be48d5 --- /dev/null +++ b/DataFormats/L1Scouting/test/BuildFile.xml @@ -0,0 +1,10 @@ + + + + + + + + + + \ No newline at end of file diff --git a/DataFormats/L1Scouting/test/TestL1ScoutingFormat.sh b/DataFormats/L1Scouting/test/TestL1ScoutingFormat.sh new file mode 100755 index 0000000000000..3ef6057274e9a --- /dev/null +++ b/DataFormats/L1Scouting/test/TestL1ScoutingFormat.sh @@ -0,0 +1,11 @@ +#!/bin/sh -ex + +function die { echo $1: status $2 ; exit $2; } + +LOCAL_TEST_DIR=${SCRAM_TEST_PATH} + +cmsRun ${LOCAL_TEST_DIR}/create_L1Scouting_test_file_cfg.py || die 'Failure using create_L1Scouting_test_file_cfg.py' $? + +file=testL1Scouting.root + +cmsRun ${LOCAL_TEST_DIR}/read_L1Scouting_cfg.py "$file" || die "Failure using read_L1Scouting_cfg.py $file" $? \ No newline at end of file diff --git a/DataFormats/L1Scouting/test/TestReadL1Scouting.cc b/DataFormats/L1Scouting/test/TestReadL1Scouting.cc new file mode 100644 index 0000000000000..b39f5f8b7547c --- /dev/null +++ b/DataFormats/L1Scouting/test/TestReadL1Scouting.cc @@ -0,0 +1,333 @@ +#include "DataFormats/L1Scouting/interface/L1ScoutingMuon.h" +#include "DataFormats/L1Scouting/interface/L1ScoutingCalo.h" +#include "DataFormats/L1Scouting/interface/OrbitCollection.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/global/EDAnalyzer.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/Utilities/interface/EDGetToken.h" +#include "FWCore/Utilities/interface/Exception.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "FWCore/Utilities/interface/StreamID.h" + +#include +#include +#include + +namespace edmtest { + using namespace l1ScoutingRun3; + class TestReadL1Scouting : public edm::global::EDAnalyzer<> { + public: + TestReadL1Scouting(edm::ParameterSet const&); + void analyze(edm::StreamID, edm::Event const&, edm::EventSetup const&) const override; + static void fillDescriptions(edm::ConfigurationDescriptions&); + + private: + void analyzeMuons(edm::Event const& iEvent) const; + void analyzeJets(edm::Event const& iEvent) const; + void analyzeEGammas(edm::Event const& iEvent) const; + void analyzeTaus(edm::Event const& iEvent) const; + void analyzeBxSums(edm::Event const& iEvent) const; + + void throwWithMessageFromConstructor(const char*) const; + void throwWithMessage(const char*) const; + + const std::vector bxValues_; + + const std::vector expectedMuonValues_; + const edm::EDGetTokenT> muonsToken_; + + const std::vector expectedJetValues_; + const edm::EDGetTokenT> jetsToken_; + + const std::vector expectedEGammaValues_; + const edm::EDGetTokenT> eGammasToken_; + + const std::vector expectedTauValues_; + const edm::EDGetTokenT> tausToken_; + + const std::vector expectedBxSumsValues_; + const edm::EDGetTokenT> bxSumsToken_; + }; + + TestReadL1Scouting::TestReadL1Scouting(edm::ParameterSet const& iPSet) + : bxValues_(iPSet.getParameter>("bxValues")), + expectedMuonValues_(iPSet.getParameter>("expectedMuonValues")), + muonsToken_(consumes(iPSet.getParameter("muonsTag"))), + expectedJetValues_(iPSet.getParameter>("expectedJetValues")), + jetsToken_(consumes(iPSet.getParameter("jetsTag"))), + expectedEGammaValues_(iPSet.getParameter>("expectedEGammaValues")), + eGammasToken_(consumes(iPSet.getParameter("eGammasTag"))), + expectedTauValues_(iPSet.getParameter>("expectedTauValues")), + tausToken_(consumes(iPSet.getParameter("tausTag"))), + expectedBxSumsValues_(iPSet.getParameter>("expectedBxSumsValues")), + bxSumsToken_(consumes(iPSet.getParameter("bxSumsTag"))) { + if (bxValues_.size() != 2) { + throwWithMessageFromConstructor("bxValues must have 2 elements and it does not"); + } + if (expectedMuonValues_.size() != 3) { + throwWithMessageFromConstructor("muonValues must have 3 elements and it does not"); + } + if (expectedJetValues_.size() != 4) { + throwWithMessageFromConstructor("jetValues must have 4 elements and it does not"); + } + if (expectedEGammaValues_.size() != 3) { + throwWithMessageFromConstructor("eGammaValues must have 3 elements and it does not"); + } + if (expectedTauValues_.size() != 2) { + throwWithMessageFromConstructor("tauValues must have 2 elements and it does not"); + } + if (expectedBxSumsValues_.size() != 1) { + throwWithMessageFromConstructor("bxSumsValues_ must have 1 elements and it does not"); + } + } + + void TestReadL1Scouting::analyze(edm::StreamID, edm::Event const& iEvent, edm::EventSetup const&) const { + analyzeMuons(iEvent); + analyzeJets(iEvent); + analyzeEGammas(iEvent); + analyzeTaus(iEvent); + analyzeBxSums(iEvent); + } + + void TestReadL1Scouting::analyzeMuons(edm::Event const& iEvent) const { + auto const& muonsCollection = iEvent.get(muonsToken_); + + for (const unsigned& bx : bxValues_) { + unsigned nMuons = muonsCollection.getBxSize(bx); + if (nMuons != expectedMuonValues_.size()) { + throwWithMessage("analyzeMuons, muons do not have the expected bx size"); + } + + const auto& muons = muonsCollection.bxIterator(bx); + for (unsigned i = 0; i < nMuons; i++) { + if (muons[i].hwPt() != expectedMuonValues_[i]) { + throwWithMessage("analyzeMuons, hwPt does not match the expected value"); + } + if (muons[i].hwEta() != expectedMuonValues_[i]) { + throwWithMessage("analyzeMuons, hwEta does not match the expected value"); + } + if (muons[i].hwPhi() != expectedMuonValues_[i]) { + throwWithMessage("analyzeMuons, hwPhi does not match the expected value"); + } + if (muons[i].hwQual() != expectedMuonValues_[i]) { + throwWithMessage("analyzeMuons, hwQual does not match the expected value"); + } + if (muons[i].hwCharge() != expectedMuonValues_[i]) { + throwWithMessage("analyzeMuons, hwCharge does not match the expected value"); + } + if (muons[i].hwChargeValid() != expectedMuonValues_[i]) { + throwWithMessage("analyzeMuons, hwChargeValid does not match the expected value"); + } + if (muons[i].hwIso() != expectedMuonValues_[i]) { + throwWithMessage("analyzeMuons, hwIso does not match the expected value"); + } + if (muons[i].hwIndex() != expectedMuonValues_[i]) { + throwWithMessage("analyzeMuons, hwIndex does not match the expected value"); + } + if (muons[i].hwEtaAtVtx() != expectedMuonValues_[i]) { + throwWithMessage("analyzeMuons, hwEtaAtVtx does not match the expected value"); + } + if (muons[i].hwPhiAtVtx() != expectedMuonValues_[i]) { + throwWithMessage("analyzeMuons, hwPhiAtVtx does not match the expected value"); + } + if (muons[i].hwPtUnconstrained() != expectedMuonValues_[i]) { + throwWithMessage("analyzeMuons, hwPtUnconstrained does not match the expected value"); + } + if (muons[i].hwDXY() != expectedMuonValues_[i]) { + throwWithMessage("analyzeMuons, hwDXY does not match the expected value"); + } + if (muons[i].tfMuonIndex() != expectedMuonValues_[i]) { + throwWithMessage("analyzeMuons, tfMuonIndex does not match the expected value"); + } + } + } + } + + void TestReadL1Scouting::analyzeJets(edm::Event const& iEvent) const { + auto const& jetsCollection = iEvent.get(jetsToken_); + + for (const unsigned& bx : bxValues_) { + unsigned nJets = jetsCollection.getBxSize(bx); + if (nJets != expectedJetValues_.size()) { + throwWithMessage("analyzeJets, jets do not have the expected bx size"); + } + + const auto& jets = jetsCollection.bxIterator(bx); + for (unsigned i = 0; i < nJets; i++) { + if (jets[i].hwEt() != expectedJetValues_[i]) { + throwWithMessage("analyzeJets, hwEt does not match the expected value"); + } + if (jets[i].hwEta() != expectedJetValues_[i]) { + throwWithMessage("analyzeJets, hwEta does not match the expected value"); + } + if (jets[i].hwPhi() != expectedJetValues_[i]) { + throwWithMessage("analyzeJets, hwPhi does not match the expected value"); + } + if (jets[i].hwIso() != expectedJetValues_[i]) { + throwWithMessage("analyzeJets, hwIso does not match the expected value"); + } + } + } + } + + void TestReadL1Scouting::analyzeEGammas(edm::Event const& iEvent) const { + auto const& eGammasCollection = iEvent.get(eGammasToken_); + + for (const unsigned& bx : bxValues_) { + unsigned nEGammas = eGammasCollection.getBxSize(bx); + if (nEGammas != expectedEGammaValues_.size()) { + throwWithMessage("analyzeEGammas, egammas do not have the expected bx size"); + } + + const auto& eGammas = eGammasCollection.bxIterator(bx); + for (unsigned i = 0; i < nEGammas; i++) { + if (eGammas[i].hwEt() != expectedEGammaValues_[i]) { + throwWithMessage("analyzeEGammas, hwEt does not match the expected value"); + } + if (eGammas[i].hwEta() != expectedEGammaValues_[i]) { + throwWithMessage("analyzeEGammas, hwEta does not match the expected value"); + } + if (eGammas[i].hwPhi() != expectedEGammaValues_[i]) { + throwWithMessage("analyzeEGammas, hwPhi does not match the expected value"); + } + if (eGammas[i].hwIso() != expectedEGammaValues_[i]) { + throwWithMessage("analyzeEGammas, hwIso does not match the expected value"); + } + } + } + } + + void TestReadL1Scouting::analyzeTaus(edm::Event const& iEvent) const { + auto const& tausCollection = iEvent.get(tausToken_); + + for (const unsigned& bx : bxValues_) { + unsigned nTaus = tausCollection.getBxSize(bx); + if (nTaus != expectedTauValues_.size()) { + throwWithMessage("analyzeTaus, taus do not have the expected bx size"); + } + + const auto& taus = tausCollection.bxIterator(bx); + for (unsigned i = 0; i < nTaus; i++) { + if (taus[i].hwEt() != expectedTauValues_[i]) { + throwWithMessage("analyzeTaus, hwEt does not match the expected value"); + } + if (taus[i].hwEta() != expectedTauValues_[i]) { + throwWithMessage("analyzeTaus, hwEta does not match the expected value"); + } + if (taus[i].hwPhi() != expectedTauValues_[i]) { + throwWithMessage("analyzeTaus, hwPhi does not match the expected value"); + } + if (taus[i].hwIso() != expectedTauValues_[i]) { + throwWithMessage("analyzeTaus, hwIso does not match the expected value"); + } + } + } + } + + void TestReadL1Scouting::analyzeBxSums(edm::Event const& iEvent) const { + auto const& bxSumsCollection = iEvent.get(bxSumsToken_); + + for (const unsigned& bx : bxValues_) { + unsigned nSums = bxSumsCollection.getBxSize(bx); + if (nSums != expectedBxSumsValues_.size()) { + throwWithMessage("analyzeBxSums, sums do not have the expected bx size"); + } + + const auto& sums = bxSumsCollection.bxIterator(bx); + for (unsigned i = 0; i < nSums; i++) { + if (sums[i].hwTotalEt() != expectedBxSumsValues_[i]) { + throwWithMessage("analyzeBxSums, hwTotalEt does not match the expected value"); + } + if (sums[i].hwTotalEtEm() != expectedBxSumsValues_[i]) { + throwWithMessage("analyzeBxSums, hwTotalEtEm does not match the expected value"); + } + if (sums[i].hwTotalHt() != expectedBxSumsValues_[i]) { + throwWithMessage("analyzeBxSums, hwTotalHt does not match the expected value"); + } + if (sums[i].hwMissEt() != expectedBxSumsValues_[i]) { + throwWithMessage("analyzeBxSums, hwMissEt does not match the expected value"); + } + if (sums[i].hwMissEtPhi() != expectedBxSumsValues_[i]) { + throwWithMessage("analyzeBxSums, hwMissEtPhi does not match the expected value"); + } + if (sums[i].hwMissHt() != expectedBxSumsValues_[i]) { + throwWithMessage("analyzeBxSums, hwMissHt does not match the expected value"); + } + if (sums[i].hwMissHtPhi() != expectedBxSumsValues_[i]) { + throwWithMessage("analyzeBxSums, hwMissHtPhi does not match the expected value"); + } + if (sums[i].hwMissEtHF() != expectedBxSumsValues_[i]) { + throwWithMessage("analyzeBxSums, hwMissEtHF does not match the expected value"); + } + if (sums[i].hwMissEtHFPhi() != expectedBxSumsValues_[i]) { + throwWithMessage("analyzeBxSums, hwMissEtHFPhi does not match the expected value"); + } + if (sums[i].hwMissHtHF() != expectedBxSumsValues_[i]) { + throwWithMessage("analyzeBxSums, hwMissHtHFPhi does not match the expected value"); + } + if (sums[i].hwAsymEt() != expectedBxSumsValues_[i]) { + throwWithMessage("analyzeBxSums, hwAsymEt does not match the expected value"); + } + if (sums[i].hwAsymHt() != expectedBxSumsValues_[i]) { + throwWithMessage("analyzeBxSums, hwAsymHt does not match the expected value"); + } + if (sums[i].hwAsymEtHF() != expectedBxSumsValues_[i]) { + throwWithMessage("analyzeBxSums, hwAsymEtHF does not match the expected value"); + } + if (sums[i].hwAsymHtHF() != expectedBxSumsValues_[i]) { + throwWithMessage("analyzeBxSums, hwAsymHtHF does not match the expected value"); + } + if (sums[i].minBiasHFP0() != expectedBxSumsValues_[i]) { + throwWithMessage("analyzeBxSums, minBiasHFP0 does not match the expected value"); + } + if (sums[i].minBiasHFM0() != expectedBxSumsValues_[i]) { + throwWithMessage("analyzeBxSums, minBiasHFM0 does not match the expected value"); + } + if (sums[i].minBiasHFP1() != expectedBxSumsValues_[i]) { + throwWithMessage("analyzeBxSums, minBiasHFP1 does not match the expected value"); + } + if (sums[i].minBiasHFM1() != expectedBxSumsValues_[i]) { + throwWithMessage("analyzeBxSums, minBiasHFM1 does not match the expected value"); + } + if (sums[i].towerCount() != expectedBxSumsValues_[i]) { + throwWithMessage("analyzeBxSums, towerCount does not match the expected value"); + } + if (sums[i].centrality() != expectedBxSumsValues_[i]) { + throwWithMessage("analyzeBxSums, centrality does not match the expected value"); + } + } + } + } + + void TestReadL1Scouting::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add>("bxValues"); + desc.add>("expectedMuonValues"); + desc.add("muonsTag"); + desc.add>("expectedJetValues"); + desc.add("jetsTag"); + desc.add>("expectedEGammaValues"); + desc.add("eGammasTag"); + desc.add>("expectedTauValues"); + desc.add("tausTag"); + desc.add>("expectedBxSumsValues"); + desc.add("bxSumsTag"); + descriptions.addDefault(desc); + } + + void TestReadL1Scouting::throwWithMessageFromConstructor(const char* msg) const { + throw cms::Exception("TestFailure") << "TestReadL1Scouting constructor, test configuration error, " << msg; + } + + void TestReadL1Scouting::throwWithMessage(const char* msg) const { + throw cms::Exception("TestFailure") << "TestReadL1Scouting analyzer, " << msg; + } + +} // namespace edmtest + +using edmtest::TestReadL1Scouting; +DEFINE_FWK_MODULE(TestReadL1Scouting); \ No newline at end of file diff --git a/DataFormats/L1Scouting/test/TestWriteL1Scouting.cc b/DataFormats/L1Scouting/test/TestWriteL1Scouting.cc new file mode 100644 index 0000000000000..62a807b4b5b18 --- /dev/null +++ b/DataFormats/L1Scouting/test/TestWriteL1Scouting.cc @@ -0,0 +1,192 @@ +#include "DataFormats/L1Scouting/interface/L1ScoutingMuon.h" +#include "DataFormats/L1Scouting/interface/L1ScoutingCalo.h" +#include "DataFormats/L1Scouting/interface/OrbitCollection.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/global/EDProducer.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/Utilities/interface/EDPutToken.h" +#include "FWCore/Utilities/interface/StreamID.h" + +#include +#include +#include + +namespace edmtest { + using namespace l1ScoutingRun3; + class TestWriteL1Scouting : public edm::global::EDProducer<> { + public: + TestWriteL1Scouting(edm::ParameterSet const&); + void produce(edm::StreamID, edm::Event&, edm::EventSetup const&) const override; + static void fillDescriptions(edm::ConfigurationDescriptions&); + + private: + void produceMuons(edm::Event& iEvent) const; + void produceJets(edm::Event& iEvent) const; + void produceEGammas(edm::Event& iEvent) const; + void produceTaus(edm::Event& iEvent) const; + void produceBxSums(edm::Event& iEvent) const; + + void throwWithMessage(const char*) const; + + const std::vector bxValues_; + + const std::vector muonValues_; + const edm::EDPutTokenT> muonsPutToken_; + + const std::vector jetValues_; + const edm::EDPutTokenT> jetsPutToken_; + + const std::vector eGammaValues_; + const edm::EDPutTokenT> eGammasPutToken_; + + const std::vector tauValues_; + const edm::EDPutTokenT> tausPutToken_; + + const std::vector bxSumsValues_; + const edm::EDPutTokenT> bxSumsPutToken_; + }; + + TestWriteL1Scouting::TestWriteL1Scouting(edm::ParameterSet const& iPSet) + : bxValues_(iPSet.getParameter>("bxValues")), + muonValues_(iPSet.getParameter>("muonValues")), + muonsPutToken_(produces()), + jetValues_(iPSet.getParameter>("jetValues")), + jetsPutToken_(produces()), + eGammaValues_(iPSet.getParameter>("eGammaValues")), + eGammasPutToken_(produces()), + tauValues_(iPSet.getParameter>("tauValues")), + tausPutToken_(produces()), + bxSumsValues_(iPSet.getParameter>("bxSumsValues")), + bxSumsPutToken_(produces()) { + if (bxValues_.size() != 2) { + throwWithMessage("bxValues must have 2 elements and it does not"); + } + if (muonValues_.size() != 3) { + throwWithMessage("muonValues must have 3 elements and it does not"); + } + if (jetValues_.size() != 4) { + throwWithMessage("jetValues must have 4 elements and it does not"); + } + if (eGammaValues_.size() != 3) { + throwWithMessage("eGammaValues must have 3 elements and it does not"); + } + if (tauValues_.size() != 2) { + throwWithMessage("tauValues must have 2 elements and it does not"); + } + if (bxSumsValues_.size() != 1) { + throwWithMessage("bxSumsValues_ must have 1 elements and it does not"); + } + } + + void TestWriteL1Scouting::produce(edm::StreamID, edm::Event& iEvent, edm::EventSetup const&) const { + produceMuons(iEvent); + produceJets(iEvent); + produceEGammas(iEvent); + produceTaus(iEvent); + produceBxSums(iEvent); + } + + void TestWriteL1Scouting::produceMuons(edm::Event& iEvent) const { + std::unique_ptr muons(new l1ScoutingRun3::MuonOrbitCollection); + + std::vector> orbitBufferMuons(3565); + int nMuons = 0; + for (const unsigned& bx : bxValues_) { + for (const int& val : muonValues_) { + orbitBufferMuons[bx].emplace_back(val, val, val, val, val, val, val, val, val, val, val, val); + nMuons++; + } + } + + muons->fillAndClear(orbitBufferMuons, nMuons); + iEvent.put(muonsPutToken_, std::move(muons)); + } + + void TestWriteL1Scouting::produceJets(edm::Event& iEvent) const { + std::unique_ptr jets(new l1ScoutingRun3::JetOrbitCollection); + + std::vector> orbitBufferJets(3565); + int nJets = 0; + for (const unsigned& bx : bxValues_) { + for (const int& val : jetValues_) { + orbitBufferJets[bx].emplace_back(val, val, val, val); + nJets++; + } + } + + jets->fillAndClear(orbitBufferJets, nJets); + iEvent.put(jetsPutToken_, std::move(jets)); + } + + void TestWriteL1Scouting::produceEGammas(edm::Event& iEvent) const { + std::unique_ptr eGammas(new l1ScoutingRun3::EGammaOrbitCollection); + + std::vector> orbitBufferEGammas(3565); + int nEGammas = 0; + for (const unsigned& bx : bxValues_) { + for (const int& val : eGammaValues_) { + orbitBufferEGammas[bx].emplace_back(val, val, val, val); + nEGammas++; + } + } + + eGammas->fillAndClear(orbitBufferEGammas, nEGammas); + iEvent.put(eGammasPutToken_, std::move(eGammas)); + } + + void TestWriteL1Scouting::produceTaus(edm::Event& iEvent) const { + std::unique_ptr taus(new l1ScoutingRun3::TauOrbitCollection); + + std::vector> orbitBufferTaus(3565); + int nTaus = 0; + for (const unsigned& bx : bxValues_) { + for (const int& val : tauValues_) { + orbitBufferTaus[bx].emplace_back(val, val, val, val); + nTaus++; + } + } + + taus->fillAndClear(orbitBufferTaus, nTaus); + iEvent.put(tausPutToken_, std::move(taus)); + } + + void TestWriteL1Scouting::produceBxSums(edm::Event& iEvent) const { + std::unique_ptr bxSums(new l1ScoutingRun3::BxSumsOrbitCollection); + + std::vector> orbitBufferBxSums(3565); + int nBxSums = 0; + for (const unsigned& bx : bxValues_) { + for (const int& val : bxSumsValues_) { + orbitBufferBxSums[bx].emplace_back( + val, val, val, val, val, val, val, val, val, val, val, val, val, val, val, val, val, val, val, val, val); + nBxSums++; + } + } + + bxSums->fillAndClear(orbitBufferBxSums, nBxSums); + iEvent.put(bxSumsPutToken_, std::move(bxSums)); + } + + void TestWriteL1Scouting::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add>("bxValues"); + desc.add>("muonValues"); + desc.add>("jetValues"); + desc.add>("eGammaValues"); + desc.add>("tauValues"); + desc.add>("bxSumsValues"); + descriptions.addDefault(desc); + } + + void TestWriteL1Scouting::throwWithMessage(const char* msg) const { + throw cms::Exception("TestFailure") << "TestWriteL1Scouting constructor, test configuration error, " << msg; + } + +} // namespace edmtest + +using edmtest::TestWriteL1Scouting; +DEFINE_FWK_MODULE(TestWriteL1Scouting); \ No newline at end of file diff --git a/DataFormats/L1Scouting/test/create_L1Scouting_test_file_cfg.py b/DataFormats/L1Scouting/test/create_L1Scouting_test_file_cfg.py new file mode 100644 index 0000000000000..1f03c455497ff --- /dev/null +++ b/DataFormats/L1Scouting/test/create_L1Scouting_test_file_cfg.py @@ -0,0 +1,24 @@ +import FWCore.ParameterSet.Config as cms + +process = cms.Process("PROD") + +process.load("FWCore.MessageService.MessageLogger_cfi") + +process.source = cms.Source("EmptySource") +process.maxEvents.input = 1 + +process.l1ScoutingTestProducer = cms.EDProducer("TestWriteL1Scouting", + bxValues = cms.vuint32(42, 512), + muonValues = cms.vint32(1, 2, 3), + jetValues = cms.vint32(4, 5, 6, 7), + eGammaValues = cms.vint32(8, 9, 10), + tauValues = cms.vint32(11, 12), + bxSumsValues = cms.vint32(13) +) + +process.out = cms.OutputModule("PoolOutputModule", + fileName = cms.untracked.string('testL1Scouting.root') +) + +process.path = cms.Path(process.l1ScoutingTestProducer) +process.endPath = cms.EndPath(process.out) \ No newline at end of file diff --git a/DataFormats/L1Scouting/test/read_L1Scouting_cfg.py b/DataFormats/L1Scouting/test/read_L1Scouting_cfg.py new file mode 100644 index 0000000000000..3153934b8106a --- /dev/null +++ b/DataFormats/L1Scouting/test/read_L1Scouting_cfg.py @@ -0,0 +1,30 @@ +import FWCore.ParameterSet.Config as cms +import sys + +process = cms.Process("READ") + +process.load("FWCore.MessageService.MessageLogger_cfi") + +process.source = cms.Source("PoolSource", fileNames = cms.untracked.vstring("file:"+sys.argv[1])) +process.maxEvents.input = 1 + +process.l1ScoutingTestAnalyzer = cms.EDAnalyzer("TestReadL1Scouting", + bxValues = cms.vuint32(42, 512), + muonsTag = cms.InputTag("l1ScoutingTestProducer", "", "PROD"), + expectedMuonValues = cms.vint32(1, 2, 3), + jetsTag = cms.InputTag("l1ScoutingTestProducer", "", "PROD"), + expectedJetValues = cms.vint32(4, 5, 6, 7), + eGammasTag = cms.InputTag("l1ScoutingTestProducer", "", "PROD"), + expectedEGammaValues = cms.vint32(8, 9, 10), + tausTag = cms.InputTag("l1ScoutingTestProducer", "", "PROD"), + expectedTauValues = cms.vint32(11, 12), + bxSumsTag = cms.InputTag("l1ScoutingTestProducer", "", "PROD"), + expectedBxSumsValues = cms.vint32(13) +) + +process.out = cms.OutputModule("PoolOutputModule", + fileName = cms.untracked.string('testL1Scouting2.root') +) + +process.path = cms.Path(process.l1ScoutingTestAnalyzer) +process.endPath = cms.EndPath(process.out) \ No newline at end of file diff --git a/DataFormats/L1ScoutingRawData/BuildFile.xml b/DataFormats/L1ScoutingRawData/BuildFile.xml new file mode 100644 index 0000000000000..736e6ca08a199 --- /dev/null +++ b/DataFormats/L1ScoutingRawData/BuildFile.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/DataFormats/L1ScoutingRawData/README.md b/DataFormats/L1ScoutingRawData/README.md new file mode 100644 index 0000000000000..4c610acbe176e --- /dev/null +++ b/DataFormats/L1ScoutingRawData/README.md @@ -0,0 +1,8 @@ +# DataFormats/L1ScoutingRawData + +## L1 Trigger Scouting raw data formats + +Any changes to the L1 scouting raw data `SDSRawDataCollection` must be backwards compatible. +In order to ensure the L1 Scouting raw data formats can be read by future CMSSW releases, +there is a `TestSDSRawDataCollectionFormat` unit test, which makes use of the `TestReadSDSRawDataCollection` analyzer and the `TestWriteSDSRawDataCollection` producer. +The unit test checks that objects can be written and read properly. \ No newline at end of file diff --git a/DataFormats/L1ScoutingRawData/interface/SDSNumbering.h b/DataFormats/L1ScoutingRawData/interface/SDSNumbering.h new file mode 100644 index 0000000000000..354131e56e4ed --- /dev/null +++ b/DataFormats/L1ScoutingRawData/interface/SDSNumbering.h @@ -0,0 +1,24 @@ +#ifndef L1ScoutingRawData_SDSNumbering_h +#define L1ScoutingRawData_SDSNumbering_h + +/** + * + * This class holds the Scouting Data Source (SDS) + * numbering scheme for the Level 1 scouting system + * + */ + +class SDSNumbering { +public: + static constexpr int lastSDSId() { return MAXSDSID; } + + static constexpr int NOT_A_SDSID = -1; + static constexpr int MAXSDSID = 32; + static constexpr int GmtSDSID = 1; + static constexpr int CaloSDSID = 2; + static constexpr int GtSDSID = 4; + static constexpr int BmtfMinSDSID = 10; + static constexpr int BmtfMaxSDSID = 21; +}; + +#endif // L1ScoutingRawData_SDSNumbering_h \ No newline at end of file diff --git a/DataFormats/L1ScoutingRawData/interface/SDSRawDataCollection.h b/DataFormats/L1ScoutingRawData/interface/SDSRawDataCollection.h new file mode 100644 index 0000000000000..8fef964a7fc3c --- /dev/null +++ b/DataFormats/L1ScoutingRawData/interface/SDSRawDataCollection.h @@ -0,0 +1,36 @@ +#ifndef L1ScoutingRawData_SDSRawDataCollection_h +#define L1ScoutingRawData_SDSRawDataCollection_h + +#include "DataFormats/FEDRawData/interface/FEDRawData.h" +#include "DataFormats/Common/interface/traits.h" + +/** + * + * This collection holds the raw data for all the + * scouting data sources. It is a collection of FEDRawData + * + */ + +class SDSRawDataCollection : public edm::DoNotRecordParents { +public: + SDSRawDataCollection(); + SDSRawDataCollection(const SDSRawDataCollection&) = default; + SDSRawDataCollection(SDSRawDataCollection&&) noexcept = default; + SDSRawDataCollection& operator=(const SDSRawDataCollection&) = default; + SDSRawDataCollection& operator=(SDSRawDataCollection&&) noexcept = default; + + // retrive data for the scouting source at sourceId + const FEDRawData& FEDData(int sourceId) const; + + // retrive data for the scouting source at sourceId + FEDRawData& FEDData(int sourceId); + + void swap(SDSRawDataCollection& other) { data_.swap(other.data_); } + +private: + std::vector data_; // vector of raw data +}; + +inline void swap(SDSRawDataCollection& a, SDSRawDataCollection& b) { a.swap(b); } + +#endif // L1ScoutingRawData_SDSRawDataCollection_h diff --git a/DataFormats/L1ScoutingRawData/src/SDSRawDataCollection.cc b/DataFormats/L1ScoutingRawData/src/SDSRawDataCollection.cc new file mode 100644 index 0000000000000..c59798bee3522 --- /dev/null +++ b/DataFormats/L1ScoutingRawData/src/SDSRawDataCollection.cc @@ -0,0 +1,8 @@ +#include "DataFormats/L1ScoutingRawData/interface/SDSRawDataCollection.h" +#include "DataFormats/L1ScoutingRawData/interface/SDSNumbering.h" + +SDSRawDataCollection::SDSRawDataCollection() : data_(SDSNumbering::lastSDSId() + 1) {} + +const FEDRawData& SDSRawDataCollection::FEDData(int sourceId) const { return data_[sourceId]; } + +FEDRawData& SDSRawDataCollection::FEDData(int sourceId) { return data_[sourceId]; } diff --git a/DataFormats/L1ScoutingRawData/src/classes.h b/DataFormats/L1ScoutingRawData/src/classes.h new file mode 100644 index 0000000000000..98c4a6b676b25 --- /dev/null +++ b/DataFormats/L1ScoutingRawData/src/classes.h @@ -0,0 +1,4 @@ +#include "DataFormats/Common/interface/Wrapper.h" +// #include "DataFormats/Common/interface/RefProd.h" + +#include "DataFormats/L1ScoutingRawData/interface/SDSRawDataCollection.h" \ No newline at end of file diff --git a/DataFormats/L1ScoutingRawData/src/classes_def.xml b/DataFormats/L1ScoutingRawData/src/classes_def.xml new file mode 100644 index 0000000000000..f62cced686169 --- /dev/null +++ b/DataFormats/L1ScoutingRawData/src/classes_def.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/DataFormats/L1ScoutingRawData/test/BuildFile.xml b/DataFormats/L1ScoutingRawData/test/BuildFile.xml new file mode 100644 index 0000000000000..6529e327073ec --- /dev/null +++ b/DataFormats/L1ScoutingRawData/test/BuildFile.xml @@ -0,0 +1,11 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/DataFormats/L1ScoutingRawData/test/TestReadSDSRawDataCollection.cc b/DataFormats/L1ScoutingRawData/test/TestReadSDSRawDataCollection.cc new file mode 100644 index 0000000000000..275488fe1842e --- /dev/null +++ b/DataFormats/L1ScoutingRawData/test/TestReadSDSRawDataCollection.cc @@ -0,0 +1,73 @@ +#include "DataFormats/FEDRawData/interface/FEDRawData.h" +#include "DataFormats/L1ScoutingRawData/interface/SDSRawDataCollection.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/global/EDAnalyzer.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/Utilities/interface/EDGetToken.h" +#include "FWCore/Utilities/interface/Exception.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "FWCore/Utilities/interface/StreamID.h" + +#include + +namespace edmtest { + + class TestReadSDSRawDataCollection : public edm::global::EDAnalyzer<> { + public: + TestReadSDSRawDataCollection(edm::ParameterSet const&); + void analyze(edm::StreamID, edm::Event const&, edm::EventSetup const&) const override; + void throwWithMessage(const char*) const; + static void fillDescriptions(edm::ConfigurationDescriptions&); + + private: + std::vector expectedSDSData1_; + std::vector expectedSDSData2_; + edm::EDGetTokenT sdsRawDataCollectionToken_; + }; + + TestReadSDSRawDataCollection::TestReadSDSRawDataCollection(edm::ParameterSet const& iPSet) + : expectedSDSData1_(iPSet.getParameter>("expectedSDSData1")), + expectedSDSData2_(iPSet.getParameter>("expectedSDSData2")), + sdsRawDataCollectionToken_(consumes(iPSet.getParameter("sdsRawDataCollectionTag"))) {} + + void TestReadSDSRawDataCollection::analyze(edm::StreamID, edm::Event const& iEvent, edm::EventSetup const&) const { + auto const& sdsRawDataCollection = iEvent.get(sdsRawDataCollectionToken_); + auto const& sdsData1 = sdsRawDataCollection.FEDData(1); + if (sdsData1.size() != expectedSDSData1_.size()) { + throwWithMessage("sdsData1 does not have expected size"); + } + for (unsigned int i = 0; i < sdsData1.size(); ++i) { + if (sdsData1.data()[i] != expectedSDSData1_[i]) { + throwWithMessage("sdsData1 does not have expected contents"); + } + } + auto const& sdsData2 = sdsRawDataCollection.FEDData(2); + if (sdsData2.size() != expectedSDSData2_.size()) { + throwWithMessage("sdsData2 does not have expected size"); + } + for (unsigned int i = 0; i < sdsData2.size(); ++i) { + if (sdsData2.data()[i] != expectedSDSData2_[i]) { + throwWithMessage("sdsData2 does not have expected contents"); + } + } + } + + void TestReadSDSRawDataCollection::throwWithMessage(const char* msg) const { + throw cms::Exception("TestFailure") << "TestReadSDSRawDataCollection::analyze, " << msg; + } + + void TestReadSDSRawDataCollection::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add>("expectedSDSData1"); + desc.add>("expectedSDSData2"); + desc.add("sdsRawDataCollectionTag"); + descriptions.addDefault(desc); + } +} // namespace edmtest + +using edmtest::TestReadSDSRawDataCollection; +DEFINE_FWK_MODULE(TestReadSDSRawDataCollection); \ No newline at end of file diff --git a/DataFormats/L1ScoutingRawData/test/TestSDSRawDataCollectionFormat.sh b/DataFormats/L1ScoutingRawData/test/TestSDSRawDataCollectionFormat.sh new file mode 100755 index 0000000000000..2b8cf681d377f --- /dev/null +++ b/DataFormats/L1ScoutingRawData/test/TestSDSRawDataCollectionFormat.sh @@ -0,0 +1,11 @@ +#!/bin/sh -ex + +function die { echo $1: status $2 ; exit $2; } + +LOCAL_TEST_DIR=${SCRAM_TEST_PATH} + +cmsRun ${LOCAL_TEST_DIR}/create_SDSRawDataCollection_test_file_cfg.py || die 'Failure using create_SDSRawDataCollection_test_file_cfg.py' $? + +file=testSDSRawDataCollection.root + +cmsRun ${LOCAL_TEST_DIR}/read_SDSRawDataCollection_cfg.py "$file" || die "Failure using read_SDSRawDataCollection_cfg.py $file" $? \ No newline at end of file diff --git a/DataFormats/L1ScoutingRawData/test/TestWriteSDSRawDataCollection.cc b/DataFormats/L1ScoutingRawData/test/TestWriteSDSRawDataCollection.cc new file mode 100644 index 0000000000000..355958331c254 --- /dev/null +++ b/DataFormats/L1ScoutingRawData/test/TestWriteSDSRawDataCollection.cc @@ -0,0 +1,63 @@ +#include "DataFormats/FEDRawData/interface/FEDRawData.h" +#include "DataFormats/L1ScoutingRawData/interface/SDSRawDataCollection.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/global/EDProducer.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/Utilities/interface/EDPutToken.h" +#include "FWCore/Utilities/interface/StreamID.h" + +#include +#include +#include + +namespace edmtest { + + class TestWriteSDSRawDataCollection : public edm::global::EDProducer<> { + public: + TestWriteSDSRawDataCollection(edm::ParameterSet const&); + void produce(edm::StreamID, edm::Event&, edm::EventSetup const&) const override; + static void fillDescriptions(edm::ConfigurationDescriptions&); + + private: + std::vector sdsData1_; + std::vector sdsData2_; + edm::EDPutTokenT sdsRawDataCollectionPutToken_; + }; + + TestWriteSDSRawDataCollection::TestWriteSDSRawDataCollection(edm::ParameterSet const& iPSet) + : sdsData1_(iPSet.getParameter>("SDSData1")), + sdsData2_(iPSet.getParameter>("SDSData2")), + sdsRawDataCollectionPutToken_(produces()) {} + + void TestWriteSDSRawDataCollection::produce(edm::StreamID, edm::Event& iEvent, edm::EventSetup const&) const { + auto sdsRawDataCollection = std::make_unique(); + FEDRawData& fedData1 = sdsRawDataCollection->FEDData(1); + FEDRawData& fedData2 = sdsRawDataCollection->FEDData(2); + + fedData1.resize(sdsData1_.size(), 4); + unsigned char* dataPtr1 = fedData1.data(); + for (unsigned int i = 0; i < sdsData1_.size(); ++i) { + dataPtr1[i] = sdsData1_[i]; + } + fedData2.resize(sdsData2_.size(), 4); + unsigned char* dataPtr2 = fedData2.data(); + for (unsigned int i = 0; i < sdsData2_.size(); ++i) { + dataPtr2[i] = sdsData2_[i]; + } + iEvent.put(sdsRawDataCollectionPutToken_, std::move(sdsRawDataCollection)); + } + + void TestWriteSDSRawDataCollection::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add>("SDSData1"); + desc.add>("SDSData2"); + descriptions.addDefault(desc); + } +} // namespace edmtest + +using edmtest::TestWriteSDSRawDataCollection; +DEFINE_FWK_MODULE(TestWriteSDSRawDataCollection); \ No newline at end of file diff --git a/DataFormats/L1ScoutingRawData/test/create_SDSRawDataCollection_test_file_cfg.py b/DataFormats/L1ScoutingRawData/test/create_SDSRawDataCollection_test_file_cfg.py new file mode 100644 index 0000000000000..f8dd0d6204f03 --- /dev/null +++ b/DataFormats/L1ScoutingRawData/test/create_SDSRawDataCollection_test_file_cfg.py @@ -0,0 +1,22 @@ +import FWCore.ParameterSet.Config as cms + +process = cms.Process("PROD") + +process.load("FWCore.MessageService.MessageLogger_cfi") + +process.source = cms.Source("EmptySource") +process.maxEvents.input = 1 + +process.sdsRawDataCollectionProducer = cms.EDProducer("TestWriteSDSRawDataCollection", + # Test values below are meaningless. We just make sure when we read + # we get the same values. + SDSData1 = cms.vuint32(0, 1, 2, 3), + SDSData2 = cms.vuint32(42, 43, 44, 45) +) + +process.out = cms.OutputModule("PoolOutputModule", + fileName = cms.untracked.string('testSDSRawDataCollection.root') +) + +process.path = cms.Path(process.sdsRawDataCollectionProducer) +process.endPath = cms.EndPath(process.out) \ No newline at end of file diff --git a/DataFormats/L1ScoutingRawData/test/read_SDSRawDataCollection_cfg.py b/DataFormats/L1ScoutingRawData/test/read_SDSRawDataCollection_cfg.py new file mode 100644 index 0000000000000..95e3e23d18142 --- /dev/null +++ b/DataFormats/L1ScoutingRawData/test/read_SDSRawDataCollection_cfg.py @@ -0,0 +1,21 @@ +import FWCore.ParameterSet.Config as cms +import sys + +process = cms.Process("READ") + +process.source = cms.Source("PoolSource", fileNames = cms.untracked.vstring("file:"+sys.argv[1])) +process.maxEvents.input = 1 + +process.testReadSDSDRawDataCollection = cms.EDAnalyzer("TestReadSDSRawDataCollection", + sdsRawDataCollectionTag = cms.InputTag("sdsRawDataCollectionProducer", "", "PROD"), + expectedSDSData1 = cms.vuint32(0, 1, 2, 3), + expectedSDSData2 = cms.vuint32(42, 43, 44, 45) +) + +process.out = cms.OutputModule("PoolOutputModule", + fileName = cms.untracked.string('testSDSRawDataCollection2.root') +) + +process.path = cms.Path(process.testReadSDSDRawDataCollection) + +process.endPath = cms.EndPath(process.out) \ No newline at end of file diff --git a/DataFormats/L1TCalorimeterPhase2/interface/Phase2L1CaloJet.h b/DataFormats/L1TCalorimeterPhase2/interface/Phase2L1CaloJet.h new file mode 100644 index 0000000000000..bd0ca1a072be3 --- /dev/null +++ b/DataFormats/L1TCalorimeterPhase2/interface/Phase2L1CaloJet.h @@ -0,0 +1,101 @@ +#ifndef DataFormats_L1TCalorimeterPhase2_Phase2L1CaloJet_h +#define DataFormats_L1TCalorimeterPhase2_Phase2L1CaloJet_h + +#include +#include +#include +#include +#include "DataFormats/L1Trigger/interface/L1Candidate.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" + +namespace l1tp2 { + + class Phase2L1CaloJet : public l1t::L1Candidate { + public: + Phase2L1CaloJet() + : l1t::L1Candidate(), + jetEt_(0.), + tauEt_(0.), + jetIEta_(-99), + jetIPhi_(-99), + jetEta_(-99.), + jetPhi_(-99.), + towerEt_(0.), + towerIEta_(-99), + towerIPhi_(-99), + towerEta_(-99.), + towerPhi_(-99.){}; + + Phase2L1CaloJet(const PolarLorentzVector& p4, + float jetEt, + float tauEt, + int jetIEta, + int jetIPhi, + float jetEta, + float jetPhi, + float towerEt, + int towerIEta, + int towerIPhi, + float towerEta, + float towerPhi) + : l1t::L1Candidate(p4), + jetEt_(jetEt), + tauEt_(tauEt), + jetIEta_(jetIEta), + jetIPhi_(jetIPhi), + jetEta_(jetEta), + jetPhi_(jetPhi), + towerEt_(towerEt), + towerIEta_(towerIEta), + towerIPhi_(towerIPhi), + towerEta_(towerEta), + towerPhi_(towerPhi){}; + + inline float jetEt() const { return jetEt_; }; + inline float tauEt() const { return tauEt_; }; + inline int jetIEta() const { return jetIEta_; }; + inline int jetIPhi() const { return jetIPhi_; }; + inline float jetEta() const { return jetEta_; }; + inline float jetPhi() const { return jetPhi_; }; + inline float towerEt() const { return towerEt_; }; + inline int towerIEta() const { return towerIEta_; }; + inline int towerIPhi() const { return towerIPhi_; }; + inline float towerEta() const { return towerEta_; }; + inline float towerPhi() const { return towerPhi_; }; + + void setJetEt(float jetEtIn) { jetEt_ = jetEtIn; }; + void setTauEt(float tauEtIn) { tauEt_ = tauEtIn; }; + void setJetIEta(int jetIEtaIn) { jetIEta_ = jetIEtaIn; }; + void setJetIPhi(int jetIPhiIn) { jetIPhi_ = jetIPhiIn; }; + void setJetEta(float jetEtaIn) { jetEta_ = jetEtaIn; }; + void setJetPhi(float jetPhiIn) { jetPhi_ = jetPhiIn; }; + void setTowerEt(float towerEtIn) { towerEt_ = towerEtIn; }; + void setTowerIEta(int towerIEtaIn) { towerIEta_ = towerIEtaIn; }; + void setTowerIPhi(int towerIPhiIn) { towerIPhi_ = towerIPhiIn; }; + void setTowerEta(float towerEtaIn) { towerEta_ = towerEtaIn; }; + void setTowerPhi(float towerPhiIn) { towerPhi_ = towerPhiIn; }; + + private: + // ET + float jetEt_; + // Tau ET + float tauEt_; + // GCT ieta + int jetIEta_; + // GCT iphi + int jetIPhi_; + // Tower (real) eta + float jetEta_; + // Tower (real) phi + float jetPhi_; + float towerEt_; + int towerIEta_; + int towerIPhi_; + float towerEta_; + float towerPhi_; + }; + + // Concrete collection of output objects (with extra tuning information) + typedef std::vector Phase2L1CaloJetCollection; +} // namespace l1tp2 +#endif diff --git a/DataFormats/L1TCalorimeterPhase2/src/classes.h b/DataFormats/L1TCalorimeterPhase2/src/classes.h index 3c8bf41a2d059..864965f0305f2 100644 --- a/DataFormats/L1TCalorimeterPhase2/src/classes.h +++ b/DataFormats/L1TCalorimeterPhase2/src/classes.h @@ -15,3 +15,4 @@ #include "DataFormats/L1TCalorimeterPhase2/interface/DigitizedTowerCorrelator.h" #include "DataFormats/L1TCalorimeterPhase2/interface/DigitizedClusterGT.h" #include "DataFormats/L1TCalorimeterPhase2/interface/CaloPFCluster.h" +#include "DataFormats/L1TCalorimeterPhase2/interface/Phase2L1CaloJet.h" diff --git a/DataFormats/L1TCalorimeterPhase2/src/classes_def.xml b/DataFormats/L1TCalorimeterPhase2/src/classes_def.xml index 2304b10e401a0..c5bb302022fba 100644 --- a/DataFormats/L1TCalorimeterPhase2/src/classes_def.xml +++ b/DataFormats/L1TCalorimeterPhase2/src/classes_def.xml @@ -50,5 +50,12 @@ + + + + + + + diff --git a/DataFormats/L1TCorrelator/interface/TkElectron.h b/DataFormats/L1TCorrelator/interface/TkElectron.h index c5b70d79598fb..29a1df62b023b 100644 --- a/DataFormats/L1TCorrelator/interface/TkElectron.h +++ b/DataFormats/L1TCorrelator/interface/TkElectron.h @@ -7,7 +7,6 @@ // Class : TkEm // -#include "DataFormats/Common/interface/Ref.h" #include "DataFormats/Common/interface/Ptr.h" #include "DataFormats/L1Trigger/interface/EGamma.h" @@ -29,27 +28,34 @@ namespace l1t { TkElectron(); TkElectron(const LorentzVector& p4, - const edm::Ref& egRef, + const edm::Ptr& egCaloPtr, const edm::Ptr& trkPtr, float tkisol = -999.); + TkElectron(const LorentzVector& p4, float tkisol = -999.); + // ---------- const member functions --------------------- const edm::Ptr& trkPtr() const { return trkPtr_; } float trkzVtx() const { return trkzVtx_; } - double trackCurvature() const { return trackCurvature_; } float idScore() const { return idScore_; } // ---------- member functions --------------------------- + void setTrkPtr(const edm::Ptr& tkPtr) { trkPtr_ = tkPtr; } void setTrkzVtx(float TrkzVtx) { trkzVtx_ = TrkzVtx; } - void setTrackCurvature(double trackCurvature) { trackCurvature_ = trackCurvature; } void setIdScore(float score) { idScore_ = score; } + l1gt::Electron hwObj() const { + if (encoding() != HWEncoding::GT) { + throw cms::Exception("RuntimeError") << "TkElectron::hwObj : encoding is not in GT format!" << std::endl; + } + return l1gt::Electron::unpack_ap(egBinaryWord()); + } + private: edm::Ptr trkPtr_; float trkzVtx_; - double trackCurvature_; float idScore_; }; } // namespace l1t diff --git a/DataFormats/L1TCorrelator/interface/TkEm.h b/DataFormats/L1TCorrelator/interface/TkEm.h index d3d9c36a41c7f..051a324a8e082 100644 --- a/DataFormats/L1TCorrelator/interface/TkEm.h +++ b/DataFormats/L1TCorrelator/interface/TkEm.h @@ -8,11 +8,14 @@ // #include "DataFormats/L1Trigger/interface/L1Candidate.h" -#include "DataFormats/Common/interface/Ref.h" +#include "DataFormats/Common/interface/Ptr.h" #include "DataFormats/L1Trigger/interface/EGamma.h" #include "DataFormats/L1TrackTrigger/interface/TTTypes.h" +#include "DataFormats/L1TParticleFlow/interface/gt_datatypes.h" +#include "FWCore/Utilities/interface/Exception.h" + #include namespace l1t { @@ -21,22 +24,15 @@ namespace l1t { public: TkEm(); - TkEm(const LorentzVector& p4, const edm::Ref& egRef, float tkisol = -999.); - - TkEm(const LorentzVector& p4, - const edm::Ref& egRef, - float tkisol = -999., - float tkisolPV = -999); - - // ---------- const member functions --------------------- + TkEm(const LorentzVector& p4, float tkisol = -999., float tkisolPV = -999); - const edm::Ref& EGRef() const { return egRef_; } + TkEm(const LorentzVector& p4, const edm::Ptr& egCaloPtr, float tkisol = -999., float tkisolPV = -999); - const double l1RefEta() const { return egRef_->eta(); } + enum class HWEncoding { None, CT, GT }; - const double l1RefPhi() const { return egRef_->phi(); } + // ---------- const member functions --------------------- - const double l1RefEt() const { return egRef_->et(); } + const edm::Ptr& egCaloPtr() const { return egCaloPtr_; } float trkIsol() const { return trkIsol_; } // not constrained to the PV, just track ptSum float trkIsolPV() const { return trkIsolPV_; } // constrained to the PV by DZ @@ -53,13 +49,21 @@ namespace l1t { void setPFIsolPV(float pfIsolPV) { pfIsolPV_ = pfIsolPV; } void setPuppiIsol(float puppiIsol) { puppiIsol_ = puppiIsol; } void setPuppiIsolPV(float puppiIsolPV) { puppiIsolPV_ = puppiIsolPV; } - void setEGRef(const edm::Ref& egRef) { egRef_ = egRef; } + void setEgCaloPtr(const edm::Ptr& egPtr) { egCaloPtr_ = egPtr; } template - void setEgBinaryWord(ap_uint word) { + void setEgBinaryWord(ap_uint word, HWEncoding encoding) { egBinaryWord0_ = word; egBinaryWord1_ = (word >> 32); egBinaryWord2_ = (word >> 64); + encoding_ = encoding; + } + + l1gt::Photon hwObj() const { + if (encoding() != HWEncoding::GT) { + throw cms::Exception("RuntimeError") << "TkEm::hwObj : encoding is not in GT format!" << std::endl; + } + return l1gt::Photon::unpack_ap(egBinaryWord()); } template @@ -67,8 +71,10 @@ namespace l1t { return ap_uint(egBinaryWord0_) | (ap_uint(egBinaryWord1_) << 32) | (ap_uint(egBinaryWord2_) << 64); } + HWEncoding encoding() const { return encoding_; } + private: - edm::Ref egRef_; + edm::Ptr egCaloPtr_; float trkIsol_; float trkIsolPV_; float pfIsol_; @@ -78,6 +84,7 @@ namespace l1t { uint32_t egBinaryWord0_; uint32_t egBinaryWord1_; uint32_t egBinaryWord2_; + HWEncoding encoding_; }; } // namespace l1t diff --git a/DataFormats/L1TCorrelator/src/TkElectron.cc b/DataFormats/L1TCorrelator/src/TkElectron.cc index d587395584e92..6ceaa36165ccf 100644 --- a/DataFormats/L1TCorrelator/src/TkElectron.cc +++ b/DataFormats/L1TCorrelator/src/TkElectron.cc @@ -11,14 +11,14 @@ using namespace l1t; TkElectron::TkElectron() {} TkElectron::TkElectron(const LorentzVector& p4, - const edm::Ref& egRef, + const edm::Ptr& egCaloPtr, const edm::Ptr& trackPtr, float tkisol) - : TkEm(p4, egRef, tkisol, -999), - trkPtr_(trackPtr) - -{ + : TkEm(p4, egCaloPtr, tkisol, -999), trkPtr_(trackPtr) { if (trkPtr_.isNonnull()) { setTrkzVtx(trkPtr()->POCA().z()); } } + +TkElectron::TkElectron(const LorentzVector& p4, float tkisol) + : TkElectron(p4, edm::Ptr(), edm::Ptr(), tkisol) {} diff --git a/DataFormats/L1TCorrelator/src/TkEm.cc b/DataFormats/L1TCorrelator/src/TkEm.cc index 276f13c1ec0bf..9758a14285c52 100644 --- a/DataFormats/L1TCorrelator/src/TkEm.cc +++ b/DataFormats/L1TCorrelator/src/TkEm.cc @@ -5,17 +5,19 @@ // #include "DataFormats/L1TCorrelator/interface/TkEm.h" +// FIXME: can remove +#include "DataFormats/Common/interface/RefToPtr.h" using namespace l1t; TkEm::TkEm() {} -TkEm::TkEm(const LorentzVector& p4, const edm::Ref& egRef, float tkisol) - : TkEm(p4, egRef, tkisol, -999) {} +TkEm::TkEm(const LorentzVector& p4, float tkisol, float tkisolPV) + : TkEm(p4, edm::Ptr(), tkisol, tkisolPV) {} -TkEm::TkEm(const LorentzVector& p4, const edm::Ref& egRef, float tkisol, float tkisolPV) +TkEm::TkEm(const LorentzVector& p4, const edm::Ptr& egCaloPtr, float tkisol, float tkisolPV) : L1Candidate(p4), - egRef_(egRef), + egCaloPtr_(egCaloPtr), trkIsol_(tkisol), trkIsolPV_(tkisolPV), pfIsol_(-999), @@ -24,4 +26,5 @@ TkEm::TkEm(const LorentzVector& p4, const edm::Ref& egRef, f puppiIsolPV_(-999), egBinaryWord0_(0), egBinaryWord1_(0), - egBinaryWord2_(0) {} + egBinaryWord2_(0), + encoding_(HWEncoding::None) {} diff --git a/DataFormats/L1TCorrelator/src/classes_def.xml b/DataFormats/L1TCorrelator/src/classes_def.xml index 6b5acaf05cbc1..2846fcf2ba77a 100644 --- a/DataFormats/L1TCorrelator/src/classes_def.xml +++ b/DataFormats/L1TCorrelator/src/classes_def.xml @@ -10,7 +10,8 @@ - + + @@ -23,6 +24,12 @@ + + setEgCaloPtr(edm::refToPtr(onfile.egRef_)); + ]]> + + @@ -44,7 +51,9 @@ - + + + diff --git a/DataFormats/L1THGCal/interface/HGCalMulticluster.h b/DataFormats/L1THGCal/interface/HGCalMulticluster.h index 3df4591998e7f..d0d58a6d1021d 100644 --- a/DataFormats/L1THGCal/interface/HGCalMulticluster.h +++ b/DataFormats/L1THGCal/interface/HGCalMulticluster.h @@ -53,7 +53,7 @@ namespace l1t { private: template - struct KeyGetter : std::unary_function { + struct KeyGetter { const typename Iter::value_type::first_type& operator()(const typename Iter::value_type& p) const { return p.first; } diff --git a/DataFormats/L1TMuon/interface/L1MuKBMTCombinedStub.h b/DataFormats/L1TMuon/interface/L1MuKBMTCombinedStub.h index 7a561497d63a4..0498c338849fe 100644 --- a/DataFormats/L1TMuon/interface/L1MuKBMTCombinedStub.h +++ b/DataFormats/L1TMuon/interface/L1MuKBMTCombinedStub.h @@ -60,6 +60,9 @@ class L1MuKBMTCombinedStub { int eta2 = 0, int qeta1 = -1, int qeta2 = -1); + /// copy constructor + L1MuKBMTCombinedStub(const L1MuKBMTCombinedStub&) = default; + //destructor ~L1MuKBMTCombinedStub(); /// return wheel inline int whNum() const { return whNum_; } diff --git a/DataFormats/L1TParticleFlow/interface/datatypes.h b/DataFormats/L1TParticleFlow/interface/datatypes.h index fa20f70039990..561a66119673c 100644 --- a/DataFormats/L1TParticleFlow/interface/datatypes.h +++ b/DataFormats/L1TParticleFlow/interface/datatypes.h @@ -1,15 +1,6 @@ #ifndef DataFormats_L1TParticleFlow_datatypes_h #define DataFormats_L1TParticleFlow_datatypes_h -#if (!defined(__CLANG__)) && defined(__GNUC__) && defined(CMSSW_GIT_HASH) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wuninitialized" -#endif -#include -#if (!defined(__CLANG__)) && defined(__GNUC__) && defined(CMSSW_GIT_HASH) -#pragma GCC diagnostic pop -#endif - #include #include #include @@ -30,7 +21,7 @@ namespace l1ct { typedef ap_int<10> z0_t; // 40cm / 0.1 typedef ap_int<8> dxy_t; // tbd typedef ap_uint<3> tkquality_t; // tbd - typedef ap_uint<9> puppiWgt_t; // 256 = 1.0 + typedef ap_ufixed<9, 1, AP_RND_CONV, AP_WRAP> puppiWgt_t; typedef ap_uint<6> emid_t; typedef ap_uint<14> tk2em_dr_t; typedef ap_uint<14> tk2calo_dr_t; @@ -42,6 +33,7 @@ namespace l1ct { typedef ap_uint<8> meanz_t; // mean - MEANZ_OFFSET(= 320 cm) typedef ap_ufixed<10, 5, AP_TRN, AP_SAT> hoe_t; typedef ap_uint<4> redChi2Bin_t; + typedef ap_fixed<10, 1, AP_RND_CONV, AP_SAT> id_score_t; // ID score to be between -1 (background) and 1 (signal) // FIXME: adjust range 10-11bits -> 1/4 - 1/2TeV is probably more than enough for all reasonable use cases typedef ap_ufixed<11, 9, AP_TRN, AP_SAT> iso_t; @@ -161,8 +153,14 @@ namespace l1ct { inline float floatPt(pt_t pt) { return pt.to_float(); } inline float floatPt(dpt_t pt) { return pt.to_float(); } inline float floatPt(pt2_t pt2) { return pt2.to_float(); } - inline int intPt(pt_t pt) { return (ap_ufixed<16, 14>(pt) << 2).to_int(); } - inline int intPt(dpt_t pt) { return (ap_fixed<18, 16>(pt) << 2).to_int(); } + inline int intPt(pt_t pt) { + ap_uint rawPt = pt.range(); + return rawPt.to_int(); + } + inline int intPt(dpt_t pt) { + ap_int rawPt = pt.range(); + return rawPt.to_int(); + } inline float floatEta(eta_t eta) { return eta.to_float() * ETAPHI_LSB; } inline float floatPhi(phi_t phi) { return phi.to_float() * ETAPHI_LSB; } inline float floatEta(tkdeta_t eta) { return eta.to_float() * ETAPHI_LSB; } @@ -171,17 +169,18 @@ namespace l1ct { inline float floatPhi(glbphi_t phi) { return phi.to_float() * ETAPHI_LSB; } inline float floatZ0(z0_t z0) { return z0.to_float() * Z0_LSB; } inline float floatDxy(dxy_t dxy) { return dxy.to_float() * DXY_LSB; } - inline float floatPuppiW(puppiWgt_t puppiw) { return puppiw.to_float() * PUPPIW_LSB; } + inline float floatPuppiW(puppiWgt_t puppiw) { return puppiw.to_float(); } inline float floatIso(iso_t iso) { return iso.to_float(); } inline float floatSrrTot(srrtot_t srrtot) { return srrtot.to_float() / SRRTOT_SCALE; }; inline float floatMeanZ(meanz_t meanz) { return meanz + MEANZ_OFFSET; }; inline float floatHoe(hoe_t hoe) { return hoe.to_float(); }; + inline float floatIDScore(id_score_t score) { return score.to_float(); }; inline pt_t makePt(int pt) { return ap_ufixed<16, 14>(pt) >> 2; } inline dpt_t makeDPt(int dpt) { return ap_fixed<18, 16>(dpt) >> 2; } - inline pt_t makePtFromFloat(float pt) { return pt_t(0.25 * round(pt * 4)); } + inline pt_t makePtFromFloat(float pt) { return pt_t(0.25 * std::round(pt * 4)); } inline dpt_t makeDPtFromFloat(float dpt) { return dpt_t(dpt); } - inline z0_t makeZ0(float z0) { return z0_t(round(z0 / Z0_LSB)); } + inline z0_t makeZ0(float z0) { return z0_t(std::round(z0 / Z0_LSB)); } inline ap_uint ptToInt(pt_t pt) { // note: this can be synthethized, e.g. when pT is used as intex in a LUT @@ -214,7 +213,7 @@ namespace l1ct { inline float maxAbsPhi() { return ((1 << (phi_t::width - 1)) - 1) * ETAPHI_LSB; } inline float maxAbsGlbEta() { return ((1 << (glbeta_t::width - 1)) - 1) * ETAPHI_LSB; } inline float maxAbsGlbPhi() { return ((1 << (glbphi_t::width - 1)) - 1) * ETAPHI_LSB; } - }; // namespace Scales + } // namespace Scales inline int dr2_int(eta_t eta1, phi_t phi1, eta_t eta2, phi_t phi2) { ap_int deta = (eta1 - eta2); diff --git a/DataFormats/L1TParticleFlow/interface/egamma.h b/DataFormats/L1TParticleFlow/interface/egamma.h index b385ba45aa30e..ea845d60e107d 100644 --- a/DataFormats/L1TParticleFlow/interface/egamma.h +++ b/DataFormats/L1TParticleFlow/interface/egamma.h @@ -85,17 +85,18 @@ namespace l1ct { // WARNING: for whatever reason, maybe connected with datamember alignment, // in 2019.2 synthesis fails if DEta & DPhi are put before Z0 & Dxy z0_t hwZ0; - tkdeta_t hwDEta; // relative to the region center, at calo - tkdphi_t hwDPhi; // relative to the region center, at calo + tkdeta_t hwDEta; + tkdphi_t hwDPhi; + id_score_t hwIDScore; bool hwCharge; - phi_t hwVtxPhi() const { return hwCharge ? hwPhi + hwDPhi : hwPhi - hwDPhi; } - eta_t hwVtxEta() const { return hwEta + hwDEta; } + glbphi_t hwVtxPhi() const { return hwCharge ? hwPhi + hwDPhi : hwPhi - hwDPhi; } + glbeta_t hwVtxEta() const { return hwEta + hwDEta; } inline bool operator==(const EGIsoEleObj &other) const { return hwPt == other.hwPt && hwEta == other.hwEta && hwPhi == other.hwPhi && hwQual == other.hwQual && hwIso == other.hwIso && hwDEta == other.hwDEta && hwDPhi == other.hwDPhi && hwZ0 == other.hwZ0 && - hwCharge == other.hwCharge; + hwIDScore == other.hwIDScore && hwCharge == other.hwCharge; } inline bool operator>(const EGIsoEleObj &other) const { return hwPt > other.hwPt; } @@ -110,6 +111,7 @@ namespace l1ct { hwDEta = 0; hwDPhi = 0; hwZ0 = 0; + hwIDScore = 0; hwCharge = false; } @@ -119,8 +121,10 @@ namespace l1ct { float floatVtxEta() const { return Scales::floatEta(hwVtxEta()); } float floatVtxPhi() const { return Scales::floatPhi(hwVtxPhi()); } float floatZ0() const { return Scales::floatZ0(hwZ0); } + float floatIDScore() const { return Scales::floatIDScore(hwIDScore); } - static const int BITWIDTH = EGIsoObj::BITWIDTH + tkdeta_t::width + tkdphi_t::width + z0_t::width + 1; + static const int BITWIDTH = + EGIsoObj::BITWIDTH + tkdeta_t::width + tkdphi_t::width + z0_t::width + id_score_t::width + 1; inline ap_uint pack() const { ap_uint ret; unsigned int start = 0; @@ -133,6 +137,7 @@ namespace l1ct { pack_into_bits(ret, start, hwDPhi); pack_into_bits(ret, start, hwZ0); pack_bool_into_bits(ret, start, hwCharge); + pack_into_bits(ret, start, hwIDScore); return ret; } inline static EGIsoEleObj unpack(const ap_uint &src) { @@ -152,16 +157,18 @@ namespace l1ct { unpack_from_bits(src, start, hwDPhi); unpack_from_bits(src, start, hwZ0); unpack_bool_from_bits(src, start, hwCharge); + unpack_from_bits(src, start, hwIDScore); } l1gt::Electron toGT() const { l1gt::Electron ele; ele.valid = hwPt != 0; ele.v3.pt = CTtoGT_pt(hwPt); - ele.v3.phi = CTtoGT_phi(hwPhi); - ele.v3.eta = CTtoGT_eta(hwEta); + ele.v3.phi = CTtoGT_phi(hwVtxPhi()); + ele.v3.eta = CTtoGT_eta(hwVtxEta()); ele.quality = hwQual; - ele.charge = hwCharge; + // NOTE: GT: 0 = positive, 1 = negative, CT: 0 = negative, 1 = positive + ele.charge = !hwCharge; ele.z0(l1ct::z0_t::width - 1, 0) = hwZ0(l1ct::z0_t::width - 1, 0); ele.isolation = hwIso; return ele; diff --git a/DataFormats/L1TParticleFlow/interface/gt_datatypes.h b/DataFormats/L1TParticleFlow/interface/gt_datatypes.h index 366532962e25d..5d6ea94fedf14 100644 --- a/DataFormats/L1TParticleFlow/interface/gt_datatypes.h +++ b/DataFormats/L1TParticleFlow/interface/gt_datatypes.h @@ -156,8 +156,8 @@ namespace l1gt { } static const int BITWIDTH = 64; - inline ap_uint pack() const { - ap_uint ret; + inline ap_uint pack_ap() const { + ap_uint ret(0); unsigned int start = 0; pack_into_bits(ret, start, valid); pack_into_bits(ret, start, vector_pt); @@ -166,12 +166,19 @@ namespace l1gt { return ret; } + inline uint64_t pack() const { + ap_uint x = pack_ap(); + return (uint64_t)x; + } + inline static Sum unpack_ap(const ap_uint &src) { Sum ret; ret.initFromBits(src); return ret; } + inline static Sum unpack(const uint64_t &src) { return unpack_ap(src); } + inline void initFromBits(const ap_uint &src) { unsigned int start = 0; unpack_from_bits(src, start, valid); @@ -194,7 +201,7 @@ namespace l1gt { static const int BITWIDTH = 128; inline ap_uint pack_ap() const { - ap_uint ret; + ap_uint ret(0); unsigned int start = 0; pack_into_bits(ret, start, valid); pack_into_bits(ret, start, v3.pack()); @@ -333,7 +340,7 @@ namespace l1gt { } inline static Photon unpack(const std::array &src, int parity) { - ap_uint bits; + ap_uint bits(0); if (parity == 0) { bits(63, 0) = src[0]; bits(95, 64) = src[1]; diff --git a/DataFormats/L1TParticleFlow/interface/layer1_emulator.h b/DataFormats/L1TParticleFlow/interface/layer1_emulator.h index 4a7709bbf52e0..a620edef6d3a3 100644 --- a/DataFormats/L1TParticleFlow/interface/layer1_emulator.h +++ b/DataFormats/L1TParticleFlow/interface/layer1_emulator.h @@ -158,14 +158,16 @@ namespace l1ct { struct EGIsoObjEmu : public EGIsoObj { const l1t::PFCluster *srcCluster; - // we use an index to the standalone object needed to retrieve a Ref when putting - int sta_idx; + + // NOTE: we use an index to the persistable RefPtr when we reshuffle collections + // this way we avoid complex object in the object interface which needs to be used in standalone programs + int src_idx; bool read(std::fstream &from); bool write(std::fstream &to) const; void clear() { EGIsoObj::clear(); srcCluster = nullptr; - sta_idx = -1; + src_idx = -1; clearIsoVars(); } @@ -193,17 +195,17 @@ namespace l1ct { struct EGIsoEleObjEmu : public EGIsoEleObj { const l1t::PFCluster *srcCluster = nullptr; const l1t::PFTrack *srcTrack = nullptr; - // we use an index to the standalone object needed to retrieve a Ref when putting - int sta_idx; - float idScore; + + // NOTE: we use an index to the persistable RefPtr when we reshuffle collections + // this way we avoid complex object in the object interface which needs to be used in standalone programs + int src_idx; bool read(std::fstream &from); bool write(std::fstream &to) const; void clear() { EGIsoEleObj::clear(); srcCluster = nullptr; srcTrack = nullptr; - sta_idx = -1; - idScore = -999; + src_idx = -1; clearIsoVars(); } @@ -335,7 +337,7 @@ namespace l1ct { }; struct Event { - enum { VERSION = 12 }; + enum { VERSION = 13 }; uint32_t run, lumi; uint64_t event; RawInputs raw; diff --git a/DataFormats/L1TParticleFlow/interface/puppi.h b/DataFormats/L1TParticleFlow/interface/puppi.h index e372b80172c22..07683065dcb94 100644 --- a/DataFormats/L1TParticleFlow/interface/puppi.h +++ b/DataFormats/L1TParticleFlow/interface/puppi.h @@ -74,7 +74,9 @@ namespace l1ct { #ifndef __SYNTHESIS__ assert(hwId.neutral()); #endif - return puppiWgt_t(hwData(BITS_PUPPIW_START + puppiWgt_t::width - 1, BITS_PUPPIW_START)); + puppiWgt_t ret; + ret(puppiWgt_t::width - 1, 0) = hwData(BITS_PUPPIW_START + puppiWgt_t::width - 1, BITS_PUPPIW_START); + return ret; } inline void setHwPuppiW(puppiWgt_t w) { @@ -84,11 +86,11 @@ namespace l1ct { hwData(BITS_PUPPIW_START + puppiWgt_t::width - 1, BITS_PUPPIW_START) = w(puppiWgt_t::width - 1, 0); } - inline puppiWgt_t hwEmID() const { + inline emid_t hwEmID() const { #ifndef __SYNTHESIS__ assert(hwId.neutral()); #endif - return puppiWgt_t(hwData(BITS_EMID_START + emid_t::width - 1, BITS_EMID_START)); + return emid_t(hwData(BITS_EMID_START + emid_t::width - 1, BITS_EMID_START)); } inline void setHwEmID(emid_t w) { diff --git a/DataFormats/L1TParticleFlow/src/layer1_emulator.cpp b/DataFormats/L1TParticleFlow/src/layer1_emulator.cpp index 3f1284413bbf7..3af2dd4e3eece 100644 --- a/DataFormats/L1TParticleFlow/src/layer1_emulator.cpp +++ b/DataFormats/L1TParticleFlow/src/layer1_emulator.cpp @@ -77,7 +77,7 @@ bool l1ct::PuppiObjEmu::write(std::fstream& to) const { return writeObj(from, *this); } @@ -87,7 +87,7 @@ bool l1ct::EGIsoObjEmu::write(std::fstream& to) const { return writeObj(from, *this); } diff --git a/DataFormats/L1Trigger/interface/P2GTCandidate.h b/DataFormats/L1Trigger/interface/P2GTCandidate.h index 9fdae587d3c41..1f385b12816f0 100644 --- a/DataFormats/L1Trigger/interface/P2GTCandidate.h +++ b/DataFormats/L1Trigger/interface/P2GTCandidate.h @@ -256,6 +256,27 @@ namespace l1t { ObjectType objectType() const { return objectType_; } + // Nano SimpleCandidateFlatTableProducer accessor functions + int hwPT_toInt() const { return hwPT().to_int(); } + int hwPhi_toInt() const { return hwPhi().to_int(); } + int hwEta_toInt() const { return hwEta().to_int(); } + int hwZ0_toInt() const { return hwZ0().to_int(); } + int hwIso_toInt() const { return hwIso().to_int(); } + int hwQual_toInt() const { return hwQual().to_int(); } + int hwCharge_toInt() const { return hwCharge().to_int(); } + int hwD0_toInt() const { return hwD0().to_int(); } + int hwBeta_toInt() const { return hwBeta().to_int(); } + int hwMass_toInt() const { return hwMass().to_int(); } + int hwIndex_toInt() const { return hwIndex().to_int(); } + int hwSeed_pT_toInt() const { return hwSeed_pT().to_int(); } + int hwSeed_z0_toInt() const { return hwSeed_z0().to_int(); } + int hwSca_sum_toInt() const { return hwSca_sum().to_int(); } + int hwNumber_of_tracks_toInt() const { return hwNumber_of_tracks().to_int(); } + int hwSum_pT_pv_toInt() const { return hwSum_pT_pv().to_int(); } + int hwType_toInt() const { return hwType().to_int(); } + int hwNumber_of_tracks_in_pv_toInt() const { return hwNumber_of_tracks_in_pv().to_int(); } + int hwNumber_of_tracks_not_in_pv_toInt() const { return hwNumber_of_tracks_not_in_pv().to_int(); } + bool operator==(const P2GTCandidate& rhs) const; bool operator!=(const P2GTCandidate& rhs) const; diff --git a/DataFormats/METReco/interface/CorrMETData.h b/DataFormats/METReco/interface/CorrMETData.h index 54c895d63dc86..d46c99cb02815 100644 --- a/DataFormats/METReco/interface/CorrMETData.h +++ b/DataFormats/METReco/interface/CorrMETData.h @@ -21,6 +21,8 @@ struct CorrMETData { CorrMETData(const CorrMETData& corr) : mex(corr.mex), mey(corr.mey), sumet(corr.sumet) {} + CorrMETData& operator=(const CorrMETData&) = default; + CorrMETData& operator+=(const CorrMETData& rhs) { mex += rhs.mex; mey += rhs.mey; diff --git a/DataFormats/METReco/interface/HcalHaloData.h b/DataFormats/METReco/interface/HcalHaloData.h index c13694d54241b..1ad3f6e86026c 100644 --- a/DataFormats/METReco/interface/HcalHaloData.h +++ b/DataFormats/METReco/interface/HcalHaloData.h @@ -30,6 +30,7 @@ struct HaloTowerStrip { energyRatio = strip.energyRatio; emEt = strip.emEt; } + HaloTowerStrip& operator=(const HaloTowerStrip&) = default; }; namespace reco { diff --git a/DataFormats/MuonDetId/interface/DTBtiId.h b/DataFormats/MuonDetId/interface/DTBtiId.h index af70ee7c391e3..9289d87e13f1d 100644 --- a/DataFormats/MuonDetId/interface/DTBtiId.h +++ b/DataFormats/MuonDetId/interface/DTBtiId.h @@ -48,6 +48,9 @@ class DTBtiId { /// Constructor DTBtiId(const DTBtiId& btiId) : _suplId(btiId._suplId), _bti(btiId._bti) {} + // Assignment Operator + DTBtiId& operator=(const DTBtiId& btiId) = default; + /// Destructor virtual ~DTBtiId() {} diff --git a/DataFormats/MuonDetId/interface/DTChamberId.h b/DataFormats/MuonDetId/interface/DTChamberId.h index 2e8dbfd0ae36b..54ee5c47e545c 100644 --- a/DataFormats/MuonDetId/interface/DTChamberId.h +++ b/DataFormats/MuonDetId/interface/DTChamberId.h @@ -35,6 +35,9 @@ class DTChamberId : public DetId { /// this, no check is done on the vaildity of the values. DTChamberId(const DTChamberId& chId); + /// Assignment Operator. + DTChamberId& operator=(const DTChamberId& chId) = default; + /// Return the wheel number int wheel() const { return int((id_ >> wheelStartBit_) & wheelMask_) + minWheelId - 1; } diff --git a/DataFormats/MuonDetId/interface/DTLayerId.h b/DataFormats/MuonDetId/interface/DTLayerId.h index d933b7f6b70c9..887ad2ce4fa51 100644 --- a/DataFormats/MuonDetId/interface/DTLayerId.h +++ b/DataFormats/MuonDetId/interface/DTLayerId.h @@ -32,6 +32,9 @@ class DTLayerId : public DTSuperLayerId { /// this, no check is done on the vaildity of the values. DTLayerId(const DTLayerId& layerId); + /// Assignment Operator. + DTLayerId& operator=(const DTLayerId& layerId) = default; + /// Constructor from a camberId and SL and layer numbers DTLayerId(const DTChamberId& chId, int superlayer, int layer); diff --git a/DataFormats/MuonDetId/interface/DTSuperLayerId.h b/DataFormats/MuonDetId/interface/DTSuperLayerId.h index cb4dc7c835a91..bf51f86234a0a 100644 --- a/DataFormats/MuonDetId/interface/DTSuperLayerId.h +++ b/DataFormats/MuonDetId/interface/DTSuperLayerId.h @@ -32,6 +32,9 @@ class DTSuperLayerId : public DTChamberId { /// this, no check is done on the vaildity of the values. DTSuperLayerId(const DTSuperLayerId& slId); + /// Assignment Operator. + DTSuperLayerId& operator=(const DTSuperLayerId& slId) = default; + /// Constructor from a DTChamberId and SL number. DTSuperLayerId(const DTChamberId& chId, int superlayer); diff --git a/DataFormats/MuonDetId/interface/DTTracoId.h b/DataFormats/MuonDetId/interface/DTTracoId.h index 04507a0dc26dd..7695a8b96a678 100644 --- a/DataFormats/MuonDetId/interface/DTTracoId.h +++ b/DataFormats/MuonDetId/interface/DTTracoId.h @@ -46,6 +46,9 @@ class DTTracoId { /// Constructor DTTracoId(const DTTracoId& tracoId) : _statId(tracoId._statId), _traco(tracoId._traco) {} + // Assignment Operator + DTTracoId& operator=(const DTTracoId& tracoId) = default; + /// Destructor virtual ~DTTracoId() {} diff --git a/DataFormats/MuonDetId/interface/DTWireId.h b/DataFormats/MuonDetId/interface/DTWireId.h index 58e1213652ad1..c6f1a25d2bc04 100644 --- a/DataFormats/MuonDetId/interface/DTWireId.h +++ b/DataFormats/MuonDetId/interface/DTWireId.h @@ -29,6 +29,9 @@ class DTWireId : public DTLayerId { /// Copy Constructor. DTWireId(const DTWireId& wireId); + /// Assignment Operator. + DTWireId& operator=(const DTWireId& wireId) = default; + /// Constructor from a CamberId and SL, layer and wire numbers DTWireId(const DTChamberId& chId, int superlayer, int layer, int wire); diff --git a/DataFormats/MuonReco/src/Muon.cc b/DataFormats/MuonReco/src/Muon.cc index cf660d7abe957..afcaa8f55d3c0 100644 --- a/DataFormats/MuonReco/src/Muon.cc +++ b/DataFormats/MuonReco/src/Muon.cc @@ -68,9 +68,13 @@ int Muon::numberOfMatches(ArbitrationType type) const { continue; } if (type == GEMSegmentAndTrackArbitration) { - if (chamberMatch.gemMatches.empty()) - continue; - matches += chamberMatch.gemMatches.size(); + for (auto& segmentMatch : chamberMatch.gemMatches) { + if (segmentMatch.isMask(MuonSegmentMatch::BestInChamberByDR) && + segmentMatch.isMask(MuonSegmentMatch::BelongsToTrackByDR)) { + matches++; + break; + } + } continue; } @@ -81,13 +85,15 @@ int Muon::numberOfMatches(ArbitrationType type) const { continue; } - if (chamberMatch.segmentMatches.empty()) + if (chamberMatch.gemMatches.empty() and chamberMatch.segmentMatches.empty()) continue; if (type == NoArbitration) { matches++; continue; } + if (chamberMatch.segmentMatches.empty()) + continue; for (auto& segmentMatch : chamberMatch.segmentMatches) { if (type == SegmentArbitration) if (segmentMatch.isMask(MuonSegmentMatch::BestInChamberByDR)) { diff --git a/DataFormats/NanoAOD/interface/FlatTable.h b/DataFormats/NanoAOD/interface/FlatTable.h index 1b2f791c6f9e2..e6b574656e742 100644 --- a/DataFormats/NanoAOD/interface/FlatTable.h +++ b/DataFormats/NanoAOD/interface/FlatTable.h @@ -37,8 +37,8 @@ namespace nanoaod { class FlatTable { public: + //Int8, //removed due to mis-interpretation in ROOT/pyroot enum class ColumnType { - Int8, UInt8, Int16, UInt16, @@ -140,9 +140,7 @@ namespace nanoaod { struct dependent_false : std::false_type {}; template static ColumnType defaultColumnType() { - if constexpr (std::is_same()) - return ColumnType::Int8; - else if constexpr (std::is_same()) + if constexpr (std::is_same()) return ColumnType::UInt8; else if constexpr (std::is_same()) return ColumnType::Int16; @@ -194,9 +192,7 @@ namespace nanoaod { template static auto &bigVectorImpl(This &table) { // helper function to avoid code duplication, for the two accessor functions that differ only in const-ness - if constexpr (std::is_same()) - return table.int8s_; - else if constexpr (std::is_same()) + if constexpr (std::is_same()) return table.uint8s_; else if constexpr (std::is_same()) return table.int16s_; @@ -220,7 +216,6 @@ namespace nanoaod { std::string name_, doc_; bool singleton_, extension_; std::vector columns_; - std::vector int8s_; std::vector uint8s_; std::vector int16s_; std::vector uint16s_; diff --git a/DataFormats/NanoAOD/src/FlatTable.cc b/DataFormats/NanoAOD/src/FlatTable.cc index 1565040b86b28..38f43fafe2927 100644 --- a/DataFormats/NanoAOD/src/FlatTable.cc +++ b/DataFormats/NanoAOD/src/FlatTable.cc @@ -13,9 +13,6 @@ void nanoaod::FlatTable::addExtension(const nanoaod::FlatTable& other) { throw cms::Exception("LogicError", "Mismatch in adding extension"); for (unsigned int i = 0, n = other.nColumns(); i < n; ++i) { switch (other.columnType(i)) { - case ColumnType::Int8: - addColumn(other.columnName(i), other.columnData(i), other.columnDoc(i)); - break; case ColumnType::UInt8: addColumn(other.columnName(i), other.columnData(i), other.columnDoc(i)); break; @@ -50,8 +47,6 @@ double nanoaod::FlatTable::getAnyValue(unsigned int row, unsigned int column) co if (column >= nColumns()) throw cms::Exception("LogicError", "Invalid column"); switch (columnType(column)) { - case ColumnType::Int8: - return *(beginData(column) + row); case ColumnType::UInt8: return *(beginData(column) + row); case ColumnType::Int16: diff --git a/DataFormats/NanoAOD/src/classes_def.xml b/DataFormats/NanoAOD/src/classes_def.xml index 582d353813d4d..4622b201ae460 100644 --- a/DataFormats/NanoAOD/src/classes_def.xml +++ b/DataFormats/NanoAOD/src/classes_def.xml @@ -3,11 +3,12 @@ - + + - - - + + + diff --git a/DataFormats/ParticleFlowReco/interface/PFClusterHostCollection.h b/DataFormats/ParticleFlowReco/interface/PFClusterHostCollection.h new file mode 100644 index 0000000000000..72d8ea2706868 --- /dev/null +++ b/DataFormats/ParticleFlowReco/interface/PFClusterHostCollection.h @@ -0,0 +1,13 @@ +#ifndef DataFormats_ParticleFlowReco_interface_PFClusterHostCollection_h +#define DataFormats_ParticleFlowReco_interface_PFClusterHostCollection_h + +#include "DataFormats/ParticleFlowReco/interface/PFClusterSoA.h" +#include "DataFormats/Portable/interface/PortableHostCollection.h" + +namespace reco { + + using PFClusterHostCollection = PortableHostCollection; + +} // namespace reco + +#endif // DataFormats_ParticleFlowReco_interface_PFClusterHostCollection_h diff --git a/DataFormats/ParticleFlowReco/interface/PFClusterSoA.h b/DataFormats/ParticleFlowReco/interface/PFClusterSoA.h new file mode 100644 index 0000000000000..7663454cc42b3 --- /dev/null +++ b/DataFormats/ParticleFlowReco/interface/PFClusterSoA.h @@ -0,0 +1,29 @@ +#ifndef DataFormats_ParticleFlowReco_interface_PFClusterSoA_h +#define DataFormats_ParticleFlowReco_interface_PFClusterSoA_h + +#include "DataFormats/SoATemplate/interface/SoACommon.h" +#include "DataFormats/SoATemplate/interface/SoALayout.h" +#include "DataFormats/SoATemplate/interface/SoAView.h" + +namespace reco { + + GENERATE_SOA_LAYOUT(PFClusterSoALayout, + SOA_COLUMN(int, depth), + SOA_COLUMN(int, seedRHIdx), + SOA_COLUMN(int, topoId), + SOA_COLUMN(int, rhfracSize), + SOA_COLUMN(int, rhfracOffset), + SOA_COLUMN(float, energy), + SOA_COLUMN(float, x), + SOA_COLUMN(float, y), + SOA_COLUMN(float, z), + SOA_COLUMN(int, topoRHCount), + SOA_SCALAR(int, nTopos), + SOA_SCALAR(int, nSeeds), + SOA_SCALAR(int, nRHFracs), + SOA_SCALAR(int, size) // nRH + ) + using PFClusterSoA = PFClusterSoALayout<>; +} // namespace reco + +#endif // DataFormats_ParticleFlowReco_interface_PFClusterSoA_h diff --git a/DataFormats/ParticleFlowReco/interface/PFRecHitFractionHostCollection.h b/DataFormats/ParticleFlowReco/interface/PFRecHitFractionHostCollection.h new file mode 100644 index 0000000000000..747026710f898 --- /dev/null +++ b/DataFormats/ParticleFlowReco/interface/PFRecHitFractionHostCollection.h @@ -0,0 +1,11 @@ +#ifndef DataFormats_ParticleFlowReco_interface_PFRecHitFractionHostCollection_h +#define DataFormats_ParticleFlowReco_interface_PFRecHitFractionHostCollection_h + +#include "DataFormats/ParticleFlowReco/interface/PFRecHitFractionSoA.h" +#include "DataFormats/Portable/interface/PortableHostCollection.h" + +namespace reco { + using PFRecHitFractionHostCollection = PortableHostCollection; +} + +#endif // DataFormats_ParticleFlowReco_interface_PFRecHitFractionHostCollection_h diff --git a/DataFormats/ParticleFlowReco/interface/PFRecHitFractionSoA.h b/DataFormats/ParticleFlowReco/interface/PFRecHitFractionSoA.h new file mode 100644 index 0000000000000..00903b2985487 --- /dev/null +++ b/DataFormats/ParticleFlowReco/interface/PFRecHitFractionSoA.h @@ -0,0 +1,18 @@ +#ifndef DataFormats_ParticleFlowReco_interface_PFRecHitFractionSoA_h +#define DataFormats_ParticleFlowReco_interface_PFRecHitFractionSoA_h + +#include "DataFormats/SoATemplate/interface/SoACommon.h" +#include "DataFormats/SoATemplate/interface/SoALayout.h" +#include "DataFormats/SoATemplate/interface/SoAView.h" + +namespace reco { + + GENERATE_SOA_LAYOUT(PFRecHitFractionSoALayout, + SOA_COLUMN(float, frac), + SOA_COLUMN(int, pfrhIdx), + SOA_COLUMN(int, pfcIdx)) + + using PFRecHitFractionSoA = PFRecHitFractionSoALayout<>; +} // namespace reco + +#endif // DataFormats_ParticleFlowReco_interface_PFRecHitFractionSoA_h diff --git a/DataFormats/ParticleFlowReco/interface/PFRecHitSoA.h b/DataFormats/ParticleFlowReco/interface/PFRecHitSoA.h index 19dd4c54fca8d..cdb12377e4019 100644 --- a/DataFormats/ParticleFlowReco/interface/PFRecHitSoA.h +++ b/DataFormats/ParticleFlowReco/interface/PFRecHitSoA.h @@ -14,6 +14,7 @@ namespace reco { using PFRecHitsNeighbours = Eigen::Matrix; GENERATE_SOA_LAYOUT(PFRecHitSoALayout, SOA_COLUMN(uint32_t, detId), + SOA_COLUMN(uint32_t, denseId), SOA_COLUMN(float, energy), SOA_COLUMN(float, time), SOA_COLUMN(int, depth), diff --git a/DataFormats/ParticleFlowReco/interface/PFTrack.h b/DataFormats/ParticleFlowReco/interface/PFTrack.h index 16d5f93cb6799..4a7d3edd04ffb 100644 --- a/DataFormats/ParticleFlowReco/interface/PFTrack.h +++ b/DataFormats/ParticleFlowReco/interface/PFTrack.h @@ -68,6 +68,8 @@ namespace reco { PFTrack(const PFTrack& other); + PFTrack& operator=(const PFTrack& other) = default; + /// add a trajectory measurement /// \todo throw an exception if the number of points is too large void addPoint(const reco::PFTrajectoryPoint& trajPt); diff --git a/DataFormats/ParticleFlowReco/interface/alpaka/CaloRecHitDeviceCollection.h b/DataFormats/ParticleFlowReco/interface/alpaka/CaloRecHitDeviceCollection.h index 0116e8408add9..3e2c54ca22135 100644 --- a/DataFormats/ParticleFlowReco/interface/alpaka/CaloRecHitDeviceCollection.h +++ b/DataFormats/ParticleFlowReco/interface/alpaka/CaloRecHitDeviceCollection.h @@ -12,4 +12,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::reco { } // namespace ALPAKA_ACCELERATOR_NAMESPACE::reco +// check that the portable device collection for the host device is the same as the portable host collection +ASSERT_DEVICE_MATCHES_HOST_COLLECTION(reco::CaloRecHitDeviceCollection, reco::CaloRecHitHostCollection); + #endif // DataFormats_ParticleFlowReco_interface_alpaka_CaloRecHitDeviceCollection_h diff --git a/DataFormats/ParticleFlowReco/interface/alpaka/PFClusterDeviceCollection.h b/DataFormats/ParticleFlowReco/interface/alpaka/PFClusterDeviceCollection.h new file mode 100644 index 0000000000000..695b1f540c51b --- /dev/null +++ b/DataFormats/ParticleFlowReco/interface/alpaka/PFClusterDeviceCollection.h @@ -0,0 +1,18 @@ +#ifndef DataFormats_ParticleFlowReco_interface_alpaka_PFClusterDeviceCollection_h +#define DataFormats_ParticleFlowReco_interface_alpaka_PFClusterDeviceCollection_h + +#include "DataFormats/Portable/interface/alpaka/PortableCollection.h" +#include "DataFormats/ParticleFlowReco/interface/PFClusterSoA.h" +#include "DataFormats/ParticleFlowReco/interface/PFClusterHostCollection.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE::reco { + + using ::reco::PFClusterHostCollection; + + using PFClusterDeviceCollection = PortableCollection<::reco::PFClusterSoA>; +} // namespace ALPAKA_ACCELERATOR_NAMESPACE::reco + +// check that the portable device collection for the host device is the same as the portable host collection +ASSERT_DEVICE_MATCHES_HOST_COLLECTION(reco::PFClusterDeviceCollection, reco::PFClusterHostCollection); + +#endif // DataFormats_ParticleFlowReco_interface_alpaka_PFClusterDeviceCollection_h diff --git a/DataFormats/ParticleFlowReco/interface/alpaka/PFRecHitDeviceCollection.h b/DataFormats/ParticleFlowReco/interface/alpaka/PFRecHitDeviceCollection.h index 3ab6a82b3ad2e..70b5bb6d94093 100644 --- a/DataFormats/ParticleFlowReco/interface/alpaka/PFRecHitDeviceCollection.h +++ b/DataFormats/ParticleFlowReco/interface/alpaka/PFRecHitDeviceCollection.h @@ -13,4 +13,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE::reco { } // namespace ALPAKA_ACCELERATOR_NAMESPACE::reco +// check that the portable device collection for the host device is the same as the portable host collection +ASSERT_DEVICE_MATCHES_HOST_COLLECTION(reco::PFRecHitDeviceCollection, reco::PFRecHitHostCollection); + #endif // DataFormats_ParticleFlowReco_interface_alpaka_PFRecHitDeviceCollection_h diff --git a/DataFormats/ParticleFlowReco/interface/alpaka/PFRecHitFractionDeviceCollection.h b/DataFormats/ParticleFlowReco/interface/alpaka/PFRecHitFractionDeviceCollection.h new file mode 100644 index 0000000000000..f59631c7ff229 --- /dev/null +++ b/DataFormats/ParticleFlowReco/interface/alpaka/PFRecHitFractionDeviceCollection.h @@ -0,0 +1,18 @@ +#ifndef DataFormats_ParticleFlowReco_interface_alpaka_PFRecHitFractionDeviceCollection_h +#define DataFormats_ParticleFlowReco_interface_alpaka_PFRecHitFractionDeviceCollection_h + +#include "DataFormats/Portable/interface/alpaka/PortableCollection.h" +#include "DataFormats/ParticleFlowReco/interface/PFRecHitFractionSoA.h" +#include "DataFormats/ParticleFlowReco/interface/PFRecHitFractionHostCollection.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE::reco { + + using ::reco::PFRecHitFractionHostCollection; + + using PFRecHitFractionDeviceCollection = PortableCollection<::reco::PFRecHitFractionSoA>; +} // namespace ALPAKA_ACCELERATOR_NAMESPACE::reco + +// check that the portable device collection for the host device is the same as the portable host collection +ASSERT_DEVICE_MATCHES_HOST_COLLECTION(reco::PFRecHitFractionDeviceCollection, reco::PFRecHitFractionHostCollection); + +#endif // DataFormats_ParticleFlowReco_interface_alpaka_PFRecHitFractionDeviceCollection_h diff --git a/DataFormats/ParticleFlowReco/src/alpaka/classes_cuda.h b/DataFormats/ParticleFlowReco/src/alpaka/classes_cuda.h index dfb7c13f09d75..74c3591453e22 100644 --- a/DataFormats/ParticleFlowReco/src/alpaka/classes_cuda.h +++ b/DataFormats/ParticleFlowReco/src/alpaka/classes_cuda.h @@ -2,5 +2,9 @@ #include "DataFormats/Common/interface/Wrapper.h" #include "DataFormats/ParticleFlowReco/interface/CaloRecHitSoA.h" #include "DataFormats/ParticleFlowReco/interface/PFRecHitSoA.h" +#include "DataFormats/ParticleFlowReco/interface/PFClusterSoA.h" +#include "DataFormats/ParticleFlowReco/interface/PFRecHitFractionSoA.h" #include "DataFormats/ParticleFlowReco/interface/alpaka/CaloRecHitDeviceCollection.h" #include "DataFormats/ParticleFlowReco/interface/alpaka/PFRecHitDeviceCollection.h" +#include "DataFormats/ParticleFlowReco/interface/alpaka/PFClusterDeviceCollection.h" +#include "DataFormats/ParticleFlowReco/interface/alpaka/PFRecHitFractionDeviceCollection.h" diff --git a/DataFormats/ParticleFlowReco/src/alpaka/classes_cuda_def.xml b/DataFormats/ParticleFlowReco/src/alpaka/classes_cuda_def.xml index c7ebfa6a0907a..758656fe0df20 100644 --- a/DataFormats/ParticleFlowReco/src/alpaka/classes_cuda_def.xml +++ b/DataFormats/ParticleFlowReco/src/alpaka/classes_cuda_def.xml @@ -6,5 +6,13 @@ + + + + + + + + diff --git a/DataFormats/ParticleFlowReco/src/alpaka/classes_rocm.h b/DataFormats/ParticleFlowReco/src/alpaka/classes_rocm.h index dfb7c13f09d75..74c3591453e22 100644 --- a/DataFormats/ParticleFlowReco/src/alpaka/classes_rocm.h +++ b/DataFormats/ParticleFlowReco/src/alpaka/classes_rocm.h @@ -2,5 +2,9 @@ #include "DataFormats/Common/interface/Wrapper.h" #include "DataFormats/ParticleFlowReco/interface/CaloRecHitSoA.h" #include "DataFormats/ParticleFlowReco/interface/PFRecHitSoA.h" +#include "DataFormats/ParticleFlowReco/interface/PFClusterSoA.h" +#include "DataFormats/ParticleFlowReco/interface/PFRecHitFractionSoA.h" #include "DataFormats/ParticleFlowReco/interface/alpaka/CaloRecHitDeviceCollection.h" #include "DataFormats/ParticleFlowReco/interface/alpaka/PFRecHitDeviceCollection.h" +#include "DataFormats/ParticleFlowReco/interface/alpaka/PFClusterDeviceCollection.h" +#include "DataFormats/ParticleFlowReco/interface/alpaka/PFRecHitFractionDeviceCollection.h" diff --git a/DataFormats/ParticleFlowReco/src/alpaka/classes_rocm_def.xml b/DataFormats/ParticleFlowReco/src/alpaka/classes_rocm_def.xml index 169feda6dc59f..5057d843a8b53 100644 --- a/DataFormats/ParticleFlowReco/src/alpaka/classes_rocm_def.xml +++ b/DataFormats/ParticleFlowReco/src/alpaka/classes_rocm_def.xml @@ -6,4 +6,12 @@ + + + + + + + + diff --git a/DataFormats/ParticleFlowReco/src/classes_serial.h b/DataFormats/ParticleFlowReco/src/classes_serial.h index 18e35ca251c61..cf1fb1569a577 100644 --- a/DataFormats/ParticleFlowReco/src/classes_serial.h +++ b/DataFormats/ParticleFlowReco/src/classes_serial.h @@ -3,3 +3,8 @@ #include "DataFormats/ParticleFlowReco/interface/CaloRecHitSoA.h" #include "DataFormats/ParticleFlowReco/interface/PFRecHitHostCollection.h" #include "DataFormats/ParticleFlowReco/interface/PFRecHitSoA.h" + +#include "DataFormats/ParticleFlowReco/interface/PFClusterSoA.h" +#include "DataFormats/ParticleFlowReco/interface/PFClusterHostCollection.h" +#include "DataFormats/ParticleFlowReco/interface/PFRecHitFractionSoA.h" +#include "DataFormats/ParticleFlowReco/interface/PFRecHitFractionHostCollection.h" diff --git a/DataFormats/ParticleFlowReco/src/classes_serial_def.xml b/DataFormats/ParticleFlowReco/src/classes_serial_def.xml index 9a97cc3b50c0b..1eb1e7c34c123 100644 --- a/DataFormats/ParticleFlowReco/src/classes_serial_def.xml +++ b/DataFormats/ParticleFlowReco/src/classes_serial_def.xml @@ -30,4 +30,36 @@ ]]> + + + + + + + + + + + + + + + + diff --git a/DataFormats/PatCandidates/interface/Muon.h b/DataFormats/PatCandidates/interface/Muon.h index 5f34670e14e69..144985c46534e 100644 --- a/DataFormats/PatCandidates/interface/Muon.h +++ b/DataFormats/PatCandidates/interface/Muon.h @@ -285,6 +285,8 @@ namespace pat { /// Soft Muon MVA float softMvaValue() const { return softMvaValue_; } void setSoftMvaValue(float softmva) { softMvaValue_ = softmva; } + float softMvaRun3Value() const { return softMvaRun3Value_; } + void setSoftMvaRun3Value(float softmva) { softMvaRun3Value_ = softmva; } /// Muon MVA ID float mvaIDValue() const { return mvaIDValue_; } @@ -414,6 +416,7 @@ namespace pat { /// Muon MVA float mvaIDValue_; float softMvaValue_; + float softMvaRun3Value_ = 0; /// Inverse beta float inverseBeta_; diff --git a/DataFormats/PatCandidates/src/classes_def_objects.xml b/DataFormats/PatCandidates/src/classes_def_objects.xml index e564c35f61d2f..b02c833fce574 100644 --- a/DataFormats/PatCandidates/src/classes_def_objects.xml +++ b/DataFormats/PatCandidates/src/classes_def_objects.xml @@ -67,7 +67,8 @@ - + + diff --git a/DataFormats/Portable/interface/PortableCollection.h b/DataFormats/Portable/interface/PortableCollection.h index 86d117c02c81d..abc64b99cb0d3 100644 --- a/DataFormats/Portable/interface/PortableCollection.h +++ b/DataFormats/Portable/interface/PortableCollection.h @@ -1,13 +1,26 @@ #ifndef DataFormats_Portable_interface_PortableCollection_h #define DataFormats_Portable_interface_PortableCollection_h -#include "HeterogeneousCore/AlpakaInterface/interface/traits.h" +#include + +#include "DataFormats/Portable/interface/PortableHostCollection.h" +#include "DataFormats/Portable/interface/PortableDeviceCollection.h" +#include "HeterogeneousCore/AlpakaInterface/interface/CopyToDevice.h" +#include "HeterogeneousCore/AlpakaInterface/interface/CopyToHost.h" namespace traits { // trait for a generic SoA-based product template >> - class PortableCollectionTrait; + struct PortableCollectionTrait { + using CollectionType = PortableDeviceCollection; + }; + + // specialise for host device + template + struct PortableCollectionTrait { + using CollectionType = PortableHostCollection; + }; } // namespace traits @@ -15,4 +28,28 @@ namespace traits { template >> using PortableCollection = typename traits::PortableCollectionTrait::CollectionType; +// define how to copy PortableCollection between host and device +namespace cms::alpakatools { + template + struct CopyToHost> { + template + static auto copyAsync(TQueue& queue, PortableDeviceCollection const& srcData) { + PortableHostCollection dstData(srcData->metadata().size(), queue); + alpaka::memcpy(queue, dstData.buffer(), srcData.buffer()); + return dstData; + } + }; + + template + struct CopyToDevice> { + template + static auto copyAsync(TQueue& queue, PortableHostCollection const& srcData) { + using TDevice = typename alpaka::trait::DevType::type; + PortableDeviceCollection dstData(srcData->metadata().size(), queue); + alpaka::memcpy(queue, dstData.buffer(), srcData.buffer()); + return dstData; + } + }; +} // namespace cms::alpakatools + #endif // DataFormats_Portable_interface_PortableCollection_h diff --git a/DataFormats/Portable/interface/PortableObject.h b/DataFormats/Portable/interface/PortableObject.h index 90a33b49d0f0a..c9aadb160bb05 100644 --- a/DataFormats/Portable/interface/PortableObject.h +++ b/DataFormats/Portable/interface/PortableObject.h @@ -3,18 +3,55 @@ #include -#include "HeterogeneousCore/AlpakaInterface/interface/traits.h" +#include + +#include "DataFormats/Portable/interface/PortableHostObject.h" +#include "DataFormats/Portable/interface/PortableDeviceObject.h" +#include "HeterogeneousCore/AlpakaInterface/interface/CopyToDevice.h" +#include "HeterogeneousCore/AlpakaInterface/interface/CopyToHost.h" namespace traits { - // trait for a generic SoA-based product + // trait for a generic struct-based product template >> - class PortableObjectTrait; + struct PortableObjectTrait { + using ProductType = PortableDeviceObject; + }; + + // specialise for host device + template + struct PortableObjectTrait { + using ProductType = PortableHostObject; + }; } // namespace traits -// type alias for a generic SoA-based product +// type alias for a generic struct-based product template >> using PortableObject = typename traits::PortableObjectTrait::ProductType; +// define how to copy PortableObject between host and device +namespace cms::alpakatools { + template + struct CopyToHost> { + template + static auto copyAsync(TQueue& queue, PortableDeviceObject const& srcData) { + PortableHostObject dstData(queue); + alpaka::memcpy(queue, dstData.buffer(), srcData.buffer()); + return dstData; + } + }; + + template + struct CopyToDevice> { + template + static auto copyAsync(TQueue& queue, PortableHostObject const& srcData) { + using TDevice = typename alpaka::trait::DevType::type; + PortableDeviceObject dstData(queue); + alpaka::memcpy(queue, dstData.buffer(), srcData.buffer()); + return dstData; + } + }; +} // namespace cms::alpakatools + #endif // DataFormats_Portable_interface_PortableObject_h diff --git a/DataFormats/Portable/interface/alpaka/PortableCollection.h b/DataFormats/Portable/interface/alpaka/PortableCollection.h index 0a6abad96dfaf..1f9fa22e49cd8 100644 --- a/DataFormats/Portable/interface/alpaka/PortableCollection.h +++ b/DataFormats/Portable/interface/alpaka/PortableCollection.h @@ -4,11 +4,7 @@ #include #include "DataFormats/Portable/interface/PortableCollection.h" -#include "DataFormats/Portable/interface/PortableHostCollection.h" -#include "DataFormats/Portable/interface/PortableDeviceCollection.h" #include "HeterogeneousCore/AlpakaInterface/interface/config.h" -#include "HeterogeneousCore/AlpakaInterface/interface/CopyToDevice.h" -#include "HeterogeneousCore/AlpakaInterface/interface/CopyToHost.h" // This header is not used by PortableCollection, but is included here to automatically // provide its content to users of ALPAKA_ACCELERATOR_NAMESPACE::PortableCollection. @@ -16,54 +12,10 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE { -#if defined ALPAKA_ACC_CPU_B_SEQ_T_SEQ_ENABLED - // ... or any other CPU-based accelerators - - // generic SoA-based product in host memory - template - using PortableCollection = ::PortableHostCollection; - -#else - - // generic SoA-based product in device memory + // generic SoA-based product in the device (that may be host) memory template - using PortableCollection = ::PortableDeviceCollection; - -#endif // ALPAKA_ACC_CPU_B_SEQ_T_SEQ_ENABLED + using PortableCollection = ::PortableCollection; } // namespace ALPAKA_ACCELERATOR_NAMESPACE -namespace traits { - - // specialise the trait for the device provided by the ALPAKA_ACCELERATOR_NAMESPACE - template - class PortableCollectionTrait { - using CollectionType = ALPAKA_ACCELERATOR_NAMESPACE::PortableCollection; - }; - -} // namespace traits - -namespace cms::alpakatools { - template - struct CopyToHost> { - template - static auto copyAsync(TQueue& queue, PortableDeviceCollection const& srcData) { - PortableHostCollection dstData(srcData->metadata().size(), queue); - alpaka::memcpy(queue, dstData.buffer(), srcData.buffer()); - return dstData; - } - }; - - template - struct CopyToDevice> { - template - static auto copyAsync(TQueue& queue, PortableHostCollection const& srcData) { - using TDevice = typename alpaka::trait::DevType::type; - PortableDeviceCollection dstData(srcData->metadata().size(), queue); - alpaka::memcpy(queue, dstData.buffer(), srcData.buffer()); - return dstData; - } - }; -} // namespace cms::alpakatools - #endif // DataFormats_Portable_interface_alpaka_PortableCollection_h diff --git a/DataFormats/Portable/interface/alpaka/PortableObject.h b/DataFormats/Portable/interface/alpaka/PortableObject.h index 9b7ba65f8a460..417173176b203 100644 --- a/DataFormats/Portable/interface/alpaka/PortableObject.h +++ b/DataFormats/Portable/interface/alpaka/PortableObject.h @@ -4,11 +4,7 @@ #include #include "DataFormats/Portable/interface/PortableObject.h" -#include "DataFormats/Portable/interface/PortableHostObject.h" -#include "DataFormats/Portable/interface/PortableDeviceObject.h" #include "HeterogeneousCore/AlpakaInterface/interface/config.h" -#include "HeterogeneousCore/AlpakaInterface/interface/CopyToDevice.h" -#include "HeterogeneousCore/AlpakaInterface/interface/CopyToHost.h" // This header is not used by PortableObject, but is included here to automatically // provide its content to users of ALPAKA_ACCELERATOR_NAMESPACE::PortableObject. @@ -16,54 +12,10 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE { -#if defined ALPAKA_ACC_CPU_B_SEQ_T_SEQ_ENABLED - // ... or any other CPU-based accelerators - - // generic SoA-based product in host memory - template - using PortableObject = ::PortableHostObject; - -#else - - // generic SoA-based product in device memory + // generic struct-based product in the device (that may be host) memory template - using PortableObject = ::PortableDeviceObject; - -#endif // ALPAKA_ACC_CPU_B_SEQ_T_SEQ_ENABLED + using PortableObject = ::PortableObject; } // namespace ALPAKA_ACCELERATOR_NAMESPACE -namespace traits { - - // specialise the trait for the device provided by the ALPAKA_ACCELERATOR_NAMESPACE - template - class PortableObjectTrait { - using ProductType = ALPAKA_ACCELERATOR_NAMESPACE::PortableObject; - }; - -} // namespace traits - -namespace cms::alpakatools { - template - struct CopyToHost> { - template - static auto copyAsync(TQueue& queue, PortableDeviceObject const& srcData) { - PortableHostObject dstData(queue); - alpaka::memcpy(queue, dstData.buffer(), srcData.buffer()); - return dstData; - } - }; - - template - struct CopyToDevice> { - template - static auto copyAsync(TQueue& queue, PortableHostObject const& srcData) { - using TDevice = typename alpaka::trait::DevType::type; - PortableDeviceObject dstData(queue); - alpaka::memcpy(queue, dstData.buffer(), srcData.buffer()); - return dstData; - } - }; -} // namespace cms::alpakatools - #endif // DataFormats_Portable_interface_alpaka_PortableObject_h diff --git a/DataFormats/Portable/test/BuildFile.xml b/DataFormats/Portable/test/BuildFile.xml new file mode 100644 index 0000000000000..ef2f6603f62cc --- /dev/null +++ b/DataFormats/Portable/test/BuildFile.xml @@ -0,0 +1,5 @@ + + + + + diff --git a/DataFormats/Portable/test/portableCollectionOnHost.cc b/DataFormats/Portable/test/portableCollectionOnHost.cc new file mode 100644 index 0000000000000..aa3d56f9d0539 --- /dev/null +++ b/DataFormats/Portable/test/portableCollectionOnHost.cc @@ -0,0 +1,25 @@ +#include + +#include "DataFormats/Portable/interface/PortableCollection.h" +#include "DataFormats/Portable/interface/PortableHostCollection.h" +#include "DataFormats/SoATemplate/interface/SoACommon.h" +#include "DataFormats/SoATemplate/interface/SoALayout.h" +#include "DataFormats/SoATemplate/interface/SoAView.h" + +namespace { + GENERATE_SOA_LAYOUT(TestLayout, SOA_COLUMN(double, x), SOA_COLUMN(int32_t, id)) + + using TestSoA = TestLayout<>; + + constexpr auto s_tag = "[PortableCollection]"; +} // namespace + +// This test is currently mostly about the code compiling +TEST_CASE("Use of PortableCollection on host code", s_tag) { + auto const size = 10; + PortableCollection coll(size, cms::alpakatools::host()); + + SECTION("Tests") { REQUIRE(coll->metadata().size() == size); } + + static_assert(std::is_same_v, PortableHostCollection>); +} diff --git a/DataFormats/Portable/test/portableObjectOnHost.cc b/DataFormats/Portable/test/portableObjectOnHost.cc new file mode 100644 index 0000000000000..698605b57f465 --- /dev/null +++ b/DataFormats/Portable/test/portableObjectOnHost.cc @@ -0,0 +1,23 @@ +#include + +#include "DataFormats/Portable/interface/PortableObject.h" +#include "DataFormats/Portable/interface/PortableHostObject.h" + +namespace { + struct Test { + int a; + float b; + }; + + constexpr auto s_tag = "[PortableObject]"; +} // namespace + +// This test is currently mostly about the code compiling +TEST_CASE("Use of PortableObject on host code", s_tag) { + PortableObject obj(cms::alpakatools::host()); + obj->a = 42; + + SECTION("Tests") { REQUIRE(obj->a == 42); } + + static_assert(std::is_same_v, PortableHostObject>); +} diff --git a/DataFormats/Portable/test/test_catch2_main.cc b/DataFormats/Portable/test/test_catch2_main.cc new file mode 100644 index 0000000000000..b3143fbb1788b --- /dev/null +++ b/DataFormats/Portable/test/test_catch2_main.cc @@ -0,0 +1,2 @@ +#define CATCH_CONFIG_MAIN +#include diff --git a/DataFormats/Scalers/test/ScalersRecover.cc b/DataFormats/Scalers/test/ScalersRecover.cc deleted file mode 100644 index 3ad8e549c97b6..0000000000000 --- a/DataFormats/Scalers/test/ScalersRecover.cc +++ /dev/null @@ -1,215 +0,0 @@ -// -*- C++ -*- -// -// Package: ScalersRecover -// Class: ScalersRecover -// -/**\class ScalersRecover ScalersRecover.cc DataFormats/ScalersRecover/src/ScalersRecover.cc - - Description: EDAnalyzer to fetch trigger scalers and convert it to SQL - - Implementation: - This module may be used to recover L1 trigger scalers data from - CMS raw data files, and convert it to a series of SQL INSERT statements - that can be used to back-populate the corresponding L1 database - tables. Should be performed on a run-by-run basis as necessary. - First data file processed MUST contain data from the first lumi - sections. In general, the files should be in lumi section order. - - We recommend running the job on MinBias RECO files. If you - run with RAW files, you will have to include the ScalerRawToDigi - conversion module. - - The resulting SQL commands will be contained in a file - named scalers.sql -*/ -// -// Original Author: William Badgett -// Created: Mon May 24 14:45:17 CEST 2010 -// -// - - -// system include files -#include - -// user include files -#include "FWCore/Framework/interface/Frameworkfwd.h" -#include "FWCore/Framework/interface/EDAnalyzer.h" -#include "FWCore/MessageLogger/interface/MessageLogger.h" - -#include "FWCore/Framework/interface/Event.h" -#include "FWCore/Framework/interface/MakerMacros.h" - -#include "FWCore/ParameterSet/interface/ParameterSet.h" -#include "DataFormats/Scalers/interface/Level1TriggerScalers.h" -#include -#include "DataFormats/Scalers/interface/TimeSpec.h" -// -// class declaration -// - -class ScalersRecover : public edm::EDAnalyzer -{ - public: - explicit ScalersRecover(const edm::ParameterSet&); - ~ScalersRecover(); - - - private: - virtual void beginJob() ; - virtual void analyze(const edm::Event&, const edm::EventSetup&); - virtual void endJob() ; - - int lastLumiSection; - FILE * sql; -}; - -// Constructor -ScalersRecover::ScalersRecover(const edm::ParameterSet& iConfig) - -{ - sql = NULL; - sql = fopen("scalers.sql","w"); -} - -// Destructor -ScalersRecover::~ScalersRecover() -{ - if ( sql != NULL ) { fclose(sql);} -} - -// ------------ method called to for each event ------------ -void ScalersRecover::analyze(const edm::Event& iEvent, - const edm::EventSetup& iSetup) -{ - using namespace edm; - char heure [32]; - char sNanos [16]; - struct tm *hora; - - edm::Handle data; - bool ok = iEvent.getByLabel("scalersRawToDigi",data); - - if ( !ok ) - { - LogError("ScalersRecover") << - "Could not find Level1TriggerScalersCollection"; - return; - } - - if ( data->size() < 1 ) - { - LogError("ScalersRecover") << - "Could not find Level1TriggerScalers element from Collection"; - return; - } - - Level1TriggerScalersCollection::const_iterator triggerScalers = - data->begin(); - - - int lumiSection = triggerScalers->lumiSegmentNrLumiSeg(); - if ( ( ( lastLumiSection==-1 ) && ( lumiSection == 1 )) || - ( ( lastLumiSection>0 ) && ( lastLumiSection != lumiSection ))) - { - timespec zeit = triggerScalers->collectionTimeLumiSeg(); - time_t seconds = zeit.tv_sec; - long int nanos = zeit.tv_nsec; - - hora = gmtime(&seconds); - strftime(heure,sizeof(heure),"%Y.%m.%d %H:%M:%S", hora); - sprintf(sNanos,"%9.9d", (int)nanos); - - std::ostringstream insert; - insert << - "INSERT INTO LEVEL1_TRIGGER_CONDITIONS (RUNNUMBER,LUMISEGMENTNR,TIME,TIME_NS,TIME_STAMP" << - ",TRIGGERSPHYSICSGENERATEDFDL" << - ",TRIGGERSPHYSICSLOST" << - ",TRIGGERSPHYSICSLOSTBEAMACTIVE" << - ",TRIGGERSPHYSICSLOSTBEAMINACTI" << - ",L1ASPHYSICS" << - ",L1ASRANDOM" << - ",L1ASTEST" << - ",L1ASCALIBRATION" << - ",DEADTIME" << - ",DEADTIMEBEAMACTIVE" << - ",DEADTIMEBEAMACTIVETRIGGERRULE" << - ",DEADTIMEBEAMACTIVECALIBRATION" << - ",DEADTIMEBEAMACTIVEPRIVATEORBI" << - ",DEADTIMEBEAMACTIVEPARTITIONCO" << - ",DEADTIMEBEAMACTIVETIMESLOT" << - ") VALUES (" << iEvent.run() << - "," << lumiSection << - "," << zeit.tv_sec << - "," << nanos << - ",TO_TIMESTAMP('" << heure << "." << sNanos << - "','YYYY.MM.DD HH24:MI:SS.FF')" << - "," << triggerScalers->triggersPhysicsGeneratedFDL() << - "," << triggerScalers->triggersPhysicsLost() << - "," << triggerScalers->triggersPhysicsLostBeamActive() << - "," << triggerScalers->triggersPhysicsLostBeamInactive() << - "," << triggerScalers->l1AsPhysics() << - "," << triggerScalers->l1AsRandom() << - "," << triggerScalers->l1AsTest() << - "," << triggerScalers->l1AsCalibration() << - "," << triggerScalers->deadtime() << - "," << triggerScalers->deadtimeBeamActive() << - "," << triggerScalers->deadtimeBeamActiveTriggerRules() << - "," << triggerScalers->deadtimeBeamActiveCalibration() << - "," << triggerScalers->deadtimeBeamActivePrivateOrbit() << - "," << triggerScalers->deadtimeBeamActivePartitionController() << - "," << triggerScalers->deadtimeBeamActiveTimeSlot() << - ");" ; - - if ( sql != NULL ) { fprintf(sql,"%s\n", insert.str().c_str());} - - std::vector algo = triggerScalers->gtAlgoCounts(); - int length = algo.size(); - for ( int i=0; i tech = triggerScalers->gtAlgoCounts(); - length = tech.size(); - for ( int i=0; i trkd0, std::vector trkdz, std::vector trkpt, std::vector trketa, std::vector trkphi, + std::vector trkpMode, + std::vector trketaMode, + std::vector trkphiMode, + std::vector trkqoverpModeError, std::vector trkchi2overndf, float dEtaIn, float dPhiIn, @@ -26,6 +33,7 @@ class Run3ScoutingElectron { float ooEMOop, int missingHits, std::vector trkcharge, + float trackfbrem, float ecalIso, float hcalIso, float trackIso, @@ -33,6 +41,8 @@ class Run3ScoutingElectron { float sMin, float sMaj, uint32_t seedId, + uint32_t nClusters, + uint32_t nCrystals, std::vector energyMatrix, std::vector detIds, std::vector timingMatrix, @@ -41,11 +51,18 @@ class Run3ScoutingElectron { eta_(eta), phi_(phi), m_(m), + rawEnergy_(rawEnergy), + preshowerEnergy_(preshowerEnergy), + corrEcalEnergyError_(corrEcalEnergyError), trkd0_(std::move(trkd0)), trkdz_(std::move(trkdz)), trkpt_(std::move(trkpt)), trketa_(std::move(trketa)), trkphi_(std::move(trkphi)), + trkpMode_(std::move(trkpMode)), + trketaMode_(std::move(trketaMode)), + trkphiMode_(std::move(trkphiMode)), + trkqoverpModeError_(std::move(trkqoverpModeError)), trkchi2overndf_(std::move(trkchi2overndf)), dEtaIn_(dEtaIn), dPhiIn_(dPhiIn), @@ -54,6 +71,7 @@ class Run3ScoutingElectron { ooEMOop_(ooEMOop), missingHits_(missingHits), trkcharge_(std::move(trkcharge)), + trackfbrem_(trackfbrem), ecalIso_(ecalIso), hcalIso_(hcalIso), trackIso_(trackIso), @@ -61,6 +79,8 @@ class Run3ScoutingElectron { sMin_(sMin), sMaj_(sMaj), seedId_(seedId), + nClusters_(nClusters), + nCrystals_(nCrystals), energyMatrix_(std::move(energyMatrix)), detIds_(std::move(detIds)), timingMatrix_(std::move(timingMatrix)), @@ -71,11 +91,18 @@ class Run3ScoutingElectron { eta_(0), phi_(0), m_(0), + rawEnergy_(0), + preshowerEnergy_(0), + corrEcalEnergyError_(0), trkd0_(0), trkdz_(0), trkpt_(0), trketa_(0), trkphi_(0), + trkpMode_(0), + trketaMode_(0), + trkphiMode_(0), + trkqoverpModeError_(0), trkchi2overndf_(0), dEtaIn_(0), dPhiIn_(0), @@ -84,6 +111,7 @@ class Run3ScoutingElectron { ooEMOop_(0), missingHits_(0), trkcharge_(0), + trackfbrem_(0), ecalIso_(0), hcalIso_(0), trackIso_(0), @@ -91,6 +119,8 @@ class Run3ScoutingElectron { sMin_(0), sMaj_(0), seedId_(0), + nClusters_(0), + nCrystals_(0), rechitZeroSuppression_(false) {} //accessor functions @@ -98,11 +128,18 @@ class Run3ScoutingElectron { float eta() const { return eta_; } float phi() const { return phi_; } float m() const { return m_; } + float rawEnergy() const { return rawEnergy_; } + float preshowerEnergy() const { return preshowerEnergy_; } + float corrEcalEnergyError() const { return corrEcalEnergyError_; } std::vector const& trkd0() const { return trkd0_; } std::vector const& trkdz() const { return trkdz_; } std::vector const& trkpt() const { return trkpt_; } std::vector const& trketa() const { return trketa_; } std::vector const& trkphi() const { return trkphi_; } + std::vector const& trkpMode() const { return trkpMode_; } + std::vector const& trketaMode() const { return trketaMode_; } + std::vector const& trkphiMode() const { return trkphiMode_; } + std::vector const& trkqoverpModeError() const { return trkqoverpModeError_; } std::vector const& trkchi2overndf() const { return trkchi2overndf_; } float dEtaIn() const { return dEtaIn_; } float dPhiIn() const { return dPhiIn_; } @@ -111,6 +148,7 @@ class Run3ScoutingElectron { float ooEMOop() const { return ooEMOop_; } int missingHits() const { return missingHits_; } std::vector const& trkcharge() const { return trkcharge_; } + float trackfbrem() const { return trackfbrem_; } float ecalIso() const { return ecalIso_; } float hcalIso() const { return hcalIso_; } float trackIso() const { return trackIso_; } @@ -118,6 +156,8 @@ class Run3ScoutingElectron { float sMin() const { return sMin_; } float sMaj() const { return sMaj_; } uint32_t seedId() const { return seedId_; } + uint32_t nClusters() const { return nClusters_; } + uint32_t nCrystals() const { return nCrystals_; } std::vector const& energyMatrix() const { return energyMatrix_; } std::vector const& detIds() const { return detIds_; } std::vector const& timingMatrix() const { return timingMatrix_; } @@ -128,11 +168,18 @@ class Run3ScoutingElectron { float eta_; float phi_; float m_; + float rawEnergy_; + float preshowerEnergy_; + float corrEcalEnergyError_; std::vector trkd0_; std::vector trkdz_; std::vector trkpt_; std::vector trketa_; std::vector trkphi_; + std::vector trkpMode_; + std::vector trketaMode_; + std::vector trkphiMode_; + std::vector trkqoverpModeError_; std::vector trkchi2overndf_; float dEtaIn_; float dPhiIn_; @@ -141,6 +188,7 @@ class Run3ScoutingElectron { float ooEMOop_; int missingHits_; std::vector trkcharge_; + float trackfbrem_; float ecalIso_; float hcalIso_; float trackIso_; @@ -148,6 +196,8 @@ class Run3ScoutingElectron { float sMin_; float sMaj_; uint32_t seedId_; + uint32_t nClusters_; + uint32_t nCrystals_; std::vector energyMatrix_; std::vector detIds_; std::vector timingMatrix_; diff --git a/DataFormats/Scouting/interface/Run3ScoutingPhoton.h b/DataFormats/Scouting/interface/Run3ScoutingPhoton.h index 44399ef32a907..b0ccc3ef6530c 100644 --- a/DataFormats/Scouting/interface/Run3ScoutingPhoton.h +++ b/DataFormats/Scouting/interface/Run3ScoutingPhoton.h @@ -13,6 +13,9 @@ class Run3ScoutingPhoton { float eta, float phi, float m, + float rawEnergy, + float preshowerEnergy, + float corrEcalEnergyError, float sigmaIetaIeta, float hOverE, float ecalIso, @@ -22,6 +25,8 @@ class Run3ScoutingPhoton { float sMin, float sMaj, uint32_t seedId, + uint32_t nClusters, + uint32_t nCrystals, std::vector energyMatrix, std::vector detIds, std::vector timingMatrix, @@ -30,6 +35,9 @@ class Run3ScoutingPhoton { eta_(eta), phi_(phi), m_(m), + rawEnergy_(rawEnergy), + preshowerEnergy_(preshowerEnergy), + corrEcalEnergyError_(corrEcalEnergyError), sigmaIetaIeta_(sigmaIetaIeta), hOverE_(hOverE), ecalIso_(ecalIso), @@ -39,6 +47,8 @@ class Run3ScoutingPhoton { sMin_(sMin), sMaj_(sMaj), seedId_(seedId), + nClusters_(nClusters), + nCrystals_(nCrystals), energyMatrix_(std::move(energyMatrix)), detIds_(std::move(detIds)), timingMatrix_(std::move(timingMatrix)), @@ -49,6 +59,9 @@ class Run3ScoutingPhoton { eta_(0), phi_(0), m_(0), + rawEnergy_(0), + preshowerEnergy_(0), + corrEcalEnergyError_(0), sigmaIetaIeta_(0), hOverE_(0), ecalIso_(0), @@ -58,6 +71,8 @@ class Run3ScoutingPhoton { sMin_(0), sMaj_(0), seedId_(0), + nClusters_(0), + nCrystals_(0), energyMatrix_(0), timingMatrix_(0), rechitZeroSuppression_(false) {} @@ -67,6 +82,9 @@ class Run3ScoutingPhoton { float eta() const { return eta_; } float phi() const { return phi_; } float m() const { return m_; } + float rawEnergy() const { return rawEnergy_; } + float preshowerEnergy() const { return preshowerEnergy_; } + float corrEcalEnergyError() const { return corrEcalEnergyError_; } float sigmaIetaIeta() const { return sigmaIetaIeta_; } float hOverE() const { return hOverE_; } float ecalIso() const { return ecalIso_; } @@ -76,6 +94,8 @@ class Run3ScoutingPhoton { float sMin() const { return sMin_; } float sMaj() const { return sMaj_; } uint32_t seedId() const { return seedId_; } + uint32_t nClusters() const { return nClusters_; } + uint32_t nCrystals() const { return nCrystals_; } std::vector const& energyMatrix() const { return energyMatrix_; } std::vector const& detIds() const { return detIds_; } std::vector const& timingMatrix() const { return timingMatrix_; } @@ -86,6 +106,9 @@ class Run3ScoutingPhoton { float eta_; float phi_; float m_; + float rawEnergy_; + float preshowerEnergy_; + float corrEcalEnergyError_; float sigmaIetaIeta_; float hOverE_; float ecalIso_; @@ -95,6 +118,8 @@ class Run3ScoutingPhoton { float sMin_; float sMaj_; uint32_t seedId_; + uint32_t nClusters_; + uint32_t nCrystals_; std::vector energyMatrix_; std::vector detIds_; std::vector timingMatrix_; diff --git a/DataFormats/Scouting/interface/Run3ScoutingVertex.h b/DataFormats/Scouting/interface/Run3ScoutingVertex.h index 0f5ffebfcba9f..0437e97e11360 100644 --- a/DataFormats/Scouting/interface/Run3ScoutingVertex.h +++ b/DataFormats/Scouting/interface/Run3ScoutingVertex.h @@ -17,7 +17,10 @@ class Run3ScoutingVertex { int tracksSize, float chi2, int ndof, - bool isValidVtx) + bool isValidVtx, + float xyCov, + float xzCov, + float yzCov) : x_(x), y_(y), z_(z), @@ -27,7 +30,10 @@ class Run3ScoutingVertex { tracksSize_(tracksSize), chi2_(chi2), ndof_(ndof), - isValidVtx_(isValidVtx) {} + isValidVtx_(isValidVtx), + xyCov_(xyCov), + xzCov_(xzCov), + yzCov_(yzCov) {} //default constructor Run3ScoutingVertex() : x_(0), @@ -39,19 +45,25 @@ class Run3ScoutingVertex { tracksSize_(0), chi2_(0), ndof_(0), - isValidVtx_(false) {} + isValidVtx_(false), + xyCov_(0), + xzCov_(0), + yzCov_(0) {} //accessor functions float x() const { return x_; } float y() const { return y_; } float z() const { return z_; } - float zError() const { return zError_; } float xError() const { return xError_; } float yError() const { return yError_; } + float zError() const { return zError_; } int tracksSize() const { return tracksSize_; } float chi2() const { return chi2_; } int ndof() const { return ndof_; } bool isValidVtx() const { return isValidVtx_; } + float xyCov() const { return xyCov_; } + float xzCov() const { return xzCov_; } + float yzCov() const { return yzCov_; } private: float x_; @@ -64,6 +76,9 @@ class Run3ScoutingVertex { float chi2_; int ndof_; bool isValidVtx_; + float xyCov_; + float xzCov_; + float yzCov_; }; typedef std::vector Run3ScoutingVertexCollection; diff --git a/DataFormats/Scouting/src/classes_def.xml b/DataFormats/Scouting/src/classes_def.xml index abeb9b6f534b3..7824519709e78 100644 --- a/DataFormats/Scouting/src/classes_def.xml +++ b/DataFormats/Scouting/src/classes_def.xml @@ -2,11 +2,12 @@ - + + @@ -50,16 +51,18 @@ - + + - + + diff --git a/DataFormats/Scouting/test/TestReadRun3Scouting.cc b/DataFormats/Scouting/test/TestReadRun3Scouting.cc index 2e092860cae62..10433f63b351a 100644 --- a/DataFormats/Scouting/test/TestReadRun3Scouting.cc +++ b/DataFormats/Scouting/test/TestReadRun3Scouting.cc @@ -83,6 +83,7 @@ namespace edmtest { const std::vector expectedPFJetIntegralValues_; const edm::EDGetTokenT> pfJetsToken_; + const int inputPhotonClassVersion_; const std::vector expectedPhotonFloatingPointValues_; const std::vector expectedPhotonIntegralValues_; const edm::EDGetTokenT> photonsToken_; @@ -91,6 +92,7 @@ namespace edmtest { const std::vector expectedTrackIntegralValues_; const edm::EDGetTokenT> tracksToken_; + const int inputVertexClassVersion_; const std::vector expectedVertexFloatingPointValues_; const std::vector expectedVertexIntegralValues_; const edm::EDGetTokenT> vertexesToken_; @@ -114,6 +116,7 @@ namespace edmtest { expectedPFJetFloatingPointValues_(iPSet.getParameter>("expectedPFJetFloatingPointValues")), expectedPFJetIntegralValues_(iPSet.getParameter>("expectedPFJetIntegralValues")), pfJetsToken_(consumes(iPSet.getParameter("pfJetsTag"))), + inputPhotonClassVersion_(iPSet.getParameter("photonClassVersion")), expectedPhotonFloatingPointValues_( iPSet.getParameter>("expectedPhotonFloatingPointValues")), expectedPhotonIntegralValues_(iPSet.getParameter>("expectedPhotonIntegralValues")), @@ -121,6 +124,7 @@ namespace edmtest { expectedTrackFloatingPointValues_(iPSet.getParameter>("expectedTrackFloatingPointValues")), expectedTrackIntegralValues_(iPSet.getParameter>("expectedTrackIntegralValues")), tracksToken_(consumes(iPSet.getParameter("tracksTag"))), + inputVertexClassVersion_(iPSet.getParameter("vertexClassVersion")), expectedVertexFloatingPointValues_( iPSet.getParameter>("expectedVertexFloatingPointValues")), expectedVertexIntegralValues_(iPSet.getParameter>("expectedVertexIntegralValues")), @@ -128,12 +132,12 @@ namespace edmtest { if (expectedCaloJetsValues_.size() != 16) { throwWithMessageFromConstructor("test configuration error, expectedCaloJetsValues must have size 16"); } - if (expectedElectronFloatingPointValues_.size() != 25) { + if (expectedElectronFloatingPointValues_.size() != 33) { throwWithMessageFromConstructor( - "test configuration error, expectedElectronFloatingPointValues must have size 25"); + "test configuration error, expectedElectronFloatingPointValues must have size 33"); } - if (expectedElectronIntegralValues_.size() != 6) { - throwWithMessageFromConstructor("test configuration error, expectedElectronIntegralValues must have size 6"); + if (expectedElectronIntegralValues_.size() != 8) { + throwWithMessageFromConstructor("test configuration error, expectedElectronIntegralValues must have size 8"); } if (expectedMuonFloatingPointValues_.size() != 37) { throwWithMessageFromConstructor("test configuration error, expectedMuonFloatingPointValues must have size 37"); @@ -154,11 +158,11 @@ namespace edmtest { if (expectedPFJetIntegralValues_.size() != 8) { throwWithMessageFromConstructor("test configuration error, expectedPFJetIntegralValues must have size 8"); } - if (expectedPhotonFloatingPointValues_.size() != 14) { - throwWithMessageFromConstructor("test configuration error, expectedPhotonFloatingPointValues must have size 14"); + if (expectedPhotonFloatingPointValues_.size() != 17) { + throwWithMessageFromConstructor("test configuration error, expectedPhotonFloatingPointValues must have size 17"); } - if (expectedPhotonIntegralValues_.size() != 3) { - throwWithMessageFromConstructor("test configuration error, expectedPhotonIntegralValues must have size 3"); + if (expectedPhotonIntegralValues_.size() != 5) { + throwWithMessageFromConstructor("test configuration error, expectedPhotonIntegralValues must have size 5"); } if (expectedTrackFloatingPointValues_.size() != 29) { throwWithMessageFromConstructor("test configuration error, expectedTrackFloatingPointValues must have size 29"); @@ -166,8 +170,8 @@ namespace edmtest { if (expectedTrackIntegralValues_.size() != 5) { throwWithMessageFromConstructor("test configuration error, expectedTrackIntegralValues must have size 5"); } - if (expectedVertexFloatingPointValues_.size() != 7) { - throwWithMessageFromConstructor("test configuration error, expectedVertexFloatingPointValues must have size 7"); + if (expectedVertexFloatingPointValues_.size() != 10) { + throwWithMessageFromConstructor("test configuration error, expectedVertexFloatingPointValues must have size 10"); } if (expectedVertexIntegralValues_.size() != 3) { throwWithMessageFromConstructor("test configuration error, expectedVertexIntegralValues must have size 3"); @@ -202,12 +206,14 @@ namespace edmtest { desc.add>("expectedPFJetFloatingPointValues"); desc.add>("expectedPFJetIntegralValues"); desc.add("pfJetsTag"); + desc.add("photonClassVersion"); desc.add>("expectedPhotonFloatingPointValues"); desc.add>("expectedPhotonIntegralValues"); desc.add("photonsTag"); desc.add>("expectedTrackFloatingPointValues"); desc.add>("expectedTrackIntegralValues"); desc.add("tracksTag"); + desc.add("vertexClassVersion"); desc.add>("expectedVertexFloatingPointValues"); desc.add>("expectedVertexIntegralValues"); desc.add("vertexesTag"); @@ -393,7 +399,7 @@ namespace edmtest { if (electron.rechitZeroSuppression() != static_cast((expectedElectronIntegralValues_[4] + iOffset) % 2)) { throwWithMessage("analyzeElectrons, rechitZeroSuppression does not equal expected value"); } - if (inputElectronClassVersion_ == 6) { + if (inputElectronClassVersion_ == 6 || inputElectronClassVersion_ == 7) { if (electron.trkd0().size() != vectorSize) { throwWithMessage("analyzeElectrons, trkd0 does not have expected size"); } @@ -465,6 +471,66 @@ namespace edmtest { ++j; } } + if (inputElectronClassVersion_ == 7) { + if (electron.rawEnergy() != expectedElectronFloatingPointValues_[25] + offset) { + throwWithMessage("analyzeElectrons, rawEnergy does not equal expected value"); + } + if (electron.preshowerEnergy() != expectedElectronFloatingPointValues_[26] + offset) { + throwWithMessage("analyzeElectrons, preshowerEnergy does not equal expected value"); + } + if (electron.corrEcalEnergyError() != expectedElectronFloatingPointValues_[27] + offset) { + throwWithMessage("analyzeElectrons, corrEcalEnergyError does not equal expected value"); + } + if (electron.trkpMode().size() != vectorSize) { + throwWithMessage("analyzeElectrons, trkpMode does not have expected size"); + } + j = 0; + for (auto const& val : electron.trkpMode()) { + if (val != expectedElectronFloatingPointValues_[28] + offset + 10 * j) { + throwWithMessage("analyzeElectrons, trkpMode does not contain expected value"); + } + ++j; + } + if (electron.trketaMode().size() != vectorSize) { + throwWithMessage("analyzeElectrons, trketaMode does not have expected size"); + } + j = 0; + for (auto const& val : electron.trketaMode()) { + if (val != expectedElectronFloatingPointValues_[29] + offset + 10 * j) { + throwWithMessage("analyzeElectrons, trketaMode does not contain expected value"); + } + ++j; + } + if (electron.trkphiMode().size() != vectorSize) { + throwWithMessage("analyzeElectrons, trkphiMode does not have expected size"); + } + j = 0; + for (auto const& val : electron.trkphiMode()) { + if (val != expectedElectronFloatingPointValues_[30] + offset + 10 * j) { + throwWithMessage("analyzeElectrons, trkphiMode does not contain expected value"); + } + ++j; + } + if (electron.trkqoverpModeError().size() != vectorSize) { + throwWithMessage("analyzeElectrons, trkqoverpModeError does not have expected size"); + } + j = 0; + for (auto const& val : electron.trkqoverpModeError()) { + if (val != expectedElectronFloatingPointValues_[31] + offset + 10 * j) { + throwWithMessage("analyzeElectrons, trkqoverpModeError does not contain expected value"); + } + ++j; + } + if (electron.trackfbrem() != expectedElectronFloatingPointValues_[32] + offset) { + throwWithMessage("analyzeElectrons, trackfbrem does not equal expected value"); + } + if (electron.nClusters() != static_cast(expectedElectronIntegralValues_[6] + iOffset)) { + throwWithMessage("analyzeElectrons, nClusters does not equal expected value"); + } + if (electron.nCrystals() != static_cast(expectedElectronIntegralValues_[7] + iOffset)) { + throwWithMessage("analyzeElectrons, nCrystals does not equal expected value"); + } + } ++i; } } @@ -917,6 +983,23 @@ namespace edmtest { if (photon.rechitZeroSuppression() != static_cast((expectedPhotonIntegralValues_[2] + iOffset) % 2)) { throwWithMessage("analyzePhotons, rechitZeroSuppression does not equal expected value"); } + if (inputPhotonClassVersion_ == 6) { + if (photon.rawEnergy() != expectedPhotonFloatingPointValues_[14] + offset) { + throwWithMessage("analyzePhotons, rawEnergy does not equal expected value"); + } + if (photon.preshowerEnergy() != expectedPhotonFloatingPointValues_[15] + offset) { + throwWithMessage("analyzePhotons, preshowerEnergy does not equal expected value"); + } + if (photon.corrEcalEnergyError() != expectedPhotonFloatingPointValues_[16] + offset) { + throwWithMessage("analyzePhotons, corrEcalEnergyError does not equal expected value"); + } + if (photon.nClusters() != static_cast(expectedPhotonIntegralValues_[3] + iOffset)) { + throwWithMessage("analyzePhotons, nClusters does not equal expected value"); + } + if (photon.nCrystals() != static_cast(expectedPhotonIntegralValues_[4] + iOffset)) { + throwWithMessage("analyzePhotons, nCrystals does not equal expected value"); + } + } ++i; } } @@ -1079,6 +1162,18 @@ namespace edmtest { if (vertex.isValidVtx() != static_cast((expectedVertexIntegralValues_[2] + iOffset) % 2)) { throwWithMessage("analyzeVertexes, isValidVtx does not equal expected value"); } + + if (inputVertexClassVersion_ == 4) { + if (vertex.xyCov() != expectedVertexFloatingPointValues_[7] + offset) { + throwWithMessage("analyzeVertexes, xy cov. does not equal expected value"); + } + if (vertex.xzCov() != expectedVertexFloatingPointValues_[8] + offset) { + throwWithMessage("analyzeVertexes, xz cov. does not equal expected value"); + } + if (vertex.yzCov() != expectedVertexFloatingPointValues_[9] + offset) { + throwWithMessage("analyzeVertexes, yz cov. does not equal expected value"); + } + } ++i; } } diff --git a/DataFormats/Scouting/test/TestRun3ScoutingFormats.sh b/DataFormats/Scouting/test/TestRun3ScoutingFormats.sh index b6a773b528929..42226737e153b 100755 --- a/DataFormats/Scouting/test/TestRun3ScoutingFormats.sh +++ b/DataFormats/Scouting/test/TestRun3ScoutingFormats.sh @@ -27,6 +27,10 @@ cmsRun ${LOCAL_TEST_DIR}/test_readRun3Scouting_cfg.py || die "Failure using test # minor conflicts or issues in test/BuildFile.xml that need to # be resolved. # +# testRun3Scouting_v3_v7_v3_v4_v5_v3_v6_v3_v4_CMSSW_14_0_0_pre3.root: +# Check out the 14_0_0_pre3 pre-release, no additional commits +# will be neeeded. +# # Run the create_Run3Scouting_test_file_cfg.py configuration and # rename the file it creates. # @@ -48,12 +52,17 @@ cmsRun ${LOCAL_TEST_DIR}/test_readRun3Scouting_cfg.py || die "Failure using test file=testRun3Scouting_v3_v5_v3_v4_v5_v3_v5_v3_v3_CMSSW_12_4_0.root inputfile=$(edmFileInPath DataFormats/Scouting/data/$file) || die "Failure edmFileInPath DataFormats/Scouting/data/$file" $? -argsPassedToPython="--inputFile $inputfile --outputFileName testRun3Scouting2_CMSSW_12_4_0.root --electronVersion 5" +argsPassedToPython="--inputFile $inputfile --outputFileName testRun3Scouting2_CMSSW_12_4_0.root --electronVersion 5 --photonVersion 5 --vertexVersion 3" cmsRun ${LOCAL_TEST_DIR}/test_readRun3Scouting_cfg.py $argsPassedToPython || die "Failed to read old file $file" $? file=testRun3Scouting_v3_v6_v3_v4_v5_v3_v5_v3_v3_CMSSW_13_0_3.root inputfile=$(edmFileInPath DataFormats/Scouting/data/$file) || die "Failure edmFileInPath DataFormats/Scouting/data/$file" $? -argsPassedToPython="--inputFile $inputfile --outputFileName testRun3Scouting2_CMSSW_13_0_3.root --electronVersion 6" +argsPassedToPython="--inputFile $inputfile --outputFileName testRun3Scouting2_CMSSW_13_0_3.root --electronVersion 6 --photonVersion 5 --vertexVersion 3" +cmsRun ${LOCAL_TEST_DIR}/test_readRun3Scouting_cfg.py $argsPassedToPython || die "Failed to read old file $file" $? + +file=testRun3Scouting_v3_v7_v3_v4_v5_v3_v6_v3_v4_CMSSW_14_0_0_pre3.root +inputfile=$(edmFileInPath DataFormats/Scouting/data/$file) || die "Failure edmFileInPath DataFormats/Scouting/data/$file" $? +argsPassedToPython="--inputFile $inputfile --outputFileName testRun3Scouting2_CMSSW_14_0_0_pre3.root --electronVersion 7 --photonVersion 6 --vertexVersion 4" cmsRun ${LOCAL_TEST_DIR}/test_readRun3Scouting_cfg.py $argsPassedToPython || die "Failed to read old file $file" $? exit 0 diff --git a/DataFormats/Scouting/test/TestWriteRun3Scouting.cc b/DataFormats/Scouting/test/TestWriteRun3Scouting.cc index ae3f07be99cb6..9c04b4ca074d9 100644 --- a/DataFormats/Scouting/test/TestWriteRun3Scouting.cc +++ b/DataFormats/Scouting/test/TestWriteRun3Scouting.cc @@ -118,11 +118,11 @@ namespace edmtest { if (caloJetsValues_.size() != 16) { throwWithMessage("caloJetsValues must have 16 elements and it does not"); } - if (electronsFloatingPointValues_.size() != 25) { - throwWithMessage("electronsFloatingPointValues must have 25 elements and it does not"); + if (electronsFloatingPointValues_.size() != 33) { + throwWithMessage("electronsFloatingPointValues must have 33 elements and it does not"); } - if (electronsIntegralValues_.size() != 6) { - throwWithMessage("electronsIntegralValues must have 6 elements and it does not"); + if (electronsIntegralValues_.size() != 8) { + throwWithMessage("electronsIntegralValues must have 8 elements and it does not"); } if (muonsFloatingPointValues_.size() != 37) { throwWithMessage("muonsFloatingPointValues must have 37 elements and it does not"); @@ -142,11 +142,11 @@ namespace edmtest { if (pfJetsIntegralValues_.size() != 8) { throwWithMessage("pfJetsIntegralValues must have 8 elements and it does not"); } - if (photonsFloatingPointValues_.size() != 14) { - throwWithMessage("photonsFloatingPointValues must have 14 elements and it does not"); + if (photonsFloatingPointValues_.size() != 17) { + throwWithMessage("photonsFloatingPointValues must have 17 elements and it does not"); } - if (photonsIntegralValues_.size() != 3) { - throwWithMessage("photonsIntegralValues must have 3 elements and it does not"); + if (photonsIntegralValues_.size() != 5) { + throwWithMessage("photonsIntegralValues must have 5 elements and it does not"); } if (tracksFloatingPointValues_.size() != 29) { throwWithMessage("tracksFloatingPointValues must have 29 elements and it does not"); @@ -154,8 +154,8 @@ namespace edmtest { if (tracksIntegralValues_.size() != 5) { throwWithMessage("tracksIntegralValues must have 5 elements and it does not"); } - if (vertexesFloatingPointValues_.size() != 7) { - throwWithMessage("vertexesFloatingPointValues must have 7 elements and it does not"); + if (vertexesFloatingPointValues_.size() != 10) { + throwWithMessage("vertexesFloatingPointValues must have 10 elements and it does not"); } if (vertexesIntegralValues_.size() != 3) { throwWithMessage("vertexesIntegralValues must have 3 elements and it does not"); @@ -233,7 +233,7 @@ namespace edmtest { double offset = static_cast(iEvent.id().event() + i); int iOffset = static_cast(iEvent.id().event() + i); - // Note the first seven of these vectors use an out of sequence index + // Note the first eleven of these vectors use an out of sequence index // (starting at 19 or 5) because they are data members added in a format // change. In the CMSSW_12_4_0 version, they didn't exist. // Also the index values 4 and 5 in electronsFloatingPointValues_ @@ -244,6 +244,10 @@ namespace edmtest { std::vector trkpt; std::vector trketa; std::vector trkphi; + std::vector trkpMode; + std::vector trketaMode; + std::vector trkphiMode; + std::vector trkqoverpModeError; std::vector trkchi2overndf; std::vector trkcharge; std::vector energyMatrix; @@ -254,6 +258,10 @@ namespace edmtest { trkpt.reserve(vectorSize); trketa.reserve(vectorSize); trkphi.reserve(vectorSize); + trkpMode.reserve(vectorSize); + trketaMode.reserve(vectorSize); + trkphiMode.reserve(vectorSize); + trkqoverpModeError.reserve(vectorSize); trkchi2overndf.reserve(vectorSize); trkcharge.reserve(vectorSize); energyMatrix.reserve(vectorSize); @@ -265,6 +273,10 @@ namespace edmtest { trkpt.push_back(static_cast(electronsFloatingPointValues_[21] + offset + j * 10)); trketa.push_back(static_cast(electronsFloatingPointValues_[22] + offset + j * 10)); trkphi.push_back(static_cast(electronsFloatingPointValues_[23] + offset + j * 10)); + trkpMode.push_back(static_cast(electronsFloatingPointValues_[28] + offset + j * 10)); + trketaMode.push_back(static_cast(electronsFloatingPointValues_[29] + offset + j * 10)); + trkphiMode.push_back(static_cast(electronsFloatingPointValues_[30] + offset + j * 10)); + trkqoverpModeError.push_back(static_cast(electronsFloatingPointValues_[31] + offset + j * 10)); trkchi2overndf.push_back(static_cast(electronsFloatingPointValues_[24] + offset + j * 10)); trkcharge.push_back(static_cast(electronsIntegralValues_[5] + offset + j * 10)); energyMatrix.push_back(static_cast(electronsFloatingPointValues_[17] + offset + j * 10)); @@ -275,11 +287,18 @@ namespace edmtest { static_cast(electronsFloatingPointValues_[1] + offset), static_cast(electronsFloatingPointValues_[2] + offset), static_cast(electronsFloatingPointValues_[3] + offset), + static_cast(electronsFloatingPointValues_[25] + offset), + static_cast(electronsFloatingPointValues_[26] + offset), + static_cast(electronsFloatingPointValues_[27] + offset), std::move(trkd0), std::move(trkdz), std::move(trkpt), std::move(trketa), std::move(trkphi), + std::move(trkpMode), + std::move(trketaMode), + std::move(trkphiMode), + std::move(trkqoverpModeError), std::move(trkchi2overndf), static_cast(electronsFloatingPointValues_[6] + offset), static_cast(electronsFloatingPointValues_[7] + offset), @@ -288,6 +307,7 @@ namespace edmtest { static_cast(electronsFloatingPointValues_[10] + offset), electronsIntegralValues_[0] + iOffset, std::move(trkcharge), + static_cast(electronsFloatingPointValues_[32] + offset), static_cast(electronsFloatingPointValues_[11] + offset), static_cast(electronsFloatingPointValues_[12] + offset), static_cast(electronsFloatingPointValues_[13] + offset), @@ -295,6 +315,8 @@ namespace edmtest { static_cast(electronsFloatingPointValues_[15] + offset), static_cast(electronsFloatingPointValues_[16] + offset), static_cast(electronsIntegralValues_[2] + iOffset), + static_cast(electronsIntegralValues_[6] + iOffset), + static_cast(electronsIntegralValues_[7] + iOffset), std::move(energyMatrix), std::move(detIds), std::move(timingMatrix), @@ -482,6 +504,9 @@ namespace edmtest { static_cast(photonsFloatingPointValues_[1] + offset), static_cast(photonsFloatingPointValues_[2] + offset), static_cast(photonsFloatingPointValues_[3] + offset), + static_cast(photonsFloatingPointValues_[14] + offset), + static_cast(photonsFloatingPointValues_[15] + offset), + static_cast(photonsFloatingPointValues_[16] + offset), static_cast(photonsFloatingPointValues_[4] + offset), static_cast(photonsFloatingPointValues_[5] + offset), static_cast(photonsFloatingPointValues_[6] + offset), @@ -491,6 +516,8 @@ namespace edmtest { static_cast(photonsFloatingPointValues_[10] + offset), static_cast(photonsFloatingPointValues_[11] + offset), static_cast(photonsIntegralValues_[0] + iOffset), + static_cast(photonsIntegralValues_[3] + iOffset), + static_cast(photonsIntegralValues_[4] + iOffset), std::move(energyMatrix), std::move(detIds), std::move(timingMatrix), @@ -564,7 +591,10 @@ namespace edmtest { vertexesIntegralValues_[0] + iOffset, static_cast(vertexesFloatingPointValues_[6] + offset), vertexesIntegralValues_[1] + iOffset, - static_cast((vertexesIntegralValues_[2] + iOffset) % 2)); + static_cast((vertexesIntegralValues_[2] + iOffset) % 2), + static_cast(vertexesFloatingPointValues_[7] + offset), + static_cast(vertexesFloatingPointValues_[8] + offset), + static_cast(vertexesFloatingPointValues_[9] + offset)); } iEvent.put(vertexesPutToken_, std::move(run3ScoutingVertexes)); } diff --git a/DataFormats/Scouting/test/create_Run3Scouting_test_file_cfg.py b/DataFormats/Scouting/test/create_Run3Scouting_test_file_cfg.py index 64863ed18059b..46217daf44316 100644 --- a/DataFormats/Scouting/test/create_Run3Scouting_test_file_cfg.py +++ b/DataFormats/Scouting/test/create_Run3Scouting_test_file_cfg.py @@ -23,10 +23,12 @@ 60.0, 70.0, 80.0, 90.0, 100.0, 110.0, 120.0, 130.0, 140.0, 150.0, 160.0, 170.0, 180.0, 190.0, 200.0, - 210.0, 220.0, 230.0, 240.0, 250.0 + 210.0, 220.0, 230.0, 240.0, 250.0, + 260.0, 270.0, 280.0, 290.0, 300.0, + 310.0, 320.0, 330.0 ), electronsIntegralValues = cms.vint32( - 10, 20, 30, 40, 50, 60 + 10, 20, 30, 40, 50, 60, 70, 80 ), muonsFloatingPointValues = cms.vdouble( 10.0, 20.0, 30.0, 40.0, 50.0, @@ -66,10 +68,11 @@ photonsFloatingPointValues = cms.vdouble( 14.0, 23.0, 33.0, 43.0, 53.0, 63.0, 73.0, 83.0, 93.0, 103.0, - 113.0, 123.0, 133.0, 143.0 + 113.0, 123.0, 133.0, 143.0, 153.0, + 163.0, 173.0 ), photonsIntegralValues = cms.vint32( - 14, 23, 33 + 14, 23, 33, 43, 53 ), tracksFloatingPointValues = cms.vdouble( 14.0, 24.0, 34.0, 44.0, 54.0, @@ -84,7 +87,7 @@ ), vertexesFloatingPointValues = cms.vdouble( 15.0, 25.0, 35.0, 45.0, 55.0, - 65.0, 75.0 + 65.0, 75.0, 85.0, 95.0, 105.0 ), vertexesIntegralValues = cms.vint32( 15, 25, 35 diff --git a/DataFormats/Scouting/test/test_readRun3Scouting_cfg.py b/DataFormats/Scouting/test/test_readRun3Scouting_cfg.py index bee221a1c4c8b..6b1fc7b65c36b 100644 --- a/DataFormats/Scouting/test/test_readRun3Scouting_cfg.py +++ b/DataFormats/Scouting/test/test_readRun3Scouting_cfg.py @@ -4,7 +4,9 @@ parser = argparse.ArgumentParser(prog=sys.argv[0], description='Test Run 3 Scouting data formats') -parser.add_argument("--electronVersion", type=int, help="electron data format version (default: 6)", default=6) +parser.add_argument("--electronVersion", type=int, help="electron data format version (default: 7)", default=7) +parser.add_argument("--photonVersion", type=int, help="photon data format version (default: 6)", default=6) +parser.add_argument("--vertexVersion", type=int, help="photon data format version (default: 4)", default=4) parser.add_argument("--inputFile", type=str, help="Input file name (default: testRun3Scouting.root)", default="testRun3Scouting.root") parser.add_argument("--outputFileName", type=str, help="Output file name (default: testRun3Scouting2.root)", default="testRun3Scouting2.root") args = parser.parse_args() @@ -28,8 +30,10 @@ 60.0, 70.0, 80.0, 90.0, 100.0, 110.0, 120.0, 130.0, 140.0, 150.0, 160.0, 170.0, 180.0, 190.0, 200.0, - 210.0, 220.0, 230.0, 240.0, 250.0), - expectedElectronIntegralValues = cms.vint32(10, 20, 30, 40, 50, 60), + 210.0, 220.0, 230.0, 240.0, 250.0, + 260.0, 270.0, 280.0, 290.0, 300.0, + 310.0, 320.0, 330.0), + expectedElectronIntegralValues = cms.vint32(10, 20, 30, 40, 50, 60, 70, 80), electronsTag = cms.InputTag("run3ScoutingProducer", "", "PROD"), expectedMuonFloatingPointValues = cms.vdouble( 10.0, 20.0, 30.0, 40.0, 50.0, @@ -69,13 +73,15 @@ 62, 72, 82 ), pfJetsTag = cms.InputTag("run3ScoutingProducer", "", "PROD"), + photonClassVersion = cms.int32(args.photonVersion), expectedPhotonFloatingPointValues = cms.vdouble( 14.0, 23.0, 33.0, 43.0, 53.0, 63.0, 73.0, 83.0, 93.0, 103.0, - 113.0, 123.0, 133.0, 143.0 + 113.0, 123.0, 133.0, 143.0, 153.0, + 163.0, 173.0 ), expectedPhotonIntegralValues = cms.vint32( - 14, 23, 33 + 14, 23, 33, 43, 53 ), photonsTag = cms.InputTag("run3ScoutingProducer", "", "PROD"), expectedTrackFloatingPointValues = cms.vdouble( @@ -90,9 +96,10 @@ 14, 24, 34, 44, 54 ), tracksTag = cms.InputTag("run3ScoutingProducer", "", "PROD"), + vertexClassVersion = cms.int32(args.vertexVersion), expectedVertexFloatingPointValues = cms.vdouble( - 15.0, 25.0, 35.0, 45.0, 55.0, - 65.0, 75.0 + 15.0, 25.0, 35.0, 45.0, 55.0, + 65.0, 75.0, 85.0, 95.0, 105.0 ), expectedVertexIntegralValues = cms.vint32( 15, 25, 35 diff --git a/DataFormats/SiPixelClusterSoA/BuildFile.xml b/DataFormats/SiPixelClusterSoA/BuildFile.xml new file mode 100644 index 0000000000000..c9b7e4ef81817 --- /dev/null +++ b/DataFormats/SiPixelClusterSoA/BuildFile.xml @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/DataFormats/SiPixelClusterSoA/interface/ClusteringConstants.h b/DataFormats/SiPixelClusterSoA/interface/ClusteringConstants.h new file mode 100644 index 0000000000000..6726c1d29d5c9 --- /dev/null +++ b/DataFormats/SiPixelClusterSoA/interface/ClusteringConstants.h @@ -0,0 +1,35 @@ +#ifndef DataFormats_SiPixelClusterSoA_interface_ClusteringConstants_h +#define DataFormats_SiPixelClusterSoA_interface_ClusteringConstants_h + +#include +#include + +//TODO: move this to TrackerTraits! +namespace pixelClustering { +#ifdef GPU_SMALL_EVENTS + // kept for testing and debugging + constexpr uint32_t maxHitsInIter() { return 64; } +#else + // optimized for real data PU 50 + // tested on MC events with 55-75 pileup events + constexpr uint32_t maxHitsInIter() { return 160; } //TODO better tuning for PU 140-200 +#endif + constexpr uint32_t maxHitsInModule() { return 1024; } + + constexpr uint16_t clusterThresholdLayerOne = 2000; + constexpr uint16_t clusterThresholdOtherLayers = 4000; + + constexpr uint16_t clusterThresholdPhase2LayerOne = 4000; + constexpr uint16_t clusterThresholdPhase2OtherLayers = 4000; + + constexpr uint32_t maxNumDigis = 3 * 256 * 1024; // @PU=200 µ=530k σ=50k this is >4σ away + constexpr uint16_t maxNumModules = 4000; + + constexpr int32_t maxNumClustersPerModules = maxHitsInModule(); + constexpr uint16_t invalidModuleId = std::numeric_limits::max() - 1; + constexpr int invalidClusterId = -9999; + static_assert(invalidModuleId > maxNumModules); // invalidModuleId must be > maxNumModules + +} // namespace pixelClustering + +#endif // DataFormats_SiPixelClusterSoA_interface_ClusteringConstants_h diff --git a/DataFormats/SiPixelClusterSoA/interface/SiPixelClustersDevice.h b/DataFormats/SiPixelClusterSoA/interface/SiPixelClustersDevice.h new file mode 100644 index 0000000000000..2593475bf5c3a --- /dev/null +++ b/DataFormats/SiPixelClusterSoA/interface/SiPixelClustersDevice.h @@ -0,0 +1,38 @@ +#ifndef DataFormats_SiPixelClusterSoA_interface_SiPixelClustersDevice_h +#define DataFormats_SiPixelClusterSoA_interface_SiPixelClustersDevice_h + +#include +#include +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "DataFormats/SiPixelClusterSoA/interface/SiPixelClustersHost.h" +#include "DataFormats/Portable/interface/PortableDeviceCollection.h" +#include "DataFormats/SiPixelClusterSoA/interface/SiPixelClustersSoA.h" +#include "HeterogeneousCore/AlpakaInterface/interface/CopyToHost.h" + +template +class SiPixelClustersDevice : public PortableDeviceCollection { +public: + SiPixelClustersDevice() = default; + + template + explicit SiPixelClustersDevice(size_t maxModules, TQueue queue) + : PortableDeviceCollection(maxModules + 1, queue) {} + + // Constructor which specifies the SoA size + explicit SiPixelClustersDevice(size_t maxModules, TDev const &device) + : PortableDeviceCollection(maxModules + 1, device) {} + + void setNClusters(uint32_t nClusters, int32_t offsetBPIX2) { + nClusters_h = nClusters; + offsetBPIX2_h = offsetBPIX2; + } + + uint32_t nClusters() const { return nClusters_h; } + int32_t offsetBPIX2() const { return offsetBPIX2_h; } + +private: + uint32_t nClusters_h = 0; + int32_t offsetBPIX2_h = 0; +}; + +#endif // DataFormats_SiPixelClusterSoA_interface_SiPixelClustersDevice_h diff --git a/DataFormats/SiPixelClusterSoA/interface/SiPixelClustersHost.h b/DataFormats/SiPixelClusterSoA/interface/SiPixelClustersHost.h new file mode 100644 index 0000000000000..eb086160a6188 --- /dev/null +++ b/DataFormats/SiPixelClusterSoA/interface/SiPixelClustersHost.h @@ -0,0 +1,33 @@ +#ifndef DataFormats_SiPixelClusterSoA_interface_SiPixelClustersHost_h +#define DataFormats_SiPixelClusterSoA_interface_SiPixelClustersHost_h + +#include +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "DataFormats/Portable/interface/PortableHostCollection.h" +#include "DataFormats/SiPixelClusterSoA/interface/SiPixelClustersSoA.h" + +// TODO: The class is created via inheritance of the PortableCollection. +// This is generally discouraged, and should be done via composition. +// See: https://github.com/cms-sw/cmssw/pull/40465#discussion_r1067364306 +class SiPixelClustersHost : public PortableHostCollection { +public: + SiPixelClustersHost() = default; + + template + explicit SiPixelClustersHost(size_t maxModules, TQueue queue) + : PortableHostCollection(maxModules + 1, queue) {} + + void setNClusters(uint32_t nClusters, int32_t offsetBPIX2) { + nClusters_h = nClusters; + offsetBPIX2_h = offsetBPIX2; + } + + uint32_t nClusters() const { return nClusters_h; } + int32_t offsetBPIX2() const { return offsetBPIX2_h; } + +private: + uint32_t nClusters_h = 0; + int32_t offsetBPIX2_h = 0; +}; + +#endif // DataFormats_SiPixelClusterSoA_interface_SiPixelClustersHost_h diff --git a/DataFormats/SiPixelClusterSoA/interface/SiPixelClustersSoA.h b/DataFormats/SiPixelClusterSoA/interface/SiPixelClustersSoA.h new file mode 100644 index 0000000000000..c44c0148662ff --- /dev/null +++ b/DataFormats/SiPixelClusterSoA/interface/SiPixelClustersSoA.h @@ -0,0 +1,16 @@ +#ifndef DataFormats_SiPixelClusterSoA_SiPixelClustersLayout_h +#define DataFormats_SiPixelClusterSoA_SiPixelClustersLayout_h + +#include "DataFormats/SoATemplate/interface/SoALayout.h" + +GENERATE_SOA_LAYOUT(SiPixelClustersLayout, + SOA_COLUMN(uint32_t, moduleStart), + SOA_COLUMN(uint32_t, clusInModule), + SOA_COLUMN(uint32_t, moduleId), + SOA_COLUMN(uint32_t, clusModuleStart)) + +using SiPixelClustersSoA = SiPixelClustersLayout<>; +using SiPixelClustersSoAView = SiPixelClustersSoA::View; +using SiPixelClustersSoAConstView = SiPixelClustersSoA::ConstView; + +#endif // DataFormats_SiPixelClusterSoA_SiPixelClustersLayout_h diff --git a/DataFormats/SiPixelClusterSoA/interface/alpaka/SiPixelClustersSoACollection.h b/DataFormats/SiPixelClusterSoA/interface/alpaka/SiPixelClustersSoACollection.h new file mode 100644 index 0000000000000..c5e35475b5330 --- /dev/null +++ b/DataFormats/SiPixelClusterSoA/interface/alpaka/SiPixelClustersSoACollection.h @@ -0,0 +1,35 @@ +#ifndef DataFormats_SiPixelClusterSoA_interface_alpaka_SiPixelClustersSoACollection_h +#define DataFormats_SiPixelClusterSoA_interface_alpaka_SiPixelClustersSoACollection_h + +#include + +#include "DataFormats/Portable/interface/alpaka/PortableCollection.h" +#include "DataFormats/SiPixelClusterSoA/interface/SiPixelClustersDevice.h" +#include "DataFormats/SiPixelClusterSoA/interface/SiPixelClustersHost.h" +#include "DataFormats/SiPixelClusterSoA/interface/SiPixelClustersSoA.h" +#include "HeterogeneousCore/AlpakaInterface/interface/CopyToHost.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + using SiPixelClustersSoACollection = + std::conditional_t, SiPixelClustersHost, SiPixelClustersDevice>; +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +namespace cms::alpakatools { + template + struct CopyToHost> { + template + static auto copyAsync(TQueue &queue, SiPixelClustersDevice const &srcData) { + SiPixelClustersHost dstData(srcData->metadata().size(), queue); + alpaka::memcpy(queue, dstData.buffer(), srcData.buffer()); + dstData.setNClusters(srcData.nClusters(), srcData.offsetBPIX2()); +#ifdef GPU_DEBUG //keeping this untiil copies are in the Tracer + printf("SiPixelClustersSoACollection: I'm copying to host.\n"); +#endif + return dstData; + } + }; +} // namespace cms::alpakatools + +ASSERT_DEVICE_MATCHES_HOST_COLLECTION(SiPixelClustersSoACollection, SiPixelClustersHost); +#endif // DataFormats_SiPixelClusterSoA_interface_alpaka_SiPixelClustersSoACollection_h diff --git a/DataFormats/SiPixelClusterSoA/src/alpaka/classes_cuda.h b/DataFormats/SiPixelClusterSoA/src/alpaka/classes_cuda.h new file mode 100644 index 0000000000000..e54864699fb73 --- /dev/null +++ b/DataFormats/SiPixelClusterSoA/src/alpaka/classes_cuda.h @@ -0,0 +1,8 @@ +#ifndef DataFormats_SiPixelClusterSoA_src_alpaka_classes_cuda_h +#define DataFormats_SiPixelClusterSoA_src_alpaka_classes_cuda_h + +#include "DataFormats/Common/interface/Wrapper.h" +#include "DataFormats/Common/interface/DeviceProduct.h" +#include "DataFormats/SiPixelClusterSoA/interface/alpaka/SiPixelClustersSoACollection.h" + +#endif // DataFormats_SiPixelClusterSoA_src_alpaka_classes_cuda_h diff --git a/DataFormats/SiPixelClusterSoA/src/alpaka/classes_cuda_def.xml b/DataFormats/SiPixelClusterSoA/src/alpaka/classes_cuda_def.xml new file mode 100644 index 0000000000000..b9858c3fbffdd --- /dev/null +++ b/DataFormats/SiPixelClusterSoA/src/alpaka/classes_cuda_def.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/DataFormats/SiPixelClusterSoA/src/alpaka/classes_rocm.h b/DataFormats/SiPixelClusterSoA/src/alpaka/classes_rocm.h new file mode 100644 index 0000000000000..bd510fa1618b0 --- /dev/null +++ b/DataFormats/SiPixelClusterSoA/src/alpaka/classes_rocm.h @@ -0,0 +1,8 @@ +#ifndef DataFormats_SiPixelClusterSoA_src_alpaka_classes_rocm_h +#define DataFormats_SiPixelClusterSoA_src_alpaka_classes_rocm_h + +#include "DataFormats/Common/interface/Wrapper.h" +#include "DataFormats/Common/interface/DeviceProduct.h" +#include "DataFormats/SiPixelClusterSoA/interface/alpaka/SiPixelClustersSoACollection.h" + +#endif // DataFormats_SiPixelClusterSoA_src_alpaka_classes_rocm_h diff --git a/DataFormats/SiPixelClusterSoA/src/alpaka/classes_rocm_def.xml b/DataFormats/SiPixelClusterSoA/src/alpaka/classes_rocm_def.xml new file mode 100644 index 0000000000000..d27887904579c --- /dev/null +++ b/DataFormats/SiPixelClusterSoA/src/alpaka/classes_rocm_def.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/DataFormats/SiPixelClusterSoA/src/classes.cc b/DataFormats/SiPixelClusterSoA/src/classes.cc new file mode 100644 index 0000000000000..70b4f7b100cb4 --- /dev/null +++ b/DataFormats/SiPixelClusterSoA/src/classes.cc @@ -0,0 +1,4 @@ +#include "DataFormats/Portable/interface/PortableHostCollectionReadRules.h" +#include "DataFormats/SiPixelClusterSoA/interface/SiPixelClustersSoA.h" + +SET_PORTABLEHOSTCOLLECTION_READ_RULES(PortableHostCollection); \ No newline at end of file diff --git a/DataFormats/SiPixelClusterSoA/src/classes.h b/DataFormats/SiPixelClusterSoA/src/classes.h new file mode 100644 index 0000000000000..8514c7732375b --- /dev/null +++ b/DataFormats/SiPixelClusterSoA/src/classes.h @@ -0,0 +1,7 @@ +#ifndef DataFormats_SiPixelClusterSoA_src_classes_h +#define DataFormats_SiPixelClusterSoA_src_classes_h + +#include "DataFormats/Common/interface/Wrapper.h" +#include "DataFormats/SiPixelClusterSoA/interface/SiPixelClustersHost.h" + +#endif // DataFormats_SiPixelClusterSoA_src_classes_h diff --git a/DataFormats/SiPixelClusterSoA/src/classes_def.xml b/DataFormats/SiPixelClusterSoA/src/classes_def.xml new file mode 100644 index 0000000000000..96b9df2725473 --- /dev/null +++ b/DataFormats/SiPixelClusterSoA/src/classes_def.xml @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/DataFormats/SiPixelClusterSoA/test/BuildFile.xml b/DataFormats/SiPixelClusterSoA/test/BuildFile.xml new file mode 100644 index 0000000000000..ed54aae76ecab --- /dev/null +++ b/DataFormats/SiPixelClusterSoA/test/BuildFile.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/DataFormats/SiPixelClusterSoA/test/alpaka/Clusters_test.cc b/DataFormats/SiPixelClusterSoA/test/alpaka/Clusters_test.cc new file mode 100644 index 0000000000000..d96469858b916 --- /dev/null +++ b/DataFormats/SiPixelClusterSoA/test/alpaka/Clusters_test.cc @@ -0,0 +1,45 @@ +#include + +#include "DataFormats/SiPixelClusterSoA/interface/alpaka/SiPixelClustersSoACollection.h" +#include "DataFormats/SiPixelClusterSoA/interface/SiPixelClustersDevice.h" +#include "DataFormats/SiPixelClusterSoA/interface/SiPixelClustersHost.h" +#include "DataFormats/SiPixelClusterSoA/interface/SiPixelClustersSoA.h" + +#include "HeterogeneousCore/AlpakaInterface/interface/devices.h" +#include "HeterogeneousCore/AlpakaInterface/interface/host.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" + +using namespace ALPAKA_ACCELERATOR_NAMESPACE; + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace testClusterSoA { + + void runKernels(SiPixelClustersSoAView clust_view, Queue& queue); + } +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +int main() { + const auto host = cms::alpakatools::host(); + const auto device = cms::alpakatools::devices()[0]; + Queue queue(device); + + // Inner scope to deallocate memory before destroying the stream + { + // Instantiate tracks on device. PortableDeviceCollection allocates + // SoA on device automatically. + SiPixelClustersSoACollection clusters_d(100, queue); + testClusterSoA::runKernels(clusters_d.view(), queue); + + // Instantate tracks on host. This is where the data will be + // copied to from device. + SiPixelClustersHost clusters_h(clusters_d.view().metadata().size(), queue); + + std::cout << clusters_h.view().metadata().size() << std::endl; + alpaka::memcpy(queue, clusters_h.buffer(), clusters_d.const_buffer()); + alpaka::wait(queue); + } + + return 0; +} diff --git a/DataFormats/SiPixelClusterSoA/test/alpaka/Clusters_test.dev.cc b/DataFormats/SiPixelClusterSoA/test/alpaka/Clusters_test.dev.cc new file mode 100644 index 0000000000000..684380dcbdfbc --- /dev/null +++ b/DataFormats/SiPixelClusterSoA/test/alpaka/Clusters_test.dev.cc @@ -0,0 +1,49 @@ +#include "DataFormats/SiPixelClusterSoA/interface/alpaka/SiPixelClustersSoACollection.h" +#include "DataFormats/SiPixelClusterSoA/interface/SiPixelClustersDevice.h" +#include "DataFormats/SiPixelClusterSoA/interface/SiPixelClustersHost.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/traits.h" + +using namespace alpaka; + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + using namespace cms::alpakatools; + namespace testClusterSoA { + + class TestFillKernel { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, SiPixelClustersSoAView clust_view) const { + for (int32_t j : elements_with_stride(acc, clust_view.metadata().size())) { + clust_view[j].moduleStart() = j; + clust_view[j].clusInModule() = j * 2; + clust_view[j].moduleId() = j * 3; + clust_view[j].clusModuleStart() = j * 4; + } + } + }; + + class TestVerifyKernel { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, SiPixelClustersSoAConstView clust_view) const { + for (uint32_t j : elements_with_stride(acc, clust_view.metadata().size())) { + assert(clust_view[j].moduleStart() == j); + assert(clust_view[j].clusInModule() == j * 2); + assert(clust_view[j].moduleId() == j * 3); + assert(clust_view[j].clusModuleStart() == j * 4); + } + } + }; + + void runKernels(SiPixelClustersSoAView clust_view, Queue& queue) { + uint32_t items = 64; + uint32_t groups = divide_up_by(clust_view.metadata().size(), items); + auto workDiv = make_workdiv(groups, items); + alpaka::exec(queue, workDiv, TestFillKernel{}, clust_view); + alpaka::exec(queue, workDiv, TestVerifyKernel{}, clust_view); + } + + } // namespace testClusterSoA +} // namespace ALPAKA_ACCELERATOR_NAMESPACE diff --git a/DataFormats/SiPixelDigi/interface/SiPixelDigisSoA.h b/DataFormats/SiPixelDigi/interface/SiPixelDigisSoA.h index f352754e31d17..a97dfadea52c4 100644 --- a/DataFormats/SiPixelDigi/interface/SiPixelDigisSoA.h +++ b/DataFormats/SiPixelDigi/interface/SiPixelDigisSoA.h @@ -5,36 +5,46 @@ #include #include -// The main purpose of this class is to deliver digi and cluster data -// from an EDProducer that transfers the data from GPU to host to an -// EDProducer that converts the SoA to legacy data products. The class -// is independent of any GPU technology, and in prunciple could be -// produced by host code, and be used for other purposes than -// conversion-to-legacy as well. -class SiPixelDigisSoA { -public: - SiPixelDigisSoA() = default; - explicit SiPixelDigisSoA( - size_t nDigis, const uint32_t* pdigi, const uint32_t* rawIdArr, const uint16_t* adc, const int32_t* clus); - ~SiPixelDigisSoA() = default; - - auto size() const { return pdigi_.size(); } - - uint32_t pdigi(size_t i) const { return pdigi_[i]; } - uint32_t rawIdArr(size_t i) const { return rawIdArr_[i]; } - uint16_t adc(size_t i) const { return adc_[i]; } - int32_t clus(size_t i) const { return clus_[i]; } - - const std::vector& pdigiVector() const { return pdigi_; } - const std::vector& rawIdArrVector() const { return rawIdArr_; } - const std::vector& adcVector() const { return adc_; } - const std::vector& clusVector() const { return clus_; } - -private: - std::vector pdigi_; // packed digi (row, col, adc) of each pixel - std::vector rawIdArr_; // DetId of each pixel - std::vector adc_; // ADC of each pixel - std::vector clus_; // cluster id of each pixel -}; - -#endif +namespace legacy { + + // The main purpose of this class is to deliver digi and cluster data + // from an EDProducer that transfers the data from GPU to host to an + // EDProducer that converts the SoA to legacy data products. The class + // is independent of any GPU technology, and in prunciple could be + // produced by host code, and be used for other purposes than + // conversion-to-legacy as well. + + class SiPixelDigisSoA { + public: + SiPixelDigisSoA() = default; + explicit SiPixelDigisSoA( + size_t nDigis, const uint32_t* pdigi, const uint32_t* rawIdArr, const uint16_t* adc, const int32_t* clus) + : pdigi_(pdigi, pdigi + nDigis), + rawIdArr_(rawIdArr, rawIdArr + nDigis), + adc_(adc, adc + nDigis), + clus_(clus, clus + nDigis) {} + + ~SiPixelDigisSoA() = default; + + auto size() const { return pdigi_.size(); } + + uint32_t pdigi(size_t i) const { return pdigi_[i]; } + uint32_t rawIdArr(size_t i) const { return rawIdArr_[i]; } + uint16_t adc(size_t i) const { return adc_[i]; } + int32_t clus(size_t i) const { return clus_[i]; } + + const std::vector& pdigiVector() const { return pdigi_; } + const std::vector& rawIdArrVector() const { return rawIdArr_; } + const std::vector& adcVector() const { return adc_; } + const std::vector& clusVector() const { return clus_; } + + private: + std::vector pdigi_; // packed digi (row, col, adc) of each pixel + std::vector rawIdArr_; // DetId of each pixel + std::vector adc_; // ADC of each pixel + std::vector clus_; // cluster id of each pixel + }; + +} // namespace legacy + +#endif // DataFormats_SiPixelDigi_interface_SiPixelDigisSoA_h diff --git a/DataFormats/SiPixelDigi/src/SiPixelDigisSoA.cc b/DataFormats/SiPixelDigi/src/SiPixelDigisSoA.cc deleted file mode 100644 index b95c004a50a25..0000000000000 --- a/DataFormats/SiPixelDigi/src/SiPixelDigisSoA.cc +++ /dev/null @@ -1,10 +0,0 @@ -#include "DataFormats/SiPixelDigi/interface/SiPixelDigisSoA.h" - -#include - -SiPixelDigisSoA::SiPixelDigisSoA( - size_t nDigis, const uint32_t *pdigi, const uint32_t *rawIdArr, const uint16_t *adc, const int32_t *clus) - : pdigi_(pdigi, pdigi + nDigis), - rawIdArr_(rawIdArr, rawIdArr + nDigis), - adc_(adc, adc + nDigis), - clus_(clus, clus + nDigis) {} diff --git a/DataFormats/SiPixelDigi/src/classes.h b/DataFormats/SiPixelDigi/src/classes.h index 1360ee6e469d9..be707668d0dfc 100644 --- a/DataFormats/SiPixelDigi/src/classes.h +++ b/DataFormats/SiPixelDigi/src/classes.h @@ -1,6 +1,8 @@ #ifndef SIPIXELDIGI_CLASSES_H #define SIPIXELDIGI_CLASSES_H +#include + #include "DataFormats/SiPixelDigi/interface/PixelDigi.h" #include "DataFormats/SiPixelDigi/interface/PixelDigiCollection.h" #include "DataFormats/SiPixelDigi/interface/SiPixelCalibDigi.h" @@ -9,6 +11,5 @@ #include "DataFormats/Common/interface/Wrapper.h" #include "DataFormats/Common/interface/DetSetVector.h" #include "DataFormats/Common/interface/DetSetVectorNew.h" -#include #endif // SIPIXELDIGI_CLASSES_H diff --git a/DataFormats/SiPixelDigi/src/classes_def.xml b/DataFormats/SiPixelDigi/src/classes_def.xml index e6bc08de161fa..697b6c467d799 100755 --- a/DataFormats/SiPixelDigi/src/classes_def.xml +++ b/DataFormats/SiPixelDigi/src/classes_def.xml @@ -50,6 +50,6 @@ - - + + diff --git a/DataFormats/SiPixelDigiSoA/BuildFile.xml b/DataFormats/SiPixelDigiSoA/BuildFile.xml new file mode 100644 index 0000000000000..538802f92c3ca --- /dev/null +++ b/DataFormats/SiPixelDigiSoA/BuildFile.xml @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsDevice.h b/DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsDevice.h new file mode 100644 index 0000000000000..36c7d0be7e88a --- /dev/null +++ b/DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsDevice.h @@ -0,0 +1,33 @@ +#ifndef DataFormats_SiPixelDigiSoA_interface_SiPixelDigiErrorsDevice_h +#define DataFormats_SiPixelDigiSoA_interface_SiPixelDigiErrorsDevice_h + +#include + +#include + +#include "DataFormats/Portable/interface/PortableDeviceCollection.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsSoA.h" +#include "DataFormats/SiPixelRawData/interface/SiPixelErrorCompact.h" +#include "HeterogeneousCore/AlpakaInterface/interface/SimpleVector.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" + +template +class SiPixelDigiErrorsDevice : public PortableDeviceCollection { +public: + SiPixelDigiErrorsDevice() = default; + template + explicit SiPixelDigiErrorsDevice(size_t maxFedWords, TQueue queue) + : PortableDeviceCollection(maxFedWords, queue), maxFedWords_(maxFedWords) {} + + // Constructor which specifies the SoA size + explicit SiPixelDigiErrorsDevice(size_t maxFedWords, TDev const& device) + : PortableDeviceCollection(maxFedWords, device) {} + + auto& error_data() const { return (*this->view().pixelErrors()); } + auto maxFedWords() const { return maxFedWords_; } + +private: + int maxFedWords_; +}; + +#endif // DataFormats_SiPixelDigiSoA_interface_SiPixelDigiErrorsDevice_h diff --git a/DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsHost.h b/DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsHost.h new file mode 100644 index 0000000000000..ac706dea4b544 --- /dev/null +++ b/DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsHost.h @@ -0,0 +1,30 @@ +#ifndef DataFormats_SiPixelDigiSoA_interface_SiPixelDigiErrorsHost_h +#define DataFormats_SiPixelDigiSoA_interface_SiPixelDigiErrorsHost_h + +#include + +#include + +#include "DataFormats/Portable/interface/PortableHostCollection.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsSoA.h" +#include "DataFormats/SiPixelRawData/interface/SiPixelErrorCompact.h" +#include "HeterogeneousCore/AlpakaInterface/interface/SimpleVector.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" + +class SiPixelDigiErrorsHost : public PortableHostCollection { +public: + SiPixelDigiErrorsHost() = default; + template + explicit SiPixelDigiErrorsHost(int maxFedWords, TQueue queue) + : PortableHostCollection(maxFedWords, queue), maxFedWords_(maxFedWords) {} + + int maxFedWords() const { return maxFedWords_; } + + auto& error_data() { return (*view().pixelErrors()); } + auto const& error_data() const { return (*view().pixelErrors()); } + +private: + int maxFedWords_ = 0; +}; + +#endif // DataFormats_SiPixelDigiSoA_interface_SiPixelDigiErrorsHost_h diff --git a/DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsSoA.h b/DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsSoA.h new file mode 100644 index 0000000000000..b6398bc840c5b --- /dev/null +++ b/DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsSoA.h @@ -0,0 +1,14 @@ +#ifndef DataFormats_SiPixelDigiSoA_interface_SiPixelDigiErrorsSoA_h +#define DataFormats_SiPixelDigiSoA_interface_SiPixelDigiErrorsSoA_h + +#include "DataFormats/SoATemplate/interface/SoALayout.h" +#include "DataFormats/SiPixelRawData/interface/SiPixelErrorCompact.h" +#include "HeterogeneousCore/AlpakaInterface/interface/SimpleVector.h" + +GENERATE_SOA_LAYOUT(SiPixelDigiErrorsLayout, SOA_COLUMN(SiPixelErrorCompact, pixelErrors), SOA_SCALAR(uint32_t, size)) + +using SiPixelDigiErrorsSoA = SiPixelDigiErrorsLayout<>; +using SiPixelDigiErrorsSoAView = SiPixelDigiErrorsSoA::View; +using SiPixelDigiErrorsSoAConstView = SiPixelDigiErrorsSoA::ConstView; + +#endif // DataFormats_SiPixelDigiSoA_interface_SiPixelDigiErrorsSoA_h diff --git a/DataFormats/SiPixelDigiSoA/interface/SiPixelDigisDevice.h b/DataFormats/SiPixelDigiSoA/interface/SiPixelDigisDevice.h new file mode 100644 index 0000000000000..1748069685923 --- /dev/null +++ b/DataFormats/SiPixelDigiSoA/interface/SiPixelDigisDevice.h @@ -0,0 +1,37 @@ +#ifndef DataFormats_SiPixelDigiSoA_interface_SiPixelDigisDevice_h +#define DataFormats_SiPixelDigiSoA_interface_SiPixelDigisDevice_h + +#include + +#include + +#include "DataFormats/Portable/interface/PortableDeviceCollection.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisSoA.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" + +template +class SiPixelDigisDevice : public PortableDeviceCollection { +public: + SiPixelDigisDevice() = default; + template + explicit SiPixelDigisDevice(size_t maxFedWords, TQueue queue) + : PortableDeviceCollection(maxFedWords + 1, queue) {} + + // Constructor which specifies the SoA size + explicit SiPixelDigisDevice(size_t maxFedWords, TDev const &device) + : PortableDeviceCollection(maxFedWords + 1, device) {} + + void setNModulesDigis(uint32_t nModules, uint32_t nDigis) { + nModules_h = nModules; + nDigis_h = nDigis; + } + + uint32_t nModules() const { return nModules_h; } + uint32_t nDigis() const { return nDigis_h; } + +private: + uint32_t nModules_h = 0; + uint32_t nDigis_h = 0; +}; + +#endif // DataFormats_SiPixelDigiSoA_interface_SiPixelDigisDevice_h diff --git a/DataFormats/SiPixelDigiSoA/interface/SiPixelDigisHost.h b/DataFormats/SiPixelDigiSoA/interface/SiPixelDigisHost.h new file mode 100644 index 0000000000000..4e4650efac1cb --- /dev/null +++ b/DataFormats/SiPixelDigiSoA/interface/SiPixelDigisHost.h @@ -0,0 +1,30 @@ +#ifndef DataFormats_SiPixelDigiSoA_interface_SiPixelDigisHost_h +#define DataFormats_SiPixelDigiSoA_interface_SiPixelDigisHost_h + +#include "DataFormats/Portable/interface/PortableHostCollection.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisSoA.h" + +// TODO: The class is created via inheritance of the PortableDeviceCollection. +// This is generally discouraged, and should be done via composition. +// See: https://github.com/cms-sw/cmssw/pull/40465#discussion_r1067364306 +class SiPixelDigisHost : public PortableHostCollection { +public: + SiPixelDigisHost() = default; + template + explicit SiPixelDigisHost(size_t maxFedWords, TQueue queue) + : PortableHostCollection(maxFedWords + 1, queue) {} + + void setNModulesDigis(uint32_t nModules, uint32_t nDigis) { + nModules_h = nModules; + nDigis_h = nDigis; + } + + uint32_t nModules() const { return nModules_h; } + uint32_t nDigis() const { return nDigis_h; } + +private: + uint32_t nModules_h = 0; + uint32_t nDigis_h = 0; +}; + +#endif // DataFormats_SiPixelDigiSoA_interface_SiPixelDigisHost_h diff --git a/DataFormats/SiPixelDigiSoA/interface/SiPixelDigisSoA.h b/DataFormats/SiPixelDigiSoA/interface/SiPixelDigisSoA.h new file mode 100644 index 0000000000000..2c7c5e1079513 --- /dev/null +++ b/DataFormats/SiPixelDigiSoA/interface/SiPixelDigisSoA.h @@ -0,0 +1,19 @@ +#ifndef DataFormats_SiPixelDigiSoA_interface_SiPixelDigisSoA_h +#define DataFormats_SiPixelDigiSoA_interface_SiPixelDigisSoA_h + +#include "DataFormats/SoATemplate/interface/SoALayout.h" + +GENERATE_SOA_LAYOUT(SiPixelDigisLayout, + SOA_COLUMN(int32_t, clus), + SOA_COLUMN(uint32_t, pdigi), + SOA_COLUMN(uint32_t, rawIdArr), + SOA_COLUMN(uint16_t, adc), + SOA_COLUMN(uint16_t, xx), + SOA_COLUMN(uint16_t, yy), + SOA_COLUMN(uint16_t, moduleId)) + +using SiPixelDigisSoA = SiPixelDigisLayout<>; +using SiPixelDigisSoAView = SiPixelDigisSoA::View; +using SiPixelDigisSoAConstView = SiPixelDigisSoA::ConstView; + +#endif // DataFormats_SiPixelDigiSoA_interface_SiPixelDigisSoA_h diff --git a/DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigiErrorsSoACollection.h b/DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigiErrorsSoACollection.h new file mode 100644 index 0000000000000..673a22bd23a1e --- /dev/null +++ b/DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigiErrorsSoACollection.h @@ -0,0 +1,39 @@ +#ifndef DataFormats_SiPixelDigiSoA_interface_alpaka_SiPixelDigiErrorsSoACollection_h +#define DataFormats_SiPixelDigiSoA_interface_alpaka_SiPixelDigiErrorsSoACollection_h + +#include + +#include + +#include "DataFormats/Portable/interface/alpaka/PortableCollection.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsHost.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsDevice.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/CopyToHost.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + using SiPixelDigiErrorsSoACollection = + std::conditional_t, SiPixelDigiErrorsHost, SiPixelDigiErrorsDevice>; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +namespace cms::alpakatools { + template + struct CopyToHost> { + template + static auto copyAsync(TQueue& queue, SiPixelDigiErrorsDevice const& srcData) { + SiPixelDigiErrorsHost dstData(srcData.maxFedWords(), queue); + alpaka::memcpy(queue, dstData.buffer(), srcData.buffer()); +#ifdef GPU_DEBUG + printf("SiPixelDigiErrorsSoACollection: I'm copying to host.\n"); +#endif + return dstData; + } + }; +} // namespace cms::alpakatools + +ASSERT_DEVICE_MATCHES_HOST_COLLECTION(SiPixelDigiErrorsSoACollection, SiPixelDigiErrorsHost); + +#endif // DataFormats_SiPixelDigiSoA_interface_alpaka_SiPixelDigiErrorsSoACollection_h diff --git a/DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigisSoACollection.h b/DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigisSoACollection.h new file mode 100644 index 0000000000000..2fe60454d553f --- /dev/null +++ b/DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigisSoACollection.h @@ -0,0 +1,36 @@ +#ifndef DataFormats_SiPixelDigiSoA_interface_alpaka_SiPixelDigisSoACollection_h +#define DataFormats_SiPixelDigiSoA_interface_alpaka_SiPixelDigisSoACollection_h + +#include + +#include + +#include "DataFormats/Portable/interface/alpaka/PortableCollection.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisDevice.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisHost.h" +#include "HeterogeneousCore/AlpakaInterface/interface/CopyToHost.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + using SiPixelDigisSoACollection = + std::conditional_t, SiPixelDigisHost, SiPixelDigisDevice>; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +namespace cms::alpakatools { + template + struct CopyToHost> { + template + static auto copyAsync(TQueue &queue, SiPixelDigisDevice const &srcData) { + SiPixelDigisHost dstData(srcData.view().metadata().size(), queue); + alpaka::memcpy(queue, dstData.buffer(), srcData.buffer()); + dstData.setNModulesDigis(srcData.nModules(), srcData.nDigis()); + return dstData; + } + }; +} // namespace cms::alpakatools + +ASSERT_DEVICE_MATCHES_HOST_COLLECTION(SiPixelDigisSoACollection, SiPixelDigisHost); + +#endif // DataFormats_SiPixelDigiSoA_interface_alpaka_SiPixelDigisSoACollection_h diff --git a/DataFormats/SiPixelDigiSoA/src/alpaka/classes_cuda.h b/DataFormats/SiPixelDigiSoA/src/alpaka/classes_cuda.h new file mode 100644 index 0000000000000..d2fb20448545c --- /dev/null +++ b/DataFormats/SiPixelDigiSoA/src/alpaka/classes_cuda.h @@ -0,0 +1,12 @@ +#ifndef DataFormats_SiPixelDigiSoA_Alpaka_Classes_cuda_h +#define DataFormats_SiPixelDigiSoA_Alpaka_Classes_cuda_h + +#include "DataFormats/Common/interface/DeviceProduct.h" +#include "DataFormats/Common/interface/Wrapper.h" +#include "DataFormats/Portable/interface/alpaka/PortableCollection.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsSoA.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisSoA.h" +#include "DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigiErrorsSoACollection.h" +#include "DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigisSoACollection.h" + +#endif // DataFormats_SiPixelDigiSoA_src_alpaka_classes_cuda_h diff --git a/DataFormats/SiPixelDigiSoA/src/alpaka/classes_cuda_def.xml b/DataFormats/SiPixelDigiSoA/src/alpaka/classes_cuda_def.xml new file mode 100644 index 0000000000000..7315bc37eeb1b --- /dev/null +++ b/DataFormats/SiPixelDigiSoA/src/alpaka/classes_cuda_def.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/DataFormats/SiPixelDigiSoA/src/alpaka/classes_rocm.h b/DataFormats/SiPixelDigiSoA/src/alpaka/classes_rocm.h new file mode 100644 index 0000000000000..db5bf9385f99d --- /dev/null +++ b/DataFormats/SiPixelDigiSoA/src/alpaka/classes_rocm.h @@ -0,0 +1,13 @@ +#ifndef DataFormats_SiPixelDigiSoA_Alpaka_Classes_cuda_h +#define DataFormats_SiPixelDigiSoA_Alpaka_Classes_cuda_h + +#include "DataFormats/Common/interface/DeviceProduct.h" +#include "DataFormats/Common/interface/Wrapper.h" +#include "DataFormats/Portable/interface/alpaka/PortableCollection.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsSoA.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisDevice.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisSoA.h" +#include "DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigiErrorsSoACollection.h" +#include "DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigisSoACollection.h" + +#endif // DataFormats_SiPixelDigiSoA_src_alpaka_classes_cuda_h diff --git a/DataFormats/SiPixelDigiSoA/src/alpaka/classes_rocm_def.xml b/DataFormats/SiPixelDigiSoA/src/alpaka/classes_rocm_def.xml new file mode 100644 index 0000000000000..21deb7bbd46dc --- /dev/null +++ b/DataFormats/SiPixelDigiSoA/src/alpaka/classes_rocm_def.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/DataFormats/SiPixelDigiSoA/src/classes.cc b/DataFormats/SiPixelDigiSoA/src/classes.cc new file mode 100644 index 0000000000000..9022a3102107e --- /dev/null +++ b/DataFormats/SiPixelDigiSoA/src/classes.cc @@ -0,0 +1,6 @@ +#include "DataFormats/Portable/interface/PortableHostCollectionReadRules.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsSoA.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisSoA.h" + +SET_PORTABLEHOSTCOLLECTION_READ_RULES(PortableHostCollection); +SET_PORTABLEHOSTCOLLECTION_READ_RULES(PortableHostCollection); diff --git a/DataFormats/SiPixelDigiSoA/src/classes.h b/DataFormats/SiPixelDigiSoA/src/classes.h new file mode 100644 index 0000000000000..427a4c972863d --- /dev/null +++ b/DataFormats/SiPixelDigiSoA/src/classes.h @@ -0,0 +1,10 @@ +#ifndef DataFormats_SiPixelDigisSoA_src_classes_h +#define DataFormats_SiPixelDigisSoA_src_classes_h + +#include "DataFormats/Common/interface/Wrapper.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsHost.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsSoA.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisHost.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisSoA.h" + +#endif // DataFormats_SiPixelClusterSoA_src_classes_h diff --git a/DataFormats/SiPixelDigiSoA/src/classes_def.xml b/DataFormats/SiPixelDigiSoA/src/classes_def.xml new file mode 100644 index 0000000000000..c68be4a01bf5a --- /dev/null +++ b/DataFormats/SiPixelDigiSoA/src/classes_def.xml @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff --git a/DataFormats/SiPixelDigiSoA/test/BuildFile.xml b/DataFormats/SiPixelDigiSoA/test/BuildFile.xml new file mode 100644 index 0000000000000..b4bd8297f5011 --- /dev/null +++ b/DataFormats/SiPixelDigiSoA/test/BuildFile.xml @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/DataFormats/SiPixelDigiSoA/test/alpaka/DigiErrors_test.cc b/DataFormats/SiPixelDigiSoA/test/alpaka/DigiErrors_test.cc new file mode 100644 index 0000000000000..4703e68630f35 --- /dev/null +++ b/DataFormats/SiPixelDigiSoA/test/alpaka/DigiErrors_test.cc @@ -0,0 +1,54 @@ +#include +#include + +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsDevice.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsHost.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsSoA.h" +#include "DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigiErrorsSoACollection.h" + +#include "HeterogeneousCore/AlpakaInterface/interface/devices.h" +#include "HeterogeneousCore/AlpakaInterface/interface/host.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/traits.h" + +using namespace ALPAKA_ACCELERATOR_NAMESPACE; + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace testDigisSoA { + + void runKernels(SiPixelDigiErrorsSoAView digiErrors_view, Queue& queue); + } +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +int main() { + const auto host = cms::alpakatools::host(); + const auto device = cms::alpakatools::devices()[0]; + Queue queue(device); + + // Inner scope to deallocate memory before destroying the stream + { + // Instantiate tracks on device. PortableDeviceCollection allocates + // SoA on device automatically. + SiPixelDigiErrorsSoACollection digiErrors_d(1000, queue); + testDigisSoA::runKernels(digiErrors_d.view(), queue); + + // Instantate tracks on host. This is where the data will be + // copied to from device. + SiPixelDigiErrorsHost digiErrors_h(digiErrors_d.view().metadata().size(), queue); + alpaka::memcpy(queue, digiErrors_h.buffer(), digiErrors_d.const_buffer()); + std::cout << "digiErrors_h.view().metadata().size(): " << digiErrors_h.view().metadata().size() << std::endl; + std::cout << "digiErrors_h.view()[100].pixelErrors().rawId: " << digiErrors_h.view()[100].pixelErrors().rawId + << std::endl; + std::cout << "digiErrors_h.view()[100].pixelErrors().word: " << digiErrors_h.view()[100].pixelErrors().word + << std::endl; + std::cout << "digiErrors_h.view()[100].pixelErrors().errorType: " + << digiErrors_h.view()[100].pixelErrors().errorType << std::endl; + std::cout << "digiErrors_h.view()[100].pixelErrors().fedId: " << digiErrors_h.view()[100].pixelErrors().fedId + << std::endl; + alpaka::wait(queue); + } + + return 0; +} diff --git a/DataFormats/SiPixelDigiSoA/test/alpaka/DigiErrors_test.dev.cc b/DataFormats/SiPixelDigiSoA/test/alpaka/DigiErrors_test.dev.cc new file mode 100644 index 0000000000000..c7add92dab018 --- /dev/null +++ b/DataFormats/SiPixelDigiSoA/test/alpaka/DigiErrors_test.dev.cc @@ -0,0 +1,50 @@ +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsDevice.h" +#include "DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigiErrorsSoACollection.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsHost.h" +#include "DataFormats/SiPixelRawData/interface/SiPixelErrorCompact.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/traits.h" + +using namespace alpaka; + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + using namespace cms::alpakatools; + namespace testDigisSoA { + + class TestFillKernel { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, SiPixelDigiErrorsSoAView digiErrors_view) const { + for (uint32_t j : elements_with_stride(acc, digiErrors_view.metadata().size())) { + digiErrors_view[j].pixelErrors().rawId = j; + digiErrors_view[j].pixelErrors().word = j; + digiErrors_view[j].pixelErrors().errorType = j; + digiErrors_view[j].pixelErrors().fedId = j; + } + } + }; + + class TestVerifyKernel { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, SiPixelDigiErrorsSoAConstView digiErrors_view) const { + for (uint32_t j : elements_with_stride(acc, digiErrors_view.metadata().size())) { + assert(digiErrors_view[j].pixelErrors().rawId == j); + assert(digiErrors_view[j].pixelErrors().word == j); + assert(digiErrors_view[j].pixelErrors().errorType == j % 256); + assert(digiErrors_view[j].pixelErrors().fedId == j % 256); + } + } + }; + + void runKernels(SiPixelDigiErrorsSoAView digiErrors_view, Queue& queue) { + uint32_t items = 64; + uint32_t groups = divide_up_by(digiErrors_view.metadata().size(), items); + auto workDiv = make_workdiv(groups, items); + alpaka::exec(queue, workDiv, TestFillKernel{}, digiErrors_view); + alpaka::exec(queue, workDiv, TestVerifyKernel{}, digiErrors_view); + } + + } // namespace testDigisSoA +} // namespace ALPAKA_ACCELERATOR_NAMESPACE diff --git a/DataFormats/SiPixelDigiSoA/test/alpaka/Digis_test.cc b/DataFormats/SiPixelDigiSoA/test/alpaka/Digis_test.cc new file mode 100644 index 0000000000000..f1d9ce9cd2b37 --- /dev/null +++ b/DataFormats/SiPixelDigiSoA/test/alpaka/Digis_test.cc @@ -0,0 +1,48 @@ +#include + +#include + +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisDevice.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisHost.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisSoA.h" +#include "DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigisSoACollection.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/devices.h" +#include "HeterogeneousCore/AlpakaInterface/interface/host.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/traits.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" + +using namespace ALPAKA_ACCELERATOR_NAMESPACE; + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace testDigisSoA { + + void runKernels(SiPixelDigisSoAView digis_view, Queue& queue); + + } +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +int main() { + const auto host = cms::alpakatools::host(); + const auto device = cms::alpakatools::devices()[0]; + Queue queue(device); + + // Inner scope to deallocate memory before destroying the stream + { + // Instantiate tracks on device. PortableDeviceCollection allocates + // SoA on device automatically. + SiPixelDigisSoACollection digis_d(1000, queue); + testDigisSoA::runKernels(digis_d.view(), queue); + + // Instantate tracks on host. This is where the data will be + // copied to from device. + SiPixelDigisHost digis_h(digis_d.view().metadata().size(), queue); + + std::cout << digis_h.view().metadata().size() << std::endl; + alpaka::memcpy(queue, digis_h.buffer(), digis_d.const_buffer()); + alpaka::wait(queue); + } + + return 0; +} diff --git a/DataFormats/SiPixelDigiSoA/test/alpaka/Digis_test.dev.cc b/DataFormats/SiPixelDigiSoA/test/alpaka/Digis_test.dev.cc new file mode 100644 index 0000000000000..9bb35bfc4d7f8 --- /dev/null +++ b/DataFormats/SiPixelDigiSoA/test/alpaka/Digis_test.dev.cc @@ -0,0 +1,49 @@ +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisDevice.h" +#include "DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigisSoACollection.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisHost.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/traits.h" + +using namespace alpaka; + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + using namespace cms::alpakatools; + namespace testDigisSoA { + + class TestFillKernel { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, SiPixelDigisSoAView digi_view) const { + for (int32_t j : elements_with_stride(acc, digi_view.metadata().size())) { + digi_view[j].clus() = j; + digi_view[j].rawIdArr() = j * 2; + digi_view[j].xx() = j * 3; + digi_view[j].moduleId() = j * 4; + } + } + }; + + class TestVerifyKernel { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, SiPixelDigisSoAConstView digi_view) const { + for (uint32_t j : elements_with_stride(acc, digi_view.metadata().size())) { + assert(digi_view[j].clus() == int(j)); + assert(digi_view[j].rawIdArr() == j * 2); + assert(digi_view[j].xx() == j * 3); + assert(digi_view[j].moduleId() == j * 4); + } + } + }; + + void runKernels(SiPixelDigisSoAView digi_view, Queue& queue) { + uint32_t items = 64; + uint32_t groups = divide_up_by(digi_view.metadata().size(), items); + auto workDiv = make_workdiv(groups, items); + alpaka::exec(queue, workDiv, TestFillKernel{}, digi_view); + alpaka::exec(queue, workDiv, TestVerifyKernel{}, digi_view); + } + + } // namespace testDigisSoA +} // namespace ALPAKA_ACCELERATOR_NAMESPACE diff --git a/DataFormats/SiPixelRawData/src/classes.h b/DataFormats/SiPixelRawData/src/classes.h index 7a07e9f35f388..9adc3a440e27b 100644 --- a/DataFormats/SiPixelRawData/src/classes.h +++ b/DataFormats/SiPixelRawData/src/classes.h @@ -1,10 +1,13 @@ #ifndef SIPIXELRAWDATA_CLASSES_H #define SIPIXELRAWDATA_CLASSES_H -#include "DataFormats/SiPixelRawData/interface/SiPixelRawDataError.h" -#include "DataFormats/SiPixelRawData/interface/SiPixelErrorsSoA.h" -#include "DataFormats/Common/interface/Wrapper.h" -#include "DataFormats/Common/interface/DetSetVector.h" #include +#include "DataFormats/Common/interface/DetSetVector.h" +#include "DataFormats/Common/interface/Wrapper.h" +#include "DataFormats/SiPixelRawData/interface/SiPixelErrorCompact.h" +#include "DataFormats/SiPixelRawData/interface/SiPixelErrorsSoA.h" +#include "DataFormats/SiPixelRawData/interface/SiPixelFormatterErrors.h" +#include "DataFormats/SiPixelRawData/interface/SiPixelRawDataError.h" + #endif // SIPIXELRAWDATA_CLASSES_H diff --git a/DataFormats/SiPixelRawData/src/classes_def.xml b/DataFormats/SiPixelRawData/src/classes_def.xml index fd2b5dcf27965..3535bbc430a53 100644 --- a/DataFormats/SiPixelRawData/src/classes_def.xml +++ b/DataFormats/SiPixelRawData/src/classes_def.xml @@ -5,16 +5,23 @@ - + + - + + + + + - - - - - + + + + + + + diff --git a/DataFormats/SiStripCommon/interface/SiStripFecKey.h b/DataFormats/SiStripCommon/interface/SiStripFecKey.h index 527588e943333..516835ded7419 100644 --- a/DataFormats/SiStripCommon/interface/SiStripFecKey.h +++ b/DataFormats/SiStripCommon/interface/SiStripFecKey.h @@ -73,6 +73,9 @@ class SiStripFecKey : public SiStripKey { /** Default constructor */ SiStripFecKey(); + /** Assignment operator */ + SiStripFecKey& operator=(const SiStripFecKey&) = default; + // ---------- Control structure ---------- /** Returns VME crate. */ diff --git a/DataFormats/SiStripCommon/interface/SiStripFedKey.h b/DataFormats/SiStripCommon/interface/SiStripFedKey.h index e5466e8ceac71..71cf30ea189a6 100644 --- a/DataFormats/SiStripCommon/interface/SiStripFedKey.h +++ b/DataFormats/SiStripCommon/interface/SiStripFedKey.h @@ -78,6 +78,9 @@ class SiStripFedKey : public SiStripKey { /** Default constructor */ SiStripFedKey(); + /** Assignment operator */ + SiStripFedKey& operator=(const SiStripFedKey&) = default; + // ---------- Public interface to member data ---------- /** Returns FED id. */ diff --git a/DataFormats/SoATemplate/interface/SoACommon.h b/DataFormats/SoATemplate/interface/SoACommon.h index 8ab80fc88c697..49f5049377b44 100644 --- a/DataFormats/SoATemplate/interface/SoACommon.h +++ b/DataFormats/SoATemplate/interface/SoACommon.h @@ -52,6 +52,8 @@ #define _VALUE_TYPE_EIGEN_COLUMN 2 /* The size type need to be "hardcoded" in the template parameters for classes serialized by ROOT */ +/* In practice, using a typedef as a template parameter to the Layout or its ViewTemplateFreeParams member + * declaration fails ROOT dictionary generation. */ #define CMS_SOA_BYTE_SIZE_TYPE std::size_t namespace cms::soa { @@ -132,6 +134,8 @@ namespace cms::soa { return reinterpret_cast(addr) % alignment; } + TupleOrPointerType tupleOrPointer() { return addr_; } + public: // scalar or column ValueType const* addr_ = nullptr; @@ -166,6 +170,8 @@ namespace cms::soa { return reinterpret_cast(addr) % alignment; } + TupleOrPointerType tupleOrPointer() { return {addr_, stride_}; } + public: // address and stride ScalarType const* addr_ = nullptr; @@ -201,6 +207,8 @@ namespace cms::soa { return reinterpret_cast(addr) % alignment; } + TupleOrPointerType tupleOrPointer() { return addr_; } + public: // scalar or column ValueType* addr_ = nullptr; @@ -234,6 +242,8 @@ namespace cms::soa { return reinterpret_cast(addr) % alignment; } + TupleOrPointerType tupleOrPointer() { return {addr_, stride_}; } + public: // address and stride ScalarType* addr_ = nullptr; diff --git a/DataFormats/SoATemplate/interface/SoAView.h b/DataFormats/SoATemplate/interface/SoAView.h index 8cf0307c71ad0..f219bd137a0cc 100644 --- a/DataFormats/SoATemplate/interface/SoAView.h +++ b/DataFormats/SoATemplate/interface/SoAView.h @@ -231,6 +231,15 @@ namespace cms::soa { #define _DECLARE_VIEW_MEMBER_LIST(R, DATA, LAYOUT_MEMBER_NAME) \ BOOST_PP_EXPAND(_DECLARE_VIEW_MEMBER_LIST_IMPL LAYOUT_MEMBER_NAME) +/** + * Generator of view member list. + */ +#define _DECLARE_VIEW_OTHER_MEMBER_LIST_IMPL(LAYOUT, MEMBER, NAME) \ + (const_cast_SoAParametersImpl(other.BOOST_PP_CAT(NAME, Parameters_)).tupleOrPointer()) + +#define _DECLARE_VIEW_OTHER_MEMBER_LIST(R, DATA, LAYOUT_MEMBER_NAME) \ + BOOST_PP_EXPAND(_DECLARE_VIEW_OTHER_MEMBER_LIST_IMPL LAYOUT_MEMBER_NAME) + /** * Generator of member initializer for copy constructor. */ @@ -390,7 +399,7 @@ namespace cms::soa { template RestrictQualifier::ParamReturnType \ LOCAL_NAME(size_type _soa_impl_index) { \ if constexpr (rangeChecking == cms::soa::RangeChecking::enabled) { \ - if (_soa_impl_index >= base_type::elements_) \ + if (_soa_impl_index >= base_type::elements_ or _soa_impl_index < 0) \ SOA_THROW_OUT_OF_RANGE("Out of range index in mutable " #LOCAL_NAME "(size_type index)") \ } \ return typename cms::soa::SoAAccessors:: \ @@ -428,7 +437,7 @@ namespace cms::soa { template RestrictQualifier::ParamReturnType \ LOCAL_NAME(size_type _soa_impl_index) const { \ if constexpr (rangeChecking == cms::soa::RangeChecking::enabled) { \ - if (_soa_impl_index >= elements_) \ + if (_soa_impl_index >= elements_ or _soa_impl_index < 0) \ SOA_THROW_OUT_OF_RANGE("Out of range index in const " #LOCAL_NAME "(size_type index)") \ } \ return typename cms::soa::SoAAccessors:: \ @@ -535,6 +544,9 @@ namespace cms::soa { template \ using SoAConstValueWithConf = cms::soa::SoAConstValue; \ \ + template \ + friend struct VIEW; \ + \ /** \ * Helper/friend class allowing SoA introspection. \ */ \ @@ -582,6 +594,23 @@ namespace cms::soa { VIEW(VIEW const&) = default; \ VIEW& operator=(VIEW const&) = default; \ \ + /* Copy constructor for other parameters */ \ + template \ + VIEW(VIEW const& other): base_type{other.elements_, \ + _ITERATE_ON_ALL_COMMA(_DECLARE_VIEW_OTHER_MEMBER_LIST, BOOST_PP_EMPTY(), VALUE_LIST) \ + } {} \ + /* Copy operator for other parameters */ \ + template \ + VIEW& operator=(VIEW const& other) { static_cast(*this) = static_cast(other); } \ + \ /* Movable */ \ VIEW(VIEW &&) = default; \ VIEW& operator=(VIEW &&) = default; \ @@ -620,7 +649,7 @@ namespace cms::soa { SOA_HOST_DEVICE SOA_INLINE \ element operator[](size_type _soa_impl_index) { \ if constexpr (rangeChecking == cms::soa::RangeChecking::enabled) { \ - if (_soa_impl_index >= base_type::elements_) \ + if (_soa_impl_index >= base_type::elements_ or _soa_impl_index < 0) \ SOA_THROW_OUT_OF_RANGE("Out of range index in " #VIEW "::operator[]") \ } \ return element{_soa_impl_index, _ITERATE_ON_ALL_COMMA(_DECLARE_VIEW_ELEMENT_CONSTR_CALL, ~, VALUE_LIST)}; \ @@ -673,6 +702,9 @@ namespace cms::soa { template \ friend struct VIEW; \ \ + template \ + friend struct CONST_VIEW; \ + \ /* For CUDA applications, we align to the 128 bytes of the cache lines. \ * See https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#global-memory-3-0 this is still valid \ * up to compute capability 8.X. \ @@ -739,6 +771,23 @@ namespace cms::soa { CONST_VIEW(CONST_VIEW const&) = default; \ CONST_VIEW& operator=(CONST_VIEW const&) = default; \ \ + /* Copy constructor for other parameters */ \ + template \ + CONST_VIEW(CONST_VIEW const& other): CONST_VIEW{other.elements_, \ + _ITERATE_ON_ALL_COMMA(_DECLARE_VIEW_OTHER_MEMBER_LIST, BOOST_PP_EMPTY(), VALUE_LIST) \ + } {} \ + /* Copy operator for other parameters */ \ + template \ + CONST_VIEW& operator=(CONST_VIEW const& other) { *this = other; } \ + \ /* Movable */ \ CONST_VIEW(CONST_VIEW &&) = default; \ CONST_VIEW& operator=(CONST_VIEW &&) = default; \ @@ -761,7 +810,7 @@ namespace cms::soa { SOA_HOST_DEVICE SOA_INLINE \ const_element operator[](size_type _soa_impl_index) const { \ if constexpr (rangeChecking == cms::soa::RangeChecking::enabled) { \ - if (_soa_impl_index >= elements_) \ + if (_soa_impl_index >= elements_ or _soa_impl_index < 0) \ SOA_THROW_OUT_OF_RANGE("Out of range index in " #CONST_VIEW "::operator[]") \ } \ return const_element{ \ diff --git a/DataFormats/SoATemplate/test/SoALayoutAndView_t.cu b/DataFormats/SoATemplate/test/SoALayoutAndView_t.cu index d8a92a56bf37a..0e30f8301f099 100644 --- a/DataFormats/SoATemplate/test/SoALayoutAndView_t.cu +++ b/DataFormats/SoATemplate/test/SoALayoutAndView_t.cu @@ -35,6 +35,8 @@ GENERATE_SOA_LAYOUT(SoAHostDeviceLayoutTemplate, using SoAHostDeviceLayout = SoAHostDeviceLayoutTemplate<>; using SoAHostDeviceView = SoAHostDeviceLayout::View; +using SoAHostDeviceRangeCheckingView = + SoAHostDeviceLayout::ViewTemplate; using SoAHostDeviceConstView = SoAHostDeviceLayout::ConstView; GENERATE_SOA_LAYOUT(SoADeviceOnlyLayoutTemplate, @@ -126,6 +128,12 @@ int main(void) { cudaCheck(cudaMallocHost(&h_buf, hostDeviceSize)); SoAHostDeviceLayout h_soahdLayout(h_buf, numElements); SoAHostDeviceView h_soahd(h_soahdLayout); + + // Validation of range checking variants initialization + SoAHostDeviceRangeCheckingView h_soahdrc(h_soahdLayout); + [[maybe_unused]] SoAHostDeviceRangeCheckingView h_soahdrc2 = h_soahdLayout; + SoAHostDeviceRangeCheckingView h_soahdrc3{h_soahd}; + [[maybe_unused]] SoAHostDeviceRangeCheckingView h_soahdrc4 = h_soahd; SoAHostDeviceConstView h_soahd_c(h_soahdLayout); // Alocate buffer, stores and views on the device (single, shared buffer). @@ -248,29 +256,50 @@ int main(void) { } } - // Validation of range checking - try { - // Get a view like the default, except for range checking - SoAHostDeviceLayout::ViewTemplate - soa1viewRangeChecking(h_soahdLayout); - // This should throw an exception - [[maybe_unused]] auto si = soa1viewRangeChecking[soa1viewRangeChecking.metadata().size()]; - std::cout << "Fail: expected range-check exception (operator[]) not caught on the host." << std::endl; - assert(false); - } catch (const std::out_of_range&) { - std::cout << "Pass: expected range-check exception (operator[]) successfully caught on the host." << std::endl; + { + // Get a view like the default, except for range checking (direct initialization from layout) + SoAHostDeviceRangeCheckingView soa1viewRangeChecking(h_soahdLayout); + try { + [[maybe_unused]] auto si = soa1viewRangeChecking[soa1viewRangeChecking.metadata().size()]; + std::cout << "Fail: expected range-check exception (view-level index access) not caught on the host (overflow)." + << std::endl; + assert(false); + } catch (const std::out_of_range&) { + } + try { + [[maybe_unused]] auto si = soa1viewRangeChecking[-1]; + std::cout << "Fail: expected range-check exception (view-level index access) not caught on the host (underflow)." + << std::endl; + assert(false); + } catch (const std::out_of_range&) { + } + [[maybe_unused]] auto si = soa1viewRangeChecking[soa1viewRangeChecking.metadata().size() - 1]; + [[maybe_unused]] auto si2 = soa1viewRangeChecking[0]; + std::cout << "Pass: expected range-check exceptions (view-level index access) successfully caught on the host " + "(layout initialization)." + << std::endl; } - try { - // Get a view like the default, except for range checking - SoAHostDeviceLayout::ViewTemplate - soa1viewRangeChecking(h_soahdLayout); - // This should throw an exception - [[maybe_unused]] auto si = soa1viewRangeChecking[soa1viewRangeChecking.metadata().size()]; - std::cout << "Fail: expected range-check exception (view-level index access) not caught on the host." << std::endl; - assert(false); - } catch (const std::out_of_range&) { - std::cout << "Pass: expected range-check exception (view-level index access) successfully caught on the host." + { + // Validation of view initialized range checking view initialization + try { + [[maybe_unused]] auto si = h_soahdrc3[h_soahdrc3.metadata().size()]; + std::cout << "Fail: expected range-check exception (view-level index access) not caught on the host (overflow)." + << std::endl; + assert(false); + } catch (const std::out_of_range&) { + } + try { + [[maybe_unused]] auto si = h_soahdrc3[-1]; + std::cout << "Fail: expected range-check exception (view-level index access) not caught on the host (underflow)." + << std::endl; + assert(false); + } catch (const std::out_of_range&) { + } + [[maybe_unused]] auto si = h_soahdrc3[h_soahdrc3.metadata().size() - 1]; + [[maybe_unused]] auto si2 = h_soahdrc3[0]; + std::cout << "Pass: expected range-check exceptions (view-level index access) successfully caught on the host " + "(view initialization)." << std::endl; } diff --git a/DataFormats/SoATemplate/test/SoALayoutAndView_t.hip.cc b/DataFormats/SoATemplate/test/SoALayoutAndView_t.hip.cc index d156e9000c518..6a041248f3995 100644 --- a/DataFormats/SoATemplate/test/SoALayoutAndView_t.hip.cc +++ b/DataFormats/SoATemplate/test/SoALayoutAndView_t.hip.cc @@ -37,6 +37,8 @@ GENERATE_SOA_LAYOUT(SoAHostDeviceLayoutTemplate, using SoAHostDeviceLayout = SoAHostDeviceLayoutTemplate<>; using SoAHostDeviceView = SoAHostDeviceLayout::View; +using SoAHostDeviceRangeCheckingView = + SoAHostDeviceLayout::ViewTemplate; using SoAHostDeviceConstView = SoAHostDeviceLayout::ConstView; GENERATE_SOA_LAYOUT(SoADeviceOnlyLayoutTemplate, @@ -125,15 +127,21 @@ int main(void) { // Allocate buffer and store on host size_t hostDeviceSize = SoAHostDeviceLayout::computeDataSize(numElements); std::byte* h_buf = nullptr; - hipCheck(hipMallocHost((void**)&h_buf, hostDeviceSize)); + hipCheck(hipHostMalloc((void**)&h_buf, hostDeviceSize)); SoAHostDeviceLayout h_soahdLayout(h_buf, numElements); SoAHostDeviceView h_soahd(h_soahdLayout); + + // Validation of range checking variants initialization + SoAHostDeviceRangeCheckingView h_soahdrc(h_soahdLayout); + [[maybe_unused]] SoAHostDeviceRangeCheckingView h_soahdrc2 = h_soahdLayout; + [[maybe_unused]] SoAHostDeviceRangeCheckingView h_soahdrc3{h_soahd}; + [[maybe_unused]] SoAHostDeviceRangeCheckingView h_soahdrc4 = h_soahd; SoAHostDeviceConstView h_soahd_c(h_soahdLayout); // Alocate buffer, stores and views on the device (single, shared buffer). size_t deviceOnlySize = SoADeviceOnlyLayout::computeDataSize(numElements); std::byte* d_buf = nullptr; - hipCheck(hipMallocHost((void**)&d_buf, hostDeviceSize + deviceOnlySize)); + hipCheck(hipHostMalloc((void**)&d_buf, hostDeviceSize + deviceOnlySize)); SoAHostDeviceLayout d_soahdLayout(d_buf, numElements); SoADeviceOnlyLayout d_soadoLayout(d_soahdLayout.metadata().nextByte(), numElements); SoAHostDeviceView d_soahdView(d_soahdLayout); @@ -250,29 +258,50 @@ int main(void) { } } - // Validation of range checking - try { - // Get a view like the default, except for range checking - SoAHostDeviceLayout::ViewTemplate - soa1viewRangeChecking(h_soahdLayout); - // This should throw an exception - [[maybe_unused]] auto si = soa1viewRangeChecking[soa1viewRangeChecking.metadata().size()]; - std::cout << "Fail: expected range-check exception (operator[]) not caught on the host." << std::endl; - assert(false); - } catch (const std::out_of_range&) { - std::cout << "Pass: expected range-check exception (operator[]) successfully caught on the host." << std::endl; + { + // Get a view like the default, except for range checking (direct initialization from layout) + SoAHostDeviceRangeCheckingView soa1viewRangeChecking(h_soahdLayout); + try { + [[maybe_unused]] auto si = soa1viewRangeChecking[soa1viewRangeChecking.metadata().size()]; + std::cout << "Fail: expected range-check exception (view-level index access) not caught on the host (overflow)." + << std::endl; + assert(false); + } catch (const std::out_of_range&) { + } + try { + [[maybe_unused]] auto si = soa1viewRangeChecking[-1]; + std::cout << "Fail: expected range-check exception (view-level index access) not caught on the host (underflow)." + << std::endl; + assert(false); + } catch (const std::out_of_range&) { + } + [[maybe_unused]] auto si = soa1viewRangeChecking[soa1viewRangeChecking.metadata().size() - 1]; + [[maybe_unused]] auto si2 = soa1viewRangeChecking[0]; + std::cout << "Pass: expected range-check exceptions (view-level index access) successfully caught on the host " + "(layout initialization)." + << std::endl; } - try { - // Get a view like the default, except for range checking - SoAHostDeviceLayout::ViewTemplate - soa1viewRangeChecking(h_soahdLayout); - // This should throw an exception - [[maybe_unused]] auto si = soa1viewRangeChecking[soa1viewRangeChecking.metadata().size()]; - std::cout << "Fail: expected range-check exception (view-level index access) not caught on the host." << std::endl; - assert(false); - } catch (const std::out_of_range&) { - std::cout << "Pass: expected range-check exception (view-level index access) successfully caught on the host." + { + // Validation of view initialized range checking view initialization + try { + [[maybe_unused]] auto si = h_soahdrc3[h_soahdrc3.metadata().size()]; + std::cout << "Fail: expected range-check exception (view-level index access) not caught on the host (overflow)." + << std::endl; + assert(false); + } catch (const std::out_of_range&) { + } + try { + [[maybe_unused]] auto si = h_soahdrc3[-1]; + std::cout << "Fail: expected range-check exception (view-level index access) not caught on the host (underflow)." + << std::endl; + assert(false); + } catch (const std::out_of_range&) { + } + [[maybe_unused]] auto si = h_soahdrc3[h_soahdrc3.metadata().size() - 1]; + [[maybe_unused]] auto si2 = h_soahdrc3[0]; + std::cout << "Pass: expected range-check exceptions (view-level index access) successfully caught on the host " + "(view initialization)." << std::endl; } diff --git a/DataFormats/TrackSoA/BuildFile.xml b/DataFormats/TrackSoA/BuildFile.xml new file mode 100644 index 0000000000000..ac764cf5b95ff --- /dev/null +++ b/DataFormats/TrackSoA/BuildFile.xml @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/DataFormats/TrackSoA/README.md b/DataFormats/TrackSoA/README.md new file mode 100644 index 0000000000000..433dfb0d656c7 --- /dev/null +++ b/DataFormats/TrackSoA/README.md @@ -0,0 +1,60 @@ +# TrackSoA Data Formats + +`DataFormat`s meant to be used on Host (CPU) or Device (GPU) for +storing information about `TrackSoA`s created during the Pixel-local Reconstruction +chain. It stores data in an SoA manner. + +The host format is inheriting from `DataFormats/Portable/interface/PortableHostCollection.h`, +while the device format is inheriting from `DataFormats/Portable/interface/PortableDeviceCollection.h` + +Both formats use the same SoA Layout (`TrackSoA::Layout`) which is generated +via the `GENERATE_SOA_LAYOUT` macro in the `TrackDefinitions.h` file. + +## Notes + +-`hitIndices` and `detIndices`, instances of `HitContainer`, have been added into the +layout as `SOA_SCALAR`s, meaning that they manage their own data independently from the SoA +`Layout`. This could be improved in the future, if `HitContainer` (aka a `OneToManyAssoc` of fixed size) +is replaced, but there don't seem to be any conflicts in including it in the `Layout` like this. +- Host and Device classes should **not** be created via inheritance, as they're done here, +but via composition. See [this discussion](https://github.com/cms-sw/cmssw/pull/40465#discussion_r1066039309). + +## TracksHost + +The version of the data format to be used for storing `TrackSoA` data on the CPU. +Instances of this class are to be used for: + +- Having a place to copy data to host from device, via `Memcpy`, or +- Running host-side algorithms using data stored in an SoA manner. + +## TracksDevice + +The version of the data format to be used for storing `TrackSoA` data on the GPU. + +Instances of `TracksDevice` are to be created on host and be +used on device only. To do so, the instance's `view()` method is to be called +to pass a `View` to any kernel launched. Accessing data from the `view()` is not +possible on the host side. + +## TracksSoACollection + +Depending on the Alpaka accelerator back-end enabled, `TrackSoACollection` is an alias to either the Host or Device SoA: + +```cpp +template + using TrackSoACollection = std::conditional_t, + TrackSoAHost, + TrackSoADevice>; +``` + +## Utilities + +`alpaka/TrackUtilities.h` contains a collection of methods which were originally +defined as class methods inside either `TrackSoAHeterogeneousT` and `TrajectoryStateSoAT` +which have been adapted to operate on `View` instances, so that they are callable +from within `__global__` kernels, on both CPU and CPU. + +## Use case + +See `test/TrackSoAHeterogeneous_test.cpp` for a simple example of instantiation, +processing and copying from device to host. diff --git a/DataFormats/TrackSoA/interface/TrackDefinitions.h b/DataFormats/TrackSoA/interface/TrackDefinitions.h new file mode 100644 index 0000000000000..6bd36b5bd3cd1 --- /dev/null +++ b/DataFormats/TrackSoA/interface/TrackDefinitions.h @@ -0,0 +1,32 @@ +#ifndef DataFormats_Track_interface_TrackDefinitions_h +#define DataFormats_Track_interface_TrackDefinitions_h +#include +#include +#include + +namespace pixelTrack { + + enum class Quality : uint8_t { bad = 0, edup, dup, loose, strict, tight, highPurity, notQuality }; + constexpr uint32_t qualitySize{uint8_t(Quality::notQuality)}; + constexpr std::string_view qualityName[qualitySize]{"bad", "edup", "dup", "loose", "strict", "tight", "highPurity"}; + inline Quality qualityByName(std::string_view name) { + auto qp = std::find(qualityName, qualityName + qualitySize, name) - qualityName; + auto ret = static_cast(qp); + + if (ret == pixelTrack::Quality::notQuality) + throw std::invalid_argument(std::string(name) + " is not a pixelTrack::Quality!"); + + return ret; + } + +#ifdef GPU_SMALL_EVENTS + // kept for testing and debugging + constexpr uint32_t maxNumber() { return 2 * 1024; } +#else + // tested on MC events with 55-75 pileup events + constexpr uint32_t maxNumber() { return 32 * 1024; } +#endif + +} // namespace pixelTrack + +#endif diff --git a/DataFormats/TrackSoA/interface/TracksDevice.h b/DataFormats/TrackSoA/interface/TracksDevice.h new file mode 100644 index 0000000000000..6ef28014bab63 --- /dev/null +++ b/DataFormats/TrackSoA/interface/TracksDevice.h @@ -0,0 +1,38 @@ +#ifndef DataFormats_Track_interface_TracksDevice_h +#define DataFormats_Track_interface_TracksDevice_h + +#include +#include +#include "DataFormats/TrackSoA/interface/TracksSoA.h" +#include "DataFormats/TrackSoA/interface/TrackDefinitions.h" +#include "DataFormats/Portable/interface/PortableDeviceCollection.h" + +// TODO: The class is created via inheritance of the PortableCollection. +// This is generally discouraged, and should be done via composition. +// See: https://github.com/cms-sw/cmssw/pull/40465#discussion_r1067364306 +template +class TracksDevice : public PortableDeviceCollection, TDev> { +public: + static constexpr int32_t S = TrackerTraits::maxNumberOfTuples; //TODO: this could be made configurable at runtime + TracksDevice() = default; // necessary for ROOT dictionaries + + using PortableDeviceCollection, TDev>::view; + using PortableDeviceCollection, TDev>::const_view; + using PortableDeviceCollection, TDev>::buffer; + + // Constructor which specifies the SoA size + template + explicit TracksDevice(TQueue& queue) + : PortableDeviceCollection, TDev>(S, queue) {} +}; + +namespace pixelTrack { + + template + using TracksDevicePhase1 = TracksDevice; + template + using TracksDevicePhase2 = TracksDevice; + +} // namespace pixelTrack + +#endif // DataFormats_Track_TracksDevice_H diff --git a/DataFormats/TrackSoA/interface/TracksHost.h b/DataFormats/TrackSoA/interface/TracksHost.h new file mode 100644 index 0000000000000..a8f459eac066c --- /dev/null +++ b/DataFormats/TrackSoA/interface/TracksHost.h @@ -0,0 +1,42 @@ +#ifndef DataFormats_Track_TracksHost_H +#define DataFormats_Track_TracksHost_H + +#include +#include +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "DataFormats/TrackSoA/interface/TracksSoA.h" +#include "DataFormats/TrackSoA/interface/TrackDefinitions.h" +#include "DataFormats/Portable/interface/PortableHostCollection.h" + +// TODO: The class is created via inheritance of the PortableHostCollection. +// This is generally discouraged, and should be done via composition. +// See: https://github.com/cms-sw/cmssw/pull/40465#discussion_r1067364306 +template +class TracksHost : public PortableHostCollection> { +public: + static constexpr int32_t S = TrackerTraits::maxNumberOfTuples; //TODO: this could be made configurable at runtime + TracksHost() = default; // Needed for the dictionary; not sure if line above is needed anymore + + using PortableHostCollection>::view; + using PortableHostCollection>::const_view; + using PortableHostCollection>::buffer; + + // Constructor which specifies the SoA size + template + explicit TracksHost(TQueue& queue) + : PortableHostCollection>(S, queue) {} + + // Constructor which specifies the DevHost + explicit TracksHost(alpaka_common::DevHost const& host) + : PortableHostCollection>(S, host) {} +}; + +namespace pixelTrack { + + using TracksHostPhase1 = TracksHost; + using TracksHostPhase2 = TracksHost; + using TracksHostHIonPhase1 = TracksHost; + +} // namespace pixelTrack + +#endif // DataFormats_Track_TracksHost_H diff --git a/DataFormats/TrackSoA/interface/TracksSoA.h b/DataFormats/TrackSoA/interface/TracksSoA.h new file mode 100644 index 0000000000000..ed4ef2e5a4c93 --- /dev/null +++ b/DataFormats/TrackSoA/interface/TracksSoA.h @@ -0,0 +1,102 @@ +#ifndef DataFormats_TrackSoA_interface_TracksSoA_h +#define DataFormats_TrackSoA_interface_TracksSoA_h + +#include + +#include + +#include "HeterogeneousCore/AlpakaInterface/interface/OneToManyAssoc.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "DataFormats/SoATemplate/interface/SoALayout.h" +#include "DataFormats/TrackSoA/interface/TrackDefinitions.h" + +namespace reco { + + template + struct TrackSoA { + static constexpr int32_t S = TrackerTraits::maxNumberOfTuples; + static constexpr int32_t H = TrackerTraits::avgHitsPerTrack; + // Aliases in order to not confuse the GENERATE_SOA_LAYOUT + // macro with weird colons and angled brackets. + using Vector5f = Eigen::Matrix; + using Vector15f = Eigen::Matrix; + using Quality = pixelTrack::Quality; + + using hindex_type = uint32_t; + + using HitContainer = cms::alpakatools::OneToManyAssocSequential; + + GENERATE_SOA_LAYOUT(Layout, + SOA_COLUMN(Quality, quality), + SOA_COLUMN(float, chi2), + SOA_COLUMN(int8_t, nLayers), + SOA_COLUMN(float, eta), + SOA_COLUMN(float, pt), + // state at the beam spot: {phi, tip, 1/pt, cotan(theta), zip} + SOA_EIGEN_COLUMN(Vector5f, state), + SOA_EIGEN_COLUMN(Vector15f, covariance), + SOA_SCALAR(int, nTracks), + SOA_SCALAR(HitContainer, hitIndices), + SOA_SCALAR(HitContainer, detIndices)) + }; + + template + using TrackLayout = typename reco::TrackSoA::template Layout<>; + template + using TrackSoAView = typename reco::TrackSoA::template Layout<>::View; + template + using TrackSoAConstView = typename reco::TrackSoA::template Layout<>::ConstView; + + /* Implement a type trait to identify the specialisations of TrackSoAConstView + * + * This is done explicitly for all possible pixel topologies, because we did not find a way + * to use template deduction with a partial specialisation. + */ + template + struct IsTrackSoAConstView : std::false_type {}; + template <> + struct IsTrackSoAConstView> : std::true_type {}; + template <> + struct IsTrackSoAConstView> : std::true_type {}; + template <> + struct IsTrackSoAConstView> : std::true_type {}; + template <> + struct IsTrackSoAConstView> : std::true_type {}; + template <> + struct IsTrackSoAConstView> : std::true_type {}; + template <> + struct IsTrackSoAConstView> : std::true_type {}; + + template + constexpr bool isTrackSoAConstView = IsTrackSoAConstView::value; + + template >> + ALPAKA_FN_HOST_ACC ALPAKA_FN_INLINE constexpr float charge(ConstView const& tracks, int32_t i) { + //was: std::copysign(1.f, tracks[i].state()(2)). Will be constexpr with C++23 + float v = tracks[i].state()(2); + return float((0.0f < v) - (v < 0.0f)); + } + + template >> + ALPAKA_FN_HOST_ACC ALPAKA_FN_INLINE constexpr float phi(ConstView const& tracks, int32_t i) { + return tracks[i].state()(0); + } + + template >> + ALPAKA_FN_HOST_ACC ALPAKA_FN_INLINE constexpr float tip(ConstView const& tracks, int32_t i) { + return tracks[i].state()(1); + } + + template >> + ALPAKA_FN_HOST_ACC ALPAKA_FN_INLINE constexpr float zip(ConstView const& tracks, int32_t i) { + return tracks[i].state()(4); + } + + template >> + ALPAKA_FN_HOST_ACC ALPAKA_FN_INLINE constexpr bool isTriplet(ConstView const& tracks, int32_t i) { + return tracks[i].nLayers() == 3; + } + +} // namespace reco + +#endif // DataFormats_TrackSoA_interface_TracksSoA_h diff --git a/DataFormats/TrackSoA/interface/alpaka/TrackUtilities.h b/DataFormats/TrackSoA/interface/alpaka/TrackUtilities.h new file mode 100644 index 0000000000000..6b95d2843653f --- /dev/null +++ b/DataFormats/TrackSoA/interface/alpaka/TrackUtilities.h @@ -0,0 +1,173 @@ +#ifndef DataFormats_Track_interface_alpaka_TrackUtilities_h +#define DataFormats_Track_interface_alpaka_TrackUtilities_h + +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "DataFormats/TrackSoA/interface/TrackDefinitions.h" +#include "DataFormats/TrackSoA/interface/TracksSoA.h" + +// Methods that operate on View and ConstView of the TrackSoA, and cannot be class methods. +template +struct TracksUtilities { + using TrackSoAView = typename reco::TrackSoA::template Layout<>::View; + using TrackSoAConstView = typename reco::TrackSoA::template Layout<>::ConstView; + using hindex_type = typename reco::TrackSoA::hindex_type; + + // state at the beam spot: { phi, tip, 1/pt, cotan(theta), zip } + + template + ALPAKA_FN_HOST_ACC ALPAKA_FN_INLINE static constexpr void copyFromCircle( + TrackSoAView &tracks, V3 const &cp, M3 const &ccov, V2 const &lp, M2 const &lcov, float b, int32_t i) { + tracks[i].state() << cp.template cast(), lp.template cast(); + + tracks[i].state()(2) = tracks[i].state()(2) * b; + auto cov = tracks[i].covariance(); + cov(0) = ccov(0, 0); + cov(1) = ccov(0, 1); + cov(2) = b * float(ccov(0, 2)); + cov(4) = cov(3) = 0; + cov(5) = ccov(1, 1); + cov(6) = b * float(ccov(1, 2)); + cov(8) = cov(7) = 0; + cov(9) = b * b * float(ccov(2, 2)); + cov(11) = cov(10) = 0; + cov(12) = lcov(0, 0); + cov(13) = lcov(0, 1); + cov(14) = lcov(1, 1); + } + + template + ALPAKA_FN_HOST_ACC ALPAKA_FN_INLINE static constexpr void copyFromDense(TrackSoAView &tracks, + V5 const &v, + M5 const &cov, + int32_t i) { + tracks[i].state() = v.template cast(); + for (int j = 0, ind = 0; j < 5; ++j) + for (auto k = j; k < 5; ++k) + tracks[i].covariance()(ind++) = cov(j, k); + } + + template + ALPAKA_FN_HOST_ACC ALPAKA_FN_INLINE static constexpr void copyToDense(const TrackSoAConstView &tracks, + V5 &v, + M5 &cov, + int32_t i) { + v = tracks[i].state().template cast(); + for (int j = 0, ind = 0; j < 5; ++j) { + cov(j, j) = tracks[i].covariance()(ind++); + for (auto k = j + 1; k < 5; ++k) + cov(k, j) = cov(j, k) = tracks[i].covariance()(ind++); + } + } + + ALPAKA_FN_HOST_ACC ALPAKA_FN_INLINE static constexpr int computeNumberOfLayers(const TrackSoAConstView &tracks, + int32_t i) { + auto pdet = tracks.detIndices().begin(i); + int nl = 1; + auto ol = pixelTopology::getLayer(*pdet); + for (; pdet < tracks.detIndices().end(i); ++pdet) { + auto il = pixelTopology::getLayer(*pdet); + if (il != ol) + ++nl; + ol = il; + } + return nl; + } + + ALPAKA_FN_HOST_ACC ALPAKA_FN_INLINE static constexpr int nHits(const TrackSoAConstView &tracks, int i) { + return tracks.detIndices().size(i); + } +}; + +namespace pixelTrack { + + template + struct QualityCutsT {}; + + template + struct QualityCutsT> { + using TrackSoAView = typename reco::TrackSoA::template Layout<>::View; + using TrackSoAConstView = typename reco::TrackSoA::template Layout<>::ConstView; + float chi2Coeff[4]; + float chi2MaxPt; // GeV + float chi2Scale; + + struct Region { + float maxTip; // cm + float minPt; // GeV + float maxZip; // cm + }; + + Region triplet; + Region quadruplet; + + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool isHP(const TrackSoAConstView &tracks, int nHits, int it) const { + // impose "region cuts" based on the fit results (phi, Tip, pt, cotan(theta)), Zip) + // default cuts: + // - for triplets: |Tip| < 0.3 cm, pT > 0.5 GeV, |Zip| < 12.0 cm + // - for quadruplets: |Tip| < 0.5 cm, pT > 0.3 GeV, |Zip| < 12.0 cm + // (see CAHitNtupletGeneratorGPU.cc) + auto const ®ion = (nHits > 3) ? quadruplet : triplet; + return (std::abs(reco::tip(tracks, it)) < region.maxTip) and (tracks.pt(it) > region.minPt) and + (std::abs(reco::zip(tracks, it)) < region.maxZip); + } + + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool strictCut(const TrackSoAConstView &tracks, int it) const { + auto roughLog = [](float x) { + // max diff [0.5,12] at 1.25 0.16143 + // average diff 0.0662998 + union IF { + uint32_t i; + float f; + }; + IF z; + z.f = x; + uint32_t lsb = 1 < 21; + z.i += lsb; + z.i >>= 21; + auto f = z.i & 3; + int ex = int(z.i >> 2) - 127; + + // log2(1+0.25*f) + // averaged over bins + const float frac[4] = {0.160497f, 0.452172f, 0.694562f, 0.901964f}; + return float(ex) + frac[f]; + }; + + float pt = std::min(tracks.pt(it), chi2MaxPt); + float chi2Cut = chi2Scale * (chi2Coeff[0] + roughLog(pt) * chi2Coeff[1]); + if (tracks.chi2(it) >= chi2Cut) { +#ifdef NTUPLE_FIT_DEBUG + printf("Bad chi2 %d pt %f eta %f chi2 %f\n", it, tracks.pt(it), tracks.eta(it), tracks.chi2(it)); +#endif + return true; + } + return false; + } + }; + + template + struct QualityCutsT> { + using TrackSoAView = typename reco::TrackSoA::template Layout<>::View; + using TrackSoAConstView = typename reco::TrackSoA::template Layout<>::ConstView; + + float maxChi2; + float minPt; + float maxTip; + float maxZip; + + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool isHP(const TrackSoAConstView &tracks, int nHits, int it) const { + return (std::abs(reco::tip(tracks, it)) < maxTip) and (tracks.pt(it) > minPt) and + (std::abs(reco::zip(tracks, it)) < maxZip); + } + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool strictCut(const TrackSoAConstView &tracks, int it) const { + return tracks.chi2(it) >= maxChi2; + } + }; + +} // namespace pixelTrack + +// TODO: Should those be placed in the ALPAKA_ACCELERATOR_NAMESPACE +template struct TracksUtilities; +template struct TracksUtilities; + +#endif diff --git a/DataFormats/TrackSoA/interface/alpaka/TracksSoACollection.h b/DataFormats/TrackSoA/interface/alpaka/TracksSoACollection.h new file mode 100644 index 0000000000000..62e9f69e34636 --- /dev/null +++ b/DataFormats/TrackSoA/interface/alpaka/TracksSoACollection.h @@ -0,0 +1,52 @@ +#ifndef DataFormats_Track_interface_alpaka_TracksSoACollection_h +#define DataFormats_Track_interface_alpaka_TracksSoACollection_h + +#include +#include +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "DataFormats/Portable/interface/alpaka/PortableCollection.h" +#include "DataFormats/TrackSoA/interface/TracksSoA.h" +#include "DataFormats/TrackSoA/interface/TrackDefinitions.h" +#include "DataFormats/TrackSoA/interface/TracksHost.h" +#include "DataFormats/TrackSoA/interface/TracksDevice.h" +#include "HeterogeneousCore/AlpakaInterface/interface/CopyToHost.h" + +// TODO: The class is created via inheritance of the PortableCollection. +// This is generally discouraged, and should be done via composition. +// See: https://github.com/cms-sw/cmssw/pull/40465#discussion_r1067364306 + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + template + using TracksSoACollection = std::conditional_t, + TracksHost, + TracksDevice>; + + //Classes definition for Phase1/Phase2/HIonPhase1, to make the classes_def lighter. Not actually used in the code. + namespace pixelTrack { + using TracksSoACollectionPhase1 = TracksSoACollection; + using TracksSoACollectionPhase2 = TracksSoACollection; + using TracksSoACollectionHIonPhase1 = TracksSoACollection; + } // namespace pixelTrack +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +namespace cms::alpakatools { + template + struct CopyToHost> { + template + static auto copyAsync(TQueue& queue, TracksDevice const& deviceData) { + ::TracksHost hostData(queue); + alpaka::memcpy(queue, hostData.buffer(), deviceData.buffer()); +#ifdef GPU_DEBUG + printf("TracksSoACollection: I'm copying to host.\n"); +#endif + return hostData; + } + }; +} // namespace cms::alpakatools + +ASSERT_DEVICE_MATCHES_HOST_COLLECTION(pixelTrack::TracksSoACollectionPhase1, pixelTrack::TracksHostPhase1); +ASSERT_DEVICE_MATCHES_HOST_COLLECTION(pixelTrack::TracksSoACollectionPhase2, pixelTrack::TracksHostPhase2); +ASSERT_DEVICE_MATCHES_HOST_COLLECTION(pixelTrack::TracksSoACollectionHIonPhase1, pixelTrack::TracksHostHIonPhase1); + +#endif // DataFormats_Track_interface_alpaka_TracksSoACollection_h diff --git a/DataFormats/TrackSoA/src/alpaka/classes_cuda.h b/DataFormats/TrackSoA/src/alpaka/classes_cuda.h new file mode 100644 index 0000000000000..17f3b64498711 --- /dev/null +++ b/DataFormats/TrackSoA/src/alpaka/classes_cuda.h @@ -0,0 +1,13 @@ +#ifndef DataFormats_TrackSoA_src_alpaka_classes_cuda_h +#define DataFormats_TrackSoA_src_alpaka_classes_cuda_h + +#include "DataFormats/Common/interface/DeviceProduct.h" +#include "DataFormats/Common/interface/Wrapper.h" +#include "DataFormats/TrackSoA/interface/TracksSoA.h" +#include "DataFormats/TrackSoA/interface/alpaka/TracksSoACollection.h" +#include "DataFormats/TrackSoA/interface/TracksDevice.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" + +using namespace pixelTopology; + +#endif // DataFormats_TrackSoA_src_alpaka_classes_cuda_h diff --git a/DataFormats/TrackSoA/src/alpaka/classes_cuda_def.xml b/DataFormats/TrackSoA/src/alpaka/classes_cuda_def.xml new file mode 100644 index 0000000000000..9edee6439e63b --- /dev/null +++ b/DataFormats/TrackSoA/src/alpaka/classes_cuda_def.xml @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/DataFormats/TrackSoA/src/alpaka/classes_rocm.h b/DataFormats/TrackSoA/src/alpaka/classes_rocm.h new file mode 100644 index 0000000000000..0267ddeb213d5 --- /dev/null +++ b/DataFormats/TrackSoA/src/alpaka/classes_rocm.h @@ -0,0 +1,13 @@ +#ifndef DataFormats_TrackSoA_src_alpaka_classes_rocm_h +#define DataFormats_TrackSoA_src_alpaka_classes_rocm_h + +#include "DataFormats/Common/interface/DeviceProduct.h" +#include "DataFormats/Common/interface/Wrapper.h" +#include "DataFormats/TrackSoA/interface/TracksSoA.h" +#include "DataFormats/TrackSoA/interface/alpaka/TracksSoACollection.h" +#include "DataFormats/TrackSoA/interface/TracksDevice.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" + +using namespace pixelTopology; + +#endif // DataFormats_TrackSoA_src_alpaka_classes_rocm_h diff --git a/DataFormats/TrackSoA/src/alpaka/classes_rocm_def.xml b/DataFormats/TrackSoA/src/alpaka/classes_rocm_def.xml new file mode 100644 index 0000000000000..772a1b385a957 --- /dev/null +++ b/DataFormats/TrackSoA/src/alpaka/classes_rocm_def.xml @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/DataFormats/TrackSoA/src/classes.cc b/DataFormats/TrackSoA/src/classes.cc new file mode 100644 index 0000000000000..97e00cc5b5638 --- /dev/null +++ b/DataFormats/TrackSoA/src/classes.cc @@ -0,0 +1,9 @@ +#include "DataFormats/Portable/interface/PortableHostCollectionReadRules.h" +#include "DataFormats/TrackSoA/interface/TracksSoA.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" + +using namespace reco; + +SET_PORTABLEHOSTCOLLECTION_READ_RULES(PortableHostCollection>); +SET_PORTABLEHOSTCOLLECTION_READ_RULES(PortableHostCollection>); +// SET_PORTABLEHOSTCOLLECTION_READ_RULES(PortableHostCollection>); //TODO: For the moment we live without HIons diff --git a/DataFormats/TrackSoA/src/classes.h b/DataFormats/TrackSoA/src/classes.h new file mode 100644 index 0000000000000..c97bb234d7e18 --- /dev/null +++ b/DataFormats/TrackSoA/src/classes.h @@ -0,0 +1,10 @@ +#ifndef DataFormats_TrackSoA_src_classes_h +#define DataFormats_TrackSoA_src_classes_h + +#include "DataFormats/Common/interface/Wrapper.h" +#include "DataFormats/TrackSoA/interface/TracksSoA.h" +#include "DataFormats/TrackSoA/interface/TracksHost.h" + +using namespace pixelTopology; + +#endif // DataFormats_TrackSoA_src_classes_h diff --git a/DataFormats/TrackSoA/src/classes_def.xml b/DataFormats/TrackSoA/src/classes_def.xml new file mode 100644 index 0000000000000..5ae5fbf55cd8f --- /dev/null +++ b/DataFormats/TrackSoA/src/classes_def.xml @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/DataFormats/TrackSoA/test/BuildFile.xml b/DataFormats/TrackSoA/test/BuildFile.xml new file mode 100644 index 0000000000000..ce2b273d90577 --- /dev/null +++ b/DataFormats/TrackSoA/test/BuildFile.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/DataFormats/TrackSoA/test/alpaka/TrackSoAHeterogeneous_test.cc b/DataFormats/TrackSoA/test/alpaka/TrackSoAHeterogeneous_test.cc new file mode 100644 index 0000000000000..f4af0688ca1bf --- /dev/null +++ b/DataFormats/TrackSoA/test/alpaka/TrackSoAHeterogeneous_test.cc @@ -0,0 +1,82 @@ +/** + Simple test for the pixelTrack::TrackSoA data structure + which inherits from PortableDeviceCollection. + + Creates an instance of the class (automatically allocates + memory on device), passes the view of the SoA data to + the CUDA kernels which: + - Fill the SoA with data. + - Verify that the data written is correct. + + Then, the SoA data are copied back to Host, where + a temporary host-side view (tmp_view) is created using + the same Layout to access the data on host and print it. + */ + +#include +#include +#include "DataFormats/TrackSoA/interface/alpaka/TracksSoACollection.h" +#include "DataFormats/TrackSoA/interface/TracksDevice.h" +#include "DataFormats/TrackSoA/interface/TracksHost.h" +#include "HeterogeneousCore/AlpakaInterface/interface/devices.h" +#include "HeterogeneousCore/AlpakaInterface/interface/host.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" + +using namespace std; +using namespace reco; +using namespace ALPAKA_ACCELERATOR_NAMESPACE; +using namespace ALPAKA_ACCELERATOR_NAMESPACE::pixelTrack; + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace testTrackSoA { + + template + void runKernels(TrackSoAView tracks_view, Queue& queue); + } +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +int main() { + const auto host = cms::alpakatools::host(); + const auto device = cms::alpakatools::devices()[0]; + Queue queue(device); + + // Inner scope to deallocate memory before destroying the stream + { + // Instantiate tracks on device. PortableDeviceCollection allocates + // SoA on device automatically. + TracksSoACollection tracks_d(queue); + testTrackSoA::runKernels(tracks_d.view(), queue); + + // Instantate tracks on host. This is where the data will be + // copied to from device. + TracksHost tracks_h(queue); + + std::cout << tracks_h.view().metadata().size() << std::endl; + alpaka::memcpy(queue, tracks_h.buffer(), tracks_d.const_buffer()); + alpaka::wait(queue); + + // Print results + std::cout << "pt" + << "\t" + << "eta" + << "\t" + << "chi2" + << "\t" + << "quality" + << "\t" + << "nLayers" + << "\t" + << "hitIndices off" << std::endl; + + for (int i = 0; i < 10; ++i) { + std::cout << tracks_h.view()[i].pt() << "\t" << tracks_h.view()[i].eta() << "\t" << tracks_h.view()[i].chi2() + << "\t" << (int)tracks_h.view()[i].quality() << "\t" << (int)tracks_h.view()[i].nLayers() << "\t" + << tracks_h.view().hitIndices().off[i] << std::endl; + } + } + + return 0; +} diff --git a/DataFormats/TrackSoA/test/alpaka/TrackSoAHeterogeneous_test.dev.cc b/DataFormats/TrackSoA/test/alpaka/TrackSoAHeterogeneous_test.dev.cc new file mode 100644 index 0000000000000..2b9807e3db054 --- /dev/null +++ b/DataFormats/TrackSoA/test/alpaka/TrackSoAHeterogeneous_test.dev.cc @@ -0,0 +1,74 @@ +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "DataFormats/TrackSoA/interface/TrackDefinitions.h" +#include "DataFormats/TrackSoA/interface/alpaka/TracksSoACollection.h" +#include "DataFormats/TrackSoA/interface/TracksDevice.h" +#include "DataFormats/TrackSoA/interface/TracksHost.h" + +using namespace reco; + +using Quality = pixelTrack::Quality; +namespace ALPAKA_ACCELERATOR_NAMESPACE { + using namespace cms::alpakatools; + namespace testTrackSoA { + + // Kernel which fills the TrackSoAView with data + // to test writing to it + template + class TestFillKernel { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, TrackSoAView tracks_view, int32_t nTracks) const { + if (cms::alpakatools::once_per_grid(acc)) { + tracks_view.nTracks() = nTracks; + } + + for (int32_t j : elements_with_stride(acc, nTracks)) { + tracks_view[j].pt() = (float)j; + tracks_view[j].eta() = (float)j; + tracks_view[j].chi2() = (float)j; + tracks_view[j].quality() = (Quality)(j % 256); + tracks_view[j].nLayers() = j % 128; + tracks_view.hitIndices().off[j] = j; + } + } + }; + + // Kernel which reads from the TrackSoAView to verify + // that it was written correctly from the fill kernel + template + class TestVerifyKernel { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, + TrackSoAConstView tracks_view, + int32_t nTracks) const { + if (cms::alpakatools::once_per_grid(acc)) { + ALPAKA_ASSERT(tracks_view.nTracks() == nTracks); + } + for (int32_t j : elements_with_stride(acc, tracks_view.nTracks())) { + ALPAKA_ASSERT(abs(tracks_view[j].pt() - (float)j) < .0001); + ALPAKA_ASSERT(abs(tracks_view[j].eta() - (float)j) < .0001); + ALPAKA_ASSERT(abs(tracks_view[j].chi2() - (float)j) < .0001); + ALPAKA_ASSERT(tracks_view[j].quality() == (Quality)(j % 256)); + ALPAKA_ASSERT(tracks_view[j].nLayers() == j % 128); + ALPAKA_ASSERT(tracks_view.hitIndices().off[j] == uint32_t(j)); + } + } + }; + + // Host function which invokes the two kernels above + template + void runKernels(TrackSoAView tracks_view, Queue& queue) { + int32_t tracks = 420; + uint32_t items = 64; + uint32_t groups = divide_up_by(tracks, items); + auto workDiv = make_workdiv(groups, items); + alpaka::exec(queue, workDiv, TestFillKernel{}, tracks_view, tracks); + alpaka::exec(queue, workDiv, TestVerifyKernel{}, tracks_view, tracks); + } + + template void runKernels(TrackSoAView tracks_view, Queue& queue); + template void runKernels(TrackSoAView tracks_view, Queue& queue); + + } // namespace testTrackSoA +} // namespace ALPAKA_ACCELERATOR_NAMESPACE diff --git a/DataFormats/TrackingRecHitSoA/BuildFile.xml b/DataFormats/TrackingRecHitSoA/BuildFile.xml new file mode 100644 index 0000000000000..a7c80171ef4df --- /dev/null +++ b/DataFormats/TrackingRecHitSoA/BuildFile.xml @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/DataFormats/TrackingRecHitSoA/interface/SiPixelHitStatus.h b/DataFormats/TrackingRecHitSoA/interface/SiPixelHitStatus.h new file mode 100644 index 0000000000000..06205906d8d2f --- /dev/null +++ b/DataFormats/TrackingRecHitSoA/interface/SiPixelHitStatus.h @@ -0,0 +1,20 @@ +#ifndef DataFormats_TrackingRecHitSoA_SiPixelHitStatus_H +#define DataFormats_TrackingRecHitSoA_SiPixelHitStatus_H + +#include + +// more information on bit fields : https://en.cppreference.com/w/cpp/language/bit_field +struct SiPixelHitStatus { + bool isBigX : 1; // ∈[0,1] + bool isOneX : 1; // ∈[0,1] + bool isBigY : 1; // ∈[0,1] + bool isOneY : 1; // ∈[0,1] + uint8_t qBin : 3; // ∈[0,1,...,7] +}; + +struct SiPixelHitStatusAndCharge { + SiPixelHitStatus status; + uint32_t charge : 24; +}; + +#endif diff --git a/DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsDevice.h b/DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsDevice.h new file mode 100644 index 0000000000000..c0fc252729df7 --- /dev/null +++ b/DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsDevice.h @@ -0,0 +1,44 @@ +#ifndef DataFormats_TrackingRecHitSoA_interface_TrackingRecHitSoADevice_h +#define DataFormats_TrackingRecHitSoA_interface_TrackingRecHitSoADevice_h + +#include + +#include + +#include "DataFormats/Portable/interface/PortableDeviceCollection.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsHost.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" + +template +class TrackingRecHitDevice : public PortableDeviceCollection, TDev> { +public: + using hitSoA = TrackingRecHitSoA; + //Need to decorate the class with the inherited portable accessors being now a template + using PortableDeviceCollection, TDev>::view; + using PortableDeviceCollection, TDev>::const_view; + using PortableDeviceCollection, TDev>::buffer; + + TrackingRecHitDevice() = default; + + // Constructor which specifies the SoA size + template + explicit TrackingRecHitDevice(uint32_t nHits, int32_t offsetBPIX2, uint32_t const* hitsModuleStart, TQueue queue) + : PortableDeviceCollection, TDev>(nHits, queue) { + const auto device = alpaka::getDev(queue); + + auto start_h = cms::alpakatools::make_host_view(hitsModuleStart, TrackerTraits::numberOfModules + 1); + auto start_d = + cms::alpakatools::make_device_view(device, view().hitsModuleStart().data(), TrackerTraits::numberOfModules + 1); + alpaka::memcpy(queue, start_d, start_h); + + auto off_h = cms::alpakatools::make_host_view(offsetBPIX2); + auto off_d = cms::alpakatools::make_device_view(device, view().offsetBPIX2()); + alpaka::memcpy(queue, off_d, off_h); + alpaka::wait(queue); + } + + uint32_t nHits() const { return view().metadata().size(); } + uint32_t const* hitsModuleStart() const { return view().hitsModuleStart().data(); } +}; +#endif // DataFormats_RecHits_interface_TrackingRecHitSoADevice_h diff --git a/DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsHost.h b/DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsHost.h new file mode 100644 index 0000000000000..ce3f57232ac93 --- /dev/null +++ b/DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsHost.h @@ -0,0 +1,43 @@ +#ifndef DataFormats_TrackingRecHitSoA_interface_TrackingRecHitsHost_h +#define DataFormats_TrackingRecHitSoA_interface_TrackingRecHitsHost_h + +#include + +#include + +#include "DataFormats/Portable/interface/PortableHostCollection.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" + +template +class TrackingRecHitHost : public PortableHostCollection> { +public: + using hitSoA = TrackingRecHitSoA; + //Need to decorate the class with the inherited portable accessors being now a template + using PortableHostCollection>::view; + using PortableHostCollection>::const_view; + using PortableHostCollection>::buffer; + + TrackingRecHitHost() = default; + + template + explicit TrackingRecHitHost(uint32_t nHits, TQueue queue) + : PortableHostCollection>(nHits, queue) {} + + // Constructor which specifies the SoA size + template + explicit TrackingRecHitHost(uint32_t nHits, int32_t offsetBPIX2, uint32_t const* hitsModuleStart, TQueue queue) + : PortableHostCollection>(nHits, queue) { + std::copy(hitsModuleStart, hitsModuleStart + TrackerTraits::numberOfModules + 1, view().hitsModuleStart().data()); + view().offsetBPIX2() = offsetBPIX2; + } + + uint32_t nHits() const { return view().metadata().size(); } + uint32_t const* hitsModuleStart() const { return view().hitsModuleStart().data(); } +}; + +using TrackingRecHitHostPhase1 = TrackingRecHitHost; +using TrackingRecHitHostPhase2 = TrackingRecHitHost; +using TrackingRecHitHostHIonPhase1 = TrackingRecHitHost; + +#endif // DataFormats_TrackingRecHitSoA_interface_TrackingRecHitsHost_h diff --git a/DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h b/DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h new file mode 100644 index 0000000000000..7e45a75043951 --- /dev/null +++ b/DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h @@ -0,0 +1,55 @@ +#ifndef DataFormats_TrackingRecHitSoA_interface_TrackingRecHitsSoA_h +#define DataFormats_TrackingRecHitSoA_interface_TrackingRecHitsSoA_h + +#include + +#include "DataFormats/SoATemplate/interface/SoALayout.h" +#include "DataFormats/TrackingRecHitSoA/interface/SiPixelHitStatus.h" +#include "HeterogeneousCore/AlpakaInterface/interface/HistoContainer.h" +#include "RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforDevice.h" + +template +struct TrackingRecHitSoA { + using hindex_type = typename TrackerTraits::hindex_type; + using PhiBinner = cms::alpakatools::HistoContainer; //28 for phase2 geometry + using PhiBinnerView = typename PhiBinner::View; + using PhiBinnerStorageType = typename PhiBinner::index_type; + using AverageGeometry = pixelTopology::AverageGeometryT; + using HitLayerStartArray = std::array; + using HitModuleStartArray = std::array; + + GENERATE_SOA_LAYOUT(Layout, + SOA_COLUMN(float, xLocal), + SOA_COLUMN(float, yLocal), + SOA_COLUMN(float, xerrLocal), + SOA_COLUMN(float, yerrLocal), + SOA_COLUMN(float, xGlobal), + SOA_COLUMN(float, yGlobal), + SOA_COLUMN(float, zGlobal), + SOA_COLUMN(float, rGlobal), + SOA_COLUMN(int16_t, iphi), + SOA_COLUMN(SiPixelHitStatusAndCharge, chargeAndStatus), + SOA_COLUMN(int16_t, clusterSizeX), + SOA_COLUMN(int16_t, clusterSizeY), + SOA_COLUMN(uint16_t, detectorIndex), + SOA_SCALAR(int32_t, offsetBPIX2), + SOA_COLUMN(PhiBinnerStorageType, phiBinnerStorage), + SOA_SCALAR(HitModuleStartArray, hitsModuleStart), + SOA_SCALAR(HitLayerStartArray, hitsLayerStart), + SOA_SCALAR(AverageGeometry, averageGeometry), + SOA_SCALAR(PhiBinner, phiBinner)); +}; + +template +using TrackingRecHitLayout = typename TrackingRecHitSoA::template Layout<>; +template +using TrackingRecHitSoAView = typename TrackingRecHitSoA::template Layout<>::View; +template +using TrackingRecHitSoAConstView = typename TrackingRecHitSoA::template Layout<>::ConstView; + +#endif diff --git a/DataFormats/TrackingRecHitSoA/interface/alpaka/TrackingRecHitsSoACollection.h b/DataFormats/TrackingRecHitSoA/interface/alpaka/TrackingRecHitsSoACollection.h new file mode 100644 index 0000000000000..0e0e848afcfd9 --- /dev/null +++ b/DataFormats/TrackingRecHitSoA/interface/alpaka/TrackingRecHitsSoACollection.h @@ -0,0 +1,46 @@ +#ifndef DataFormats_RecHits_interface_alpakaTrackingRecHitsSoACollection +#define DataFormats_RecHits_interface_alpakaTrackingRecHitsSoACollection + +#include +#include +#include "DataFormats/Portable/interface/alpaka/PortableCollection.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsHost.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsDevice.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "HeterogeneousCore/AlpakaInterface/interface/CopyToHost.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + template + using TrackingRecHitsSoACollection = std::conditional_t, + TrackingRecHitHost, + TrackingRecHitDevice>; + + //Classes definition for Phase1/Phase2, to make the classes_def lighter. Not actually used in the code. + using TrackingRecHitSoAPhase1 = TrackingRecHitsSoACollection; + using TrackingRecHitSoAPhase2 = TrackingRecHitsSoACollection; + using TrackingRecHitSoAHIonPhase1 = TrackingRecHitsSoACollection; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +namespace cms::alpakatools { + template + struct CopyToHost> { + template + static auto copyAsync(TQueue& queue, TrackingRecHitDevice const& deviceData) { + TrackingRecHitHost hostData(deviceData.view().metadata().size(), queue); + alpaka::memcpy(queue, hostData.buffer(), deviceData.buffer()); +#ifdef GPU_DEBUG + printf("TrackingRecHitsSoACollection: I'm copying to host.\n"); +#endif + return hostData; + } + }; +} // namespace cms::alpakatools + +ASSERT_DEVICE_MATCHES_HOST_COLLECTION(TrackingRecHitSoAPhase1, TrackingRecHitHostPhase1); +ASSERT_DEVICE_MATCHES_HOST_COLLECTION(TrackingRecHitSoAPhase2, TrackingRecHitHostPhase2); +ASSERT_DEVICE_MATCHES_HOST_COLLECTION(TrackingRecHitSoAHIonPhase1, TrackingRecHitHostHIonPhase1); + +#endif // DataFormats_RecHits_interface_alpakaTrackingRecHitsSoACollection \ No newline at end of file diff --git a/DataFormats/TrackingRecHitSoA/src/alpaka/classes_cuda.h b/DataFormats/TrackingRecHitSoA/src/alpaka/classes_cuda.h new file mode 100644 index 0000000000000..402be81b7081e --- /dev/null +++ b/DataFormats/TrackingRecHitSoA/src/alpaka/classes_cuda.h @@ -0,0 +1,12 @@ +#ifndef DataFormats_TrackingRecHitSoA_src_alpaka_classes_cuda_h +#define DataFormats_TrackingRecHitSoA_src_alpaka_classes_cuda_h + +#include "DataFormats/Common/interface/DeviceProduct.h" +#include "DataFormats/Common/interface/Wrapper.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsDevice.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h" +#include "DataFormats/TrackingRecHitSoA/interface/alpaka/TrackingRecHitsSoACollection.h" + +using namespace pixelTopology; + +#endif // DataFormats_TrackingRecHitSoA_src_alpaka_classes_cuda_h diff --git a/DataFormats/TrackingRecHitSoA/src/alpaka/classes_cuda_def.xml b/DataFormats/TrackingRecHitSoA/src/alpaka/classes_cuda_def.xml new file mode 100644 index 0000000000000..80c267b57d585 --- /dev/null +++ b/DataFormats/TrackingRecHitSoA/src/alpaka/classes_cuda_def.xml @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff --git a/DataFormats/TrackingRecHitSoA/src/alpaka/classes_rocm.h b/DataFormats/TrackingRecHitSoA/src/alpaka/classes_rocm.h new file mode 100644 index 0000000000000..6af162021dd47 --- /dev/null +++ b/DataFormats/TrackingRecHitSoA/src/alpaka/classes_rocm.h @@ -0,0 +1,12 @@ +#ifndef DataFormats_TrackingRecHitSoA_src_alpaka_classes_rocm_h +#define DataFormats_TrackingRecHitSoA_src_alpaka_classes_rocm_h + +#include "DataFormats/Common/interface/DeviceProduct.h" +#include "DataFormats/Common/interface/Wrapper.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsDevice.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h" +#include "DataFormats/TrackingRecHitSoA/interface/alpaka/TrackingRecHitsSoACollection.h" + +using namespace pixelTopology; + +#endif // DataFormats_TrackingRecHitSoA_src_alpaka_classes_rocm_h diff --git a/DataFormats/TrackingRecHitSoA/src/alpaka/classes_rocm_def.xml b/DataFormats/TrackingRecHitSoA/src/alpaka/classes_rocm_def.xml new file mode 100644 index 0000000000000..bc4c969137121 --- /dev/null +++ b/DataFormats/TrackingRecHitSoA/src/alpaka/classes_rocm_def.xml @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff --git a/DataFormats/TrackingRecHitSoA/src/classes.cc b/DataFormats/TrackingRecHitSoA/src/classes.cc new file mode 100644 index 0000000000000..58167c21cef4f --- /dev/null +++ b/DataFormats/TrackingRecHitSoA/src/classes.cc @@ -0,0 +1,7 @@ +#include "DataFormats/Portable/interface/PortableHostCollectionReadRules.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" + +SET_PORTABLEHOSTCOLLECTION_READ_RULES(PortableHostCollection>); +SET_PORTABLEHOSTCOLLECTION_READ_RULES(PortableHostCollection>); +SET_PORTABLEHOSTCOLLECTION_READ_RULES(PortableHostCollection>); \ No newline at end of file diff --git a/DataFormats/TrackingRecHitSoA/src/classes.h b/DataFormats/TrackingRecHitSoA/src/classes.h new file mode 100644 index 0000000000000..d405a88ed6ace --- /dev/null +++ b/DataFormats/TrackingRecHitSoA/src/classes.h @@ -0,0 +1,11 @@ +#ifndef DataFormats_TrackingRecHitSoA_src_classes_h +#define DataFormats_TrackingRecHitSoA_src_classes_h + +#include "DataFormats/Common/interface/Wrapper.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsHost.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" + +using namespace pixelTopology; + +#endif // DataFormats_TrackingRecHitSoA_src_classes_h diff --git a/DataFormats/TrackingRecHitSoA/src/classes_def.xml b/DataFormats/TrackingRecHitSoA/src/classes_def.xml new file mode 100644 index 0000000000000..f3107e8587327 --- /dev/null +++ b/DataFormats/TrackingRecHitSoA/src/classes_def.xml @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/DataFormats/TrackingRecHitSoA/test/BuildFile.xml b/DataFormats/TrackingRecHitSoA/test/BuildFile.xml new file mode 100644 index 0000000000000..5b61a3460fb7d --- /dev/null +++ b/DataFormats/TrackingRecHitSoA/test/BuildFile.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/DataFormats/TrackingRecHitSoA/test/alpaka/Hits_test.cc b/DataFormats/TrackingRecHitSoA/test/alpaka/Hits_test.cc new file mode 100644 index 0000000000000..378bb95db7b30 --- /dev/null +++ b/DataFormats/TrackingRecHitSoA/test/alpaka/Hits_test.cc @@ -0,0 +1,47 @@ +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsHost.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsDevice.h" +#include "DataFormats/TrackingRecHitSoA/interface/alpaka/TrackingRecHitsSoACollection.h" + +#include "HeterogeneousCore/AlpakaInterface/interface/devices.h" +#include "HeterogeneousCore/AlpakaInterface/interface/host.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" + +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" + +#include +#include + +using namespace ALPAKA_ACCELERATOR_NAMESPACE; + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace testTrackingRecHitSoA { + + template + void runKernels(TrackingRecHitSoAView& hits, Queue& queue); + + } +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +int main() { + const auto host = cms::alpakatools::host(); + const auto device = cms::alpakatools::devices()[0]; + Queue queue(device); + + // inner scope to deallocate memory before destroying the queue + { + uint32_t nHits = 2000; + int32_t offset = 100; + uint32_t moduleStart[pixelTopology::Phase1::numberOfModules + 1]; + + for (size_t i = 0; i < pixelTopology::Phase1::numberOfModules + 1; i++) { + moduleStart[i] = i * 2; + } + TrackingRecHitsSoACollection tkhit(nHits, offset, &moduleStart[0], queue); + + testTrackingRecHitSoA::runKernels(tkhit.view(), queue); + alpaka::wait(queue); + } + return 0; +} diff --git a/DataFormats/TrackingRecHitSoA/test/alpaka/Hits_test.dev.cc b/DataFormats/TrackingRecHitSoA/test/alpaka/Hits_test.dev.cc new file mode 100644 index 0000000000000..79d8bd69cbc3a --- /dev/null +++ b/DataFormats/TrackingRecHitSoA/test/alpaka/Hits_test.dev.cc @@ -0,0 +1,65 @@ +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsDevice.h" +#include "DataFormats/TrackingRecHitSoA/interface/alpaka/TrackingRecHitsSoACollection.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/traits.h" + +using namespace alpaka; + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + using namespace cms::alpakatools; + namespace testTrackingRecHitSoA { + + template + class TestFillKernel { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, TrackingRecHitSoAView soa) const { + const uint32_t i(alpaka::getIdx(acc)[0u]); + const uint32_t j(alpaka::getIdx(acc)[0u]); + + if (i == 0 and j == 0) { + soa.offsetBPIX2() = 22; + soa[10].xLocal() = 1.11; + } + + soa[i].iphi() = i % 10; + soa.hitsLayerStart()[j] = j; + } + }; + + template + class ShowKernel { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, TrackingRecHitSoAConstView soa) const { + const uint32_t i(alpaka::getIdx(acc)[0u]); + const uint32_t j(alpaka::getIdx(acc)[0u]); + + if (i == 0 and j == 0) { + printf("nbins = %d \n", soa.phiBinner().nbins()); + printf("offsetBPIX %d ->%d \n", i, soa.offsetBPIX2()); + printf("nHits %d ->%d \n", i, soa.metadata().size()); + //printf("hitsModuleStart %d ->%d \n", i, soa.hitsModuleStart().at(28)); + } + + if (i < 10) // can be increased to soa.nHits() for debugging + printf("iPhi %d ->%d \n", i, soa[i].iphi()); + } + }; + + template + void runKernels(TrackingRecHitSoAView& view, Queue& queue) { + uint32_t items = 64; + uint32_t groups = divide_up_by(view.metadata().size(), items); + auto workDiv = make_workdiv(groups, items); + alpaka::exec(queue, workDiv, TestFillKernel{}, view); + alpaka::exec(queue, workDiv, ShowKernel{}, view); + } + + template void runKernels(TrackingRecHitSoAView& view, Queue& queue); + template void runKernels(TrackingRecHitSoAView& view, Queue& queue); + + } // namespace testTrackingRecHitSoA +} // namespace ALPAKA_ACCELERATOR_NAMESPACE diff --git a/DataFormats/VertexReco/interface/Vertex.h b/DataFormats/VertexReco/interface/Vertex.h index 7d7a119961eac..cec37f5b52205 100644 --- a/DataFormats/VertexReco/interface/Vertex.h +++ b/DataFormats/VertexReco/interface/Vertex.h @@ -39,7 +39,8 @@ namespace reco { /// point in the space typedef math::XYZPoint Point; /// error matrix dimension - enum { dimension = 3, dimension4D = 4 }; + constexpr static int dimension = 3; + constexpr static int dimension4D = 4; /// covariance error matrix (3x3) typedef math::Error::type Error; /// covariance error matrix (3x3) @@ -49,7 +50,7 @@ namespace reco { /// covariance error matrix (4x4) typedef math::Error::type CovarianceMatrix4D; /// matix size - enum { size = dimension * (dimension + 1) / 2, size4D = (dimension4D) * (dimension4D + 1) / 2 }; + constexpr static int size = dimension * (dimension + 1) / 2, size4D = (dimension4D) * (dimension4D + 1) / 2; /// index type typedef unsigned int index; /// default constructor - The vertex will not be valid. Position, error, diff --git a/DataFormats/VertexSoA/BuildFile.xml b/DataFormats/VertexSoA/BuildFile.xml new file mode 100644 index 0000000000000..af53fc68f5a45 --- /dev/null +++ b/DataFormats/VertexSoA/BuildFile.xml @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/DataFormats/VertexSoA/README.md b/DataFormats/VertexSoA/README.md new file mode 100644 index 0000000000000..54172eda14281 --- /dev/null +++ b/DataFormats/VertexSoA/README.md @@ -0,0 +1,45 @@ +# Vertex Portable Data Formats + +`DataFormat`s meant to be used on Host (CPU) or Device (GPU) for +storing information about vertices created during the Pixel-local Reconstruction +chain. It stores data in an SoA manner. It contains the data that was previously +contained in the deprecated `ZVertexSoA` class. + +The host format is inheriting from `DataFormats/Common/interface/PortableHostCollection.h`, +while the device format is inheriting from `DataFormats/Common/interface/PortableDeviceCollection.h` + +Both formats use the same SoA Layout (`ZVertexLayout`) which is generated +via the `GENERATE_SOA_LAYOUT` macro in the `ZVertexUtilities.h` file. + +## Notes + +- Initially, `ZVertexSoA` had distinct array sizes for each attribute (e.g. `zv` was `MAXVTX` elements +long, `ndof` was `MAXTRACKS` elements long). All columns are now of uniform `MAXTRACKS` size, +meaning that there will be some wasted space (appx. 190kB). +- Host and Device classes should **not** be created via inheritance, as they're done here, +but via composition. See [this discussion](https://github.com/cms-sw/cmssw/pull/40465#discussion_r1066039309). + +## ZVertexHeterogeneousHost + +The version of the data format to be used for storing vertex data on the CPU. +Instances of this class are to be used for: + +- Having a place to copy data to host from device, via `cudaMemcpy`, or +- Running host-side algorithms using data stored in an SoA manner. + +## ZVertexHeterogeneousDevice + +The version of the data format to be used for storing vertex data on the GPU. + +Instances of `ZVertexHeterogeneousDevice` are to be created on host and be +used on device only. To do so, the instance's `view()` method is to be called +to pass a `View` to any kernel launched. Accessing data from the `view()` is not +possible on the host side. + +## Utilities + +Apart from `ZVertexLayout`, `ZVertexUtilities.h` also contains +a collection of methods which were originally +defined as class methods inside the `ZVertexSoA` class +which have been adapted to operate on `View` instances, so that they are callable +from within `__global__` kernels, on both CPU and CPU. diff --git a/DataFormats/VertexSoA/interface/ZVertexDefinitions.h b/DataFormats/VertexSoA/interface/ZVertexDefinitions.h new file mode 100644 index 0000000000000..028668d1ff52a --- /dev/null +++ b/DataFormats/VertexSoA/interface/ZVertexDefinitions.h @@ -0,0 +1,13 @@ +#ifndef DataFormats_VertexSoA_ZVertexDefinitions_h +#define DataFormats_VertexSoA_ZVertexDefinitions_h + +#include + +namespace zVertex { + + constexpr uint32_t MAXTRACKS = 32 * 1024; + constexpr uint32_t MAXVTX = 1024; + +} // namespace zVertex + +#endif diff --git a/DataFormats/VertexSoA/interface/ZVertexDevice.h b/DataFormats/VertexSoA/interface/ZVertexDevice.h new file mode 100644 index 0000000000000..8d120ae190f3c --- /dev/null +++ b/DataFormats/VertexSoA/interface/ZVertexDevice.h @@ -0,0 +1,26 @@ +#ifndef DataFormats_VertexSoA_interface_ZVertexDevice_h +#define DataFormats_VertexSoA_interface_ZVertexDevice_h + +#include + +#include +#include "DataFormats/VertexSoA/interface/ZVertexSoA.h" +#include "DataFormats/VertexSoA/interface/ZVertexDefinitions.h" +#include "DataFormats/VertexSoA/interface/ZVertexHost.h" +#include "DataFormats/Portable/interface/PortableDeviceCollection.h" + +template +class ZVertexDeviceSoA : public PortableDeviceCollection, TDev> { +public: + ZVertexDeviceSoA() = default; // necessary for ROOT dictionaries + + // Constructor which specifies the SoA size + template + explicit ZVertexDeviceSoA(TQueue queue) : PortableDeviceCollection, TDev>(S, queue) {} +}; + +using namespace ::zVertex; +template +using ZVertexDevice = ZVertexDeviceSoA; + +#endif // DataFormats_VertexSoA_interface_ZVertexDevice_h diff --git a/DataFormats/VertexSoA/interface/ZVertexHost.h b/DataFormats/VertexSoA/interface/ZVertexHost.h new file mode 100644 index 0000000000000..2d72b83bfe385 --- /dev/null +++ b/DataFormats/VertexSoA/interface/ZVertexHost.h @@ -0,0 +1,29 @@ +#ifndef DataFormats_VertexSoA_ZVertexHost_H +#define DataFormats_VertexSoA_ZVertexHost_H + +#include + +#include + +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "DataFormats/VertexSoA/interface/ZVertexSoA.h" +#include "DataFormats/VertexSoA/interface/ZVertexDefinitions.h" +#include "DataFormats/Portable/interface/PortableHostCollection.h" + +template +class ZVertexHostSoA : public PortableHostCollection { +public: + ZVertexHostSoA() = default; + + // Constructor which specifies the queue + template + explicit ZVertexHostSoA(TQueue queue) : PortableHostCollection(S, queue) {} + + // Constructor which specifies the DevHost + explicit ZVertexHostSoA(alpaka_common::DevHost const& host) : PortableHostCollection(S, host) {} +}; + +//using namespace ::zVertex; +using ZVertexHost = ZVertexHostSoA; + +#endif // DataFormats_VertexSoA_ZVertexHost_H diff --git a/DataFormats/VertexSoA/interface/ZVertexSoA.h b/DataFormats/VertexSoA/interface/ZVertexSoA.h new file mode 100644 index 0000000000000..045603618acd7 --- /dev/null +++ b/DataFormats/VertexSoA/interface/ZVertexSoA.h @@ -0,0 +1,31 @@ +#ifndef DataFormats_VertexSoA_interface_ZVertexSoA_h +#define DataFormats_VertexSoA_interface_ZVertexSoA_h + +#include + +#include + +#include "DataFormats/SoATemplate/interface/SoALayout.h" + +namespace reco { + + GENERATE_SOA_LAYOUT(ZVertexLayout, + SOA_COLUMN(int16_t, idv), + SOA_COLUMN(float, zv), + SOA_COLUMN(float, wv), + SOA_COLUMN(float, chi2), + SOA_COLUMN(float, ptv2), + SOA_COLUMN(int32_t, ndof), + SOA_COLUMN(uint16_t, sortInd), + SOA_SCALAR(uint32_t, nvFinal)) + + // Common types for both Host and Device code + using ZVertexSoA = ZVertexLayout<>; + using ZVertexSoAView = ZVertexSoA::View; + using ZVertexSoAConstView = ZVertexSoA::ConstView; + + ALPAKA_FN_HOST_ACC ALPAKA_FN_INLINE void init(ZVertexSoAView &vertices) { vertices.nvFinal() = 0; } + +} // namespace reco + +#endif // DataFormats_VertexSoA_interface_ZVertexSoA_h diff --git a/DataFormats/VertexSoA/interface/alpaka/ZVertexSoACollection.h b/DataFormats/VertexSoA/interface/alpaka/ZVertexSoACollection.h new file mode 100644 index 0000000000000..636a07e2bd978 --- /dev/null +++ b/DataFormats/VertexSoA/interface/alpaka/ZVertexSoACollection.h @@ -0,0 +1,39 @@ +#ifndef DataFormats_VertexSoA_interface_ZVertexSoACollection_h +#define DataFormats_VertexSoA_interface_ZVertexSoACollection_h + +#include + +#include +#include "DataFormats/Portable/interface/alpaka/PortableCollection.h" +#include "DataFormats/VertexSoA/interface/ZVertexSoA.h" +#include "DataFormats/VertexSoA/interface/ZVertexDefinitions.h" +#include "DataFormats/VertexSoA/interface/ZVertexHost.h" +#include "DataFormats/VertexSoA/interface/ZVertexDevice.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/CopyToHost.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + using ZVertexSoACollection = + std::conditional_t, ZVertexHost, ZVertexDevice>; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +namespace cms::alpakatools { + template + struct CopyToHost> { + template + static auto copyAsync(TQueue& queue, ZVertexDevice const& deviceData) { + ZVertexHost hostData(queue); + alpaka::memcpy(queue, hostData.buffer(), deviceData.buffer()); +#ifdef GPU_DEBUG + printf("ZVertexSoACollection: I'm copying to host.\n"); +#endif + return hostData; + } + }; +} // namespace cms::alpakatools + +ASSERT_DEVICE_MATCHES_HOST_COLLECTION(ZVertexSoACollection, ZVertexHost); + +#endif // DataFormats_VertexSoA_interface_ZVertexSoACollection_h diff --git a/DataFormats/VertexSoA/src/alpaka/classes_cuda.h b/DataFormats/VertexSoA/src/alpaka/classes_cuda.h new file mode 100644 index 0000000000000..e76f6ca1365c1 --- /dev/null +++ b/DataFormats/VertexSoA/src/alpaka/classes_cuda.h @@ -0,0 +1,10 @@ +#ifndef DataFormats_VertexSoA_src_alpaka_classes_cuda_h +#define DataFormats_VertexSoA_src_alpaka_classes_cuda_h + +#include "DataFormats/Common/interface/DeviceProduct.h" +#include "DataFormats/Common/interface/Wrapper.h" +#include "DataFormats/VertexSoA/interface/ZVertexSoA.h" +#include "DataFormats/VertexSoA/interface//ZVertexDevice.h" +#include "DataFormats/VertexSoA/interface/alpaka/ZVertexSoACollection.h" + +#endif // DataFormats_VertexSoA_src_alpaka_classes_cuda_h diff --git a/DataFormats/VertexSoA/src/alpaka/classes_cuda_def.xml b/DataFormats/VertexSoA/src/alpaka/classes_cuda_def.xml new file mode 100644 index 0000000000000..606937a5bd3e5 --- /dev/null +++ b/DataFormats/VertexSoA/src/alpaka/classes_cuda_def.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/DataFormats/VertexSoA/src/alpaka/classes_rocm.h b/DataFormats/VertexSoA/src/alpaka/classes_rocm.h new file mode 100644 index 0000000000000..f5ea845c028b1 --- /dev/null +++ b/DataFormats/VertexSoA/src/alpaka/classes_rocm.h @@ -0,0 +1,9 @@ +#ifndef DataFormats_VertexSoA_src_alpaka_classes_rocm_h +#define DataFormats_VertexSoA_src_alpaka_classes_rocm_h + +#include "DataFormats/Common/interface/DeviceProduct.h" +#include "DataFormats/Common/interface/Wrapper.h" +#include "DataFormats/VertexSoA/interface/ZVertexSoA.h" +#include "DataFormats/VertexSoA/interface//ZVertexDevice.h" +#include "DataFormats/VertexSoA/interface/alpaka/ZVertexSoACollection.h" +#endif // DataFormats_VertexSoA_src_alpaka_classes_rocm_h diff --git a/DataFormats/VertexSoA/src/alpaka/classes_rocm_def.xml b/DataFormats/VertexSoA/src/alpaka/classes_rocm_def.xml new file mode 100644 index 0000000000000..94deb6fff7d61 --- /dev/null +++ b/DataFormats/VertexSoA/src/alpaka/classes_rocm_def.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/DataFormats/VertexSoA/src/classes.cc b/DataFormats/VertexSoA/src/classes.cc new file mode 100644 index 0000000000000..edffb6e08a9e5 --- /dev/null +++ b/DataFormats/VertexSoA/src/classes.cc @@ -0,0 +1,4 @@ +#include "DataFormats/Portable/interface/PortableHostCollectionReadRules.h" +#include "DataFormats/VertexSoA/interface/ZVertexSoA.h" + +SET_PORTABLEHOSTCOLLECTION_READ_RULES(PortableHostCollection); diff --git a/DataFormats/VertexSoA/src/classes.h b/DataFormats/VertexSoA/src/classes.h new file mode 100644 index 0000000000000..883182c01dcf9 --- /dev/null +++ b/DataFormats/VertexSoA/src/classes.h @@ -0,0 +1,8 @@ +#ifndef DataFormats_VertexSoA_src_classes_h +#define DataFormats_VertexSoA_src_classes_h + +#include "DataFormats/Common/interface/Wrapper.h" +#include "DataFormats/VertexSoA/interface/ZVertexSoA.h" +#include "DataFormats/VertexSoA/interface/ZVertexHost.h" + +#endif // DataFormats_VertexSoA_src_classes_h diff --git a/DataFormats/VertexSoA/src/classes_def.xml b/DataFormats/VertexSoA/src/classes_def.xml new file mode 100644 index 0000000000000..820d28ecc3493 --- /dev/null +++ b/DataFormats/VertexSoA/src/classes_def.xml @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/DataFormats/VertexSoA/test/BuildFile.xml b/DataFormats/VertexSoA/test/BuildFile.xml new file mode 100644 index 0000000000000..49dee4babd8a1 --- /dev/null +++ b/DataFormats/VertexSoA/test/BuildFile.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/DataFormats/VertexSoA/test/alpaka/ZVertexSoA_test.cc b/DataFormats/VertexSoA/test/alpaka/ZVertexSoA_test.cc new file mode 100644 index 0000000000000..0c0c8e8591df9 --- /dev/null +++ b/DataFormats/VertexSoA/test/alpaka/ZVertexSoA_test.cc @@ -0,0 +1,82 @@ +/** + Simple test for the reco::ZVertexSoA data structure + which inherits from Portable{Host}Collection. + + Creates an instance of the class (automatically allocates + memory on device), passes the view of the SoA data to + the kernels which: + - Fill the SoA with data. + - Verify that the data written is correct. + + Then, the SoA data are copied back to Host, where + a temporary host-side view (tmp_view) is created using + the same Layout to access the data on host and print it. + */ + +#include +#include +#include "DataFormats/VertexSoA/interface/alpaka/ZVertexSoACollection.h" +#include "DataFormats/VertexSoA/interface/ZVertexDevice.h" +#include "DataFormats/VertexSoA/interface/ZVertexHost.h" +#include "HeterogeneousCore/AlpakaInterface/interface/devices.h" +#include "HeterogeneousCore/AlpakaInterface/interface/host.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" + +using namespace std; +using namespace ALPAKA_ACCELERATOR_NAMESPACE; +using namespace reco; + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace testZVertexSoAT { + void runKernels(ZVertexSoAView zvertex_view, Queue& queue); + } +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +int main() { + const auto host = cms::alpakatools::host(); + const auto device = cms::alpakatools::devices()[0]; + Queue queue(device); + + // Inner scope to deallocate memory before destroying the stream + { + // Instantiate vertices on device. PortableCollection allocates + // SoA on device automatically. + ZVertexSoACollection zvertex_d(queue); + testZVertexSoAT::runKernels(zvertex_d.view(), queue); + + // Instantate vertices on host. This is where the data will be + // copied to from device. + ZVertexHost zvertex_h(queue); + std::cout << zvertex_h.view().metadata().size() << std::endl; + alpaka::memcpy(queue, zvertex_h.buffer(), zvertex_d.const_buffer()); + alpaka::wait(queue); + + // Print results + std::cout << "idv" + << "\t" + << "zv" + << "\t" + << "wv" + << "\t" + << "chi2" + << "\t" + << "ptv2" + << "\t" + << "ndof" + << "\t" + << "sortInd" + << "\t" + << "nvFinal" << std::endl; + + for (int i = 0; i < 10; ++i) { + std::cout << (int)zvertex_h.view()[i].idv() << "\t" << zvertex_h.view()[i].zv() << "\t" + << zvertex_h.view()[i].wv() << "\t" << zvertex_h.view()[i].chi2() << "\t" << zvertex_h.view()[i].ptv2() + << "\t" << (int)zvertex_h.view()[i].ndof() << "\t" << (int)zvertex_h.view()[i].sortInd() << "\t" + << (int)zvertex_h.view().nvFinal() << std::endl; + } + } + + return 0; +} diff --git a/DataFormats/VertexSoA/test/alpaka/ZVertexSoA_test.dev.cc b/DataFormats/VertexSoA/test/alpaka/ZVertexSoA_test.dev.cc new file mode 100644 index 0000000000000..1b22159a53b88 --- /dev/null +++ b/DataFormats/VertexSoA/test/alpaka/ZVertexSoA_test.dev.cc @@ -0,0 +1,62 @@ +#include "DataFormats/VertexSoA/interface/alpaka/ZVertexSoACollection.h" +#include "DataFormats/VertexSoA/interface/ZVertexDevice.h" +#include "DataFormats/VertexSoA/interface/ZVertexHost.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" // Check if this is really needed; code doesn't compile without it + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + using namespace alpaka; + using namespace cms::alpakatools; + + namespace testZVertexSoAT { + + class TestFillKernel { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, reco::ZVertexSoAView zvertex_view) const { + if (cms::alpakatools::once_per_grid(acc)) { + zvertex_view.nvFinal() = 420; + } + + for (int32_t j : elements_with_stride(acc, zvertex_view.metadata().size())) { + zvertex_view[j].idv() = (int16_t)j; + zvertex_view[j].zv() = (float)j; + zvertex_view[j].wv() = (float)j; + zvertex_view[j].chi2() = (float)j; + zvertex_view[j].ptv2() = (float)j; + zvertex_view[j].ndof() = (int32_t)j; + zvertex_view[j].sortInd() = (uint16_t)j; + } + } + }; + + class TestVerifyKernel { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, reco::ZVertexSoAView zvertex_view) const { + if (cms::alpakatools::once_per_grid(acc)) { + ALPAKA_ASSERT_OFFLOAD(zvertex_view.nvFinal() == 420); + } + + for (int32_t j : elements_with_stride(acc, zvertex_view.nvFinal())) { + assert(zvertex_view[j].idv() == j); + assert(zvertex_view[j].zv() - (float)j < 0.0001); + assert(zvertex_view[j].wv() - (float)j < 0.0001); + assert(zvertex_view[j].chi2() - (float)j < 0.0001); + assert(zvertex_view[j].ptv2() - (float)j < 0.0001); + assert(zvertex_view[j].ndof() == j); + assert(zvertex_view[j].sortInd() == uint32_t(j)); + } + } + }; + + void runKernels(reco::ZVertexSoAView zvertex_view, Queue& queue) { + uint32_t items = 64; + uint32_t groups = divide_up_by(zvertex_view.metadata().size(), items); + auto workDiv = make_workdiv(groups, items); + alpaka::exec(queue, workDiv, TestFillKernel{}, zvertex_view); + alpaka::exec(queue, workDiv, TestVerifyKernel{}, zvertex_view); + } + + } // namespace testZVertexSoAT + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE diff --git a/DetectorDescription/DDCMS/plugins/dd4hep/DDDefinitions2Objects.cc b/DetectorDescription/DDCMS/plugins/dd4hep/DDDefinitions2Objects.cc index 8343cc8651eca..11db847c664d8 100644 --- a/DetectorDescription/DDCMS/plugins/dd4hep/DDDefinitions2Objects.cc +++ b/DetectorDescription/DDCMS/plugins/dd4hep/DDDefinitions2Objects.cc @@ -2314,7 +2314,7 @@ static long load_dddefinition(Detector& det, xml_h element) { wv.placeVolume(mfv1, 1); // Can not deal with reflections without closed geometry - det.manager().CloseGeometry(); + det.manager().CloseGeometry("nv"); det.endDocument(); } diff --git a/EgammaAnalysis/ElectronTools/plugins/EGammaCutBasedEleIdAnalyzer.cc b/EgammaAnalysis/ElectronTools/plugins/EGammaCutBasedEleIdAnalyzer.cc index 916893465785a..4279f8ca01167 100644 --- a/EgammaAnalysis/ElectronTools/plugins/EGammaCutBasedEleIdAnalyzer.cc +++ b/EgammaAnalysis/ElectronTools/plugins/EGammaCutBasedEleIdAnalyzer.cc @@ -51,7 +51,6 @@ class EGammaCutBasedEleIdAnalyzer : public edm::one::EDAnalyzer<> { typedef std::vector > > IsoDepositVals; explicit EGammaCutBasedEleIdAnalyzer(const edm::ParameterSet &); - ~EGammaCutBasedEleIdAnalyzer() override; static void fillDescriptions(edm::ConfigurationDescriptions &descriptions); ElectronEffectiveArea::ElectronEffectiveAreaTarget EAtarget; @@ -59,14 +58,7 @@ class EGammaCutBasedEleIdAnalyzer : public edm::one::EDAnalyzer<> { private: void beginJob() override; void analyze(const edm::Event &, const edm::EventSetup &) override; - void endJob() override; - - /* - void beginRun(edm::Run const &, edm::EventSetup const &) override; - void endRun(edm::Run const &, edm::EventSetup const &) override; - void beginLuminosityBlock(edm::LuminosityBlock const &, edm::EventSetup const &) override; - void endLuminosityBlock(edm::LuminosityBlock const &, edm::EventSetup const &) override; - */ + // ----------member data --------------------------- // input tags @@ -126,11 +118,6 @@ EGammaCutBasedEleIdAnalyzer::EGammaCutBasedEleIdAnalyzer(const edm::ParameterSet h1_pt_fbremeopin_ = fs->make("h1_pt_fbremeopin", "pt (fbremeopin)", 100, 0.0, 100.0); } -EGammaCutBasedEleIdAnalyzer::~EGammaCutBasedEleIdAnalyzer() { - // do anything here that needs to be done at desctruction time - // (e.g. close files, deallocate resources etc.) -} - // // member functions // @@ -253,21 +240,6 @@ void EGammaCutBasedEleIdAnalyzer::beginJob() { EAtarget = ElectronEffectiveArea::kEleEAData2012; } -// ------------ method called once each job just after ending the event loop ------------ -void EGammaCutBasedEleIdAnalyzer::endJob() {} -/* -// ------------ method called when starting to processes a run ------------ -void EGammaCutBasedEleIdAnalyzer::beginRun(edm::Run const &, edm::EventSetup const &) {} - -// ------------ method called when ending the processing of a run ------------ -void EGammaCutBasedEleIdAnalyzer::endRun(edm::Run const &, edm::EventSetup const &) {} - -// ------------ method called when starting to processes a luminosity block ------------ -void EGammaCutBasedEleIdAnalyzer::beginLuminosityBlock(edm::LuminosityBlock const &, edm::EventSetup const &) {} - -// ------------ method called when ending the processing of a luminosity block ------------ -void EGammaCutBasedEleIdAnalyzer::endLuminosityBlock(edm::LuminosityBlock const &, edm::EventSetup const &) {} -*/ // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void EGammaCutBasedEleIdAnalyzer::fillDescriptions(edm::ConfigurationDescriptions &descriptions) { //The following says we do not know what parameters are allowed so do no validation diff --git a/EgammaAnalysis/ElectronTools/test/MiniAODElectronIDValidationAnalyzer.cc b/EgammaAnalysis/ElectronTools/test/MiniAODElectronIDValidationAnalyzer.cc index 308d0f608e69e..dc3c750188dff 100644 --- a/EgammaAnalysis/ElectronTools/test/MiniAODElectronIDValidationAnalyzer.cc +++ b/EgammaAnalysis/ElectronTools/test/MiniAODElectronIDValidationAnalyzer.cc @@ -60,7 +60,6 @@ class MiniAODElectronIDValidationAnalyzer : public edm::one::EDAnalyzer { public: explicit MiniAODElectronIDValidationAnalyzer(const edm::ParameterSet &); - ~MiniAODElectronIDValidationAnalyzer() override; static void fillDescriptions(edm::ConfigurationDescriptions &descriptions); @@ -72,14 +71,7 @@ class MiniAODElectronIDValidationAnalyzer : public edm::one::EDAnalyzer> &genParticles); void findFirstNonElectronMother(const reco::Candidate *particle, int &ancestorPID, int &ancestorStatus); @@ -159,11 +151,6 @@ MiniAODElectronIDValidationAnalyzer::MiniAODElectronIDValidationAnalyzer(const e electronTree_->Branch("isPass", &isPass_, "isPass/I"); } -MiniAODElectronIDValidationAnalyzer::~MiniAODElectronIDValidationAnalyzer() { - // do anything here that needs to be done at desctruction time - // (e.g. close files, deallocate resources etc.) -} - // // member functions // @@ -246,44 +233,6 @@ void MiniAODElectronIDValidationAnalyzer::analyze(const edm::Event &iEvent, cons } } -// ------------ method called once each job just before starting event loop ------------ -void MiniAODElectronIDValidationAnalyzer::beginJob() {} - -// ------------ method called once each job just after ending the event loop ------------ -void MiniAODElectronIDValidationAnalyzer::endJob() {} - -// ------------ method called when starting to processes a run ------------ -/* -void -MiniAODElectronIDValidationAnalyzer::beginRun(edm::Run const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when ending the processing of a run ------------ -/* -void -MiniAODElectronIDValidationAnalyzer::endRun(edm::Run const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when starting to processes a luminosity block ------------ -/* -void -MiniAODElectronIDValidationAnalyzer::beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when ending the processing of a luminosity block ------------ -/* -void -MiniAODElectronIDValidationAnalyzer::endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) -{ -} -*/ - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void MiniAODElectronIDValidationAnalyzer::fillDescriptions(edm::ConfigurationDescriptions &descriptions) { //The following says we do not know what parameters are allowed so do no validation diff --git a/EventFilter/EcalRawToDigi/plugins/BuildFile.xml b/EventFilter/EcalRawToDigi/plugins/BuildFile.xml index 02b8be67a6522..ae261fc4de1df 100644 --- a/EventFilter/EcalRawToDigi/plugins/BuildFile.xml +++ b/EventFilter/EcalRawToDigi/plugins/BuildFile.xml @@ -1,13 +1,11 @@ - + - - @@ -16,5 +14,17 @@ + + + + + + + + + + + + diff --git a/EventFilter/EcalRawToDigi/plugins/EcalDigisFromPortableProducer.cc b/EventFilter/EcalRawToDigi/plugins/EcalDigisFromPortableProducer.cc new file mode 100644 index 0000000000000..d2c450f1ac2ed --- /dev/null +++ b/EventFilter/EcalRawToDigi/plugins/EcalDigisFromPortableProducer.cc @@ -0,0 +1,210 @@ +#include + +#include "DataFormats/EcalDetId/interface/EcalDetIdCollections.h" +#include "DataFormats/EcalDigi/interface/EcalConstants.h" +#include "DataFormats/EcalDigi/interface/EcalDigiCollections.h" +#include "DataFormats/EcalDigi/interface/EcalDigiHostCollection.h" +#include "DataFormats/EcalRawData/interface/EcalRawDataCollections.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" + +class EcalDigisFromPortableProducer : public edm::stream::EDProducer<> { +public: + explicit EcalDigisFromPortableProducer(edm::ParameterSet const& ps); + ~EcalDigisFromPortableProducer() override = default; + static void fillDescriptions(edm::ConfigurationDescriptions&); + +private: + void produce(edm::Event&, edm::EventSetup const&) override; + + template + edm::EDPutTokenT dummyProduces(ARGS&&... args) { + return (produceDummyIntegrityCollections_) ? produces(std::forward(args)...) + : edm::EDPutTokenT{}; + } + +private: + // input digi collections on host in SoA format + using InputProduct = EcalDigiHostCollection; + edm::EDGetTokenT digisInEBToken_; + edm::EDGetTokenT digisInEEToken_; + + // output digi collections in legacy format + edm::EDPutTokenT digisOutEBToken_; + edm::EDPutTokenT digisOutEEToken_; + + // whether to produce dummy integrity collections + bool produceDummyIntegrityCollections_; + + // dummy producer collections + edm::EDPutTokenT ebSrFlagToken_; + edm::EDPutTokenT eeSrFlagToken_; + + // dummy ECAL raw data collection + edm::EDPutTokenT ecalRawDataToken_; + + // dummy integrity for xtal data + edm::EDPutTokenT ebIntegrityGainErrorsToken_; + edm::EDPutTokenT ebIntegrityGainSwitchErrorsToken_; + edm::EDPutTokenT ebIntegrityChIdErrorsToken_; + + // dummy integrity for xtal data - EE specific (to be rivisited towards EB+EE common collection) + edm::EDPutTokenT eeIntegrityGainErrorsToken_; + edm::EDPutTokenT eeIntegrityGainSwitchErrorsToken_; + edm::EDPutTokenT eeIntegrityChIdErrorsToken_; + + // dummy integrity errors + edm::EDPutTokenT integrityTTIdErrorsToken_; + edm::EDPutTokenT integrityZSXtalIdErrorsToken_; + edm::EDPutTokenT integrityBlockSizeErrorsToken_; + + edm::EDPutTokenT pnDiodeDigisToken_; + + // dummy TCC collections + edm::EDPutTokenT ecalTriggerPrimitivesToken_; + edm::EDPutTokenT ecalPseudoStripInputsToken_; + + // dummy mem integrity collections + edm::EDPutTokenT ecalIntegrityMemTtIdErrorsToken_; + edm::EDPutTokenT ecalIntegrityMemBlockSizeErrorsToken_; + edm::EDPutTokenT ecalIntegrityMemChIdErrorsToken_; + edm::EDPutTokenT ecalIntegrityMemGainErrorsToken_; +}; + +void EcalDigisFromPortableProducer::fillDescriptions(edm::ConfigurationDescriptions& confDesc) { + edm::ParameterSetDescription desc; + + desc.add("digisInLabelEB", edm::InputTag{"ecalRawToDigiPortable", "ebDigis"}); + desc.add("digisInLabelEE", edm::InputTag{"ecalRawToDigiPortable", "eeDigis"}); + desc.add("digisOutLabelEB", "ebDigis"); + desc.add("digisOutLabelEE", "eeDigis"); + desc.add("produceDummyIntegrityCollections", false); + + confDesc.add("ecalDigisFromPortableProducer", desc); +} + +EcalDigisFromPortableProducer::EcalDigisFromPortableProducer(const edm::ParameterSet& ps) + : // input digi collections on host in SoA format + digisInEBToken_{consumes(ps.getParameter("digisInLabelEB"))}, + digisInEEToken_{consumes(ps.getParameter("digisInLabelEE"))}, + + // output digi collections in legacy format + digisOutEBToken_{produces(ps.getParameter("digisOutLabelEB"))}, + digisOutEEToken_{produces(ps.getParameter("digisOutLabelEE"))}, + + // whether to produce dummy integrity collections + produceDummyIntegrityCollections_{ps.getParameter("produceDummyIntegrityCollections")}, + + // dummy collections + ebSrFlagToken_{dummyProduces()}, + eeSrFlagToken_{dummyProduces()}, + + // dummy ECAL raw data collection + ecalRawDataToken_{dummyProduces()}, + + // dummy integrity for xtal data + ebIntegrityGainErrorsToken_{dummyProduces("EcalIntegrityGainErrors")}, + ebIntegrityGainSwitchErrorsToken_{dummyProduces("EcalIntegrityGainSwitchErrors")}, + ebIntegrityChIdErrorsToken_{dummyProduces("EcalIntegrityChIdErrors")}, + + // dummy integrity for xtal data - EE specific (to be rivisited towards EB+EE common collection) + eeIntegrityGainErrorsToken_{dummyProduces("EcalIntegrityGainErrors")}, + eeIntegrityGainSwitchErrorsToken_{dummyProduces("EcalIntegrityGainSwitchErrors")}, + eeIntegrityChIdErrorsToken_{dummyProduces("EcalIntegrityChIdErrors")}, + + // dummy integrity errors + integrityTTIdErrorsToken_{dummyProduces("EcalIntegrityTTIdErrors")}, + integrityZSXtalIdErrorsToken_{dummyProduces("EcalIntegrityZSXtalIdErrors")}, + integrityBlockSizeErrorsToken_{dummyProduces("EcalIntegrityBlockSizeErrors")}, + + // + pnDiodeDigisToken_{dummyProduces()}, + + // dummy TCC collections + ecalTriggerPrimitivesToken_{dummyProduces("EcalTriggerPrimitives")}, + ecalPseudoStripInputsToken_{dummyProduces("EcalPseudoStripInputs")}, + + // dummy mem integrity collections + ecalIntegrityMemTtIdErrorsToken_{dummyProduces("EcalIntegrityMemTtIdErrors")}, + ecalIntegrityMemBlockSizeErrorsToken_{ + dummyProduces("EcalIntegrityMemBlockSizeErrors")}, + ecalIntegrityMemChIdErrorsToken_{dummyProduces("EcalIntegrityMemChIdErrors")}, + ecalIntegrityMemGainErrorsToken_{dummyProduces("EcalIntegrityMemGainErrors")} {} + +void EcalDigisFromPortableProducer::produce(edm::Event& event, edm::EventSetup const& setup) { + // output collections + auto digisEB = std::make_unique(); + auto digisEE = std::make_unique(); + + auto const& digisEBSoAHostColl = event.get(digisInEBToken_); + auto const& digisEESoAHostColl = event.get(digisInEEToken_); + auto& digisEBSoAView = digisEBSoAHostColl.view(); + auto& digisEESoAView = digisEESoAHostColl.view(); + + auto const digisEBSize = digisEBSoAView.size(); + auto const digisEESize = digisEESoAView.size(); + auto const digisEBDataSize = digisEBSize * ecalPh1::sampleSize; + auto const digisEEDataSize = digisEESize * ecalPh1::sampleSize; + + // Intermediate containers because the DigiCollection containers are accessible only as const + EBDigiCollection::IdContainer digisIdsEB; + EEDigiCollection::IdContainer digisIdsEE; + EBDigiCollection::DataContainer digisDataEB; + EEDigiCollection::DataContainer digisDataEE; + + digisIdsEB.resize(digisEBSize); + digisIdsEE.resize(digisEESize); + digisDataEB.resize(digisEBDataSize); + digisDataEE.resize(digisEEDataSize); + + // copy data + std::memcpy(digisIdsEB.data(), digisEBSoAView.id(), digisEBSize * sizeof(uint32_t)); + std::memcpy(digisIdsEE.data(), digisEESoAView.id(), digisEESize * sizeof(uint32_t)); + std::memcpy(digisDataEB.data(), digisEBSoAView.data()->data(), digisEBDataSize * sizeof(uint16_t)); + std::memcpy(digisDataEE.data(), digisEESoAView.data()->data(), digisEEDataSize * sizeof(uint16_t)); + + digisEB->swap(digisIdsEB, digisDataEB); + digisEE->swap(digisIdsEE, digisDataEE); + + digisEB->sort(); + digisEE->sort(); + + event.put(digisOutEBToken_, std::move(digisEB)); + event.put(digisOutEEToken_, std::move(digisEE)); + + if (produceDummyIntegrityCollections_) { + // dummy collections + event.emplace(ebSrFlagToken_); + event.emplace(eeSrFlagToken_); + // dummy ECAL raw data collection + event.emplace(ecalRawDataToken_); + // dummy integrity for xtal data + event.emplace(ebIntegrityGainErrorsToken_); + event.emplace(ebIntegrityGainSwitchErrorsToken_); + event.emplace(ebIntegrityChIdErrorsToken_); + // dummy integrity for xtal data - EE specific (to be rivisited towards EB+EE common collection) + event.emplace(eeIntegrityGainErrorsToken_); + event.emplace(eeIntegrityGainSwitchErrorsToken_); + event.emplace(eeIntegrityChIdErrorsToken_); + // dummy integrity errors + event.emplace(integrityTTIdErrorsToken_); + event.emplace(integrityZSXtalIdErrorsToken_); + event.emplace(integrityBlockSizeErrorsToken_); + // + event.emplace(pnDiodeDigisToken_); + // dummy TCC collections + event.emplace(ecalTriggerPrimitivesToken_); + event.emplace(ecalPseudoStripInputsToken_); + // dummy mem integrity collections + event.emplace(ecalIntegrityMemTtIdErrorsToken_); + event.emplace(ecalIntegrityMemBlockSizeErrorsToken_); + event.emplace(ecalIntegrityMemChIdErrorsToken_); + event.emplace(ecalIntegrityMemGainErrorsToken_); + } +} + +DEFINE_FWK_MODULE(EcalDigisFromPortableProducer); diff --git a/EventFilter/EcalRawToDigi/plugins/alpaka/DeclsForKernels.h b/EventFilter/EcalRawToDigi/plugins/alpaka/DeclsForKernels.h new file mode 100644 index 0000000000000..c91bad61e2dce --- /dev/null +++ b/EventFilter/EcalRawToDigi/plugins/alpaka/DeclsForKernels.h @@ -0,0 +1,43 @@ +#ifndef EventFilter_EcalRawToDigi_plugins_alpaka_DeclsForKernels_h +#define EventFilter_EcalRawToDigi_plugins_alpaka_DeclsForKernels_h + +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::raw { + + struct InputDataHost { + // delete the default constructor because alpaka buffers do not have a default constructor + InputDataHost() = delete; + + explicit InputDataHost(const Queue& queue, size_t size, size_t nFeds) + : data{cms::alpakatools::make_host_buffer(queue, size)}, + offsets{cms::alpakatools::make_host_buffer(queue, nFeds)}, + feds{cms::alpakatools::make_host_buffer(queue, nFeds)} {}; + + cms::alpakatools::host_buffer data; + cms::alpakatools::host_buffer offsets; + cms::alpakatools::host_buffer feds; + }; + + struct ConfigurationParameters { + uint32_t maxChannelsEE; + uint32_t maxChannelsEB; + }; + + struct InputDataDevice { + InputDataDevice() = delete; + + explicit InputDataDevice(const Queue& queue, size_t size, size_t nFeds) + : data{cms::alpakatools::make_device_buffer(queue, size)}, + offsets{cms::alpakatools::make_device_buffer(queue, nFeds)}, + feds{cms::alpakatools::make_device_buffer(queue, nFeds)} {}; + + cms::alpakatools::device_buffer data; + cms::alpakatools::device_buffer offsets; + cms::alpakatools::device_buffer feds; + }; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::raw + +#endif // EventFilter_EcalRawToDigi_plugins_alpaka_DeclsForKernels_h diff --git a/EventFilter/EcalRawToDigi/plugins/alpaka/EcalElectronicsMappingHostESProducer.cc b/EventFilter/EcalRawToDigi/plugins/alpaka/EcalElectronicsMappingHostESProducer.cc new file mode 100644 index 0000000000000..32708b201ef2d --- /dev/null +++ b/EventFilter/EcalRawToDigi/plugins/alpaka/EcalElectronicsMappingHostESProducer.cc @@ -0,0 +1,58 @@ +#include "FWCore/Framework/interface/ESTransientHandle.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "CondFormats/DataRecord/interface/EcalMappingElectronicsRcd.h" +#include "CondFormats/EcalObjects/interface/EcalMappingElectronics.h" +#include "CondFormats/EcalObjects/interface/alpaka/EcalElectronicsMappingDevice.h" +#include "DataFormats/EcalDetId/interface/EcalElectronicsId.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ESGetToken.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ESProducer.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ModuleFactory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/host.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + class EcalElectronicsMappingHostESProducer : public ESProducer { + public: + EcalElectronicsMappingHostESProducer(edm::ParameterSet const& iConfig) : ESProducer(iConfig) { + auto cc = setWhatProduced(this); + token_ = cc.consumes(); + } + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + descriptions.addWithDefaultLabel(desc); + } + + std::unique_ptr produce(EcalMappingElectronicsRcd const& iRecord) { + auto const& mapping = iRecord.get(token_); + + // TODO: 0x3FFFFF * 4B ~= 16MB + // tmp solution for linear mapping of eid -> did + int const size = 0x3FFFFF; + auto product = std::make_unique(size, cms::alpakatools::host()); + + // fill in eb + auto const& barrelValues = mapping.barrelItems(); + for (unsigned int i = 0; i < barrelValues.size(); ++i) { + EcalElectronicsId eid{barrelValues[i].electronicsid}; + EBDetId did{EBDetId::unhashIndex(i)}; + product->view()[eid.linearIndex()].rawid() = did.rawId(); + } + + // fill in ee + auto const& endcapValues = mapping.endcapItems(); + for (unsigned int i = 0; i < endcapValues.size(); ++i) { + EcalElectronicsId eid{endcapValues[i].electronicsid}; + EEDetId did{EEDetId::unhashIndex(i)}; + product->view()[eid.linearIndex()].rawid() = did.rawId(); + } + return product; + } + + private: + edm::ESGetToken token_; + }; +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +DEFINE_FWK_EVENTSETUP_ALPAKA_MODULE(EcalElectronicsMappingHostESProducer); diff --git a/EventFilter/EcalRawToDigi/plugins/alpaka/EcalRawToDigiPortable.cc b/EventFilter/EcalRawToDigi/plugins/alpaka/EcalRawToDigiPortable.cc new file mode 100644 index 0000000000000..7739cf15c0ab3 --- /dev/null +++ b/EventFilter/EcalRawToDigi/plugins/alpaka/EcalRawToDigiPortable.cc @@ -0,0 +1,142 @@ +#include "CondFormats/DataRecord/interface/EcalMappingElectronicsRcd.h" +#include "CondFormats/EcalObjects/interface/alpaka/EcalElectronicsMappingDevice.h" +#include "DataFormats/EcalDigi/interface/alpaka/EcalDigiDeviceCollection.h" +#include "DataFormats/FEDRawData/interface/FEDRawDataCollection.h" +#include "EventFilter/EcalRawToDigi/interface/DCCRawDataDefinitions.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/Utilities/interface/ESGetToken.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/EDGetToken.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/EDPutToken.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/Event.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/EventSetup.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/stream/EDProducer.h" + +#include + +#include "DeclsForKernels.h" +#include "UnpackPortable.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + class EcalRawToDigiPortable : public stream::EDProducer<> { + public: + explicit EcalRawToDigiPortable(edm::ParameterSet const& ps); + ~EcalRawToDigiPortable() override = default; + static void fillDescriptions(edm::ConfigurationDescriptions&); + + void produce(device::Event&, device::EventSetup const&) override; + + private: + edm::EDGetTokenT rawDataToken_; + using OutputProduct = EcalDigiDeviceCollection; + device::EDPutToken digisDevEBToken_; + device::EDPutToken digisDevEEToken_; + device::ESGetToken eMappingToken_; + + std::vector fedsToUnpack_; + + ecal::raw::ConfigurationParameters config_; + }; + + void EcalRawToDigiPortable::fillDescriptions(edm::ConfigurationDescriptions& confDesc) { + edm::ParameterSetDescription desc; + + desc.add("InputLabel", edm::InputTag("rawDataCollector")); + std::vector feds(54); + for (uint32_t i = 0; i < 54; ++i) + feds[i] = i + 601; + desc.add>("FEDs", feds); + desc.add("maxChannelsEB", 61200); + desc.add("maxChannelsEE", 14648); + desc.add("digisLabelEB", "ebDigis"); + desc.add("digisLabelEE", "eeDigis"); + + confDesc.addWithDefaultLabel(desc); + } + + EcalRawToDigiPortable::EcalRawToDigiPortable(const edm::ParameterSet& ps) + : rawDataToken_{consumes(ps.getParameter("InputLabel"))}, + digisDevEBToken_{produces(ps.getParameter("digisLabelEB"))}, + digisDevEEToken_{produces(ps.getParameter("digisLabelEE"))}, + eMappingToken_{esConsumes()}, + fedsToUnpack_{ps.getParameter>("FEDs")} { + config_.maxChannelsEB = ps.getParameter("maxChannelsEB"); + config_.maxChannelsEE = ps.getParameter("maxChannelsEE"); + } + + void EcalRawToDigiPortable::produce(device::Event& event, device::EventSetup const& setup) { + // conditions + auto const& eMappingProduct = setup.getData(eMappingToken_); + + // event data + const auto rawDataHandle = event.getHandle(rawDataToken_); + + // make a first iteration over the FEDs to compute the total buffer size + uint32_t size = 0; + uint32_t feds = 0; + for (auto const& fed : fedsToUnpack_) { + auto const& data = rawDataHandle->FEDData(fed); + auto const nbytes = data.size(); + + // skip empty FEDs + if (nbytes < globalFieds::EMPTYEVENTSIZE) + continue; + + size += nbytes; + ++feds; + } + + auto& queue = event.queue(); + + // input host buffers + ecal::raw::InputDataHost inputHost(queue, size, feds); + + // output device collections + OutputProduct digisDevEB{static_cast(config_.maxChannelsEB), queue}; + OutputProduct digisDevEE{static_cast(config_.maxChannelsEE), queue}; + // reset the size scalar of the SoA + // memset takes an alpaka view that is created from the scalar in a view to the device collection + auto digiViewEB = cms::alpakatools::make_device_view(alpaka::getDev(queue), digisDevEB.view().size()); + auto digiViewEE = cms::alpakatools::make_device_view(alpaka::getDev(queue), digisDevEE.view().size()); + alpaka::memset(queue, digiViewEB, 0); + alpaka::memset(queue, digiViewEE, 0); + + // iterate over FEDs to fill the host buffer + uint32_t currentCummOffset = 0; + uint32_t fedCounter = 0; + for (auto const& fed : fedsToUnpack_) { + auto const& data = rawDataHandle->FEDData(fed); + auto const nbytes = data.size(); + + // skip empty FEDs + if (nbytes < globalFieds::EMPTYEVENTSIZE) + continue; + + // copy raw data into host buffer + std::memcpy(inputHost.data.data() + currentCummOffset, data.data(), nbytes); + // set the offset in bytes from the start + inputHost.offsets[fedCounter] = currentCummOffset; + inputHost.feds[fedCounter] = fed; + + // this is the current offset into the buffer + currentCummOffset += nbytes; + ++fedCounter; + } + assert(currentCummOffset == size); + assert(fedCounter == feds); + + // unpack if at least one FED has data + if (fedCounter > 0) { + ecal::raw::unpackRaw(queue, inputHost, digisDevEB, digisDevEE, eMappingProduct, fedCounter, currentCummOffset); + } + + event.emplace(digisDevEBToken_, std::move(digisDevEB)); + event.emplace(digisDevEEToken_, std::move(digisDevEE)); + } + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/MakerMacros.h" +DEFINE_FWK_ALPAKA_MODULE(EcalRawToDigiPortable); diff --git a/EventFilter/EcalRawToDigi/plugins/alpaka/UnpackPortable.dev.cc b/EventFilter/EcalRawToDigi/plugins/alpaka/UnpackPortable.dev.cc new file mode 100644 index 0000000000000..374a5a9c2c87f --- /dev/null +++ b/EventFilter/EcalRawToDigi/plugins/alpaka/UnpackPortable.dev.cc @@ -0,0 +1,441 @@ +#include + +#include "DataFormats/DetId/interface/DetId.h" +#include "DataFormats/EcalDigi/interface/EcalConstants.h" +#include "EventFilter/EcalRawToDigi/interface/ElectronicsIdGPU.h" +#include "EventFilter/EcalRawToDigi/interface/DCCRawDataDefinitions.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" + +#include "UnpackPortable.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::raw { + + using namespace ::ecal::raw; + using namespace cms::alpakatools; + + class Kernel_unpack { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, + unsigned char const* __restrict__ data, + uint32_t const* __restrict__ offsets, + int const* __restrict__ feds, + EcalDigiDeviceCollection::View digisDevEB, + EcalDigiDeviceCollection::View digisDevEE, + EcalElectronicsMappingDevice::ConstView eid2did, + uint32_t const nbytesTotal) const { + constexpr auto kSampleSize = ecalPh1::sampleSize; + + // indices + auto const ifed = alpaka::getIdx(acc)[0u]; + auto const threadIdx = alpaka::getIdx(acc)[0u]; + + // offset in bytes + auto const offset = offsets[ifed]; + // fed id + auto const fed = feds[ifed]; + auto const isBarrel = is_barrel(static_cast(fed - 600)); + // size + auto const gridDim = alpaka::getWorkDiv(acc)[0u]; + auto const size = ifed == gridDim - 1 ? nbytesTotal - offset : offsets[ifed + 1] - offset; + auto* samples = isBarrel ? digisDevEB.data()->data() : digisDevEE.data()->data(); + auto* ids = isBarrel ? digisDevEB.id() : digisDevEE.id(); + auto* pChannelsCounter = isBarrel ? &digisDevEB.size() : &digisDevEE.size(); + + // offset to the right raw buffer + uint64_t const* buffer = reinterpret_cast(data + offset); + + // dump first 3 bits for each 64-bit word + //print_first3bits(buffer, size / 8); + + // + // fed header + // + auto const fed_header = buffer[0]; + uint32_t bx = (fed_header >> H_BX_B) & H_BX_MASK; + uint32_t lv1 = (fed_header >> H_L1_B) & H_L1_MASK; + uint32_t triggerType = (fed_header >> H_TTYPE_B) & H_TTYPE_MASK; + + // determine the number of FE channels from the trigger type + uint32_t numbChannels(0); + if (triggerType == PHYSICTRIGGER) { + numbChannels = NUMB_FE; + } else if (triggerType == CALIBRATIONTRIGGER) { + numbChannels = NUMB_FE + 2; // FE + 2 MEM blocks + } else { + // unsupported trigger type + return; + } + + // 9 for fed + dcc header + // 36 for 4 EE TCC blocks or 18 for 1 EB TCC block + // 6 for SR block size + + // dcc header w2 + auto const w2 = buffer[2]; + uint8_t const fov = (w2 >> H_FOV_B) & H_FOV_MASK; + + // make a list of channels with data from DCC header channels status + // this could be done for each block instead of each thread since it defined per FED + uint8_t exp_ttids[NUMB_FE + 2]; // FE + 2 MEM blocks + uint8_t ch = 1; + uint8_t nCh = 0; + for (uint8_t i = 4; i < 9; ++i) { // data words with channel status info + for (uint8_t j = 0; j < 14; ++j, ++ch) { // channel status fields in one data word + const uint8_t shift = j * 4; //each channel has 4 bits + const int chStatus = (buffer[i] >> shift) & H_CHSTATUS_MASK; + const bool regular = (chStatus == CH_DISABLED || chStatus == CH_SUPPRESS); + const bool problematic = + (chStatus == CH_TIMEOUT || chStatus == CH_HEADERERR || chStatus == CH_LINKERR || + chStatus == CH_LENGTHERR || chStatus == CH_IFIFOFULL || chStatus == CH_L1AIFIFOFULL); + if (!(regular || problematic)) { + exp_ttids[nCh] = ch; + ++nCh; + } + } + } + + // + // print Tower block headers + // + uint8_t ntccblockwords = isBarrel ? 18 : 36; + auto const* tower_blocks_start = buffer + 9 + ntccblockwords + 6; + auto const* trailer = buffer + (size / 8 - 1); + auto const* current_tower_block = tower_blocks_start; + uint8_t iCh = 0; + uint8_t next_tower_id = exp_ttids[iCh]; + while (current_tower_block < trailer && iCh < numbChannels) { + auto const w = *current_tower_block; + uint8_t ttid = w & TOWER_ID_MASK; + uint16_t bxlocal = (w >> TOWER_BX_B) & TOWER_BX_MASK; + uint16_t lv1local = (w >> TOWER_L1_B) & TOWER_L1_MASK; + uint16_t block_length = (w >> TOWER_LENGTH_B) & TOWER_LENGTH_MASK; + + // fast forward to the next good tower id (in case of recovery from an earlier header corruption) + while (exp_ttids[iCh] < next_tower_id) { + ++iCh; + } + ++iCh; + + // check if the tower id in the tower header is the one expected + // if not try to find the next good header, point the current_tower_block to it, and extract its tower id + // or break if there is none + if (ttid != next_tower_id) { + next_tower_id = find_next_tower_block(current_tower_block, trailer, bx, lv1); + if (next_tower_id < TOWER_ID_MASK) { + continue; + } else { + break; + } + } + + // prepare for the next iteration + next_tower_id = exp_ttids[iCh]; + + uint16_t const dccbx = bx & 0xfff; + uint16_t const dccl1 = lv1 & 0xfff; + // fov>=1 is required to support simulated data for which bx==bxlocal==0 + if (fov >= 1 && !is_synced_towerblock(dccbx, bxlocal, dccl1, lv1local)) { + current_tower_block += block_length; + continue; + } + + // go through all the channels + // get the next channel coordinates + uint32_t const nchannels = (block_length - 1) / 3; + + bool bad_block = false; + auto& ch_with_bad_block = alpaka::declareSharedVar(acc); + if (once_per_block(acc)) { + ch_with_bad_block = std::numeric_limits::max(); + } + // make sure the shared memory is initialised for all threads + alpaka::syncBlockThreads(acc); + + auto const threadsPerBlock = alpaka::getWorkDiv(acc)[0u]; + // 1 threads per channel in this block + // All threads enter the loop regardless if they will treat channel indices channel >= nchannels. + // The threads with excess indices perform no operations but also reach the syncBlockThreads() inside the loop. + for (uint32_t i = 0; i < nchannels; i += threadsPerBlock) { + auto const channel = i + threadIdx; + + uint64_t wdata; + uint8_t stripid; + uint8_t xtalid; + + // threads must be inside the range (no break here because of syncBlockThreads() afterwards) + if (channel < nchannels && channel < ch_with_bad_block) { + // inc the channel's counter and get the pos where to store + wdata = current_tower_block[1 + channel * 3]; + stripid = wdata & 0x7; + xtalid = (wdata >> 4) & 0x7; + + // check if the stripid and xtalid are in the allowed range and if not skip the rest of the block + if (stripid < ElectronicsIdGPU::MIN_STRIPID || stripid > ElectronicsIdGPU::MAX_STRIPID || + xtalid < ElectronicsIdGPU::MIN_XTALID || xtalid > ElectronicsIdGPU::MAX_XTALID) { + bad_block = true; + } + if (channel > 0) { + // check if the stripid has increased or that the xtalid has increased from the previous data word. If not something is wrong and the rest of the block is skipped. + auto const prev_channel = channel - 1; + auto const prevwdata = current_tower_block[1 + prev_channel * 3]; + uint8_t const laststripid = prevwdata & 0x7; + uint8_t const lastxtalid = (prevwdata >> 4) & 0x7; + if ((stripid == laststripid && xtalid <= lastxtalid) || (stripid < laststripid)) { + bad_block = true; + } + } + } + + // check if this thread has the lowest bad block + if (bad_block && channel < ch_with_bad_block) { + alpaka::atomicMin(acc, &ch_with_bad_block, channel, alpaka::hierarchy::Threads{}); + } + + // make sure that all threads that have to have set the ch_with_bad_block shared memory + alpaka::syncBlockThreads(acc); + + // threads outside of the range or bad block detected in this thread or one working on a lower block -> stop this loop iteration here + if (channel >= nchannels || channel >= ch_with_bad_block) { + continue; + } + + ElectronicsIdGPU eid{fed2dcc(fed), ttid, stripid, xtalid}; + auto const didraw = isBarrel ? compute_ebdetid(eid) : eid2did[eid.linearIndex()].rawid(); + // FIXME: what kind of channels are these guys + if (didraw == 0) + continue; + + // get samples + uint16_t sampleValues[kSampleSize]; + sampleValues[0] = (wdata >> 16) & 0x3fff; + sampleValues[1] = (wdata >> 32) & 0x3fff; + sampleValues[2] = (wdata >> 48) & 0x3fff; + auto const wdata1 = current_tower_block[2 + channel * 3]; + sampleValues[3] = wdata1 & 0x3fff; + sampleValues[4] = (wdata1 >> 16) & 0x3fff; + sampleValues[5] = (wdata1 >> 32) & 0x3fff; + sampleValues[6] = (wdata1 >> 48) & 0x3fff; + auto const wdata2 = current_tower_block[3 + channel * 3]; + sampleValues[7] = wdata2 & 0x3fff; + sampleValues[8] = (wdata2 >> 16) & 0x3fff; + sampleValues[9] = (wdata2 >> 32) & 0x3fff; + + // check gain + bool isSaturation = true; + short firstGainZeroSampID{-1}, firstGainZeroSampADC{-1}; + for (uint32_t si = 0; si < kSampleSize; ++si) { + if (gainId(sampleValues[si]) == 0) { + firstGainZeroSampID = si; + firstGainZeroSampADC = adc(sampleValues[si]); + break; + } + } + if (firstGainZeroSampID != -1) { + unsigned int plateauEnd = std::min(kSampleSize, (unsigned int)(firstGainZeroSampID + 5)); + for (unsigned int s = firstGainZeroSampID; s < plateauEnd; s++) { + if (!(gainId(sampleValues[s]) == 0 && adc(sampleValues[s]) == firstGainZeroSampADC)) { + isSaturation = false; + break; + } //it's not saturation + } + // get rid of channels which are stuck in gain0 + if (firstGainZeroSampID < 3) { + isSaturation = false; + } + if (!isSaturation) + continue; + } else { // there is no zero gainId sample + // gain switch check + short numGain = 1; + bool gainSwitchError = false; + for (unsigned int si = 1; si < kSampleSize; ++si) { + if ((gainId(sampleValues[si - 1]) > gainId(sampleValues[si])) && numGain < 5) + gainSwitchError = true; + if (gainId(sampleValues[si - 1]) == gainId(sampleValues[si])) + numGain++; + else + numGain = 1; + } + if (gainSwitchError) + continue; + } + + auto const pos = alpaka::atomicAdd(acc, pChannelsCounter, 1u, alpaka::hierarchy::Threads{}); + + // store to global + ids[pos] = didraw; + std::memcpy(&samples[pos * kSampleSize], sampleValues, kSampleSize * sizeof(uint16_t)); + } + + current_tower_block += block_length; + } + } + + private: + ALPAKA_FN_INLINE ALPAKA_FN_ACC void print_raw_buffer(uint8_t const* const buffer, + uint32_t const nbytes, + uint32_t const nbytes_per_row = 20) const { + for (uint32_t i = 0; i < nbytes; ++i) { + if (i % nbytes_per_row == 0 && i > 0) + printf("\n"); + printf("%02X ", buffer[i]); + } + } + + ALPAKA_FN_INLINE ALPAKA_FN_ACC void print_first3bits(uint64_t const* buffer, uint32_t size) const { + for (uint32_t i = 0; i < size; ++i) { + uint8_t const b61 = (buffer[i] >> 61) & 0x1; + uint8_t const b62 = (buffer[i] >> 62) & 0x1; + uint8_t const b63 = (buffer[i] >> 63) & 0x1; + printf("[word: %u] %u%u%u\n", i, b63, b62, b61); + } + } + + ALPAKA_FN_INLINE ALPAKA_FN_ACC bool is_barrel(uint8_t dccid) const { + return dccid >= ElectronicsIdGPU::MIN_DCCID_EBM && dccid <= ElectronicsIdGPU::MAX_DCCID_EBP; + } + + ALPAKA_FN_INLINE ALPAKA_FN_ACC uint8_t fed2dcc(int fed) const { return static_cast(fed - 600); } + + ALPAKA_FN_INLINE ALPAKA_FN_ACC int zside_for_eb(ElectronicsIdGPU const& eid) const { + int dcc = eid.dccId(); + return ((dcc >= ElectronicsIdGPU::MIN_DCCID_EBM && dcc <= ElectronicsIdGPU::MAX_DCCID_EBM)) ? -1 : 1; + } + + ALPAKA_FN_INLINE ALPAKA_FN_ACC uint8_t find_next_tower_block(uint64_t const*& current_tower_block, + uint64_t const* trailer, + uint32_t const bx, + uint32_t const lv1) const { + const auto* next_tower_block = current_tower_block + 1; // move forward to skip the broken header + + // expected LV1, BX, #TS + const uint64_t lv1local = ((lv1 - 1) & TOWER_L1_MASK); + const uint64_t bxlocal = (bx != 3564) ? bx : 0; + // The CPU unpacker also checks the # time samples expected in the header + // but those are currently not available here + + // construct tower header and mask + const uint64_t sign = 0xC0000000C0000000 + (lv1local << TOWER_L1_B) + (bxlocal << TOWER_BX_B); + const uint64_t mask = + 0xC0001000D0000000 + (uint64_t(TOWER_L1_MASK) << TOWER_L1_B) + (uint64_t(TOWER_BX_MASK) << TOWER_BX_B); + + while (next_tower_block < trailer) { + if ((*next_tower_block & mask) == sign) { + current_tower_block = next_tower_block; + return uint8_t(*next_tower_block & TOWER_ID_MASK); + } else { + ++next_tower_block; + } + } + return TOWER_ID_MASK; // return the maximum value + } + + ALPAKA_FN_INLINE ALPAKA_FN_ACC bool is_synced_towerblock(uint16_t const dccbx, + uint16_t const bx, + uint16_t const dccl1, + uint16_t const l1) const { + bool const bxsync = (bx == 0 && dccbx == 3564) || (bx == dccbx && dccbx != 3564); + bool const l1sync = (l1 == ((dccl1 - 1) & 0xfff)); + return bxsync && l1sync; + } + + ALPAKA_FN_INLINE ALPAKA_FN_ACC bool right_tower_for_eb(int tower) const { + // for EB, two types of tower (LVRB top/bottom) + return (tower > 12 && tower < 21) || (tower > 28 && tower < 37) || (tower > 44 && tower < 53) || + (tower > 60 && tower < 69); + } + + ALPAKA_FN_INLINE ALPAKA_FN_ACC uint32_t compute_ebdetid(ElectronicsIdGPU const& eid) const { + // as in Geometry/EcalMaping/.../EcalElectronicsMapping + auto const dcc = eid.dccId(); + auto const tower = eid.towerId(); + auto const strip = eid.stripId(); + auto const xtal = eid.xtalId(); + + int smid = 0; + int iphi = 0; + bool EBPlus = (zside_for_eb(eid) > 0); + bool EBMinus = !EBPlus; + + if (zside_for_eb(eid) < 0) { + smid = dcc + 19 - ElectronicsIdGPU::DCCID_PHI0_EBM; + iphi = (smid - 19) * ElectronicsIdGPU::kCrystalsInPhi; + iphi += 5 * ((tower - 1) % ElectronicsIdGPU::kTowersInPhi); + } else { + smid = dcc + 1 - ElectronicsIdGPU::DCCID_PHI0_EBP; + iphi = (smid - 1) * ElectronicsIdGPU::kCrystalsInPhi; + iphi += 5 * (ElectronicsIdGPU::kTowersInPhi - ((tower - 1) % ElectronicsIdGPU::kTowersInPhi) - 1); + } + + bool RightTower = right_tower_for_eb(tower); + int ieta = 5 * ((tower - 1) / ElectronicsIdGPU::kTowersInPhi) + 1; + if (RightTower) { + ieta += (strip - 1); + if (strip % 2 == 1) { + if (EBMinus) + iphi += (xtal - 1) + 1; + else + iphi += (4 - (xtal - 1)) + 1; + } else { + if (EBMinus) + iphi += (4 - (xtal - 1)) + 1; + else + iphi += (xtal - 1) + 1; + } + } else { + ieta += 4 - (strip - 1); + if (strip % 2 == 1) { + if (EBMinus) + iphi += (4 - (xtal - 1)) + 1; + else + iphi += (xtal - 1) + 1; + } else { + if (EBMinus) + iphi += (xtal - 1) + 1; + else + iphi += (4 - (xtal - 1)) + 1; + } + } + + if (zside_for_eb(eid) < 0) + ieta = -ieta; + + DetId did{DetId::Ecal, EcalBarrel}; + return did.rawId() | ((ieta > 0) ? (0x10000 | (ieta << 9)) : ((-ieta) << 9)) | (iphi & 0x1FF); + } + + ALPAKA_FN_INLINE ALPAKA_FN_ACC int adc(uint16_t sample) const { return sample & 0xfff; } + + ALPAKA_FN_INLINE ALPAKA_FN_ACC int gainId(uint16_t sample) const { return (sample >> 12) & 0x3; } + }; + + void unpackRaw(Queue& queue, + InputDataHost const& inputHost, + EcalDigiDeviceCollection& digisDevEB, + EcalDigiDeviceCollection& digisDevEE, + EcalElectronicsMappingDevice const& mapping, + uint32_t const nfedsWithData, + uint32_t const nbytesTotal) { + // input device buffers + ecal::raw::InputDataDevice inputDevice(queue, nbytesTotal, nfedsWithData); + + // transfer the raw data + alpaka::memcpy(queue, inputDevice.data, inputHost.data); + alpaka::memcpy(queue, inputDevice.offsets, inputHost.offsets); + alpaka::memcpy(queue, inputDevice.feds, inputHost.feds); + + auto workDiv = cms::alpakatools::make_workdiv(nfedsWithData, 32); // 32 channels per block + alpaka::exec(queue, + workDiv, + Kernel_unpack{}, + inputDevice.data.data(), + inputDevice.offsets.data(), + inputDevice.feds.data(), + digisDevEB.view(), + digisDevEE.view(), + mapping.const_view(), + nbytesTotal); + } + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::raw diff --git a/EventFilter/EcalRawToDigi/plugins/alpaka/UnpackPortable.h b/EventFilter/EcalRawToDigi/plugins/alpaka/UnpackPortable.h new file mode 100644 index 0000000000000..9204d2ff71965 --- /dev/null +++ b/EventFilter/EcalRawToDigi/plugins/alpaka/UnpackPortable.h @@ -0,0 +1,22 @@ +#ifndef EventFilter_EcalRawToDigi_plugins_alpaka_UnpackPortable_h +#define EventFilter_EcalRawToDigi_plugins_alpaka_UnpackPortable_h + +#include "CondFormats/EcalObjects/interface/alpaka/EcalElectronicsMappingDevice.h" +#include "DataFormats/EcalDigi/interface/alpaka/EcalDigiDeviceCollection.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/traits.h" +#include "DeclsForKernels.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::raw { + + void unpackRaw(Queue& queue, + InputDataHost const& inputHost, + EcalDigiDeviceCollection& digisDevEB, + EcalDigiDeviceCollection& digisDevEE, + EcalElectronicsMappingDevice const& mapping, + uint32_t const nfedsWithData, + uint32_t const nbytesTotal); + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::raw + +#endif // EventFilter_EcalRawToDigi_plugins_alpaka_UnpackPortable_h diff --git a/EventFilter/EcalRawToDigi/python/ecalDigis_cff.py b/EventFilter/EcalRawToDigi/python/ecalDigis_cff.py index cd202d520303a..0710a87569343 100644 --- a/EventFilter/EcalRawToDigi/python/ecalDigis_cff.py +++ b/EventFilter/EcalRawToDigi/python/ecalDigis_cff.py @@ -3,8 +3,10 @@ # ECAL unpacker running on CPU from EventFilter.EcalRawToDigi.EcalUnpackerData_cfi import ecalEBunpacker as _ecalEBunpacker +ecalDigisCPU = _ecalEBunpacker.clone() + ecalDigis = SwitchProducerCUDA( - cpu = _ecalEBunpacker.clone() + cpu = ecalDigisCPU ) ecalDigisTask = cms.Task( @@ -12,6 +14,8 @@ ecalDigis ) +from Configuration.StandardSequences.Accelerators_cff import * + # process modifier to run on GPUs from Configuration.ProcessModifiers.gpu_cff import gpu @@ -22,10 +26,10 @@ from EventFilter.EcalRawToDigi.ecalRawToDigiGPU_cfi import ecalRawToDigiGPU as _ecalRawToDigiGPU ecalDigisGPU = _ecalRawToDigiGPU.clone() -# extend the SwitchProducer to add a case to copy the ECAL digis from GPU to CPU and covert them from SoA to legacy format +# extend the SwitchProducer to add a case to copy the ECAL digis from GPU to CPU and convert them from SoA to legacy format from EventFilter.EcalRawToDigi.ecalCPUDigisProducer_cfi import ecalCPUDigisProducer as _ecalCPUDigisProducer gpu.toModify(ecalDigis, - # copy the ECAL digis from GPU to CPU and covert them from SoA to legacy format + # copy the ECAL digis from GPU to CPU and convert them from SoA to legacy format cuda = _ecalCPUDigisProducer.clone( digisInLabelEB = ('ecalDigisGPU', 'ebDigis'), digisInLabelEE = ('ecalDigisGPU', 'eeDigis'), @@ -38,6 +42,37 @@ ecalElectronicsMappingGPUESProducer, # run the ECAL unpacker on GPU ecalDigisGPU, - # run the ECAL unpacker on CPU, or copy the ECAL digis from GPU to CPU and covert them from SoA to legacy format + # run the ECAL unpacker on CPU, or copy the ECAL digis from GPU to CPU and convert them from SoA to legacy format + ecalDigis +)) + +# process modifier to run alpaka implementation +from Configuration.ProcessModifiers.alpaka_cff import alpaka + +# ECAL conditions used by the portable unpacker +from EventFilter.EcalRawToDigi.ecalElectronicsMappingHostESProducer_cfi import ecalElectronicsMappingHostESProducer + +# alpaka ECAL unpacker +from EventFilter.EcalRawToDigi.ecalRawToDigiPortable_cfi import ecalRawToDigiPortable as _ecalRawToDigiPortable +ecalDigisPortable = _ecalRawToDigiPortable.clone() + +from EventFilter.EcalRawToDigi.ecalDigisFromPortableProducer_cfi import ecalDigisFromPortableProducer as _ecalDigisFromPortableProducer + +# replace the SwitchProducer branches with a module to copy the ECAL digis from the accelerator to CPU (if needed) and convert them from SoA to legacy format +_ecalDigisFromPortable = _ecalDigisFromPortableProducer.clone( + digisInLabelEB = 'ecalDigisPortable:ebDigis', + digisInLabelEE = 'ecalDigisPortable:eeDigis', + produceDummyIntegrityCollections = True +) +alpaka.toModify(ecalDigis, + cpu = _ecalDigisFromPortable.clone() +) + +alpaka.toReplaceWith(ecalDigisTask, cms.Task( + # ECAL conditions used by the portable unpacker + ecalElectronicsMappingHostESProducer, + # run the portable ECAL unpacker + ecalDigisPortable, + # copy the ECAL digis from GPU to CPU (if needed) and convert them from SoA to legacy format ecalDigis )) diff --git a/EventFilter/EcalRawToDigi/test/stubs/EcalMatacqHist2.cc b/EventFilter/EcalRawToDigi/test/stubs/EcalMatacqHist2.cc deleted file mode 100644 index 3b1174bca7d59..0000000000000 --- a/EventFilter/EcalRawToDigi/test/stubs/EcalMatacqHist2.cc +++ /dev/null @@ -1,140 +0,0 @@ -#include "EcalMatacqHist2.h" - -#include "TH1D.h" -#include "TProfile.h" -#include -#include -#include -#include "FWCore/MessageLogger/interface/MessageLogger.h" -#include -#include - - -EcalMatacqHist2::EcalMatacqHist2(const edm::ParameterSet& ps): - iEvent(0){ - outFileName= ps.getUntrackedParameter("outputRootFile", "matacqHist.root"); - nTimePlots = ps.getUntrackedParameter("nTimePlots", 10); - firstTimePlotEvent = ps.getUntrackedParameter("firstTimePlotEvent", - 1); - hTTrigMin = ps.getUntrackedParameter("hTTrigMin", 0.); - hTTrigMax = ps.getUntrackedParameter("hTTrigMax", 2000.); - matacqProducer_ = ps.getParameter("matacqProducer"); - TDirectory* dsave = gDirectory; - outFile = std::unique_ptr (new TFile(outFileName.c_str(), "RECREATE")); - if(outFile->IsZombie()){ - std::cout << "EcalMatacqHist2: Failed to create file " << outFileName - << " No histogram will be created.\n"; - } - - hTTrig = new TH1D("tTrig", "Trigger time in ns", - 100, - hTTrigMin, - hTTrigMax); - dsave->cd(); -} - -EcalMatacqHist2::~EcalMatacqHist2(){ - if(!outFile->IsZombie()){ - TDirectory* dsave = gDirectory; - outFile->cd(); - for(std::vector::iterator it = profiles.begin(); - it != profiles.end(); - ++it){ - it->Write(); - } - if(hTTrig!=0) hTTrig->Write(); - dsave->cd(); - } -} - -void -EcalMatacqHist2:: analyze( const edm::Event & e, const edm::EventSetup& c){ - ++iEvent; - if(outFile->IsZombie()) return; - TDirectory* dsave = gDirectory; - outFile->cd(); - - edm::TimeValue_t t = e.time().value(); - - time_t ts = t >>32; - time_t tus = t & 0xFFFFFFFF; - char buf[256]; - strftime(buf, sizeof(buf), "%F %R %S s", localtime(&ts)); - - std::cerr << std::flush; - std::cout << "---- > Event data: " << buf - << " " << tus << "us" << std::endl; - - // retrieving MATACQ digis: - edm::Handle digiColl; - if(e.getByLabel(matacqProducer_, "", digiColl)){ - unsigned iCh=0; - for(EcalMatacqDigiCollection::const_iterator it = digiColl->begin(); - it!=digiColl->end(); ++it, ++iCh){ - - const EcalMatacqDigi& digis = *it; - - std::cout << "Matacq digi size: " << digis.size() << std::endl; - - if(digis.size()==0) continue; - - if(iEvent >= firstTimePlotEvent - && iEvent < firstTimePlotEvent + nTimePlots){ - int nSamples = digis.size(); - std::stringstream title; - std::stringstream name; - name << "matacq" << digis.chId() << "_" - << std::setfill('0') << std::setw(4) << iEvent; - title << "Matacq channel " << digis.chId() << ", event " << iEvent - << ", Ts = " << digis.ts()*1.e9 << "ns"; - float tTrig_s = digis.tTrig(); - if(tTrig_s<999.){ - title << ", t_trig = " << tTrig_s * 1.e9 << "ns"; - } - TH1D h1(name.str().c_str(), title.str().c_str(), - nSamples, -.5, -.5+nSamples); - for(int i=0; i=profiles.size()){ //profile not yet allocated for this matacq ch. - std::stringstream profTitle; - profTitle << "Matacq channel " << digis.chId() - << " profile"; - std::stringstream profileName; - profileName << "matacq" << digis.chId(); - profiles.push_back(TProfile(profileName.str().c_str(), - profTitle.str().c_str(), - digis.size(), - -.5, - -.5+digis.size(), - "I")); - profiles.back().SetDirectory(0);//mem. management done by std::vector - profChId.push_back(digis.chId()); - } - - for(int i=0; iFill(digis.tTrig()*1.e9); - } - } - } else{ - edm::LogInfo("No matacq digi found"); - } - dsave->cd(); -} // analyze - - -DEFINE_FWK_MODULE(EcalMatacqHist2); diff --git a/EventFilter/EcalRawToDigi/test/stubs/EcalMatacqHist2.h b/EventFilter/EcalRawToDigi/test/stubs/EcalMatacqHist2.h deleted file mode 100644 index 2406ee868dcbf..0000000000000 --- a/EventFilter/EcalRawToDigi/test/stubs/EcalMatacqHist2.h +++ /dev/null @@ -1,53 +0,0 @@ -// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: t; tab-width: 8; -*- - -/** - * - * Test module for matacq data producing some histograms. - * - * Parameters: - *
    outputRootFile: untracked string, name of the root file to create for - * the histograms - *
  • nTimePlots: untracked int, number of events whose laser pulse is to - * be plotted. - *
  • firstTimePlotEvent: untracked int, first event for laser pulse time - * plot starting at 1. - *
- */ - -#include -#include -#include - -#include -#include - -class TProfile; -class TH1D; - -class EcalMatacqHist2: public edm::EDAnalyzer{ - public: - EcalMatacqHist2(const edm::ParameterSet& ps); - - virtual ~EcalMatacqHist2(); - - protected: - void - analyze( const edm::Event & e, const edm::EventSetup& c); - - -private: - std::string outFileName; - int nTimePlots; - int firstTimePlotEvent; - int iEvent; - double hTTrigMin; - double hTTrigMax; - std::unique_ptr outFile; - std::vector profiles; - //profile->MATACQ CH ID map - std::vector profChId; - TH1D* hTTrig; - std::string matacqProducer_; -}; - - diff --git a/EventFilter/GEMRawToDigi/plugins/GEMDigiToRawModule.cc b/EventFilter/GEMRawToDigi/plugins/GEMDigiToRawModule.cc index e3e5f84be98bb..60c07219719fb 100644 --- a/EventFilter/GEMRawToDigi/plugins/GEMDigiToRawModule.cc +++ b/EventFilter/GEMRawToDigi/plugins/GEMDigiToRawModule.cc @@ -39,10 +39,13 @@ class GEMDigiToRawModule : public edm::global::EDProducer digi_token; + const int event_type_; + const int minBunch_; + const int maxBunch_; + const edm::EDGetTokenT digiToken_; edm::ESGetToken gemChMapToken_; - bool useDBEMap_; + const bool useDBEMap_; + const bool simulatePulseStretching_; }; #include "FWCore/Framework/interface/MakerMacros.h" @@ -50,8 +53,11 @@ DEFINE_FWK_MODULE(GEMDigiToRawModule); GEMDigiToRawModule::GEMDigiToRawModule(const edm::ParameterSet& pset) : event_type_(pset.getParameter("eventType")), - digi_token(consumes(pset.getParameter("gemDigi"))), - useDBEMap_(pset.getParameter("useDBEMap")) { + minBunch_(pset.getParameter("minBunch")), + maxBunch_(pset.getParameter("maxBunch")), + digiToken_(consumes(pset.getParameter("gemDigi"))), + useDBEMap_(pset.getParameter("useDBEMap")), + simulatePulseStretching_(pset.getParameter("simulatePulseStretching")) { produces(); if (useDBEMap_) { gemChMapToken_ = esConsumes(); @@ -62,7 +68,13 @@ void GEMDigiToRawModule::fillDescriptions(edm::ConfigurationDescriptions& descri edm::ParameterSetDescription desc; desc.add("gemDigi", edm::InputTag("simMuonGEMDigis")); desc.add("eventType", 0); + + // time window for pulse stretching simulation + desc.add("minBunch", -3); + desc.add("maxBunch", 4); + desc.add("useDBEMap", false); + desc.add("simulatePulseStretching", false); descriptions.add("gemPackerDefault", desc); } @@ -83,7 +95,7 @@ void GEMDigiToRawModule::produce(edm::StreamID iID, edm::Event& iEvent, edm::Eve auto fedRawDataCol = std::make_unique(); edm::Handle gemDigis; - iEvent.getByToken(digi_token, gemDigis); + iEvent.getByToken(digiToken_, gemDigis); if (!gemDigis.isValid()) { iEvent.put(std::move(fedRawDataCol)); return; @@ -106,6 +118,12 @@ void GEMDigiToRawModule::produce(edm::StreamID iID, edm::Event& iEvent, edm::Eve const GEMDigiCollection::Range& digis = etaPart.second; for (auto digi = digis.first; digi != digis.second; ++digi) { int bx = digi->bx(); + if (simulatePulseStretching_) { + if (bx < minBunch_ or bx > maxBunch_) + continue; + else + bx = 0; + } auto search = gemBxMap.find(bx); if (search != gemBxMap.end()) { search->second.insertDigi(gemId, *digi); diff --git a/EventFilter/GEMRawToDigi/python/gemPacker_cfi.py b/EventFilter/GEMRawToDigi/python/gemPacker_cfi.py index 2804b490ccd13..837cbc7e8af47 100644 --- a/EventFilter/GEMRawToDigi/python/gemPacker_cfi.py +++ b/EventFilter/GEMRawToDigi/python/gemPacker_cfi.py @@ -6,6 +6,6 @@ from Configuration.Eras.Modifier_run3_GEM_cff import run3_GEM from Configuration.Eras.Modifier_phase2_GEM_cff import phase2_GEM -run2_GEM_2017.toModify(gemPacker, useDBEMap = True) -run3_GEM.toModify(gemPacker, useDBEMap = True) -phase2_GEM.toModify(gemPacker, useDBEMap = False) +run2_GEM_2017.toModify(gemPacker, useDBEMap = True, simulatePulseStretching = False) +run3_GEM.toModify(gemPacker, useDBEMap = True, simulatePulseStretching = True) +phase2_GEM.toModify(gemPacker, useDBEMap = False, simulatePulseStretching = False) diff --git a/EventFilter/L1ScoutingRawToDigi/BuildFile.xml b/EventFilter/L1ScoutingRawToDigi/BuildFile.xml new file mode 100644 index 0000000000000..04bc54b4991df --- /dev/null +++ b/EventFilter/L1ScoutingRawToDigi/BuildFile.xml @@ -0,0 +1,11 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/EventFilter/L1ScoutingRawToDigi/interface/blocks.h b/EventFilter/L1ScoutingRawToDigi/interface/blocks.h new file mode 100644 index 0000000000000..bb614e82e875a --- /dev/null +++ b/EventFilter/L1ScoutingRawToDigi/interface/blocks.h @@ -0,0 +1,50 @@ +#ifndef L1ScoutingRawToDigi_blocks_h +#define L1ScoutingRawToDigi_blocks_h + +#include + +namespace l1ScoutingRun3 { + + namespace ugmt { + + struct muon { + uint32_t f; + uint32_t s; + uint32_t extra; + }; + + struct block { + uint32_t bx; + uint32_t orbit; + muon mu[16]; + }; + } // namespace ugmt + + namespace demux { + + // unrolled frame block + struct block { + uint32_t header; + uint32_t bx; + uint32_t orbit; + uint32_t link0; + uint32_t jet2[6]; + uint32_t link1; + uint32_t jet1[6]; + uint32_t link2; + uint32_t egamma2[6]; + uint32_t link3; + uint32_t egamma1[6]; + uint32_t link4; + uint32_t empty[6]; + uint32_t link5; + uint32_t sum[6]; + uint32_t link6; + uint32_t tau2[6]; + uint32_t link7; + uint32_t tau1[6]; + }; + } // namespace demux + +} // namespace l1ScoutingRun3 +#endif // L1ScoutingRawToDigi_blocks_h diff --git a/EventFilter/L1ScoutingRawToDigi/interface/masks.h b/EventFilter/L1ScoutingRawToDigi/interface/masks.h new file mode 100644 index 0000000000000..349c8a8bb4ddd --- /dev/null +++ b/EventFilter/L1ScoutingRawToDigi/interface/masks.h @@ -0,0 +1,102 @@ +#ifndef L1ScoutingRawToDigi_masks_h +#define L1ScoutingRawToDigi_masks_h + +#include +#include "shifts.h" + +namespace l1ScoutingRun3 { + + namespace ugmt { + struct masksMuon { + // bx word: 16 bits used for actual bx, MS 4 bits are muon type + // 0xf intermediate, + // 0x0 final + // following 4 bits for link id + static constexpr uint32_t bx = 0x1fff; + static constexpr uint32_t interm = 0x0001; + //masks for muon 64 bits + static constexpr uint32_t phiext = 0x03ff; + static constexpr uint32_t pt = 0x01ff; + static constexpr uint32_t ptuncon = 0x00ff; // 8 bits + static constexpr uint32_t qual = 0x000f; + static constexpr uint32_t etaext = 0x01ff; + static constexpr uint32_t etaextv = 0x00ff; + static constexpr uint32_t etaexts = 0x0100; + static constexpr uint32_t iso = 0x0003; + static constexpr uint32_t chrg = 0x0001; + static constexpr uint32_t chrgv = 0x0001; + static constexpr uint32_t index = 0x007f; + static constexpr uint32_t phi = 0x03ff; + static constexpr uint32_t eta = 0x01ff; + static constexpr uint32_t etav = 0x00ff; + static constexpr uint32_t etas = 0x0100; + static constexpr uint32_t dxy = 0x0003; + }; + } // namespace ugmt + + namespace demux { + + struct masksJet { + static constexpr uint32_t ET = 0x07ff; + static constexpr uint32_t eta = 0x00ff; + static constexpr uint32_t phi = 0x00ff; + static constexpr uint32_t disp = 0x0001; + static constexpr uint32_t qual = 0x0003; + }; + + struct masksEGamma { + static constexpr uint32_t ET = 0x01ff; + static constexpr uint32_t eta = 0x00ff; + static constexpr uint32_t phi = 0x00ff; + static constexpr uint32_t iso = 0x0003; + }; + + struct masksTau { + static constexpr uint32_t ET = 0x01ff; + static constexpr uint32_t eta = 0x00ff; + static constexpr uint32_t phi = 0x00ff; + static constexpr uint32_t iso = 0x0003; + }; + + struct masksESums { + static constexpr uint32_t ETEt = 0x0fff; // Et of ET object + static constexpr uint32_t ETEttem = 0x0fff; + static constexpr uint32_t ETMinBiasHF = 0x000f; + + static constexpr uint32_t HTEt = 0x0fff; // Et of HT object + static constexpr uint32_t HTtowerCount = 0x1fff; + static constexpr uint32_t HTMinBiasHF = 0x000f; + + static constexpr uint32_t ETmissEt = 0x0fff; + static constexpr uint32_t ETmissPhi = 0x00ff; + static constexpr uint32_t ETmissASYMET = 0x00ff; + static constexpr uint32_t ETmissMinBiasHF = 0x000f; + + static constexpr uint32_t HTmissEt = 0x0fff; + static constexpr uint32_t HTmissPhi = 0x00ff; + static constexpr uint32_t HTmissASYMHT = 0x00ff; + static constexpr uint32_t HTmissMinBiasHF = 0x000f; + + static constexpr uint32_t ETHFmissEt = 0x0fff; + static constexpr uint32_t ETHFmissPhi = 0x00ff; + static constexpr uint32_t ETHFmissASYMETHF = 0x00ff; + static constexpr uint32_t ETHFmissCENT = 0x0003; + + static constexpr uint32_t HTHFmissEt = 0x0fff; + static constexpr uint32_t HTHFmissPhi = 0x00ff; + static constexpr uint32_t HTHFmissASYMHTHF = 0x00ff; + static constexpr uint32_t HTHFmissCENT = 0x0003; + }; + } // namespace demux + + struct header_masks { + static constexpr uint32_t bxmatch = 0x00ff << header_shifts::bxmatch; + static constexpr uint32_t mAcount = 0x000f << header_shifts::mAcount; + static constexpr uint32_t orbitmatch = 0x00ff << header_shifts::orbitmatch; + static constexpr uint32_t warningTestEnabled = 0x0001 << header_shifts::warningTestEnabled; + static constexpr uint32_t mBcount = 0x000f << header_shifts::mBcount; + static constexpr uint32_t sBmtfCount = 0x000f << header_shifts::sBmtfCount; + }; + +} // namespace l1ScoutingRun3 +#endif // L1ScoutingRawToDigi_masks_h diff --git a/EventFilter/L1ScoutingRawToDigi/interface/shifts.h b/EventFilter/L1ScoutingRawToDigi/interface/shifts.h new file mode 100644 index 0000000000000..5f0788c9a7a47 --- /dev/null +++ b/EventFilter/L1ScoutingRawToDigi/interface/shifts.h @@ -0,0 +1,102 @@ +#ifndef L1ScoutingRawToDigi_shifts_h +#define L1ScoutingRawToDigi_shifts_h + +#include + +namespace l1ScoutingRun3 { + + namespace ugmt { + // struct shifts{ + struct shiftsMuon { + // bx word: 16 bits used for actual bx, MS 4 bits are muon type + // 0xf intermediate, + // 0x0 final + // following 4 bits for link id + static constexpr uint32_t bx = 0; + static constexpr uint32_t interm = 31; // updated for new run3 format (tj) + // shifts for muon 64 bits + static constexpr uint32_t phiext = 0; + static constexpr uint32_t pt = 10; + static constexpr uint32_t qual = 19; + static constexpr uint32_t etaext = 23; + static constexpr uint32_t iso = 0; + static constexpr uint32_t chrg = 2; + static constexpr uint32_t chrgv = 3; + static constexpr uint32_t index = 4; + static constexpr uint32_t phi = 11; + static constexpr uint32_t eta1 = 13; + static constexpr uint32_t eta2 = 22; + static constexpr uint32_t ptuncon = 21; + static constexpr uint32_t dxy = 30; + }; + } // namespace ugmt + + namespace demux { + // struct shiftsCaloJet{ + struct shiftsJet { + static constexpr uint32_t ET = 0; + static constexpr uint32_t eta = 11; + static constexpr uint32_t phi = 19; + static constexpr uint32_t disp = 27; + static constexpr uint32_t qual = 28; + }; + + // struct shiftsCaloEGamma{ + struct shiftsEGamma { + static constexpr uint32_t ET = 0; + static constexpr uint32_t eta = 9; + static constexpr uint32_t phi = 17; + static constexpr uint32_t iso = 25; + }; + + // struct shiftsCaloTau{ + struct shiftsTau { + static constexpr uint32_t ET = 0; + static constexpr uint32_t eta = 9; + static constexpr uint32_t phi = 17; + static constexpr uint32_t iso = 25; + }; + + // struct shiftsCaloESums{ + struct shiftsESums { + static constexpr uint32_t ETEt = 0; // Et of ET object + static constexpr uint32_t ETEttem = 12; + static constexpr uint32_t ETMinBiasHF = 28; + + static constexpr uint32_t HTEt = 0; // Et of HT object + static constexpr uint32_t HTtowerCount = 12; + static constexpr uint32_t HTMinBiasHF = 28; + + static constexpr uint32_t ETmissEt = 0; + static constexpr uint32_t ETmissPhi = 12; + static constexpr uint32_t ETmissASYMET = 20; + static constexpr uint32_t ETmissMinBiasHF = 28; + + static constexpr uint32_t HTmissEt = 0; + static constexpr uint32_t HTmissPhi = 12; + static constexpr uint32_t HTmissASYMHT = 20; + static constexpr uint32_t HTmissMinBiasHF = 28; + + static constexpr uint32_t ETHFmissEt = 0; + static constexpr uint32_t ETHFmissPhi = 12; + static constexpr uint32_t ETHFmissASYMETHF = 20; + static constexpr uint32_t ETHFmissCENT = 28; + + static constexpr uint32_t HTHFmissEt = 0; + static constexpr uint32_t HTHFmissPhi = 12; + static constexpr uint32_t HTHFmissASYMHTHF = 20; + static constexpr uint32_t HTHFmissCENT = 28; + }; + } // namespace demux + + struct header_shifts { + static constexpr uint32_t bxmatch = 24; + static constexpr uint32_t mAcount = 16; + static constexpr uint32_t orbitmatch = 8; + static constexpr uint32_t warningTestEnabled = 8; + static constexpr uint32_t mBcount = 0; + static constexpr uint32_t sBmtfCount = 0; + }; + +} // namespace l1ScoutingRun3 +#endif // L1ScoutingRawToDigi_shifts_h diff --git a/EventFilter/L1ScoutingRawToDigi/plugins/BuildFile.xml b/EventFilter/L1ScoutingRawToDigi/plugins/BuildFile.xml new file mode 100644 index 0000000000000..dbb651083a6fb --- /dev/null +++ b/EventFilter/L1ScoutingRawToDigi/plugins/BuildFile.xml @@ -0,0 +1,10 @@ + + + + + + + + + + \ No newline at end of file diff --git a/EventFilter/L1ScoutingRawToDigi/plugins/ScCALORawToDigi.cc b/EventFilter/L1ScoutingRawToDigi/plugins/ScCALORawToDigi.cc new file mode 100644 index 0000000000000..b635d82c66375 --- /dev/null +++ b/EventFilter/L1ScoutingRawToDigi/plugins/ScCALORawToDigi.cc @@ -0,0 +1,328 @@ +#include "EventFilter/L1ScoutingRawToDigi/plugins/ScCALORawToDigi.h" + +ScCaloRawToDigi::ScCaloRawToDigi(const edm::ParameterSet& iConfig) { + using namespace edm; + using namespace l1ScoutingRun3; + srcInputTag = iConfig.getParameter("srcInputTag"); + enableAllSums_ = iConfig.getUntrackedParameter("enableAllSums", false); + debug_ = iConfig.getUntrackedParameter("debug", false); + + orbitBufferJets_ = std::vector>(3565); + orbitBufferEGammas_ = std::vector>(3565); + orbitBufferTaus_ = std::vector>(3565); + orbitBufferEtSums_ = std::vector>(3565); + + nJetsOrbit_ = 0; + nEGammasOrbit_ = 0; + nTausOrbit_ = 0; + nEtSumsOrbit_ = 0; + + produces().setBranchAlias("JetOrbitCollection"); + produces().setBranchAlias("TauOrbitCollection"); + produces().setBranchAlias("EGammaOrbitCollection"); + produces().setBranchAlias("BxSumsOrbitCollection"); + + rawToken = consumes(srcInputTag); +} + +ScCaloRawToDigi::~ScCaloRawToDigi(){}; + +void ScCaloRawToDigi::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { + using namespace edm; + using namespace l1ScoutingRun3; + + Handle ScoutingRawDataCollection; + iEvent.getByToken(rawToken, ScoutingRawDataCollection); + + const FEDRawData& sourceRawData = ScoutingRawDataCollection->FEDData(SDSNumbering::CaloSDSID); + size_t orbitSize = sourceRawData.size(); + + std::unique_ptr unpackedJets(new JetOrbitCollection); + std::unique_ptr unpackedTaus(new TauOrbitCollection); + std::unique_ptr unpackedEGammas(new EGammaOrbitCollection); + std::unique_ptr unpackedEtSums(new BxSumsOrbitCollection); + + if ((sourceRawData.size() == 0) && debug_) { + std::cout << "No raw data for CALO source\n"; + } + + // unpack current orbit and store data into the orbitBufferr + unpackOrbit(sourceRawData.data(), orbitSize); + + // fill orbit collection and clear the Bx buffer vector + unpackedJets->fillAndClear(orbitBufferJets_, nJetsOrbit_); + unpackedEGammas->fillAndClear(orbitBufferEGammas_, nEGammasOrbit_); + unpackedTaus->fillAndClear(orbitBufferTaus_, nTausOrbit_); + unpackedEtSums->fillAndClear(orbitBufferEtSums_, nEtSumsOrbit_); + + // store collections in the event + iEvent.put(std::move(unpackedJets)); + iEvent.put(std::move(unpackedTaus)); + iEvent.put(std::move(unpackedEGammas)); + iEvent.put(std::move(unpackedEtSums)); +} + +void ScCaloRawToDigi::unpackOrbit(const unsigned char* buf, size_t len) { + using namespace l1ScoutingRun3; + + // reset counters + nJetsOrbit_ = 0; + nEGammasOrbit_ = 0; + nTausOrbit_ = 0; + nEtSumsOrbit_ = 0; + + size_t pos = 0; + + while (pos < len) { + assert(pos + sizeof(demux::block) <= len); + + demux::block* bl = (demux::block*)(buf + pos); + pos += sizeof(demux::block); + + assert(pos <= len); + uint32_t orbit = bl->orbit & 0x7FFFFFFF; + uint32_t bx = bl->bx; + + if (debug_) { + std::cout << "CALO Orbit " << orbit << ", BX -> " << bx << std::endl; + } + + // unpack jets from first link + if (debug_) + std::cout << "--- Jets link 1 ---\n"; + unpackLinkJets(bl->jet1, bx); + + // unpack jets from second link + if (debug_) + std::cout << "--- Jets link 2 ---\n"; + unpackLinkJets(bl->jet2, bx); + + // unpack eg from first link + if (debug_) + std::cout << "--- E/g link 1 ---\n"; + unpackLinkEGammas(bl->egamma1, bx); + + // unpack eg from second link link + if (debug_) + std::cout << "--- E/g link 2 ---\n"; + unpackLinkEGammas(bl->egamma2, bx); + + // unpack taus from first link + if (debug_) + std::cout << "--- Taus link 1 ---\n"; + unpackLinkTaus(bl->tau1, bx); + + // unpack taus from second link + if (debug_) + std::cout << "--- Taus link 2 ---\n"; + unpackLinkTaus(bl->tau2, bx); + + // unpack et sums + if (debug_) + std::cout << "--- Sums ---\n"; + unpackEtSums(bl->sum, bx); + + } // end of bx objects +} + +void ScCaloRawToDigi::unpackLinkJets(uint32_t* dataBlock, int bx) { + using namespace l1ScoutingRun3; + + int32_t ET(0), Eta(0), Phi(0), Qual(0); + for (uint32_t i = 0; i < 6; i++) { + ET = ((dataBlock[i] >> demux::shiftsJet::ET) & demux::masksJet::ET); + + if (ET != 0) { + Eta = ((dataBlock[i] >> demux::shiftsJet::eta) & demux::masksJet::eta); + Phi = ((dataBlock[i] >> demux::shiftsJet::phi) & demux::masksJet::phi); + Qual = ((dataBlock[i] >> demux::shiftsJet::qual) & demux::masksJet::qual); + + if (Eta > 127) + Eta = Eta - 256; + + Jet jet(ET, Eta, Phi, Qual); + orbitBufferJets_[bx].push_back(jet); + nJetsOrbit_++; + + if (debug_) { + std::cout << "Jet " << i << std::endl; + std::cout << " Raw: 0x" << std::hex << dataBlock[i] << std::dec << std::endl; + printJet(jet); + } + } + } // end link jets unpacking loop +} + +void ScCaloRawToDigi::unpackLinkEGammas(uint32_t* dataBlock, int bx) { + using namespace l1ScoutingRun3; + + int32_t ET(0), Eta(0), Phi(0), Iso(0); + for (uint32_t i = 0; i < 6; i++) { + ET = ((dataBlock[i] >> demux::shiftsEGamma::ET) & demux::masksEGamma::ET); + if (ET != 0) { + Eta = ((dataBlock[i] >> demux::shiftsEGamma::eta) & demux::masksEGamma::eta); + Phi = ((dataBlock[i] >> demux::shiftsEGamma::phi) & demux::masksEGamma::phi); + Iso = ((dataBlock[i] >> demux::shiftsEGamma::iso) & demux::masksEGamma::iso); + + if (Eta > 127) + Eta = Eta - 256; + + EGamma eGamma(ET, Eta, Phi, Iso); + orbitBufferEGammas_[bx].push_back(eGamma); + nEGammasOrbit_++; + + if (debug_) { + std::cout << "E/g " << i << std::endl; + std::cout << " Raw: 0x" << std::hex << dataBlock[i] << std::dec << std::endl; + printEGamma(eGamma); + } + } + } // end link e/gammas unpacking loop +} + +void ScCaloRawToDigi::unpackLinkTaus(uint32_t* dataBlock, int bx) { + using namespace l1ScoutingRun3; + + int32_t ET(0), Eta(0), Phi(0), Iso(0); + for (uint32_t i = 0; i < 6; i++) { + ET = ((dataBlock[i] >> demux::shiftsTau::ET) & demux::masksTau::ET); + if (ET != 0) { + Eta = ((dataBlock[i] >> demux::shiftsTau::eta) & demux::masksTau::eta); + Phi = ((dataBlock[i] >> demux::shiftsTau::phi) & demux::masksTau::phi); + Iso = ((dataBlock[i] >> demux::shiftsTau::iso) & demux::masksTau::iso); + + if (Eta > 127) + Eta = Eta - 256; + + Tau tau(ET, Eta, Phi, Iso); + orbitBufferTaus_[bx].push_back(tau); + nTausOrbit_++; + + if (debug_) { + std::cout << "Tau " << i << std::endl; + std::cout << " Raw: 0x" << std::hex << dataBlock[i] << std::dec << std::endl; + printTau(tau); + } + } + } // end link taus unpacking loop +} + +void ScCaloRawToDigi::unpackEtSums(uint32_t* dataBlock, int bx) { + using namespace l1ScoutingRun3; + + BxSums bxSums; + + int32_t ETEt(0), ETEttem(0), ETMinBiasHFP0(0); // ET + int32_t HTEt(0), HTtowerCount(0), HTMinBiasHFM0(0); // HT + int32_t ETmissEt(0), ETmissPhi(0), ETmissASYMET(0), ETmissMinBiasHFP1(0); //ETMiss + int32_t HTmissEt(0), HTmissPhi(0), HTmissASYMHT(0), HTmissMinBiasHFM1(0); // HTMiss + int32_t ETHFmissEt(0), ETHFmissPhi(0), ETHFmissASYMETHF(0), ETHFmissCENT(0); // ETHFMiss + int32_t HTHFmissEt(0), HTHFmissPhi(0), HTHFmissASYMHTHF(0), HTHFmissCENT(0); // HTHFMiss + + // ET block + ETEt = ((dataBlock[0] >> demux::shiftsESums::ETEt) & demux::masksESums::ETEt); + ETEttem = ((dataBlock[0] >> demux::shiftsESums::ETEttem) & demux::masksESums::ETEttem); + + bxSums.setHwTotalEt(ETEt); + bxSums.setHwTotalEtEm(ETEttem); + + // HT block + HTEt = ((dataBlock[1] >> demux::shiftsESums::HTEt) & demux::masksESums::HTEt); + + bxSums.setHwTotalHt(HTEt); + + // ETMiss block + ETmissEt = ((dataBlock[2] >> demux::shiftsESums::ETmissEt) & demux::masksESums::ETmissEt); + ETmissPhi = ((dataBlock[2] >> demux::shiftsESums::ETmissPhi) & demux::masksESums::ETmissPhi); + + if (ETmissEt > 0) { + bxSums.setHwMissEt(ETmissEt); + bxSums.setHwMissEtPhi(ETmissPhi); + } + + // HTMiss block + HTmissEt = ((dataBlock[3] >> demux::shiftsESums::HTmissEt) & demux::masksESums::HTmissEt); + HTmissPhi = ((dataBlock[3] >> demux::shiftsESums::HTmissPhi) & demux::masksESums::HTmissPhi); + + if (HTmissEt > 0) { + bxSums.setHwMissHt(HTmissEt); + bxSums.setHwMissHtPhi(HTmissPhi); + } + + // ETHFMiss block + ETHFmissEt = ((dataBlock[4] >> demux::shiftsESums::ETHFmissEt) & demux::masksESums::ETHFmissEt); + ETHFmissPhi = ((dataBlock[4] >> demux::shiftsESums::ETHFmissPhi) & demux::masksESums::ETHFmissPhi); + + if (ETHFmissEt > 0) { + bxSums.setHwMissEtHF(ETHFmissEt); + bxSums.setHwMissEtHFPhi(ETHFmissPhi); + } + + // HTHFMiss block + HTHFmissEt = ((dataBlock[5] >> demux::shiftsESums::ETHFmissEt) & demux::masksESums::ETHFmissEt); + HTHFmissPhi = ((dataBlock[5] >> demux::shiftsESums::ETHFmissPhi) & demux::masksESums::ETHFmissPhi); + + if (HTHFmissEt > 0) { + bxSums.setHwMissHtHF(HTHFmissEt); + bxSums.setHwMissHtHFPhi(HTHFmissPhi); + } + + // Insert additional sums + if (enableAllSums_) { + // ET block + ETMinBiasHFP0 = ((dataBlock[0] >> demux::shiftsESums::ETMinBiasHF) & demux::masksESums::ETMinBiasHF); + bxSums.setMinBiasHFP0(ETMinBiasHFP0); + + // HT block + HTtowerCount = ((dataBlock[1] >> demux::shiftsESums::HTtowerCount) & demux::masksESums::HTtowerCount); + HTMinBiasHFM0 = ((dataBlock[1] >> demux::shiftsESums::HTMinBiasHF) & demux::masksESums::HTMinBiasHF); + + bxSums.setTowerCount(HTtowerCount); + bxSums.setMinBiasHFM0(HTMinBiasHFM0); + + // ET Miss block + ETmissASYMET = ((dataBlock[2] >> demux::shiftsESums::ETmissASYMET) & demux::masksESums::ETmissASYMET); + ETmissMinBiasHFP1 = ((dataBlock[2] >> demux::shiftsESums::ETmissMinBiasHF) & demux::masksESums::ETmissMinBiasHF); + bxSums.setHwAsymEt(ETmissASYMET); + bxSums.setMinBiasHFP1(ETmissMinBiasHFP1); + + // HT Miss block + HTmissASYMHT = ((dataBlock[3] >> demux::shiftsESums::HTmissASYMHT) & demux::masksESums::HTmissASYMHT); + HTmissMinBiasHFM1 = ((dataBlock[3] >> demux::shiftsESums::HTmissMinBiasHF) & demux::masksESums::HTmissMinBiasHF); + + bxSums.setHwAsymHt(HTmissASYMHT); + bxSums.setMinBiasHFM1(HTmissMinBiasHFM1); + + // ETHFMiss + ETHFmissASYMETHF = ((dataBlock[4] >> demux::shiftsESums::ETHFmissASYMETHF) & demux::masksESums::ETHFmissASYMETHF); + ETHFmissCENT = ((dataBlock[4] >> demux::shiftsESums::ETHFmissCENT) & demux::masksESums::ETHFmissCENT); + + bxSums.setHwAsymEtHF(ETHFmissASYMETHF); + + // HTHFMiss + HTHFmissASYMHTHF = ((dataBlock[5] >> demux::shiftsESums::ETHFmissASYMETHF) & demux::masksESums::ETHFmissASYMETHF); + HTHFmissCENT = ((dataBlock[5] >> demux::shiftsESums::ETHFmissCENT) & demux::masksESums::ETHFmissCENT); + + bxSums.setHwAsymHtHF(HTHFmissASYMHTHF); + bxSums.setCentrality((HTHFmissCENT << 4) + ETHFmissCENT); + } + + orbitBufferEtSums_[bx].push_back(bxSums); + nEtSumsOrbit_ += 1; + + if (debug_) { + std::cout << "Raw frames:\n"; + for (int frame = 0; frame < 6; frame++) { + std::cout << " frame " << frame << ": 0x" << std::hex << dataBlock[frame] << std::dec << std::endl; + } + printBxSums(bxSums); + } +} + +void ScCaloRawToDigi::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.setUnknown(); + descriptions.addDefault(desc); +} + +DEFINE_FWK_MODULE(ScCaloRawToDigi); diff --git a/EventFilter/L1ScoutingRawToDigi/plugins/ScCALORawToDigi.h b/EventFilter/L1ScoutingRawToDigi/plugins/ScCALORawToDigi.h new file mode 100644 index 0000000000000..ca1bcd34e1ea3 --- /dev/null +++ b/EventFilter/L1ScoutingRawToDigi/plugins/ScCALORawToDigi.h @@ -0,0 +1,53 @@ +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/Utilities/interface/StreamID.h" + +#include "DataFormats/FEDRawData/interface/FEDRawData.h" +#include "DataFormats/L1ScoutingRawData/interface/SDSNumbering.h" +#include "DataFormats/L1ScoutingRawData/interface/SDSRawDataCollection.h" +#include "DataFormats/L1Scouting/interface/OrbitCollection.h" + +#include "DataFormats/L1Scouting/interface/L1ScoutingCalo.h" + +#include "EventFilter/L1ScoutingRawToDigi/interface/shifts.h" +#include "EventFilter/L1ScoutingRawToDigi/interface/masks.h" +#include "EventFilter/L1ScoutingRawToDigi/interface/blocks.h" +#include "L1TriggerScouting/Utilities/interface/printScObjects.h" + +#include +#include + +class ScCaloRawToDigi : public edm::stream::EDProducer<> { +public: + explicit ScCaloRawToDigi(const edm::ParameterSet&); + ~ScCaloRawToDigi() override; + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + +private: + void produce(edm::Event&, const edm::EventSetup&) override; + + void unpackOrbit(const unsigned char* buf, size_t len); + + void unpackLinkJets(uint32_t* dataBlock, int bx); + void unpackLinkEGammas(uint32_t* dataBlock, int bx); + void unpackLinkTaus(uint32_t* dataBlock, int bx); + void unpackEtSums(uint32_t* dataBlock, int bx); + + int nJetsOrbit_, nEGammasOrbit_, nTausOrbit_, nEtSumsOrbit_; + // vectors holding data for every bunch crossing + // before filling the orbit collection + std::vector> orbitBufferJets_; + std::vector> orbitBufferEGammas_; + std::vector> orbitBufferTaus_; + std::vector> orbitBufferEtSums_; + + bool debug_ = false; + bool enableAllSums_ = false; + edm::InputTag srcInputTag; + edm::EDGetToken rawToken; +}; diff --git a/EventFilter/L1ScoutingRawToDigi/plugins/ScGMTRawToDigi.cc b/EventFilter/L1ScoutingRawToDigi/plugins/ScGMTRawToDigi.cc new file mode 100644 index 0000000000000..349c250d4bba2 --- /dev/null +++ b/EventFilter/L1ScoutingRawToDigi/plugins/ScGMTRawToDigi.cc @@ -0,0 +1,169 @@ +#include "EventFilter/L1ScoutingRawToDigi/plugins/ScGMTRawToDigi.h" + +ScGMTRawToDigi::ScGMTRawToDigi(const edm::ParameterSet& iConfig) { + using namespace edm; + srcInputTag = iConfig.getParameter("srcInputTag"); + debug_ = iConfig.getUntrackedParameter("debug", false); + + // initialize orbit buffer for BX 1->3564; + orbitBuffer_ = std::vector>(3565); + for (auto& bxVec : orbitBuffer_) { + bxVec.reserve(8); + } + nMuonsOrbit_ = 0; + + produces().setBranchAlias("MuonOrbitCollection"); + rawToken = consumes(srcInputTag); +} + +ScGMTRawToDigi::~ScGMTRawToDigi(){}; + +void ScGMTRawToDigi::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { + using namespace edm; + + Handle ScoutingRawDataCollection; + iEvent.getByToken(rawToken, ScoutingRawDataCollection); + + const FEDRawData& sourceRawData = ScoutingRawDataCollection->FEDData(SDSNumbering::GmtSDSID); + size_t orbitSize = sourceRawData.size(); + + std::unique_ptr unpackedMuons(new l1ScoutingRun3::MuonOrbitCollection); + + if ((sourceRawData.size() == 0) && debug_) { + std::cout << "No raw data for GMT FED\n"; + } + + // unpack current orbit and store data into the orbitBufferr + unpackOrbit(sourceRawData.data(), orbitSize); + + // fill orbit collection and clear the Bx buffer vector + unpackedMuons->fillAndClear(orbitBuffer_, nMuonsOrbit_); + + // store collection in the event + iEvent.put(std::move(unpackedMuons)); +} + +void ScGMTRawToDigi::unpackOrbit(const unsigned char* buf, size_t len) { + using namespace l1ScoutingRun3; + + // reset counters + nMuonsOrbit_ = 0; + + size_t pos = 0; + + while (pos < len) { + assert(pos + 4 <= len); + + // get BX header + uint32_t header = *((uint32_t*)(buf + pos)); + pos += 4; + // count mA and mB + uint32_t mAcount = (header & header_masks::mAcount) >> header_shifts::mAcount; + uint32_t mBcount = (header & header_masks::mBcount) >> header_shifts::mBcount; + + // declare block to read + ugmt::block* bl = (ugmt::block*)(buf + pos); + pos += 4 + 4 + (mAcount + mBcount) * 12; + assert(pos <= len); + + uint32_t orbit = bl->orbit & 0x7FFFFFFF; + uint32_t bx = bl->bx; + + if (debug_) { + std::cout << "GMT Orbit " << orbit << ", BX -> " << bx << ", nMuons -> " << mAcount + mBcount << std::endl; + } + + // Unpack muons for this BX + orbitBuffer_[bx].reserve(mAcount + mBcount); + + for (unsigned int i = 0; i < mAcount + mBcount; i++) { + uint32_t interm = (bl->mu[i].extra >> ugmt::shiftsMuon::interm) & ugmt::masksMuon::interm; + if (interm == 1) { + if (debug_) { + std::cout << " -> Excluding intermediate muon\n"; + } + continue; + } + + uint32_t index = (bl->mu[i].s >> ugmt::shiftsMuon::index) & ugmt::masksMuon::index; + uint32_t ietaextu = (bl->mu[i].f >> ugmt::shiftsMuon::etaext) & ugmt::masksMuon::etaextv; + int32_t ietaext; + if (((bl->mu[i].f >> ugmt::shiftsMuon::etaext) & ugmt::masksMuon::etaexts) != 0) { + ietaext = ietaextu -= 256; + } else { + ietaext = ietaextu; + } + + // extract pt and quality and apply cut if required + int32_t iptuncon = (bl->mu[i].s >> ugmt::shiftsMuon::ptuncon) & ugmt::masksMuon::ptuncon; + int32_t ipt = (bl->mu[i].f >> ugmt::shiftsMuon::pt) & ugmt::masksMuon::pt; + if ((ipt - 1) < 0) { + continue; + } + uint32_t qual = (bl->mu[i].f >> ugmt::shiftsMuon::qual) & ugmt::masksMuon::qual; + if (qual == 0) { + continue; + } + + // extract integer value for extrapolated phi + int32_t iphiext = ((bl->mu[i].f >> ugmt::shiftsMuon::phiext) & ugmt::masksMuon::phiext); + + // extract integer value for extrapolated phi + int32_t idxy = ((bl->mu[i].s >> ugmt::shiftsMuon::dxy) & ugmt::masksMuon::dxy); + + // extract iso bits and charge + uint32_t iso = (bl->mu[i].s >> ugmt::shiftsMuon::iso) & ugmt::masksMuon::iso; + int32_t chrg = 0; + if (((bl->mu[i].s >> ugmt::shiftsMuon::chrgv) & ugmt::masksMuon::chrgv) == 1) + chrg = ((bl->mu[i].s >> ugmt::shiftsMuon::chrg) & ugmt::masksMuon::chrg) == 1 ? -1 : 1; + + // extract eta and phi at muon station + int32_t iphi = (bl->mu[i].s >> ugmt::shiftsMuon::phi) & ugmt::masksMuon::phi; + uint32_t ieta1 = (bl->mu[i].extra >> ugmt::shiftsMuon::eta1) & ugmt::masksMuon::eta; + uint32_t ieta2 = (bl->mu[i].extra >> ugmt::shiftsMuon::eta2) & ugmt::masksMuon::eta; + + uint32_t ieta_u; + int32_t ieta; + // checking if raw eta should be taken from muon 1 or muon 2 + if ((bl->mu[i].extra & 0x1) == 0) { + ieta_u = ieta1; + } else { + ieta_u = ieta2; + } + + // two's complement + if (ieta_u > 256) { + ieta = ieta_u - 512; + } else { + ieta = ieta_u; + } + + // increment muon counter + nMuonsOrbit_++; + + l1ScoutingRun3::Muon muon(ipt, ieta, iphi, qual, chrg, chrg != 0, iso, index, ietaext, iphiext, iptuncon, idxy); + + orbitBuffer_[bx].push_back(muon); + + if (debug_) { + std::cout << "--- Muon " << i << " ---\n"; + std::cout << " Raw f: 0x" << std::hex << bl->mu[i].f << std::dec << "\n"; + std::cout << " Raw s: 0x" << std::hex << bl->mu[i].s << std::dec << "\n"; + std::cout << " Raw extra: 0x" << std::hex << bl->mu[i].extra << std::dec << "\n"; + printMuon(muon); + } + + } // end of bx + + } // end orbit while loop + + //muons->flatten(); +} + +void ScGMTRawToDigi::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.setUnknown(); + descriptions.addDefault(desc); +} + +DEFINE_FWK_MODULE(ScGMTRawToDigi); diff --git a/EventFilter/L1ScoutingRawToDigi/plugins/ScGMTRawToDigi.h b/EventFilter/L1ScoutingRawToDigi/plugins/ScGMTRawToDigi.h new file mode 100644 index 0000000000000..8b1622d327304 --- /dev/null +++ b/EventFilter/L1ScoutingRawToDigi/plugins/ScGMTRawToDigi.h @@ -0,0 +1,44 @@ +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/Utilities/interface/StreamID.h" + +#include "DataFormats/FEDRawData/interface/FEDRawData.h" +#include "DataFormats/L1ScoutingRawData/interface/SDSNumbering.h" +#include "DataFormats/L1ScoutingRawData/interface/SDSRawDataCollection.h" +#include "DataFormats/L1Scouting/interface/OrbitCollection.h" + +#include "DataFormats/L1Scouting/interface/L1ScoutingMuon.h" + +#include "EventFilter/L1ScoutingRawToDigi/interface/shifts.h" +#include "EventFilter/L1ScoutingRawToDigi/interface/masks.h" +#include "EventFilter/L1ScoutingRawToDigi/interface/blocks.h" +#include "L1TriggerScouting/Utilities/interface/printScObjects.h" + +#include +#include +#include + +class ScGMTRawToDigi : public edm::stream::EDProducer<> { +public: + explicit ScGMTRawToDigi(const edm::ParameterSet&); + ~ScGMTRawToDigi() override; + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + +private: + void produce(edm::Event&, const edm::EventSetup&) override; + + void unpackOrbit(const unsigned char* buf, size_t len); + + // vector holding data for every bunch crossing + // before filling the orbit collection + std::vector> orbitBuffer_; + int nMuonsOrbit_; + + bool debug_ = false; + edm::InputTag srcInputTag; + edm::EDGetToken rawToken; +}; diff --git a/EventFilter/L1TRawToDigi/plugins/AMC13DumpToRaw.cc b/EventFilter/L1TRawToDigi/plugins/AMC13DumpToRaw.cc index 9bdf63222fa40..347a3fe51f14e 100644 --- a/EventFilter/L1TRawToDigi/plugins/AMC13DumpToRaw.cc +++ b/EventFilter/L1TRawToDigi/plugins/AMC13DumpToRaw.cc @@ -49,7 +49,6 @@ namespace l1t { class AMC13DumpToRaw : public edm::one::EDProducer<> { public: explicit AMC13DumpToRaw(const edm::ParameterSet&); - ~AMC13DumpToRaw() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); @@ -64,11 +63,6 @@ namespace l1t { // void formatRaw(edm::Event& iEvent, amc13::Packet& amc13, FEDRawData& fed_data); - //virtual void beginRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void endRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - //virtual void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - // ----------member data --------------------------- std::ifstream file_; std::string filename_; @@ -106,11 +100,6 @@ namespace l1t { produces(); } - AMC13DumpToRaw::~AMC13DumpToRaw() { - // do anything here that needs to be done at desctruction time - // (e.g. close files, deallocate resources etc.) - } - // // member functions // @@ -227,38 +216,6 @@ namespace l1t { // ------------ method called once each job just after ending the event loop ------------ void AMC13DumpToRaw::endJob() { file_.close(); } - // ------------ method called when starting to processes a run ------------ - /* -void -AMC13DumpToRaw::beginRun(edm::Run const&, edm::EventSetup const&) -{ -} -*/ - - // ------------ method called when ending the processing of a run ------------ - /* -void -AMC13DumpToRaw::endRun(edm::Run const&, edm::EventSetup const&) -{ -} -*/ - - // ------------ method called when starting to processes a luminosity block ------------ - /* -vvoid -AMC13DumpToRaw::beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) -{ -} -*/ - - // ------------ method called when ending the processing of a luminosity block ------------ - /* -void -AMC13DumpToRaw::endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) -{ -} -*/ - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void AMC13DumpToRaw::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { //The following says we do not know what parameters are allowed so do no validation diff --git a/EventFilter/L1TRawToDigi/plugins/L1TDigiToRaw.cc b/EventFilter/L1TRawToDigi/plugins/L1TDigiToRaw.cc index 9789807862c34..46227383066cd 100644 --- a/EventFilter/L1TRawToDigi/plugins/L1TDigiToRaw.cc +++ b/EventFilter/L1TRawToDigi/plugins/L1TDigiToRaw.cc @@ -47,20 +47,12 @@ namespace l1t { class L1TDigiToRaw : public edm::stream::EDProducer<> { public: explicit L1TDigiToRaw(const edm::ParameterSet&); - ~L1TDigiToRaw() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); - using edm::stream::EDProducer<>::consumes; - private: void produce(edm::Event&, const edm::EventSetup&) override; - void beginRun(edm::Run const&, edm::EventSetup const&) override{}; - void endRun(edm::Run const&, edm::EventSetup const&) override{}; - void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override{}; - void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override{}; - // ----------member data --------------------------- int evtType_; int fedId_; @@ -95,8 +87,6 @@ namespace l1t { slinkTrailerSize_ = config.getUntrackedParameter("lenSlinkTrailer", 8); } - L1TDigiToRaw::~L1TDigiToRaw() {} - // ------------ method called to produce the data ------------ void L1TDigiToRaw::produce(edm::Event& event, const edm::EventSetup& setup) { using namespace edm; diff --git a/EventFilter/L1TRawToDigi/plugins/L1TRawToDigi.cc b/EventFilter/L1TRawToDigi/plugins/L1TRawToDigi.cc index 8e8acec018fc5..af8bb008fc7a3 100644 --- a/EventFilter/L1TRawToDigi/plugins/L1TRawToDigi.cc +++ b/EventFilter/L1TRawToDigi/plugins/L1TRawToDigi.cc @@ -52,11 +52,6 @@ namespace l1t { private: void produce(edm::Event&, const edm::EventSetup&) override; - void beginRun(edm::Run const&, edm::EventSetup const&) override{}; - void endRun(edm::Run const&, edm::EventSetup const&) override{}; - void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override{}; - void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override{}; - // ----------member data --------------------------- edm::EDGetTokenT fedData_; std::vector fedIds_; diff --git a/EventFilter/L1TRawToDigi/plugins/TriggerRulePrefireVetoFilter.cc b/EventFilter/L1TRawToDigi/plugins/TriggerRulePrefireVetoFilter.cc index 1459ae47d6e85..5a9db004ce1f9 100644 --- a/EventFilter/L1TRawToDigi/plugins/TriggerRulePrefireVetoFilter.cc +++ b/EventFilter/L1TRawToDigi/plugins/TriggerRulePrefireVetoFilter.cc @@ -39,19 +39,11 @@ class TriggerRulePrefireVetoFilter : public edm::stream::EDFilter<> { public: explicit TriggerRulePrefireVetoFilter(const edm::ParameterSet&); - ~TriggerRulePrefireVetoFilter() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: - void beginStream(edm::StreamID) override; bool filter(edm::Event&, const edm::EventSetup&) override; - void endStream() override; - - //virtual void beginRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void endRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - //virtual void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; // ----------member data --------------------------- edm::EDGetTokenT tcdsRecordToken_; @@ -73,11 +65,6 @@ TriggerRulePrefireVetoFilter::TriggerRulePrefireVetoFilter(const edm::ParameterS //now do what ever initialization is needed } -TriggerRulePrefireVetoFilter::~TriggerRulePrefireVetoFilter() { - // do anything here that needs to be done at destruction time - // (e.g. close files, deallocate resources etc.) -} - // // member functions // @@ -131,44 +118,6 @@ bool TriggerRulePrefireVetoFilter::filter(edm::Event& iEvent, const edm::EventSe return false; } -// ------------ method called once each stream before processing any runs, lumis or events ------------ -void TriggerRulePrefireVetoFilter::beginStream(edm::StreamID) {} - -// ------------ method called once each stream after processing all runs, lumis and events ------------ -void TriggerRulePrefireVetoFilter::endStream() {} - -// ------------ method called when starting to processes a run ------------ -/* -void -TriggerRulePrefireVetoFilter::beginRun(edm::Run const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when ending the processing of a run ------------ -/* -void -TriggerRulePrefireVetoFilter::endRun(edm::Run const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when starting to processes a luminosity block ------------ -/* -void -TriggerRulePrefireVetoFilter::beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when ending the processing of a luminosity block ------------ -/* -void -TriggerRulePrefireVetoFilter::endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) -{ -} -*/ - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void TriggerRulePrefireVetoFilter::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { //The following says we do not know what parameters are allowed so do no validation diff --git a/EventFilter/L1TXRawToDigi/plugins/L1TCaloLayer1RawToDigi.cc b/EventFilter/L1TXRawToDigi/plugins/L1TCaloLayer1RawToDigi.cc index 75208a4f0338a..d32858cb3aba1 100644 --- a/EventFilter/L1TXRawToDigi/plugins/L1TCaloLayer1RawToDigi.cc +++ b/EventFilter/L1TXRawToDigi/plugins/L1TCaloLayer1RawToDigi.cc @@ -67,14 +67,11 @@ using namespace edm; class L1TCaloLayer1RawToDigi : public stream::EDProducer<> { public: explicit L1TCaloLayer1RawToDigi(const ParameterSet&); - ~L1TCaloLayer1RawToDigi() override; static void fillDescriptions(ConfigurationDescriptions& descriptions); private: - void beginStream(StreamID) override; void produce(Event&, const EventSetup&) override; - void endStream() override; void makeECalTPGs(uint32_t lPhi, UCTCTP7RawData& ctp7Data, std::unique_ptr& ecalTPGs); @@ -84,11 +81,6 @@ class L1TCaloLayer1RawToDigi : public stream::EDProducer<> { void makeRegions(uint32_t lPhi, UCTCTP7RawData& ctp7Data, std::unique_ptr& regions); - //virtual void beginRun(Run const&, EventSetup const&) override; - //virtual void endRun(Run const&, EventSetup const&) override; - //virtual void beginLuminosityBlock(LuminosityBlock const&, EventSetup const&) override; - //virtual void endLuminosityBlock(LuminosityBlock const&, EventSetup const&) override; - // ----------member data --------------------------- InputTag fedRawDataLabel; @@ -122,8 +114,6 @@ L1TCaloLayer1RawToDigi::L1TCaloLayer1RawToDigi(const ParameterSet& iConfig) consumes(fedRawDataLabel); } -L1TCaloLayer1RawToDigi::~L1TCaloLayer1RawToDigi() {} - // // member functions // @@ -378,44 +368,6 @@ void L1TCaloLayer1RawToDigi::makeRegions(uint32_t lPhi, } } -// ------------ method called once each stream before processing any runs, lumis or events ------------ -void L1TCaloLayer1RawToDigi::beginStream(StreamID) {} - -// ------------ method called once each stream after processing all runs, lumis and events ------------ -void L1TCaloLayer1RawToDigi::endStream() {} - -// ------------ method called when starting to processes a run ------------ -/* - void - L1TCaloLayer1RawToDigi::beginRun(Run const&, EventSetup const&) - { - } -*/ - -// ------------ method called when ending the processing of a run ------------ -/* - void - L1TCaloLayer1RawToDigi::endRun(Run const&, EventSetup const&) - { - } -*/ - -// ------------ method called when starting to processes a luminosity block ------------ -/* - void - L1TCaloLayer1RawToDigi::beginLuminosityBlock(LuminosityBlock const&, EventSetup const&) - { - } -*/ - -// ------------ method called when ending the processing of a luminosity block ------------ -/* - void - L1TCaloLayer1RawToDigi::endLuminosityBlock(LuminosityBlock const&, EventSetup const&) - { - } -*/ - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void L1TCaloLayer1RawToDigi::fillDescriptions(ConfigurationDescriptions& descriptions) { //The following says we do not know what parameters are allowed so do no validation diff --git a/EventFilter/Phase2TrackerRawToDigi/plugins/Phase2TrackerDigiProducer.cc b/EventFilter/Phase2TrackerRawToDigi/plugins/Phase2TrackerDigiProducer.cc index 2f72ea9f869f0..2608191896378 100644 --- a/EventFilter/Phase2TrackerRawToDigi/plugins/Phase2TrackerDigiProducer.cc +++ b/EventFilter/Phase2TrackerRawToDigi/plugins/Phase2TrackerDigiProducer.cc @@ -13,7 +13,7 @@ #include "EventFilter/Phase2TrackerRawToDigi/interface/Phase2TrackerFEDRawChannelUnpacker.h" #include "EventFilter/Phase2TrackerRawToDigi/interface/Phase2TrackerFEDZSChannelUnpacker.h" #include "EventFilter/Phase2TrackerRawToDigi/interface/utils.h" -#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/Framework/interface/global/EDProducer.h" #include "FWCore/Framework/interface/Event.h" #include "FWCore/Framework/interface/EventSetup.h" #include "FWCore/MessageLogger/interface/MessageLogger.h" @@ -22,23 +22,19 @@ namespace Phase2Tracker { - class Phase2TrackerDigiProducer : public edm::stream::EDProducer<> { + class Phase2TrackerDigiProducer : public edm::global::EDProducer<> { public: /// constructor Phase2TrackerDigiProducer(const edm::ParameterSet& pset); /// default constructor ~Phase2TrackerDigiProducer() override = default; - void beginRun(edm::Run const&, edm::EventSetup const&) override; - void produce(edm::Event&, const edm::EventSetup&) override; + void produce(edm::StreamID, edm::Event&, const edm::EventSetup&) const override; private: const edm::ESGetToken ph2CablingESToken_; - unsigned int runNumber_; - edm::EDGetTokenT token_; - const Phase2TrackerCabling* cabling_; - uint32_t cacheId_; - DetIdCollection detids_; + const edm::EDGetTokenT token_; + const edm::EDPutTokenT> putToken_; class Registry { public: /// constructor @@ -54,8 +50,6 @@ namespace Phase2Tracker { size_t index; uint16_t length; }; - std::vector proc_work_registry_; - std::vector proc_work_digis_; }; } // namespace Phase2Tracker @@ -68,41 +62,58 @@ using namespace std; namespace Phase2Tracker { Phase2TrackerDigiProducer::Phase2TrackerDigiProducer(const edm::ParameterSet& pset) - : ph2CablingESToken_(esConsumes()), runNumber_(0), cabling_(nullptr), cacheId_(0) { - // define product - produces>("ProcessedRaw"); - token_ = consumes(pset.getParameter("ProductLabel")); - } + : ph2CablingESToken_(esConsumes()), + token_(consumes(pset.getParameter("ProductLabel"))), + putToken_(produces>("ProcessedRaw")) {} - void Phase2TrackerDigiProducer::beginRun(edm::Run const& run, edm::EventSetup const& es) { + void Phase2TrackerDigiProducer::produce(edm::StreamID, edm::Event& event, const edm::EventSetup& es) const { // fetch cabling from event setup - cabling_ = &es.getData(ph2CablingESToken_); - } - - void Phase2TrackerDigiProducer::produce(edm::Event& event, const edm::EventSetup& es) { - // empty vectors for the next event - proc_work_registry_.clear(); - proc_work_digis_.clear(); + auto const& cabling = es.getData(ph2CablingESToken_); // Retrieve FEDRawData collection edm::Handle buffers; event.getByToken(token_, buffers); + //reserve enough working memory + std::vector proc_work_registry; + std::vector proc_work_digis; + { + size_t reserve_count = 0; + size_t reserve_digis = 0; + for (size_t fedIndex = Phase2Tracker::FED_ID_MIN; fedIndex <= Phase2Tracker::CMS_FED_ID_MAX; ++fedIndex) { + const FEDRawData& fed = buffers->FEDData(fedIndex); + if (fed.size() != 0) { + // construct buffer + Phase2Tracker::Phase2TrackerFEDBuffer buffer(fed.data(), fed.size()); + int ichan = 0; + for (int ife = 0; ife < MAX_FE_PER_FED; ife++) { + for (int icbc = 0; icbc < MAX_CBC_PER_FE; icbc++) { + if (buffer.channel(ichan).length() > 0) { + ++reserve_count; + //calculation from Phase2TrackerFEDRawChannelUnpacker.h + reserve_digis += buffer.channel(ichan).length() * 8 - STRIPS_PADDING; + ++ichan; + } + } + } + } + } + proc_work_registry.reserve(2 * reserve_count); + proc_work_digis.reserve(reserve_digis); + } // Analyze strip tracker FED buffers in data - size_t fedIndex; - for (fedIndex = Phase2Tracker::FED_ID_MIN; fedIndex <= Phase2Tracker::CMS_FED_ID_MAX; ++fedIndex) { + for (size_t fedIndex = Phase2Tracker::FED_ID_MIN; fedIndex <= Phase2Tracker::CMS_FED_ID_MAX; ++fedIndex) { const FEDRawData& fed = buffers->FEDData(fedIndex); if (fed.size() != 0) { // construct buffer - Phase2Tracker::Phase2TrackerFEDBuffer* buffer = nullptr; - buffer = new Phase2Tracker::Phase2TrackerFEDBuffer(fed.data(), fed.size()); + Phase2Tracker::Phase2TrackerFEDBuffer buffer(fed.data(), fed.size()); #ifdef EDM_ML_DEBUG std::ostringstream ss; ss << " -------------------------------------------- " << endl; ss << " buffer debug ------------------------------- " << endl; ss << " -------------------------------------------- " << endl; - ss << " buffer size : " << buffer->bufferSize() << endl; + ss << " buffer size : " << buffer.bufferSize() << endl; ss << " fed id : " << fedIndex << endl; ss << " -------------------------------------------- " << endl; ss << " tracker header debug ------------------------" << endl; @@ -111,7 +122,7 @@ namespace Phase2Tracker { ss.clear(); ss.str(""); - Phase2TrackerFEDHeader tr_header = buffer->trackerHeader(); + Phase2TrackerFEDHeader tr_header = buffer.trackerHeader(); ss << " Version : " << hex << setw(2) << (int)tr_header.getDataFormatVersion() << endl; ss << " Mode : " << hex << setw(2) << tr_header.getDebugMode() << endl; ss << " Type : " << hex << setw(2) << (int)tr_header.getEventType() << endl; @@ -146,10 +157,10 @@ namespace Phase2Tracker { int ichan = 0; for (int ife = 0; ife < MAX_FE_PER_FED; ife++) { for (int icbc = 0; icbc < MAX_CBC_PER_FE; icbc++) { - const Phase2TrackerFEDChannel& channel = buffer->channel(ichan); + const Phase2TrackerFEDChannel& channel = buffer.channel(ichan); if (channel.length() > 0) { // get fedid from cabling - const Phase2TrackerModule mod = cabling_->findFedCh(std::make_pair(fedIndex, ife)); + const Phase2TrackerModule mod = cabling.findFedCh(std::make_pair(fedIndex, ife)); uint32_t detid = mod.getDetid(); #ifdef EDM_ML_DEBUG ss << dec << " id from cabling : " << detid << endl; @@ -193,46 +204,40 @@ namespace Phase2Tracker { // store beginning and end of this digis for this detid and add this registry to the list // and store data - Registry regItemTop(detid + 1, STRIPS_PER_CBC * icbc / 2, proc_work_digis_.size(), stripsTop.size()); - proc_work_registry_.push_back(regItemTop); - proc_work_digis_.insert(proc_work_digis_.end(), stripsTop.begin(), stripsTop.end()); - Registry regItemBottom( - detid + 2, STRIPS_PER_CBC * icbc / 2, proc_work_digis_.size(), stripsBottom.size()); - proc_work_registry_.push_back(regItemBottom); - proc_work_digis_.insert(proc_work_digis_.end(), stripsBottom.begin(), stripsBottom.end()); + proc_work_registry.emplace_back( + detid + 1, STRIPS_PER_CBC * icbc / 2, proc_work_digis.size(), stripsTop.size()); + proc_work_digis.insert(proc_work_digis.end(), stripsTop.begin(), stripsTop.end()); + proc_work_registry.emplace_back( + detid + 2, STRIPS_PER_CBC * icbc / 2, proc_work_digis.size(), stripsBottom.size()); + proc_work_digis.insert(proc_work_digis.end(), stripsBottom.begin(), stripsBottom.end()); } ichan++; } } // end loop on channels - // store digis in edm collections - std::sort(proc_work_registry_.begin(), proc_work_registry_.end()); - std::vector> sorted_and_merged; - - edm::DetSetVector* pr = new edm::DetSetVector(); - - std::vector::iterator it = proc_work_registry_.begin(), it2 = it + 1, end = proc_work_registry_.end(); - while (it < end) { - sorted_and_merged.push_back(edm::DetSet(it->detid)); - std::vector& digis = sorted_and_merged.back().data; - // first count how many digis we have - size_t len = it->length; - for (it2 = it + 1; (it2 != end) && (it2->detid == it->detid); ++it2) { - len += it2->length; - } - // reserve memory - digis.reserve(len); - // push them in - for (it2 = it + 0; (it2 != end) && (it2->detid == it->detid); ++it2) { - digis.insert(digis.end(), &proc_work_digis_[it2->index], &proc_work_digis_[it2->index + it2->length]); - } - it = it2; - } + } + } - edm::DetSetVector proc_raw_dsv(sorted_and_merged, true); - pr->swap(proc_raw_dsv); - event.put(std::unique_ptr>(pr), "ProcessedRaw"); - delete buffer; + // store digis in edm collections + std::sort(proc_work_registry.begin(), proc_work_registry.end()); + std::vector> sorted_and_merged; + + std::vector::iterator it = proc_work_registry.begin(), it2 = it + 1, end = proc_work_registry.end(); + while (it < end) { + sorted_and_merged.push_back(edm::DetSet(it->detid)); + std::vector& digis = sorted_and_merged.back().data; + // first count how many digis we have + size_t len = it->length; + for (it2 = it + 1; (it2 != end) && (it2->detid == it->detid); ++it2) { + len += it2->length; + } + // reserve memory + digis.reserve(len); + // push them in + for (it2 = it + 0; (it2 != end) && (it2->detid == it->detid); ++it2) { + digis.insert(digis.end(), &proc_work_digis[it2->index], &proc_work_digis[it2->index + it2->length]); } + it = it2; } + event.emplace(putToken_, sorted_and_merged, true); } } // namespace Phase2Tracker diff --git a/EventFilter/RPCRawToDigi/plugins/RPCDigiMerger.cc b/EventFilter/RPCRawToDigi/plugins/RPCDigiMerger.cc index 2ce728ff2ed04..63597221608ef 100644 --- a/EventFilter/RPCRawToDigi/plugins/RPCDigiMerger.cc +++ b/EventFilter/RPCRawToDigi/plugins/RPCDigiMerger.cc @@ -57,8 +57,6 @@ void RPCDigiMerger::fillDescriptions(edm::ConfigurationDescriptions& descs) { descs.add("rpcDigiMerger", desc); } -void RPCDigiMerger::beginRun(edm::Run const& run, edm::EventSetup const& setup) {} - void RPCDigiMerger::produce(edm::Event& event, edm::EventSetup const& setup) { // Get the digis // new RPCDigiCollection diff --git a/EventFilter/RPCRawToDigi/plugins/RPCDigiMerger.h b/EventFilter/RPCRawToDigi/plugins/RPCDigiMerger.h index 45c155ffee844..154794554ab5f 100644 --- a/EventFilter/RPCRawToDigi/plugins/RPCDigiMerger.h +++ b/EventFilter/RPCRawToDigi/plugins/RPCDigiMerger.h @@ -28,7 +28,6 @@ class RPCDigiMerger : public edm::stream::EDProducer<> { static void fillDescriptions(edm::ConfigurationDescriptions& descs); - void beginRun(edm::Run const& run, edm::EventSetup const& setup) override; void produce(edm::Event& event, edm::EventSetup const& setup) override; protected: diff --git a/EventFilter/SiPixelRawToDigi/plugins/BuildFile.xml b/EventFilter/SiPixelRawToDigi/plugins/BuildFile.xml index 212738e941533..87123219d44e4 100644 --- a/EventFilter/SiPixelRawToDigi/plugins/BuildFile.xml +++ b/EventFilter/SiPixelRawToDigi/plugins/BuildFile.xml @@ -2,6 +2,7 @@ + diff --git a/EventFilter/SiPixelRawToDigi/plugins/SiPixelDigiErrorsFromSoAAlpaka.cc b/EventFilter/SiPixelRawToDigi/plugins/SiPixelDigiErrorsFromSoAAlpaka.cc new file mode 100644 index 0000000000000..ab762b8f4d97c --- /dev/null +++ b/EventFilter/SiPixelRawToDigi/plugins/SiPixelDigiErrorsFromSoAAlpaka.cc @@ -0,0 +1,130 @@ +#include + +#include "CondFormats/DataRecord/interface/SiPixelFedCablingMapRcd.h" +#include "CondFormats/SiPixelObjects/interface/SiPixelFedCablingMap.h" +#include "CondFormats/SiPixelObjects/interface/SiPixelFedCablingTree.h" +#include "DataFormats/Common/interface/DetSetVector.h" +#include "DataFormats/Common/interface/Handle.h" +#include "DataFormats/DetId/interface/DetIdCollection.h" +#include "DataFormats/FEDRawData/interface/FEDNumbering.h" +#include "DataFormats/SiPixelDetId/interface/PixelFEDChannel.h" +#include "DataFormats/SiPixelDigi/interface/PixelDigi.h" +#include "DataFormats/SiPixelRawData/interface/SiPixelFormatterErrors.h" +#include "EventFilter/SiPixelRawToDigi/interface/PixelDataFormatter.h" +#include "FWCore/Framework/interface/ESWatcher.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsHost.h" + +class SiPixelDigiErrorsFromSoAAlpaka : public edm::stream::EDProducer<> { +public: + explicit SiPixelDigiErrorsFromSoAAlpaka(const edm::ParameterSet& iConfig); + ~SiPixelDigiErrorsFromSoAAlpaka() override = default; + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + +private: + void produce(edm::Event& iEvent, const edm::EventSetup& iSetup) override; + + const edm::ESGetToken cablingToken_; + const edm::EDGetTokenT digiErrorsSoAGetToken_; + const edm::EDGetTokenT fmtErrorsGetToken_; + const edm::EDPutTokenT> errorPutToken_; + const edm::EDPutTokenT tkErrorPutToken_; + const edm::EDPutTokenT userErrorPutToken_; + const edm::EDPutTokenT> disabledChannelPutToken_; + + edm::ESWatcher cablingWatcher_; + std::unique_ptr cabling_; + + const std::vector tkerrorlist_; + const std::vector usererrorlist_; + + const bool usePhase1_; +}; + +SiPixelDigiErrorsFromSoAAlpaka::SiPixelDigiErrorsFromSoAAlpaka(const edm::ParameterSet& iConfig) + : cablingToken_(esConsumes(edm::ESInputTag("", iConfig.getParameter("CablingMapLabel")))), + digiErrorsSoAGetToken_{consumes(iConfig.getParameter("digiErrorSoASrc"))}, + fmtErrorsGetToken_{consumes(iConfig.getParameter("fmtErrorsSoASrc"))}, + errorPutToken_{produces>()}, + tkErrorPutToken_{produces()}, + userErrorPutToken_{produces("UserErrorModules")}, + disabledChannelPutToken_{produces>()}, + tkerrorlist_(iConfig.getParameter>("ErrorList")), + usererrorlist_(iConfig.getParameter>("UserErrorList")), + usePhase1_(iConfig.getParameter("UsePhase1")) {} + +void SiPixelDigiErrorsFromSoAAlpaka::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("digiErrorSoASrc", edm::InputTag("siPixelDigis")); + desc.add("fmtErrorsSoASrc", edm::InputTag("siPixelDigis")); + // the configuration parameters here are named following those in SiPixelRawToDigi + desc.add("CablingMapLabel", "")->setComment("CablingMap label"); + desc.add("UsePhase1", false)->setComment("## Use phase1"); + desc.add>("ErrorList", std::vector{29}) + ->setComment("## ErrorList: list of error codes used by tracking to invalidate modules"); + desc.add>("UserErrorList", std::vector{40}) + ->setComment("## UserErrorList: list of error codes used by Pixel experts for investigation"); + descriptions.addWithDefaultLabel(desc); +} + +void SiPixelDigiErrorsFromSoAAlpaka::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { + // pack errors into collection + + // initialize cabling map or update if necessary + if (cablingWatcher_.check(iSetup)) { + // cabling map, which maps online address (fed->link->ROC->local pixel) to offline (DetId->global pixel) + const SiPixelFedCablingMap* cablingMap = &iSetup.getData(cablingToken_); + cabling_ = cablingMap->cablingTree(); + LogDebug("map version:") << cabling_->version(); + } + + const auto& digiErrors = iEvent.get(digiErrorsSoAGetToken_); + const auto& formatterErrors = iEvent.get(fmtErrorsGetToken_); + + edm::DetSetVector errorcollection{}; + DetIdCollection tkerror_detidcollection{}; + DetIdCollection usererror_detidcollection{}; + edmNew::DetSetVector disabled_channelcollection{}; + + PixelDataFormatter formatter(cabling_.get(), usePhase1_); // for phase 1 & 0 + auto errors = formatterErrors; // make a copy + PixelDataFormatter::DetErrors nodeterrors; + + // if (digiErrors.view().size() > 0) { // TODO: need to know if this size will be useful or not and how to use it + uint32_t size = digiErrors.view().metadata().size(); + for (auto i = 0U; i < size; i++) { + SiPixelErrorCompact err = digiErrors.view()[i].pixelErrors(); + if (err.errorType != 0) { + SiPixelRawDataError error(err.word, err.errorType, err.fedId + FEDNumbering::MINSiPixeluTCAFEDID); + errors[err.rawId].push_back(error); + } + } + // } + + formatter.unpackFEDErrors(errors, + tkerrorlist_, + usererrorlist_, + errorcollection, + tkerror_detidcollection, + usererror_detidcollection, + disabled_channelcollection, + nodeterrors); + + const uint32_t dummydetid = 0xffffffff; + edm::DetSet& errorDetSet = errorcollection.find_or_insert(dummydetid); + errorDetSet.data = nodeterrors; + + iEvent.emplace(errorPutToken_, std::move(errorcollection)); + iEvent.emplace(tkErrorPutToken_, std::move(tkerror_detidcollection)); + iEvent.emplace(userErrorPutToken_, std::move(usererror_detidcollection)); + iEvent.emplace(disabledChannelPutToken_, std::move(disabled_channelcollection)); +} + +DEFINE_FWK_MODULE(SiPixelDigiErrorsFromSoAAlpaka); diff --git a/EventFilter/SiPixelRawToDigi/plugins/SiPixelDigisSoAFromCUDA.cc b/EventFilter/SiPixelRawToDigi/plugins/SiPixelDigisSoAFromCUDA.cc index 5b23f2dbda104..67b1b519d4089 100644 --- a/EventFilter/SiPixelRawToDigi/plugins/SiPixelDigisSoAFromCUDA.cc +++ b/EventFilter/SiPixelRawToDigi/plugins/SiPixelDigisSoAFromCUDA.cc @@ -26,16 +26,16 @@ class SiPixelDigisSoAFromCUDA : public edm::stream::EDProducer> digiGetToken_; - edm::EDPutTokenT digiPutToken_; + edm::EDPutTokenT digiPutToken_; - cms::cuda::PortableHostCollection> digis_h_; + cms::cuda::PortableHostCollection digis_h_; int nDigis_; }; SiPixelDigisSoAFromCUDA::SiPixelDigisSoAFromCUDA(const edm::ParameterSet& iConfig) : digiGetToken_(consumes>(iConfig.getParameter("src"))), - digiPutToken_(produces()) {} + digiPutToken_(produces()) {} void SiPixelDigisSoAFromCUDA::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { edm::ParameterSetDescription desc; @@ -52,8 +52,7 @@ void SiPixelDigisSoAFromCUDA::acquire(const edm::Event& iEvent, const auto& digis_d = ctx.get(iEvent, digiGetToken_); nDigis_ = digis_d.nDigis(); - nDigis_ = digis_d.nDigis(); - digis_h_ = cms::cuda::PortableHostCollection>(digis_d.view().metadata().size(), ctx.stream()); + digis_h_ = cms::cuda::PortableHostCollection(digis_d.view().metadata().size(), ctx.stream()); cudaCheck(cudaMemcpyAsync(digis_h_.buffer().get(), digis_d.const_buffer().get(), digis_d.bufferSize(), diff --git a/EventFilter/SiPixelRawToDigi/python/siPixelDigis_cff.py b/EventFilter/SiPixelRawToDigi/python/siPixelDigis_cff.py index b5484afd2fafa..f5139f1cb418b 100644 --- a/EventFilter/SiPixelRawToDigi/python/siPixelDigis_cff.py +++ b/EventFilter/SiPixelRawToDigi/python/siPixelDigis_cff.py @@ -23,6 +23,12 @@ from EventFilter.SiPixelRawToDigi.siPixelDigiErrorsFromSoA_cfi import siPixelDigiErrorsFromSoA as _siPixelDigiErrorsFromSoA siPixelDigiErrors = _siPixelDigiErrorsFromSoA.clone() +# Alpaka modifier +from Configuration.ProcessModifiers.alpaka_cff import alpaka +from EventFilter.SiPixelRawToDigi.siPixelDigiErrorsFromSoAAlpaka_cfi import siPixelDigiErrorsFromSoAAlpaka as _siPixelDigiErrorsFromSoAAlpaka + +alpaka.toReplaceWith(siPixelDigiErrors, _siPixelDigiErrorsFromSoAAlpaka.clone()) + # use the Phase 1 settings from Configuration.Eras.Modifier_phase1Pixel_cff import phase1Pixel phase1Pixel.toModify(siPixelDigiErrors, diff --git a/EventFilter/SiStripRawToDigi/plugins/ExcludedFEDListProducer.cc b/EventFilter/SiStripRawToDigi/plugins/ExcludedFEDListProducer.cc index 487410142a80e..7f85e1cba45c9 100644 --- a/EventFilter/SiStripRawToDigi/plugins/ExcludedFEDListProducer.cc +++ b/EventFilter/SiStripRawToDigi/plugins/ExcludedFEDListProducer.cc @@ -18,27 +18,12 @@ namespace sistrip { ExcludedFEDListProducer::ExcludedFEDListProducer(const edm::ParameterSet& pset) - : runNumber_(0), - cacheId_(0), - cabling_(nullptr), - token_(consumes(pset.getParameter("ProductLabel"))), - cablingToken_(esConsumes()) { + : runNumber_(0), token_(consumes(pset.getParameter("ProductLabel"))), cablingToken_(esConsumes()) { produces(); } ExcludedFEDListProducer::~ExcludedFEDListProducer() {} - void ExcludedFEDListProducer::beginRun(const edm::Run& run, const edm::EventSetup& es) { - uint32_t cacheId = es.get().cacheIdentifier(); - - if (cacheId_ != cacheId) { - cacheId_ = cacheId; - - edm::ESHandle c = es.getHandle(cablingToken_); - cabling_ = c.product(); - } - } - void ExcludedFEDListProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { edm::ParameterSetDescription desc; desc.add("ProductLabel", edm::InputTag("rawDataCollector")); @@ -49,6 +34,8 @@ namespace sistrip { if (runNumber_ != event.run()) { runNumber_ = event.run(); + auto const& cabling = es.getData(cablingToken_); + DetIdVector emptyDetIdVector; detids_.swap(emptyDetIdVector); // Reserve space in bad module list @@ -58,7 +45,7 @@ namespace sistrip { event.getByToken(token_, buffers); // Retrieve FED ids from cabling map and iterate through - for (auto ifed = cabling_->fedIds().begin(); ifed != cabling_->fedIds().end(); ifed++) { + for (auto ifed = cabling.fedIds().begin(); ifed != cabling.fedIds().end(); ifed++) { // ignore trigger FED // if ( *ifed == triggerFedId_ ) { continue; } @@ -69,7 +56,7 @@ namespace sistrip { if (input.size() == 0) { // std::cout << "Input size == 0 for FED number " << static_cast(*ifed) << std::endl; // get the cabling connections for this FED - auto conns = cabling_->fedConnections(*ifed); + auto conns = cabling.fedConnections(*ifed); // Mark FED modules as bad detids_.reserve(detids_.size() + conns.size()); for (auto iconn = conns.begin(); iconn != conns.end(); ++iconn) { diff --git a/EventFilter/SiStripRawToDigi/plugins/ExcludedFEDListProducer.h b/EventFilter/SiStripRawToDigi/plugins/ExcludedFEDListProducer.h index 21318c0ef55ea..74f177f851311 100644 --- a/EventFilter/SiStripRawToDigi/plugins/ExcludedFEDListProducer.h +++ b/EventFilter/SiStripRawToDigi/plugins/ExcludedFEDListProducer.h @@ -30,14 +30,11 @@ namespace sistrip { ExcludedFEDListProducer(const edm::ParameterSet& pset); /// default constructor ~ExcludedFEDListProducer() override; - void beginRun(const edm::Run& run, const edm::EventSetup& es) override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); void produce(edm::Event& event, const edm::EventSetup& es) override; private: unsigned int runNumber_; - uint32_t cacheId_; - const SiStripFedCabling* cabling_; const edm::EDGetTokenT token_; edm::ESGetToken cablingToken_; diff --git a/EventFilter/Utilities/BuildFile.xml b/EventFilter/Utilities/BuildFile.xml index 6a69ee301bfe8..2270817d5ce0a 100644 --- a/EventFilter/Utilities/BuildFile.xml +++ b/EventFilter/Utilities/BuildFile.xml @@ -2,6 +2,7 @@ + diff --git a/EventFilter/Utilities/interface/DAQSourceModels.h b/EventFilter/Utilities/interface/DAQSourceModels.h index d886aaf1c26a4..5727dc7aa2164 100644 --- a/EventFilter/Utilities/interface/DAQSourceModels.h +++ b/EventFilter/Utilities/interface/DAQSourceModels.h @@ -61,7 +61,9 @@ class DataMode { bool fileListMode) const = 0; virtual bool isMultiDir() { return false; } - virtual void makeDirectoryEntries(std::vector const& baseDirs, std::string const& runDir) = 0; + virtual void makeDirectoryEntries(std::vector const& baseDirs, + std::vector const& numSources, + std::string const& runDir) = 0; void setTesting(bool testing) { testing_ = testing; } protected: diff --git a/EventFilter/Utilities/interface/DAQSourceModelsFRD.h b/EventFilter/Utilities/interface/DAQSourceModelsFRD.h index bdf770836c3aa..c3e1c896ab623 100644 --- a/EventFilter/Utilities/interface/DAQSourceModelsFRD.h +++ b/EventFilter/Utilities/interface/DAQSourceModelsFRD.h @@ -64,7 +64,9 @@ class DataModeFRD : public DataMode { MAXTCDSuTCAFEDID_ = MAXTCDSuTCAFEDID; } - void makeDirectoryEntries(std::vector const& baseDirs, std::string const& runDir) override {} + void makeDirectoryEntries(std::vector const& baseDirs, + std::vector const& numSources, + std::string const& runDir) override {} std::pair> defineAdditionalFiles(std::string const& primaryName, bool) const override { return std::make_pair(true, std::vector()); @@ -171,7 +173,9 @@ class DataModeFRDStriped : public DataMode { MAXTCDSuTCAFEDID_ = MAXTCDSuTCAFEDID; } - void makeDirectoryEntries(std::vector const& baseDirs, std::string const& runDir) override; + void makeDirectoryEntries(std::vector const& baseDirs, + std::vector const& numSources, + std::string const& runDir) override; std::pair> defineAdditionalFiles(std::string const& primaryName, bool fileListMode) const override; diff --git a/EventFilter/Utilities/interface/DAQSourceModelsScouting.h b/EventFilter/Utilities/interface/DAQSourceModelsScouting.h deleted file mode 100644 index 430fdfdefd34b..0000000000000 --- a/EventFilter/Utilities/interface/DAQSourceModelsScouting.h +++ /dev/null @@ -1,268 +0,0 @@ -#ifndef EventFilter_Utilities_DAQSourceModelsScouting_h -#define EventFilter_Utilities_DAQSourceModelsScouting_h - -#include - -#include "EventFilter/Utilities/interface/DAQSourceModels.h" -#include "DataFormats/L1Trigger/interface/Muon.h" -#include "DataFormats/L1Trigger/interface/BXVector.h" - -namespace scouting { - struct muon { - uint32_t f; - uint32_t s; - }; - - struct block { - uint32_t bx; - uint32_t orbit; - muon mu[16]; - }; - - struct masks { - static constexpr uint32_t phiext = 0x3ff; - static constexpr uint32_t pt = 0x1ff; - static constexpr uint32_t qual = 0xf; - static constexpr uint32_t etaext = 0x1ff; - static constexpr uint32_t etaextv = 0xff; - static constexpr uint32_t etaexts = 0x100; - static constexpr uint32_t iso = 0x3; - static constexpr uint32_t chrg = 0x1; - static constexpr uint32_t chrgv = 0x1; - static constexpr uint32_t index = 0x7f; - static constexpr uint32_t phi = 0x3ff; - static constexpr uint32_t eta = 0x1ff; - static constexpr uint32_t etav = 0xff; - static constexpr uint32_t etas = 0x100; - static constexpr uint32_t phiv = 0x1ff; - static constexpr uint32_t phis = 0x200; - static constexpr uint32_t sv = 0x3; - }; - - struct shifts { - static constexpr uint32_t phiext = 0; - static constexpr uint32_t pt = 10; - static constexpr uint32_t qual = 19; - static constexpr uint32_t etaext = 23; - static constexpr uint32_t iso = 0; - static constexpr uint32_t chrg = 2; - static constexpr uint32_t chrgv = 3; - static constexpr uint32_t index = 4; - static constexpr uint32_t phi = 11; - static constexpr uint32_t eta = 21; - static constexpr uint32_t rsv = 30; - }; - - struct gmt_scales { - static constexpr float pt_scale = 0.5; - static constexpr float phi_scale = 2. * M_PI / 576.; - static constexpr float eta_scale = 0.0870 / 8; //9th MS bit is sign - static constexpr float phi_range = M_PI; - }; - - struct header_shifts { - static constexpr uint32_t bxmatch = 24; - static constexpr uint32_t mAcount = 16; - static constexpr uint32_t orbitmatch = 8; - static constexpr uint32_t mBcount = 0; - }; - - struct header_masks { - static constexpr uint32_t bxmatch = 0xff << header_shifts::bxmatch; - static constexpr uint32_t mAcount = 0xf << header_shifts::mAcount; - static constexpr uint32_t orbitmatch = 0xff << header_shifts::orbitmatch; - static constexpr uint32_t mBcount = 0xf; - }; - -} //namespace scouting - -class DataModeScoutingRun2Muon : public DataMode { -public: - DataModeScoutingRun2Muon(DAQSource* daqSource) : DataMode(daqSource) { - dummyLVec_ = std::make_unique>>(); - } - - ~DataModeScoutingRun2Muon() override{}; - - std::vector>& makeDaqProvenanceHelpers() override; - void readEvent(edm::EventPrincipal& eventPrincipal) override; - - int dataVersion() const override { return detectedFRDversion_; } - void detectVersion(unsigned char* fileBuf, uint32_t fileHeaderOffset) override { - detectedFRDversion_ = *((uint16_t*)(fileBuf + fileHeaderOffset)); - } - - uint32_t headerSize() const override { return FRDHeaderVersionSize[detectedFRDversion_]; } - - bool versionCheck() const override { return detectedFRDversion_ <= FRDHeaderMaxVersion; } - - uint64_t dataBlockSize() const override { return event_->size(); } - - void makeDataBlockView(unsigned char* addr, - size_t maxSize, - std::vector const& fileSizes, - size_t fileHeaderSize) override { - dataBlockAddr_ = addr; - dataBlockMax_ = maxSize; - eventCached_ = false; - nextEventView(); - eventCached_ = true; - } - - bool nextEventView() override; - bool checksumValid() override; - std::string getChecksumError() const override; - - bool isRealData() const override { return event_->isRealData(); } - - uint32_t run() const override { return event_->run(); } - - //true for scouting muon - bool dataBlockCompleted() const override { return true; } - - bool requireHeader() const override { return true; } - - bool fitToBuffer() const override { return true; } - - bool dataBlockInitialized() const override { return true; } - - void setDataBlockInitialized(bool) override{}; - - void setTCDSSearchRange(uint16_t MINTCDSuTCAFEDID, uint16_t MAXTCDSuTCAFEDID) override { return; } - - void makeDirectoryEntries(std::vector const& baseDirs, std::string const& runDir) override {} - - std::pair> defineAdditionalFiles(std::string const& primaryName, bool) const override { - return std::make_pair(true, std::vector()); - } - - char* readPayloadPos() { return (char*)event_->payload(); } - -private: - void unpackOrbit(BXVector* muons, char* buf, size_t len); - - std::vector> daqProvenanceHelpers_; - uint16_t detectedFRDversion_ = 0; - size_t headerSize_ = 0; - std::unique_ptr event_; - - std::unique_ptr>> dummyLVec_; - - unsigned char* dataBlockAddr_ = nullptr; - size_t dataBlockMax_ = 0; - bool eventCached_ = false; -}; - -class DataModeScoutingRun2Multi : public DataMode { -public: - DataModeScoutingRun2Multi(DAQSource* daqSource) : DataMode(daqSource) { - dummyLVec_ = std::make_unique>>(); - } - - ~DataModeScoutingRun2Multi() override{}; - - std::vector>& makeDaqProvenanceHelpers() override; - void readEvent(edm::EventPrincipal& eventPrincipal) override; - - int dataVersion() const override { return detectedFRDversion_; } - void detectVersion(unsigned char* fileBuf, uint32_t fileHeaderOffset) override { - detectedFRDversion_ = *((uint16_t*)(fileBuf + fileHeaderOffset)); - } - - uint32_t headerSize() const override { return FRDHeaderVersionSize[detectedFRDversion_]; } - - bool versionCheck() const override { return detectedFRDversion_ <= FRDHeaderMaxVersion; } - - uint64_t dataBlockSize() const override { - //TODO: adjust to multiple objects - return events_[0]->size(); - } - - void makeDataBlockView(unsigned char* addr, - size_t maxSize, - std::vector const& fileSizes, - size_t fileHeaderSize) override { - fileHeaderSize_ = fileHeaderSize; - numFiles_ = fileSizes.size(); - //add offset address for each file payload - startAddrs_.clear(); - startAddrs_.push_back(addr); - dataBlockAddrs_.clear(); - dataBlockAddrs_.push_back(addr); - dataBlockMaxAddrs_.clear(); - dataBlockMaxAddrs_.push_back(addr + fileSizes[0] - fileHeaderSize); - auto fileAddr = addr; - for (unsigned int i = 1; i < fileSizes.size(); i++) { - fileAddr += fileSizes[i - 1]; - startAddrs_.push_back(fileAddr); - dataBlockAddrs_.push_back(fileAddr); - dataBlockMaxAddrs_.push_back(fileAddr + fileSizes[i] - fileHeaderSize); - } - - dataBlockMax_ = maxSize; - blockCompleted_ = false; - //set event cached as we set initial address here - bool result = makeEvents(); - assert(result); - eventCached_ = true; - setDataBlockInitialized(true); - } - - bool nextEventView() override; - bool checksumValid() override; - std::string getChecksumError() const override; - - bool isRealData() const override { - assert(!events_.empty()); - return events_[0]->isRealData(); - } - - uint32_t run() const override { - assert(!events_.empty()); - return events_[0]->run(); - } - - //true for DAQ3 FRD - bool dataBlockCompleted() const override { return blockCompleted_; } - - bool requireHeader() const override { return true; } - - bool dataBlockInitialized() const override { return dataBlockInitialized_; } - - void setDataBlockInitialized(bool val) override { dataBlockInitialized_ = val; }; - - void setTCDSSearchRange(uint16_t MINTCDSuTCAFEDID, uint16_t MAXTCDSuTCAFEDID) override { return; } - - void makeDirectoryEntries(std::vector const& baseDirs, std::string const& runDir) override { - //receive directory paths for multiple input files ('striped') - } - - std::pair> defineAdditionalFiles(std::string const& primaryName, - bool fileListMode) const override; - -private: - bool makeEvents(); - void unpackMuonOrbit(BXVector* muons, char* buf, size_t len); - - std::vector> daqProvenanceHelpers_; - uint16_t detectedFRDversion_ = 0; - size_t headerSize_ = 0; - std::vector> events_; - - std::unique_ptr>> dummyLVec_; - - unsigned char* dataBlockAddr_ = nullptr; - - //debugging - std::vector startAddrs_; - std::vector dataBlockAddrs_; - std::vector dataBlockMaxAddrs_; - size_t dataBlockMax_ = 0; - size_t fileHeaderSize_ = 0; - short numFiles_ = 0; - bool eventCached_ = false; - bool dataBlockInitialized_ = false; - bool blockCompleted_ = true; -}; - -#endif // EventFilter_Utilities_DAQSourceModelsScouting_h diff --git a/EventFilter/Utilities/interface/DAQSourceModelsScoutingRun3.h b/EventFilter/Utilities/interface/DAQSourceModelsScoutingRun3.h new file mode 100644 index 0000000000000..8692df494c738 --- /dev/null +++ b/EventFilter/Utilities/interface/DAQSourceModelsScoutingRun3.h @@ -0,0 +1,135 @@ +#ifndef EventFilter_Utilities_DAQSourceModelsScoutingRun3_h +#define EventFilter_Utilities_DAQSourceModelsScoutingRun3_h + +#include "EventFilter/Utilities/interface/DAQSource.h" +#include "EventFilter/Utilities/interface/DAQSourceModels.h" +#include "DataFormats/L1ScoutingRawData/interface/SDSRawDataCollection.h" +#include "DataFormats/L1ScoutingRawData/interface/SDSNumbering.h" + +#include "FWCore/Framework/interface/Event.h" +#include "DataFormats/Provenance/interface/EventAuxiliary.h" +#include "DataFormats/Provenance/interface/EventID.h" + +#include +#include +#include +#include +#include +#include + +class DataModeScoutingRun3 : public DataMode { +public: + DataModeScoutingRun3(DAQSource* daqSource) : DataMode(daqSource) {} + ~DataModeScoutingRun3() override{}; + std::vector>& makeDaqProvenanceHelpers() override; + void readEvent(edm::EventPrincipal& eventPrincipal) override; + + void fillSDSRawDataCollection(SDSRawDataCollection& rawData, char* buff, size_t len); + + //reuse FRD file and event headers + int dataVersion() const override { return detectedFRDversion_; } + void detectVersion(unsigned char* fileBuf, uint32_t fileHeaderOffset) override { + detectedFRDversion_ = *((uint16_t*)(fileBuf + fileHeaderOffset)); + } + uint32_t headerSize() const override { return FRDHeaderVersionSize[detectedFRDversion_]; } + bool versionCheck() const override { return detectedFRDversion_ <= FRDHeaderMaxVersion; } + + uint64_t dataBlockSize() const override { + // get event size from the first data source (main) + return events_[0]->size(); + } + + void makeDataBlockView(unsigned char* addr, + size_t maxSize, + std::vector const& fileSizes, + size_t fileHeaderSize) override { + fileHeaderSize_ = fileHeaderSize; + numFiles_ = fileSizes.size(); + + // initalize vectors keeping tracks of valid orbits and completed blocks + sourceValidOrbitPair_.clear(); + completedBlocks_.clear(); + for (unsigned int i = 0; i < fileSizes.size(); i++) { + completedBlocks_.push_back(false); + } + + //add offset address for each file payload + dataBlockAddrs_.clear(); + dataBlockAddrs_.push_back(addr); + dataBlockMaxAddrs_.clear(); + dataBlockMaxAddrs_.push_back(addr + fileSizes[0] - fileHeaderSize); + auto fileAddr = addr; + for (unsigned int i = 1; i < fileSizes.size(); i++) { + fileAddr += fileSizes[i - 1]; + dataBlockAddrs_.push_back(fileAddr); + dataBlockMaxAddrs_.push_back(fileAddr + fileSizes[i] - fileHeaderSize); + } + + dataBlockMax_ = maxSize; + blockCompleted_ = false; + //set event cached as we set initial address here + bool result = makeEvents(); + assert(result); + eventCached_ = true; + setDataBlockInitialized(true); + } + + bool nextEventView() override; + bool checksumValid() override; + std::string getChecksumError() const override; + + bool isRealData() const override { + assert(!events_.empty()); + return events_[0]->isRealData(); + } + + uint32_t run() const override { + assert(!events_.empty()); + return events_[0]->run(); + } + + bool dataBlockCompleted() const override { return blockCompleted_; } + + bool requireHeader() const override { return true; } + + bool fitToBuffer() const override { return true; } + + bool dataBlockInitialized() const override { return dataBlockInitialized_; } + + void setDataBlockInitialized(bool val) override { dataBlockInitialized_ = val; }; + + void setTCDSSearchRange(uint16_t MINTCDSuTCAFEDID, uint16_t MAXTCDSuTCAFEDID) override { return; } + + void makeDirectoryEntries(std::vector const& baseDirs, + std::vector const& numSources, + std::string const& runDir) override; + + std::pair> defineAdditionalFiles(std::string const& primaryName, + bool fileListMode) const override; + +private: + bool makeEvents(); + std::vector> daqProvenanceHelpers_; // + uint16_t detectedFRDversion_ = 0; + size_t fileHeaderSize_ = 0; + size_t headerSize_ = 0; + std::vector> events_; + unsigned char* dataBlockAddr_ = nullptr; + std::vector dataBlockAddrs_; + std::vector dataBlockMaxAddrs_; + size_t dataBlockMax_ = 0; + short numFiles_ = 0; + bool dataBlockInitialized_ = false; + bool blockCompleted_ = true; + bool eventCached_ = false; + std::vector buPaths_; + std::vector buNumSources_; + + // keep track of valid (=aligned) orbits from different data sources + std::vector> sourceValidOrbitPair_; + unsigned int currOrbit_ = 0xFFFFFFFF; + + std::vector completedBlocks_; +}; + +#endif // EventFilter_Utilities_DAQSourceModelsScoutingRun3_h \ No newline at end of file diff --git a/EventFilter/Utilities/interface/EvFDaqDirector.h b/EventFilter/Utilities/interface/EvFDaqDirector.h index 45886d16b0f83..86a7bc103a378 100644 --- a/EventFilter/Utilities/interface/EvFDaqDirector.h +++ b/EventFilter/Utilities/interface/EvFDaqDirector.h @@ -26,7 +26,6 @@ #include #include -#include #include class SystemBounds; @@ -68,7 +67,6 @@ namespace evf { void initRun(); static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); void preallocate(edm::service::SystemBounds const& bounds); - void preBeginJob(edm::PathsAndConsumesOfModulesBase const&, edm::ProcessContext const&); void preBeginRun(edm::GlobalContext const& globalContext); void postEndRun(edm::GlobalContext const& globalContext); void preGlobalEndLumi(edm::GlobalContext const& globalContext); @@ -186,14 +184,15 @@ namespace evf { filesToDeletePtr_ = filesToDelete; } - void checkTransferSystemPSet(edm::ProcessContext const& pc); - void checkMergeTypePSet(edm::ProcessContext const& pc); - std::string getStreamDestinations(std::string const& stream) const; - std::string getStreamMergeType(std::string const& stream, MergeType defaultType); + std::string getStreamDestinations(std::string const&) const { return std::string(""); } + std::string getStreamMergeType(std::string const&, MergeType defaultType) const { + return MergeTypeNames_[defaultType]; + } static struct flock make_flock(short type, short whence, off_t start, off_t len, pid_t pid); bool inputThrottled(); bool lumisectionDiscarded(unsigned int ls); std::vector const& getBUBaseDirs() const { return bu_base_dirs_all_; } + std::vector const& getBUBaseDirsNSources() const { return bu_base_dirs_nSources_; } private: bool bumpFile(unsigned int& ls, @@ -216,6 +215,7 @@ namespace evf { std::string base_dir_; std::string bu_base_dir_; std::vector bu_base_dirs_all_; + std::vector bu_base_dirs_nSources_; unsigned int run_; bool useFileBroker_; bool fileBrokerHostFromCfg_; @@ -225,9 +225,6 @@ namespace evf { bool fileBrokerUseLocalLock_; unsigned int fuLockPollInterval_; bool outputAdler32Recheck_; - bool requireTSPSet_; - std::string selectedTransferMode_; - std::string mergeTypePset_; bool directorBU_; std::string hltSourceDirectory_; @@ -282,9 +279,6 @@ namespace evf { std::string stopFilePathPid_; unsigned int stop_ls_override_ = 0; - std::shared_ptr transferSystemJson_; - tbb::concurrent_hash_map mergeTypeMap_; - //values initialized in .cc file static const std::vector MergeTypeNames_; diff --git a/EventFilter/Utilities/interface/EvFOutputModule.h b/EventFilter/Utilities/interface/EvFOutputModule.h deleted file mode 100644 index e65f7eb5636d2..0000000000000 --- a/EventFilter/Utilities/interface/EvFOutputModule.h +++ /dev/null @@ -1,108 +0,0 @@ -#ifndef EventFilter_Utilities_EvFOutputModule_h -#define EventFilter_Utilities_EvFOutputModule_h - -#include "IOPool/Streamer/interface/StreamerOutputFile.h" -#include "FWCore/Framework/interface/one/OutputModule.h" -#include "IOPool/Streamer/interface/StreamerOutputModuleCommon.h" -#include "FWCore/Utilities/interface/EDGetToken.h" -#include "DataFormats/Streamer/interface/StreamedProducts.h" - -#include "EventFilter/Utilities/interface/JsonMonitorable.h" -#include "EventFilter/Utilities/interface/FastMonitor.h" -# -typedef edm::detail::TriggerResultsBasedEventSelector::handle_t Trig; - -namespace evf { - - class FastMonitoringService; - // class EvFOutputEventWriter; - - class EvFOutputEventWriter { - public: - explicit EvFOutputEventWriter(std::string const& filePath) - : filePath_(filePath), accepted_(0), stream_writer_events_(new StreamerOutputFile(filePath)) {} - - ~EvFOutputEventWriter() {} - - void close() { stream_writer_events_->close(); } - - void doOutputEvent(EventMsgBuilder const& msg) { - EventMsgView eview(msg.startAddress()); - stream_writer_events_->write(eview); - } - - uint32 get_adler32() const { return stream_writer_events_->adler32(); } - - std::string const& getFilePath() const { return filePath_; } - - unsigned long getAccepted() const { return accepted_; } - void incAccepted() { accepted_++; } - - private: - std::string filePath_; - unsigned long accepted_; - edm::propagate_const> stream_writer_events_; - }; - - class EvFOutputJSONWriter { - public: - EvFOutputJSONWriter(edm::StreamerOutputModuleCommon::Parameters const& commonParameters, - edm::SelectedProducts const* selections, - std::string const& streamLabel, - std::string const& moduleLabel); - - edm::StreamerOutputModuleCommon streamerCommon_; - - jsoncollector::IntJ processed_; - jsoncollector::IntJ accepted_; - jsoncollector::IntJ errorEvents_; - jsoncollector::IntJ retCodeMask_; - jsoncollector::StringJ filelist_; - jsoncollector::IntJ filesize_; - jsoncollector::StringJ inputFiles_; - jsoncollector::IntJ fileAdler32_; - jsoncollector::StringJ transferDestination_; - jsoncollector::StringJ mergeType_; - jsoncollector::IntJ hltErrorEvents_; - std::shared_ptr jsonMonitor_; - jsoncollector::DataPointDefinition outJsonDef_; - }; - - typedef edm::one::OutputModule> - EvFOutputModuleType; - - class EvFOutputModule : public EvFOutputModuleType { - public: - explicit EvFOutputModule(edm::ParameterSet const& ps); - ~EvFOutputModule() override; - static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); - - private: - void beginRun(edm::RunForOutput const& run) override; - void write(edm::EventForOutput const& e) override; - - //pure in parent class but unused here - void writeLuminosityBlock(edm::LuminosityBlockForOutput const&) override {} - void writeRun(edm::RunForOutput const&) override {} - void endRun(edm::RunForOutput const&) override {} - - std::shared_ptr globalBeginLuminosityBlock( - edm::LuminosityBlockForOutput const& iLB) const override; - void globalEndLuminosityBlock(edm::LuminosityBlockForOutput const& iLB) override; - - Trig getTriggerResults(edm::EDGetTokenT const& token, edm::EventForOutput const& e) const; - - edm::StreamerOutputModuleCommon::Parameters commonParameters_; - std::string streamLabel_; - edm::EDGetTokenT trToken_; - edm::EDGetTokenT psetToken_; - - evf::FastMonitoringService* fms_; - - std::unique_ptr jsonWriter_; - - }; //end-of-class-def - -} // namespace evf - -#endif diff --git a/EventFilter/Utilities/interface/FastMonitor.h b/EventFilter/Utilities/interface/FastMonitor.h index ab978dbc26e2c..dc8e6bbbaa762 100644 --- a/EventFilter/Utilities/interface/FastMonitor.h +++ b/EventFilter/Utilities/interface/FastMonitor.h @@ -77,13 +77,12 @@ namespace jsoncollector { std::string getCSVString(int sid = -1); //fastpath file output - void outputCSV(std::string const& path, std::string const& csvString); + void outputCSV(std::string const& path, std::vector const& csvString); //provide merged variable back to user JsonMonitorable* getMergedIntJForLumi(std::string const& name, unsigned int forLumi); // merges and outputs everything collected for the given stream to JSON file - bool outputFullJSONs(std::string const& pathstem, std::string const& ext, unsigned int lumi, bool output = true); bool outputFullJSON(std::string const& path, unsigned int lumi, bool output = true); //discard what was collected for a lumisection diff --git a/EventFilter/Utilities/interface/FastMonitoringService.h b/EventFilter/Utilities/interface/FastMonitoringService.h index e955f92d9cc1f..94c61334ee267 100644 --- a/EventFilter/Utilities/interface/FastMonitoringService.h +++ b/EventFilter/Utilities/interface/FastMonitoringService.h @@ -3,6 +3,7 @@ #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "FWCore/ServiceRegistry/interface/ActivityRegistry.h" +#include "FWCore/ServiceRegistry/interface/StreamContext.h" #include "DataFormats/Provenance/interface/EventID.h" #include "DataFormats/Provenance/interface/LuminosityBlockID.h" #include "DataFormats/Provenance/interface/Timestamp.h" @@ -20,6 +21,8 @@ #include #include #include +#include "oneapi/tbb/task_arena.h" +#include "oneapi/tbb/task_scheduler_observer.h" /*Description this is an evolution of the MicroStateService intended to be run in standalone multi-threaded cmsRun jobs @@ -28,9 +31,6 @@ moduledesc pointer to key into the map instead and no string or string pointers are used for the microstates. Only a pointer value is stored using relaxed ordering at the time of module execution which is fast. At snapshot time only (every few seconds) we do the map lookup to produce snapshot. - Path names use a similar logic. However path names are not accessible in the same way as later so they need to be - when starting to run associated to the memory location of path name strings as accessible when path is executed. - Path intermediate info will be called "ministate" :D The general counters and status variables (event number, number of processed events, number of passed and stored events, luminosity section etc.) are also monitored here. @@ -47,7 +47,10 @@ namespace edm { namespace evf { + template + struct ContainableAtomic; class FastMonitoringThread; + class ConcurrencyTracker; namespace FastMonState { @@ -62,7 +65,11 @@ namespace evf { mBoL, mEoL, mGlobEoL, - mCOUNT + mFwk, + mIdleSource, + mEvent, + mIgnore, + mCOUNT, }; enum Macrostate { @@ -153,19 +160,21 @@ namespace evf { }; } // namespace FastMonState + constexpr int nSpecialModules = FastMonState::mCOUNT; + //reserve output module space + constexpr int nReservedModules = 128; + class FastMonitoringService : public MicroStateService { public: // the names of the states - some of them are never reached in an online app - static const edm::ModuleDescription reservedMicroStateNames[FastMonState::mCOUNT]; + static const edm::ModuleDescription specialMicroStateNames[FastMonState::mCOUNT]; static const std::string macroStateNames[FastMonState::MCOUNT]; static const std::string inputStateNames[FastMonState::inCOUNT]; // Reserved names for microstates - static const std::string nopath_; FastMonitoringService(const edm::ParameterSet&, edm::ActivityRegistry&); ~FastMonitoringService() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); - std::string makePathLegendaJson(); std::string makeModuleLegendaJson(); std::string makeInputLegendaJson(); @@ -200,10 +209,6 @@ namespace evf { void preSourceEarlyTermination(edm::TerminationOrigin); void setExceptionDetected(unsigned int ls); - //this is still needed for use in special functions like DQM which are in turn framework services - void setMicroState(FastMonState::Microstate); - void setMicroState(edm::StreamID, FastMonState::Microstate); - void accumulateFileSize(unsigned int lumi, unsigned long fileSize); void startedLookingForFile(); void stoppedLookingForFile(unsigned int lumi); @@ -223,14 +228,23 @@ namespace evf { void setInputSource(DAQSource* inputSource) { daqInputSource_ = inputSource; } void setInState(FastMonState::InputState inputState) { inputState_ = inputState; } void setInStateSup(FastMonState::InputState inputState) { inputSupervisorState_ = inputState; } + //available for other modules + void setTMicrostate(FastMonState::Microstate m); + + static unsigned int getTID() { return tbb::this_task_arena::current_thread_index(); } private: void doSnapshot(const unsigned int ls, const bool isGlobalEOL); void snapshotRunner(); + static unsigned int getSID(edm::StreamContext const& sc) { return sc.streamID().value(); } + + static unsigned int getSID(edm::StreamID const& sid) { return sid.value(); } + //the actual monitoring thread is held by a separate class object for ease of maintenance - std::shared_ptr fmt_; + std::unique_ptr fmt_; + std::unique_ptr ct_; //Encoding encModule_; //std::vector encPath_; FedRawDataInputSource* inputSource_ = nullptr; @@ -238,14 +252,16 @@ namespace evf { std::atomic inputState_{FastMonState::InputState::inInit}; std::atomic inputSupervisorState_{FastMonState::InputState::inInit}; - unsigned int nStreams_; - unsigned int nThreads_; + unsigned int nStreams_ = 0; + unsigned int nMonThreads_ = 0; + unsigned int nThreads_ = 0; + bool tbbMonitoringMode_; + bool tbbConcurrencyTracker_; int sleepTime_; unsigned int fastMonIntervals_; unsigned int snapCounter_ = 0; std::string microstateDefPath_, fastMicrostateDefPath_; - std::string fastName_, fastPath_, slowName_; - bool filePerFwkStream_; + std::string fastName_, fastPath_; //variables that are used by/monitored by FastMonitoringThread / FastMonitor @@ -272,9 +288,6 @@ namespace evf { //to disable this behavior, set #ATOMIC_LEVEL 0 or 1 in DataPoint.h std::vector*> streamCounterUpdating_; - std::vector*> collectedPathList_; - std::vector pathNamesReady_; - std::filesystem::path workingDirectory_, runDirectory_; bool threadIDAvailable_ = false; @@ -283,8 +296,6 @@ namespace evf { std::string moduleLegendFile_; std::string moduleLegendFileJson_; - std::string pathLegendFile_; - std::string pathLegendFileJson_; std::string inputLegendFileJson_; unsigned int nOutputModules_ = 0; @@ -293,7 +304,13 @@ namespace evf { std::atomic has_source_exception_ = false; std::atomic has_data_exception_ = false; std::vector exceptionInLS_; - std::vector fastPathList_; + + //per stream + std::vector> microstate_; + std::vector> microstateAcqFlag_; + //per thread + std::vector> tmicrostate_; + std::vector> tmicrostateAcqFlag_; bool verbose_ = false; }; diff --git a/EventFilter/Utilities/interface/FastMonitoringThread.h b/EventFilter/Utilities/interface/FastMonitoringThread.h index bc0dc7810b8d2..ac505a4197e02 100644 --- a/EventFilter/Utilities/interface/FastMonitoringThread.h +++ b/EventFilter/Utilities/interface/FastMonitoringThread.h @@ -13,13 +13,9 @@ namespace evf { - constexpr int nReservedModules = 128; - constexpr int nSpecialModules = 10; - constexpr int nReservedPaths = 1; - - namespace FastMonState { - enum Macrostate; - } + //namespace FastMonState { + // enum Macrostate; + //} class FastMonitoringService; @@ -131,11 +127,10 @@ namespace evf { unsigned int varIndexThrougput_; //per stream + std::vector tmicrostateEncoded_; std::vector microstateEncoded_; - std::vector ministateEncoded_; std::vector processed_; jsoncollector::IntJ fastPathProcessedJ_; - std::vector threadMicrostateEncoded_; std::vector inputState_; //tracking luminosity of a stream @@ -143,19 +138,12 @@ namespace evf { //N bins for histograms unsigned int macrostateBins_; - unsigned int ministateBins_; unsigned int microstateBins_; unsigned int inputstateBins_; //global state std::atomic macrostate_; - //per stream - std::vector> ministate_; - std::vector> microstate_; - std::vector> microstateAcqFlag_; - std::vector> threadMicrostate_; - FastMonEncoding encModule_; std::vector encPath_; @@ -180,7 +168,10 @@ namespace evf { } //to be called after fast monitor is constructed - void registerVariables(jsoncollector::FastMonitor* fm, unsigned int nStreams, unsigned int nThreads) { + void registerVariables(jsoncollector::FastMonitor* fm, + unsigned nMaxSlices, + unsigned nMaxStreams, + unsigned nMaxThreads) { //tell FM to track these global variables(for fast and slow monitoring) fm->registerGlobalMonitorable(&fastMacrostateJ_, true, ¯ostateBins_); fm->registerGlobalMonitorable(&fastThroughputJ_, false); @@ -189,27 +180,30 @@ namespace evf { fm->registerGlobalMonitorable(&fastLockWaitJ_, false); fm->registerGlobalMonitorable(&fastLockCountJ_, false); - for (unsigned int i = 0; i < nStreams; i++) { + for (unsigned int i = 0; i < nMaxSlices; i++) { jsoncollector::AtomicMonUInt* p = new jsoncollector::AtomicMonUInt; *p = 0; processed_.push_back(p); streamLumi_.push_back(0); } - microstateEncoded_.resize(nStreams); - ministateEncoded_.resize(nStreams); - threadMicrostateEncoded_.resize(nThreads); - inputState_.resize(nStreams); - for (unsigned int j = 0; j < inputState_.size(); j++) - inputState_[j] = 0; + tmicrostateEncoded_.resize(nMaxSlices, FastMonState::mInvalid); + for (unsigned int i = nMaxThreads; i < nMaxSlices; i++) { + tmicrostateEncoded_[i] = FastMonState::mIgnore; + } + microstateEncoded_.resize(nMaxSlices, FastMonState::mInvalid); + inputState_.resize(nMaxSlices, FastMonState::inInit); + for (unsigned int i = nMaxStreams; i < nMaxSlices; i++) { + microstateEncoded_[i] = FastMonState::mIgnore; + inputState_[i] = FastMonState::inIgnore; + } + //for (unsigned int j = 0; j < nMaxStreams; j++) + // inputState_[j] = 0; //tell FM to track these int vectors - fm->registerStreamMonitorableUIntVec("Ministate", &ministateEncoded_, true, &ministateBins_); + fm->registerStreamMonitorableUIntVec("tMicrostate", &tmicrostateEncoded_, true, µstateBins_); - if (nThreads <= nStreams) //no overlapping in module execution per stream - fm->registerStreamMonitorableUIntVec("Microstate", µstateEncoded_, true, µstateBins_); - else - fm->registerStreamMonitorableUIntVec("Microstate", &threadMicrostateEncoded_, true, µstateBins_); + fm->registerStreamMonitorableUIntVec("Microstate", µstateEncoded_, true, µstateBins_); fm->registerStreamMonitorableUIntVecAtomic("Processed", &processed_, false, nullptr); diff --git a/EventFilter/Utilities/interface/SourceCommon.h b/EventFilter/Utilities/interface/SourceCommon.h new file mode 100644 index 0000000000000..46109ea5be0a3 --- /dev/null +++ b/EventFilter/Utilities/interface/SourceCommon.h @@ -0,0 +1,25 @@ +#ifndef EventFilter_Utilities_SourceCommon_h +#define EventFilter_Utilities_SourceCommon_h + +/* + * This header will host common definitions used by FedRawDataInputSource and DAQSource + * */ + +#include "EventFilter/Utilities/interface/FastMonitoringService.h" + +class IdleSourceSentry { +public: + IdleSourceSentry(evf::FastMonitoringService* fms) : fms_(fms) { + if (fms_) + fms_->setTMicrostate(evf::FastMonState::mIdleSource); + } + ~IdleSourceSentry() { + if (fms_) + fms_->setTMicrostate(evf::FastMonState::mIdle); + } + +private: + evf::FastMonitoringService* fms_; +}; + +#endif diff --git a/EventFilter/Utilities/plugins/DumpMuonScouting.cc b/EventFilter/Utilities/plugins/DumpMuonScouting.cc deleted file mode 100644 index 4f1b37b030b4c..0000000000000 --- a/EventFilter/Utilities/plugins/DumpMuonScouting.cc +++ /dev/null @@ -1,112 +0,0 @@ -/// -/// \class l1t::DumpMuonScouting.cc -/// -/// Description: Dump/Analyze Moun Scouting stored in BXVector -/// -/// Implementation: -/// Based off of Michael Mulhearn's YellowParamTester -/// -/// \author: Brian Winer Ohio State -/// - -// -// This simple module simply retreives the YellowParams object from the event -// setup, and sends its payload as an INFO message, for debugging purposes. -// - -#include "FWCore/Framework/interface/MakerMacros.h" - -// system include files -#include -#include -#include - -// user include files -// base class -#include "FWCore/Framework/interface/stream/EDAnalyzer.h" - -#include "FWCore/Framework/interface/Event.h" -#include "FWCore/ParameterSet/interface/ParameterSet.h" -#include "FWCore/Utilities/interface/EDGetToken.h" -#include "FWCore/Utilities/interface/InputTag.h" -#include "FWCore/Framework/interface/EventSetup.h" -#include "FWCore/Framework/interface/Frameworkfwd.h" - -#include "DataFormats/L1Trigger/interface/Muon.h" - -#include "FWCore/MessageLogger/interface/MessageLogger.h" -#include "FWCore/MessageLogger/interface/MessageDrop.h" - -using namespace edm; -using namespace std; - -// class declaration -class DumpMuonScouting : public edm::stream::EDAnalyzer<> { -public: - explicit DumpMuonScouting(const edm::ParameterSet&); - ~DumpMuonScouting() override{}; - void analyze(const edm::Event&, const edm::EventSetup&) override; - - EDGetTokenT> muToken; - - int m_minBx; - int m_maxBx; - -private: - int m_tvVersion; -}; - -DumpMuonScouting::DumpMuonScouting(const edm::ParameterSet& iConfig) { - muToken = consumes(iConfig.getParameter("muInputTag")); - - m_minBx = iConfig.getParameter("minBx"); - m_maxBx = iConfig.getParameter("maxBx"); -} - -// loop over events -void DumpMuonScouting::analyze(const edm::Event& iEvent, const edm::EventSetup& evSetup) { - //input - Handle> muons = iEvent.getHandle(muToken); - - { - cout << " ----------------------------------------------------- " << endl; - cout << " *********** Run " << std::dec << iEvent.id().run() << " Event " << iEvent.id().event() - << " ************** " << endl; - cout << " ----------------------------------------------------- " << endl; - - //Loop over BX - for (int i = m_minBx; i <= m_maxBx; ++i) { - //Loop over Muons - //cout << " ------ Muons --------" << endl; - if (muons.isValid()) { - if (i >= muons->getFirstBX() && i <= muons->getLastBX()) { - for (std::vector::const_iterator mu = muons->begin(i); mu != muons->end(i); ++mu) { - cout << " " << std::dec << std::setw(2) << std::setfill(' ') << std::setfill('0') << ")"; - cout << " Pt " << std::dec << std::setw(3) << mu->hwPt() << " (0x" << std::hex << std::setw(3) - << std::setfill('0') << mu->hwPt() << ")"; - cout << " EtaAtVtx " << std::dec << std::setw(3) << mu->hwEtaAtVtx() << " (0x" << std::hex << std::setw(3) - << std::setfill('0') << (mu->hwEtaAtVtx() & 0x1ff) << ")"; - cout << " Eta " << std::dec << std::setw(3) << mu->hwEta() << " (0x" << std::hex << std::setw(3) - << std::setfill('0') << (mu->hwEta() & 0x1ff) << ")"; - cout << " PhiAtVtx " << std::dec << std::setw(3) << mu->hwPhiAtVtx() << " (0x" << std::hex << std::setw(3) - << std::setfill('0') << mu->hwPhiAtVtx() << ")"; - cout << " Phi " << std::dec << std::setw(3) << mu->hwPhi() << " (0x" << std::hex << std::setw(3) - << std::setfill('0') << mu->hwPhi() << ")"; - cout << " Iso " << std::dec << std::setw(1) << mu->hwIso(); - cout << " Qual " << std::dec << std::setw(1) << mu->hwQual(); - cout << " Chrg " << std::dec << std::setw(1) << mu->hwCharge(); - cout << endl; - } - } else { - cout << "No Muons stored for this bx " << i << endl; - } - } else { - cout << "No Muon Data in this event " << endl; - } - - } //loop over Bx - cout << std::dec << endl; - } //if dumpGtRecord -} - -DEFINE_FWK_MODULE(DumpMuonScouting); diff --git a/EventFilter/Utilities/plugins/ExceptionGenerator.cc b/EventFilter/Utilities/plugins/ExceptionGenerator.cc index a3e8696ede3d9..87541772343ec 100644 --- a/EventFilter/Utilities/plugins/ExceptionGenerator.cc +++ b/EventFilter/Utilities/plugins/ExceptionGenerator.cc @@ -250,6 +250,4 @@ namespace evf { } } - void ExceptionGenerator::endLuminosityBlock(edm::LuminosityBlock const &lb, edm::EventSetup const &es) {} - } // end namespace evf diff --git a/EventFilter/Utilities/plugins/ExceptionGenerator.h b/EventFilter/Utilities/plugins/ExceptionGenerator.h index 1630855ebee8c..bf42d1a0476bd 100644 --- a/EventFilter/Utilities/plugins/ExceptionGenerator.h +++ b/EventFilter/Utilities/plugins/ExceptionGenerator.h @@ -17,10 +17,8 @@ namespace evf { static const std::string menu[menu_items]; explicit ExceptionGenerator(const edm::ParameterSet&); - ~ExceptionGenerator() override{}; void beginRun(const edm::Run& r, const edm::EventSetup& iSetup) override; void analyze(const edm::Event& e, const edm::EventSetup& c) override; - void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; private: int actionId_; diff --git a/EventFilter/Utilities/plugins/microstatedef.jsd b/EventFilter/Utilities/plugins/microstatedef.jsd index 5343a5a4870f7..747f26131bf29 100644 --- a/EventFilter/Utilities/plugins/microstatedef.jsd +++ b/EventFilter/Utilities/plugins/microstatedef.jsd @@ -5,7 +5,7 @@ "operation" : "histo" }, { - "name" : "Ministate", + "name" : "tMicrostate", "operation" : "histo" }, { diff --git a/EventFilter/Utilities/plugins/microstatedeffast.jsd b/EventFilter/Utilities/plugins/microstatedeffast.jsd index 948c7914eb07d..f2c57b93dba7c 100644 --- a/EventFilter/Utilities/plugins/microstatedeffast.jsd +++ b/EventFilter/Utilities/plugins/microstatedeffast.jsd @@ -5,7 +5,7 @@ "operation" : "histo" }, { - "name" : "Ministate", + "name" : "tMicrostate", "operation" : "histo" }, { diff --git a/EventFilter/Utilities/plugins/modules.cc b/EventFilter/Utilities/plugins/modules.cc index 106969f00a0b4..f0213d4fb2693 100644 --- a/EventFilter/Utilities/plugins/modules.cc +++ b/EventFilter/Utilities/plugins/modules.cc @@ -1,5 +1,4 @@ #include "EventFilter/Utilities/interface/EvFDaqDirector.h" -#include "EventFilter/Utilities/interface/EvFOutputModule.h" #include "EventFilter/Utilities/interface/FastMonitoringService.h" #include "EventFilter/Utilities/interface/FedRawDataInputSource.h" #include "EventFilter/Utilities/interface/DAQSource.h" @@ -29,7 +28,6 @@ DEFINE_FWK_SERVICE(EvFDaqDirector); DEFINE_FWK_MODULE(ExceptionGenerator); DEFINE_FWK_MODULE(EvFFEDSelector); DEFINE_FWK_MODULE(EvFFEDExcluder); -DEFINE_FWK_MODULE(EvFOutputModule); DEFINE_FWK_MODULE(DaqFakeReader); DEFINE_FWK_INPUT_SOURCE(FedRawDataInputSource); DEFINE_FWK_INPUT_SOURCE(DAQSource); diff --git a/EventFilter/Utilities/scripts/scoutingToRaw.py b/EventFilter/Utilities/scripts/scoutingToRaw.py deleted file mode 100755 index d6ce51feae7ad..0000000000000 --- a/EventFilter/Utilities/scripts/scoutingToRaw.py +++ /dev/null @@ -1,259 +0,0 @@ -#!/bin/env python3 - -import struct -import os,sys -import json -import shutil - -os.umask(0) - -#struct muon{ -# uint32_t f; -# uint32_t s; -#}; - -#struct block{ -# uint32_t bx; -# uint32_t orbit; -# muon mu[16]; -#}; - -#class masks: -# phiext = 0x3ff -# pt = 0x1ff -# qual = 0xf -# etaext = 0x1ff -# etaextv = 0xff -# etaexts = 0x100 -# iso = 0x3 -# chrg = 0x1 -# chrgv = 0x1 -# index = 0x7f -# phi = 0x3ff -# eta = 0x1ff -# etav = 0xff -# etas = 0x100 -# phiv = 0x1ff -# phis = 0x200 -# sv = 0x3 - -#class shifts: -# phiext = 0 -# pt = 10 -# qual = 19 -# etaext = 23 -# iso = 0 -# chrg = 2 -# chrgv = 3 -# index = 4 -# phi = 11 -# eta = 21 -# rsv = 30 - -#class gmt_scales: -# pt_scale = 0.5 -# phi_scale = 2.*M_PI/576. -# eta_scale = 0.0870/8 #9th MS bit is sign -# phi_range = M_PI - - -#need to read this to find orbit ("event") boundary and calculate size per orbit -class header_shifts: - bxmatch = 32; - mAcount = 16; - orbitmatch = 8; - mBcount = 0; - -class header_masks: - bxmatch = 0xff << header_shifts.bxmatch; - mAcount = 0xf << header_shifts.mAcount; - orbitmatch = 0xff << header_shifts.orbitmatch; - mBcount = 0xf - - -#new V2 FRD file header (32 bytes) -class frd_file_header_v2: - ver_id = "RAW_0002".encode() # 64 (offset 0B) - header_size = 32 #16 (offset 8B) - data_type = 20 #16 (offset 10) - event_count = 0 #32 (offset 12B) - run_number = 0 #32 (offset 16B) - lumisection = 0 #32 (offset 20B) - file_size = 0 #64 (offset 24B) - - -def parseMuonScoutingRawFile(infilepath, outdir, rn_override, maxorbits): - - if infilepath != 'stdin': - fin = open(infilepath,'rb') - else: - fin = sys.stdin.buffer - - #sys.stdout.flush() - - #orbit count per file - orbitcount=0 - #total - orbitcount_total=0 - - last_ls = 0 - - orbit_data = bytes() - orbit_nr = 0 - orbit_size = 0 - flags = 0 - c_crc32c = 0 - - #ls = 1 - #event header (FRD format) const - version = 6 - - #files - fout = None - if infilepath != 'stdin': - fin = open(infilepath,'rb') - else: - fin = sys.stdin.buffer - - - #write header before closing the file - def update_header(): - nonlocal orbitcount - nonlocal last_ls - h = frd_file_header_v2() - h.event_count = orbitcount - h.run_number = rn_override - h.lumisection = last_ls - h.file_size = fout.tell() - fout.seek(0, 0) - fout.write(frd_file_header_v2.ver_id) - fout.write(struct.pack('H',h.header_size)) - fout.write(struct.pack('H',h.data_type)) - fout.write(struct.pack('I',h.event_count)) - fout.write(struct.pack('I',h.run_number)) - fout.write(struct.pack('I',h.lumisection)) - fout.write(struct.pack('Q',h.file_size)) - - orbitcount = 0 - print(h.ver_id, h.header_size, h.data_type, h.event_count, h.lumisection, h.file_size) - - - #write orbit when next one is detected or file is closed - def write_orbit(): - nonlocal orbit_size - nonlocal orbit_data - if not orbit_size: - return - - #print(fout.tell(), struct.pack('H',version)) - fout.write(struct.pack('H',version)) #could be 8 bytes - fout.write(struct.pack('H',flags)) #could be 8 bytes - fout.write(struct.pack('I',rn_override)) #run - #fout.write(struct.pack('I',ls)) #ls - fout.write(struct.pack('I',last_ls)) #ls - fout.write(struct.pack('I',orbit_nr)) #eid (orbit number, 32-bit) - fout.write(struct.pack('I',orbit_size)) #payload size - fout.write(struct.pack('I',c_crc32c)) #payload checksum (not used) - fout.write(orbit_data) - - orbit_data = bytes() - orbit_size = 0 - - def writeout_close(): - write_orbit() - update_header() - fout.close() - orbit_nr = 0 - - #read loop - while True: - - #check if exceeded max orbits specified - if orbitcount_total > maxorbits: - print(f"finish: {orbitcount_total-1}/{maxorbits} orbits") - writeout_close() - - if infilepath != 'stdin': - fin.close() - sys.exit(0) - - try: - h_raw = fin.read(4) - bxmatch = struct.unpack('B', h_raw[3:4])[0] - mAcount = struct.unpack('B', h_raw[2:3])[0] - orbitmatch = struct.unpack('B', h_raw[1:2])[0] - mBcount = struct.unpack('B', h_raw[0:1])[0] - - #print("bxmatch", bxmatch, "mA", mAcount, "orbitmatch", orbitmatch, "mB", mBcount) - - bx_raw = fin.read(4) - bx = struct.unpack('i', bx_raw)[0] - #print("bx",bx) - orbit_raw = fin.read(4) - orbit = struct.unpack('i', orbit_raw)[0] - - new_ls = orbit >> 18 - - if new_ls > last_ls: - #open a new output file if crossing LS boundary or on first orbit - if last_ls: - write_orbit() - update_header() - fout.close() - orbitcount = 0 - - last_ls = new_ls - fout = open(os.path.join(outdir, f'run{rn_override}_ls{str(new_ls).zfill(4)}_index000000.raw') ,'wb') - #empty file header, will be updated later - fout.write(frd_file_header_v2.ver_id) -# fout.write(bytes(16)) - fout.write(bytes(24)) - - read_len = 8*(mAcount+mBcount) - mu_blk = fin.read(8*(mAcount+mBcount)) - if len(mu_blk) != read_len: - print('incomplete read') - sys.exit(1) - - if not orbit_nr or orbit != orbit_nr: - #received new orbit, write previous one - if orbit_nr: - write_orbit() - - #should not decrease: - if orbit < orbit_nr: - orbit_count = -1 - print("got smaller orbit than earlier!") - sys.exit(1) - - print("new orbit", orbit) - orbit_nr = orbit - - #per LS file counter: - orbitcount += 1 - #total counter: - orbitcount_total += 1 - - #update orbit size and data variables - orbit_size += 12 + read_len - orbit_data += (h_raw + bx_raw + orbit_raw) + mu_blk - - except Exception as ex: - #reached premature end of the file? - print(f"exception: {ex}") - #writeout_close() - #if infilepath != 'stdin': - # fin.close() - sys.exit(1) - - #print count," : ",version,run,lumi,eid,esize,crc32c,"override id/ls/run:",count,1,rn_override - #lumi=1 - -if len(sys.argv) < 5: - print("parameters: input file (or stdin), output directory, run number (use same as input file), orbits to write") -else: - parseMuonScoutingRawFile(sys.argv[1], sys.argv[2], int(sys.argv[3]), int(sys.argv[4])) - - - - diff --git a/EventFilter/Utilities/src/DAQSource.cc b/EventFilter/Utilities/src/DAQSource.cc index b607b13ca1dd0..dde1577ac4bb0 100644 --- a/EventFilter/Utilities/src/DAQSource.cc +++ b/EventFilter/Utilities/src/DAQSource.cc @@ -7,7 +7,7 @@ #include "EventFilter/Utilities/interface/DAQSource.h" #include "EventFilter/Utilities/interface/DAQSourceModels.h" #include "EventFilter/Utilities/interface/DAQSourceModelsFRD.h" -#include "EventFilter/Utilities/interface/DAQSourceModelsScouting.h" +#include "EventFilter/Utilities/interface/DAQSourceModelsScoutingRun3.h" #include "FWCore/Framework/interface/Event.h" #include "FWCore/Framework/interface/InputSourceDescription.h" @@ -19,7 +19,7 @@ #include "DataFormats/Provenance/interface/EventID.h" #include "DataFormats/Provenance/interface/Timestamp.h" -#include "EventFilter/Utilities/interface/FastMonitoringService.h" +#include "EventFilter/Utilities/interface/SourceCommon.h" #include "EventFilter/Utilities/interface/DataPointDefinition.h" #include "EventFilter/Utilities/interface/FFFNamingSchema.h" #include "EventFilter/Utilities/interface/crc32c.h" @@ -81,6 +81,8 @@ DAQSource::DAQSource(edm::ParameterSet const& pset, edm::InputSourceDescription dataMode_.reset(new DataModeFRD(this)); } else if (dataModeConfig_ == "FRDStriped") { dataMode_.reset(new DataModeFRDStriped(this)); + } else if (dataModeConfig_ == "ScoutingRun3") { + dataMode_.reset(new DataModeScoutingRun3(this)); } else throw cms::Exception("DAQSource::DAQSource") << "Unknown data mode " << dataModeConfig_; @@ -101,7 +103,8 @@ DAQSource::DAQSource(edm::ParameterSet const& pset, edm::InputSourceDescription } } - dataMode_->makeDirectoryEntries(daqDirector_->getBUBaseDirs(), daqDirector_->runString()); + dataMode_->makeDirectoryEntries( + daqDirector_->getBUBaseDirs(), daqDirector_->getBUBaseDirsNSources(), daqDirector_->runString()); auto& daqProvenanceHelpers = dataMode_->makeDaqProvenanceHelpers(); for (const auto& daqProvenanceHelper : daqProvenanceHelpers) @@ -373,11 +376,14 @@ evf::EvFDaqDirector::FileStatus DAQSource::getNextDataBlock() { if (!currentFile_.get()) { evf::EvFDaqDirector::FileStatus status = evf::EvFDaqDirector::noFile; setMonState(inWaitInput); - if (!fileQueue_.try_pop(currentFile_)) { - //sleep until wakeup (only in single-buffer mode) or timeout - std::unique_lock lkw(mWakeup_); - if (cvWakeup_.wait_for(lkw, std::chrono::milliseconds(100)) == std::cv_status::timeout || !currentFile_.get()) - return evf::EvFDaqDirector::noFile; + { + IdleSourceSentry ids(fms_); + if (!fileQueue_.try_pop(currentFile_)) { + //sleep until wakeup (only in single-buffer mode) or timeout + std::unique_lock lkw(mWakeup_); + if (cvWakeup_.wait_for(lkw, std::chrono::milliseconds(100)) == std::cv_status::timeout || !currentFile_.get()) + return evf::EvFDaqDirector::noFile; + } } status = currentFile_->status_; if (status == evf::EvFDaqDirector::runEnded) { @@ -468,10 +474,13 @@ evf::EvFDaqDirector::FileStatus DAQSource::getNextDataBlock() { //multibuffer mode //wait for the current chunk to become added to the vector setMonState(inWaitChunk); - while (!currentFile_->waitForChunk(currentFile_->currentChunk_)) { - usleep(10000); - if (setExceptionState_) - threadError(); + { + IdleSourceSentry ids(fms_); + while (!currentFile_->waitForChunk(currentFile_->currentChunk_)) { + usleep(10000); + if (setExceptionState_) + threadError(); + } } setMonState(inChunkReceived); @@ -508,13 +517,14 @@ evf::EvFDaqDirector::FileStatus DAQSource::getNextDataBlock() { currentFile_->rewindChunk(dataMode_->headerSize()); setMonState(inWaitChunk); - - //do the copy to the beginning of the starting chunk. move pointers for next event in the next chunk - chunkEnd = currentFile_->advance(dataPosition, dataMode_->headerSize() + msgSize); - assert(chunkEnd); - //mark to release old chunk - chunkIsFree_ = true; - + { + IdleSourceSentry ids(fms_); + //do the copy to the beginning of the starting chunk. move pointers for next event in the next chunk + chunkEnd = currentFile_->advance(dataPosition, dataMode_->headerSize() + msgSize); + assert(chunkEnd); + //mark to release old chunk + chunkIsFree_ = true; + } setMonState(inChunkReceived); //header and payload is moved, update view dataMode_->makeDataBlockView( diff --git a/EventFilter/Utilities/src/DAQSourceModelsFRD.cc b/EventFilter/Utilities/src/DAQSourceModelsFRD.cc index c0dc23cdedeab..2784cef60ec55 100644 --- a/EventFilter/Utilities/src/DAQSourceModelsFRD.cc +++ b/EventFilter/Utilities/src/DAQSourceModelsFRD.cc @@ -153,7 +153,9 @@ std::string DataModeFRD::getChecksumError() const { * FRD Multi Test */ -void DataModeFRDStriped::makeDirectoryEntries(std::vector const& baseDirs, std::string const& runDir) { +void DataModeFRDStriped::makeDirectoryEntries(std::vector const& baseDirs, + std::vector const& numSources, + std::string const& runDir) { std::filesystem::path runDirP(runDir); for (auto& baseDir : baseDirs) { std::filesystem::path baseDirP(baseDir); diff --git a/EventFilter/Utilities/src/DAQSourceModelsScouting.cc b/EventFilter/Utilities/src/DAQSourceModelsScouting.cc deleted file mode 100644 index 4626975e6014e..0000000000000 --- a/EventFilter/Utilities/src/DAQSourceModelsScouting.cc +++ /dev/null @@ -1,312 +0,0 @@ -#include "EventFilter/Utilities/interface/DAQSource.h" -#include "EventFilter/Utilities/interface/DAQSourceModelsScouting.h" - -#include -#include -#include -#include - -#include "FWCore/Framework/interface/Event.h" -#include "DataFormats/Provenance/interface/EventAuxiliary.h" -#include "DataFormats/Provenance/interface/EventID.h" - -using namespace scouting; - -void DataModeScoutingRun2Muon::readEvent(edm::EventPrincipal& eventPrincipal) { - edm::TimeValue_t time; - timeval stv; - gettimeofday(&stv, nullptr); - time = stv.tv_sec; - time = (time << 32) + stv.tv_usec; - edm::Timestamp tstamp(time); - - std::unique_ptr> rawData(new BXVector); - //allow any bx - rawData->setBXRange(0, 4000); - - unpackOrbit(rawData.get(), (char*)event_->payload(), event_->eventSize()); - - uint32_t hdrEventID = event_->event(); - edm::EventID eventID = edm::EventID(daqSource_->eventRunNumber(), daqSource_->currentLumiSection(), hdrEventID); - edm::EventAuxiliary aux( - eventID, daqSource_->processGUID(), tstamp, event_->isRealData(), edm::EventAuxiliary::PhysicsTrigger); - - aux.setProcessHistoryID(daqSource_->processHistoryID()); - daqSource_->makeEventWrapper(eventPrincipal, aux); - - std::unique_ptr edp(new edm::Wrapper>(std::move(rawData))); - eventPrincipal.put( - daqProvenanceHelpers_[0]->branchDescription(), std::move(edp), daqProvenanceHelpers_[0]->dummyProvenance()); -} - -void DataModeScoutingRun2Muon::unpackOrbit(BXVector* muons, char* buf, size_t len) { - using namespace scouting; - size_t pos = 0; - uint32_t o_test = 0; - while (pos < len) { - assert(pos + 4 <= len); - uint32_t header = *((uint32*)(buf + pos)); - uint32_t mAcount = (header & header_masks::mAcount) >> header_shifts::mAcount; - uint32_t mBcount = (header & header_masks::mBcount) >> header_shifts::mBcount; - - block* bl = (block*)(buf + pos + 4); - - pos += 12 + (mAcount + mBcount) * 8; - assert(pos <= len); - - uint32_t bx = bl->bx; - - uint32_t orbit = bl->orbit; - o_test = orbit; - - //should cuts should be applied - bool excludeIntermediate = true; - - for (size_t i = 0; i < (mAcount + mBcount); i++) { - //unpack new muon - //variables: index, ietaext, ipt, qual, iphiext, iso, chrg, iphi, ieta - - // remove intermediate if required - // index==0 and ietaext==0 are a necessary and sufficient condition - uint32_t index = (bl->mu[i].s >> shifts::index) & masks::index; - int32_t ietaext = ((bl->mu[i].f >> shifts::etaext) & masks::etaextv); - if (((bl->mu[i].f >> shifts::etaext) & masks::etaexts) != 0) - ietaext -= 256; - - if (excludeIntermediate && index == 0 && ietaext == 0) - continue; - - //extract pt and quality and apply cut if required - uint32_t ipt = (bl->mu[i].f >> shifts::pt) & masks::pt; - //cuts?? - // if((ipt-1)mu[i].f >> shifts::qual) & masks::qual; - // if(qual < qualcut) {discarded++; continue;} - - //extract integer value for extrapolated phi - int32_t iphiext = ((bl->mu[i].f >> shifts::phiext) & masks::phiext); - - // extract iso bits and charge - uint32_t iso = (bl->mu[i].s >> shifts::iso) & masks::iso; - int32_t chrg = 0; - if (((bl->mu[i].s >> shifts::chrgv) & masks::chrgv) == 1) { - chrg = ((bl->mu[i].s >> shifts::chrg) & masks::chrg) == 1 ? -1 : 1; - } - - // extract eta and phi at muon station - int32_t iphi = ((bl->mu[i].s >> shifts::phi) & masks::phi); - int32_t ieta = (bl->mu[i].s >> shifts::eta) & masks::etav; - if (((bl->mu[i].s >> shifts::eta) & masks::etas) != 0) - ieta -= 256; - - l1t::Muon muon( - *dummyLVec_, ipt, ieta, iphi, qual, chrg, chrg != 0, iso, -1, 0, false, 0, 0, 0, 0, ietaext, iphiext); - muons->push_back(bx, muon); - } - } - std::cout << "end read ... " << o_test << std::endl << std::flush; -} //unpackOrbit - -std::vector>& DataModeScoutingRun2Muon::makeDaqProvenanceHelpers() { - //set FRD data collection - daqProvenanceHelpers_.clear(); - daqProvenanceHelpers_.emplace_back(std::make_shared( - edm::TypeID(typeid(l1t::MuonBxCollection)), "l1t::MuonBxCollection", "l1tMuonBxCollection", "DAQSource")); - return daqProvenanceHelpers_; -} - -bool DataModeScoutingRun2Muon::nextEventView() { - if (eventCached_) - return true; - event_ = std::make_unique(dataBlockAddr_); - if (event_->size() > dataBlockMax_) { - throw cms::Exception("DAQSource::getNextEvent") - << " event id:" << event_->event() << " lumi:" << event_->lumi() << " run:" << event_->run() - << " of size:" << event_->size() << " bytes does not fit into a chunk of size:" << dataBlockMax_ << " bytes"; - } - return true; -} - -bool DataModeScoutingRun2Muon::checksumValid() { return true; } - -std::string DataModeScoutingRun2Muon::getChecksumError() const { return std::string(); } - -// -//2nd model: read multiple input files with different data type -// - -std::pair> DataModeScoutingRun2Multi::defineAdditionalFiles( - std::string const& primaryName, bool fileListMode) const { - std::vector additionalFiles; - - auto fullpath = std::filesystem::path(primaryName); - auto fullname = fullpath.filename(); - std::string stem = fullname.stem().string(); - std::string ext = fullname.extension().string(); - std::regex regexz("_"); - std::vector nameTokens = {std::sregex_token_iterator(stem.begin(), stem.end(), regexz, -1), - std::sregex_token_iterator()}; - - if (nameTokens.size() < 3) { - throw cms::Exception("DAQSource::getNextEvent") - << primaryName << " name doesn't start with run#_ls#_index#_*.ext syntax"; - } - - //Can also filter out non-matching primary files (if detected by DaqDirector). false will tell source to skip the primary file. - if (nameTokens.size() > 3 && nameTokens[3].rfind("secondary", 0) == 0) - return std::make_pair(false, additionalFiles); - - //TODO: provisional, name syntax should be better defined - - additionalFiles.push_back(fullpath.parent_path().string() + "/" + nameTokens[0] + "_" + nameTokens[1] + "_" + - nameTokens[2] + "_secondary" + ext); - //additionalFiles.push_back(fullpath.parent_path.string() + "/" + nameTokens[0] + "_" + nameTokens[1] + "_" + nameTokens[2] + "_tertiary" + ".raw"); - - return std::make_pair(true, additionalFiles); -} - -void DataModeScoutingRun2Multi::readEvent(edm::EventPrincipal& eventPrincipal) { - edm::TimeValue_t time; - timeval stv; - gettimeofday(&stv, nullptr); - time = stv.tv_sec; - time = (time << 32) + stv.tv_usec; - edm::Timestamp tstamp(time); - - std::unique_ptr> rawData(new BXVector); - //allow any bx - rawData->setBXRange(0, 4000); - - unpackMuonOrbit(rawData.get(), (char*)events_[0]->payload(), events_[0]->eventSize()); - - //TODO: implement here other object type (e.g. unpackCaloOrbit) - // - std::unique_ptr> rawDataSec(new BXVector); - //allow any bx - rawDataSec->setBXRange(0, 4000); - - unpackMuonOrbit(rawDataSec.get(), (char*)events_[1]->payload(), events_[1]->eventSize()); - - uint32_t hdrEventID = events_[0]->event(); //take from 1st file - edm::EventID eventID = edm::EventID(daqSource_->eventRunNumber(), daqSource_->currentLumiSection(), hdrEventID); - edm::EventAuxiliary aux( - eventID, daqSource_->processGUID(), tstamp, events_[0]->isRealData(), edm::EventAuxiliary::PhysicsTrigger); - - aux.setProcessHistoryID(daqSource_->processHistoryID()); - daqSource_->makeEventWrapper(eventPrincipal, aux); - - std::unique_ptr edp(new edm::Wrapper>(std::move(rawData))); - eventPrincipal.put( - daqProvenanceHelpers_[0]->branchDescription(), std::move(edp), daqProvenanceHelpers_[0]->dummyProvenance()); - - //TODO: use other object and provenance helper (duplicate is just for demonstration) - // std::unique_ptr edpSec(new edm::Wrapper>(std::move(rawDataSec))); - // eventPrincipal.put(daqProvenanceHelpers_[1]->branchDescription(), std::move(edpSec), daqProvenanceHelpers_[1]->dummyProvenance()); - - eventCached_ = false; -} - -void DataModeScoutingRun2Multi::unpackMuonOrbit(BXVector* muons, char* buf, size_t len) { - using namespace scouting; - size_t pos = 0; - //uint32_t o_test = 0; - while (pos < len) { - assert(pos + 4 <= len); - uint32_t header = *((uint32*)(buf + pos)); - uint32_t mAcount = (header & header_masks::mAcount) >> header_shifts::mAcount; - uint32_t mBcount = (header & header_masks::mBcount) >> header_shifts::mBcount; - - block* bl = (block*)(buf + pos + 4); - - pos += 12 + (mAcount + mBcount) * 8; - assert(pos <= len); - - uint32_t bx = bl->bx; - - //uint32_t orbit = bl->orbit; - //o_test = orbit; - - //should cuts should be applied - bool excludeIntermediate = true; - - for (size_t i = 0; i < (mAcount + mBcount); i++) { - //unpack new muon - //variables: index, ietaext, ipt, qual, iphiext, iso, chrg, iphi, ieta - - // remove intermediate if required - // index==0 and ietaext==0 are a necessary and sufficient condition - uint32_t index = (bl->mu[i].s >> shifts::index) & masks::index; - int32_t ietaext = ((bl->mu[i].f >> shifts::etaext) & masks::etaextv); - if (((bl->mu[i].f >> shifts::etaext) & masks::etaexts) != 0) - ietaext -= 256; - - if (excludeIntermediate && index == 0 && ietaext == 0) - continue; - - //extract pt and quality and apply cut if required - uint32_t ipt = (bl->mu[i].f >> shifts::pt) & masks::pt; - //cuts?? - // if((ipt-1)mu[i].f >> shifts::qual) & masks::qual; - // if(qual < qualcut) {discarded++; continue;} - - //extract integer value for extrapolated phi - int32_t iphiext = ((bl->mu[i].f >> shifts::phiext) & masks::phiext); - - // extract iso bits and charge - uint32_t iso = (bl->mu[i].s >> shifts::iso) & masks::iso; - int32_t chrg = 0; - if (((bl->mu[i].s >> shifts::chrgv) & masks::chrgv) == 1) { - chrg = ((bl->mu[i].s >> shifts::chrg) & masks::chrg) == 1 ? -1 : 1; - } - - // extract eta and phi at muon station - int32_t iphi = ((bl->mu[i].s >> shifts::phi) & masks::phi); - int32_t ieta = (bl->mu[i].s >> shifts::eta) & masks::etav; - if (((bl->mu[i].s >> shifts::eta) & masks::etas) != 0) - ieta -= 256; - - l1t::Muon muon( - *dummyLVec_, ipt, ieta, iphi, qual, chrg, chrg != 0, iso, -1, 0, false, 0, 0, 0, 0, ietaext, iphiext); - muons->push_back(bx, muon); - } - } -} //unpackOrbit - -std::vector>& DataModeScoutingRun2Multi::makeDaqProvenanceHelpers() { - //set FRD data collection - daqProvenanceHelpers_.clear(); - daqProvenanceHelpers_.emplace_back(std::make_shared( - edm::TypeID(typeid(l1t::MuonBxCollection)), "l1t::MuonBxCollection", "l1tMuonBxCollection", "DAQSource")); - //Note: two same kind of objects can not be put in the event from the source, so this example will be changed - daqProvenanceHelpers_.emplace_back(std::make_shared( - edm::TypeID(typeid(l1t::MuonBxCollection)), "l1t::MuonBxCollection", "l1tMuonBxCollection", "DAQSource")); - return daqProvenanceHelpers_; -} - -bool DataModeScoutingRun2Multi::nextEventView() { - blockCompleted_ = false; - if (eventCached_) - return true; - for (unsigned int i = 0; i < events_.size(); i++) { - //add last event length.. - dataBlockAddrs_[i] += events_[i]->size(); - } - return makeEvents(); -} - -bool DataModeScoutingRun2Multi::makeEvents() { - events_.clear(); - for (int i = 0; i < numFiles_; i++) { - if (dataBlockAddrs_[i] >= dataBlockMaxAddrs_[i]) { - blockCompleted_ = true; - return false; - } - events_.emplace_back(std::make_unique(dataBlockAddrs_[i])); - } - return true; -} - -bool DataModeScoutingRun2Multi::checksumValid() { return true; } - -std::string DataModeScoutingRun2Multi::getChecksumError() const { return std::string(); } diff --git a/EventFilter/Utilities/src/DAQSourceModelsScoutingRun3.cc b/EventFilter/Utilities/src/DAQSourceModelsScoutingRun3.cc new file mode 100644 index 0000000000000..f856fdbed66ef --- /dev/null +++ b/EventFilter/Utilities/src/DAQSourceModelsScoutingRun3.cc @@ -0,0 +1,184 @@ +#include "EventFilter//Utilities/interface/DAQSourceModelsScoutingRun3.h" + +void DataModeScoutingRun3::makeDirectoryEntries(std::vector const& baseDirs, + std::vector const& numSources, + std::string const& runDir) { + std::filesystem::path runDirP(runDir); + for (auto& baseDir : baseDirs) { + std::filesystem::path baseDirP(baseDir); + buPaths_.emplace_back(baseDirP / runDirP); + } + + // store the number of sources in each BU + buNumSources_ = numSources; +} + +std::pair> DataModeScoutingRun3::defineAdditionalFiles(std::string const& primaryName, + bool fileListMode) const { + std::vector additionalFiles; + + if (fileListMode) { + // Expected file naming when working in file list mode + for (int j = 1; j < buNumSources_[0]; j++) { + additionalFiles.push_back(primaryName + "_" + std::to_string(j)); + } + return std::make_pair(true, additionalFiles); + } + + auto fullpath = std::filesystem::path(primaryName); + auto fullname = fullpath.filename(); + + for (size_t i = 0; i < buPaths_.size(); i++) { + std::filesystem::path newPath = buPaths_[i] / fullname; + + if (i != 0) { + // secondary files from other ramdisks + additionalFiles.push_back(newPath.generic_string()); + } + + // add extra sources from the same ramdisk + for (int j = 1; j < buNumSources_[i]; j++) { + additionalFiles.push_back(newPath.generic_string() + "_" + std::to_string(j)); + } + } + return std::make_pair(true, additionalFiles); +} + +void DataModeScoutingRun3::readEvent(edm::EventPrincipal& eventPrincipal) { + assert(!events_.empty()); + + edm::TimeValue_t time; + timeval stv; + gettimeofday(&stv, nullptr); + time = stv.tv_sec; + time = (time << 32) + stv.tv_usec; + edm::Timestamp tstamp(time); + + // set provenance helpers + uint32_t hdrEventID = currOrbit_; + edm::EventID eventID = edm::EventID(daqSource_->eventRunNumber(), daqSource_->currentLumiSection(), hdrEventID); + edm::EventAuxiliary aux( + eventID, daqSource_->processGUID(), tstamp, events_[0]->isRealData(), edm::EventAuxiliary::PhysicsTrigger); + + aux.setProcessHistoryID(daqSource_->processHistoryID()); + daqSource_->makeEventWrapper(eventPrincipal, aux); + + // create scouting raw data collection + std::unique_ptr rawData(new SDSRawDataCollection); + + // Fill the ScoutingRawDataCollection with valid orbit data from the multiple sources + for (const auto& pair : sourceValidOrbitPair_) { + fillSDSRawDataCollection(*rawData, (char*)events_[pair.second]->payload(), events_[pair.second]->eventSize()); + } + + std::unique_ptr edp(new edm::Wrapper(std::move(rawData))); + eventPrincipal.put( + daqProvenanceHelpers_[0]->branchDescription(), std::move(edp), daqProvenanceHelpers_[0]->dummyProvenance()); + + eventCached_ = false; +} + +void DataModeScoutingRun3::fillSDSRawDataCollection(SDSRawDataCollection& rawData, char* buff, size_t len) { + size_t pos = 0; + + // get the source ID + int sourceId = *((uint32_t*)(buff + pos)); + pos += 4; + + // size of the orbit paylod + size_t orbitSize = len - pos; + + // set the size (=orbit size) in the SRDColletion of the current source. + // FRD size is expecting 8 bytes words, while scouting is using 4 bytes + // words. This could be different for some future sources. + FEDRawData& fedData = rawData.FEDData(sourceId); + fedData.resize(orbitSize, 4); + + memcpy(fedData.data(), buff + pos, orbitSize); + + return; +} + +std::vector>& DataModeScoutingRun3::makeDaqProvenanceHelpers() { + //set SRD data collection + daqProvenanceHelpers_.clear(); + daqProvenanceHelpers_.emplace_back(std::make_shared( + edm::TypeID(typeid(SDSRawDataCollection)), "SDSRawDataCollection", "SDSRawDataCollection", "DAQSource")); + return daqProvenanceHelpers_; +} + +bool DataModeScoutingRun3::nextEventView() { + blockCompleted_ = false; + if (eventCached_) + return true; + + // move the data block address only for the sources processed + // un the previous event by adding the last event size + for (const auto& pair : sourceValidOrbitPair_) { + dataBlockAddrs_[pair.first] += events_[pair.second]->size(); + } + + return makeEvents(); +} + +bool DataModeScoutingRun3::makeEvents() { + // clear events and reset current orbit + events_.clear(); + sourceValidOrbitPair_.clear(); + currOrbit_ = 0xFFFFFFFF; // max uint + assert(!blockCompleted_); + + // create current "events" (= orbits) list from each data source, + // check if one dataBlock terminated earlier than others. + for (int i = 0; i < numFiles_; i++) { + if (dataBlockAddrs_[i] >= dataBlockMaxAddrs_[i]) { + completedBlocks_[i] = true; + continue; + } + + // event contains data, add it to the events list + events_.emplace_back(std::make_unique(dataBlockAddrs_[i])); + if (dataBlockAddrs_[i] + events_.back()->size() > dataBlockMaxAddrs_[i]) + throw cms::Exception("DAQSource::getNextEvent") + << " event id:" << events_.back()->event() << " lumi:" << events_.back()->lumi() + << " run:" << events_.back()->run() << " of size:" << events_.back()->size() + << " bytes does not fit into the buffer or has corrupted header"; + + // find the minimum orbit for the current event between all files + if ((events_.back()->event() < currOrbit_) && (!completedBlocks_[i])) { + currOrbit_ = events_.back()->event(); + } + } + + // mark valid orbits from each data source + // e.g. find when orbit is missing from one source + bool allBlocksCompleted = true; + int evt_idx = 0; + for (int i = 0; i < numFiles_; i++) { + if (completedBlocks_[i]) { + continue; + } + + if (events_[evt_idx]->event() != currOrbit_) { + // current source (=i-th source) doesn't contain the expected orbit. + // skip it, and move to the next orbit + } else { + // add a pair + // evt_idx can be different from variable i, as some data blocks can be + // completed before others + sourceValidOrbitPair_.emplace_back(std::make_pair(i, evt_idx)); + allBlocksCompleted = false; + } + + evt_idx++; + } + + if (allBlocksCompleted) { + blockCompleted_ = true; + } + return !allBlocksCompleted; +} + +bool DataModeScoutingRun3::checksumValid() { return true; } + +std::string DataModeScoutingRun3::getChecksumError() const { return std::string(); } diff --git a/EventFilter/Utilities/src/EvFDaqDirector.cc b/EventFilter/Utilities/src/EvFDaqDirector.cc index b89a2e4b032ce..2344195c6efd9 100644 --- a/EventFilter/Utilities/src/EvFDaqDirector.cc +++ b/EventFilter/Utilities/src/EvFDaqDirector.cc @@ -40,18 +40,16 @@ namespace evf { : base_dir_(pset.getUntrackedParameter("baseDir")), bu_base_dir_(pset.getUntrackedParameter("buBaseDir")), bu_base_dirs_all_(pset.getUntrackedParameter>("buBaseDirsAll")), + bu_base_dirs_nSources_(pset.getUntrackedParameter>("buBaseDirsNumStreams")), run_(pset.getUntrackedParameter("runNumber")), useFileBroker_(pset.getUntrackedParameter("useFileBroker")), - fileBrokerHostFromCfg_(pset.getUntrackedParameter("fileBrokerHostFromCfg", true)), + fileBrokerHostFromCfg_(pset.getUntrackedParameter("fileBrokerHostFromCfg", false)), fileBrokerHost_(pset.getUntrackedParameter("fileBrokerHost", "InValid")), fileBrokerPort_(pset.getUntrackedParameter("fileBrokerPort", "8080")), fileBrokerKeepAlive_(pset.getUntrackedParameter("fileBrokerKeepAlive", true)), fileBrokerUseLocalLock_(pset.getUntrackedParameter("fileBrokerUseLocalLock", true)), fuLockPollInterval_(pset.getUntrackedParameter("fuLockPollInterval", 2000)), outputAdler32Recheck_(pset.getUntrackedParameter("outputAdler32Recheck", false)), - requireTSPSet_(pset.getUntrackedParameter("requireTransfersPSet", false)), - selectedTransferMode_(pset.getUntrackedParameter("selectedTransferMode", "")), - mergeTypePset_(pset.getUntrackedParameter("mergingPset", "")), directorBU_(pset.getUntrackedParameter("directorIsBU", false)), hltSourceDirectory_(pset.getUntrackedParameter("hltSourceDirectory", "")), hostname_(""), @@ -72,7 +70,6 @@ namespace evf { fu_rw_flk(make_flock(F_WRLCK, SEEK_SET, 0, 0, getpid())), fu_rw_fulk(make_flock(F_UNLCK, SEEK_SET, 0, 0, getpid())) { reg.watchPreallocate(this, &EvFDaqDirector::preallocate); - reg.watchPreBeginJob(this, &EvFDaqDirector::preBeginJob); reg.watchPreGlobalBeginRun(this, &EvFDaqDirector::preBeginRun); reg.watchPostGlobalEndRun(this, &EvFDaqDirector::postEndRun); reg.watchPreGlobalEndLumi(this, &EvFDaqDirector::preGlobalEndLumi); @@ -146,6 +143,23 @@ namespace evf { } } + // set number of streams in each BU's ramdisk + if (bu_base_dirs_nSources_.empty()) { + // default is 1 stream per ramdisk + for (unsigned int i = 0; i < bu_base_dirs_all_.size(); i++) { + bu_base_dirs_nSources_.push_back(1); + } + } else if (bu_base_dirs_nSources_.size() != bu_base_dirs_all_.size()) { + throw cms::Exception("DaqDirector") + << " Error while setting number of sources: size mismatch with BU base directory vector"; + } else { + for (unsigned int i = 0; i < bu_base_dirs_all_.size(); i++) { + bu_base_dirs_nSources_.push_back(bu_base_dirs_nSources_[i]); + edm::LogInfo("EvFDaqDirector") << "Setting " << bu_base_dirs_nSources_[i] << " sources" + << " for ramdisk " << bu_base_dirs_all_[i]; + } + } + std::stringstream ss; ss << "run" << std::setfill('0') << std::setw(6) << run_; run_string_ = ss.str(); @@ -272,6 +286,9 @@ namespace evf { auto waitForDir = [=](std::string const& bu_base_dir) -> void { int cnt = 0; while (!edm::shutdown_flag.load(std::memory_order_relaxed)) { + //stat should trigger autofs mount (mkdir could fail with access denied first time) + struct stat statbuf; + stat(bu_base_dir.c_str(), &statbuf); int retval = mkdir(bu_base_dir.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); if (retval != 0 && errno != EEXIST) { usleep(500000); @@ -281,15 +298,17 @@ namespace evf { if (cnt > 120) throw cms::Exception("DaqDirector") << " Error checking for bu base dir after 1 minute -: " << bu_base_dir << " mkdir error:" << strerror(errno); + continue; } break; } }; if (!bu_base_dirs_all_.empty()) { - checkExists(bu_base_dirs_all_[0]); - bu_run_dir_ = bu_base_dirs_all_[0] + "/" + run_string_; - for (unsigned int i = 1; i < bu_base_dirs_all_.size(); i++) + std::string check_dir = bu_base_dir_.empty() ? bu_base_dirs_all_[0] : bu_base_dir_; + checkExists(check_dir); + bu_run_dir_ = check_dir + "/" + run_string_; + for (unsigned int i = 0; i < bu_base_dirs_all_.size(); i++) waitForDir(bu_base_dirs_all_[i]); } else { checkExists(bu_base_dir_); @@ -365,6 +384,8 @@ namespace evf { desc.addUntracked("buBaseDir", ".")->setComment("BU base ramdisk directory "); desc.addUntracked>("buBaseDirsAll", std::vector()) ->setComment("BU base ramdisk directories for multi-file DAQSource models"); + desc.addUntracked>("buBaseDirsNumStreams", std::vector()) + ->setComment("Number of streams for each BU base ramdisk directories for multi-file DAQSource models"); desc.addUntracked("runNumber", 0)->setComment("Run Number in ramdisk to open"); desc.addUntracked("useFileBroker", false) ->setComment("Use BU file service to grab input data instead of NFS file locking"); @@ -380,10 +401,6 @@ namespace evf { ->setComment("Lock polling interval in microseconds for the input directory file lock"); desc.addUntracked("outputAdler32Recheck", false) ->setComment("Check Adler32 of per-process output files while micro-merging"); - desc.addUntracked("requireTransfersPSet", false) - ->setComment("Require complete transferSystem PSet in the process configuration"); - desc.addUntracked("selectedTransferMode", "") - ->setComment("Selected transfer mode (choice in Lvl0 propagated as Python parameter"); desc.addUntracked("directorIsBU", false)->setComment("BU director mode used for testing"); desc.addUntracked("hltSourceDirectory", "")->setComment("BU director mode source directory"); desc.addUntracked("mergingPset", "") @@ -391,11 +408,6 @@ namespace evf { descriptions.add("EvFDaqDirector", desc); } - void EvFDaqDirector::preBeginJob(edm::PathsAndConsumesOfModulesBase const&, edm::ProcessContext const& pc) { - checkTransferSystemPSet(pc); - checkMergeTypePSet(pc); - } - void EvFDaqDirector::preBeginRun(edm::GlobalContext const& globalContext) { //assert(run_ == id.run()); @@ -1982,146 +1994,6 @@ namespace evf { } //if transferSystem PSet is present in the menu, we require it to be complete and consistent for all specified streams - void EvFDaqDirector::checkTransferSystemPSet(edm::ProcessContext const& pc) { - if (transferSystemJson_) - return; - - transferSystemJson_.reset(new Json::Value); - edm::ParameterSet const& topPset = edm::getParameterSet(pc.parameterSetID()); - if (topPset.existsAs("transferSystem", true)) { - const edm::ParameterSet& tsPset(topPset.getParameterSet("transferSystem")); - - Json::Value destinationsVal(Json::arrayValue); - std::vector destinations = tsPset.getParameter>("destinations"); - for (auto& dest : destinations) - destinationsVal.append(dest); - (*transferSystemJson_)["destinations"] = destinationsVal; - - Json::Value modesVal(Json::arrayValue); - std::vector modes = tsPset.getParameter>("transferModes"); - for (auto& mode : modes) - modesVal.append(mode); - (*transferSystemJson_)["transferModes"] = modesVal; - - for (auto psKeyItr = tsPset.psetTable().begin(); psKeyItr != tsPset.psetTable().end(); ++psKeyItr) { - if (psKeyItr->first != "destinations" && psKeyItr->first != "transferModes") { - const edm::ParameterSet& streamDef = tsPset.getParameterSet(psKeyItr->first); - Json::Value streamVal; - for (auto& mode : modes) { - //validation - if (!streamDef.existsAs>(mode, true)) - throw cms::Exception("EvFDaqDirector") - << " Missing transfer system specification for -:" << psKeyItr->first << " (transferMode " << mode - << ")"; - std::vector streamDestinations = streamDef.getParameter>(mode); - - Json::Value sDestsValue(Json::arrayValue); - - if (streamDestinations.empty()) - throw cms::Exception("EvFDaqDirector") - << " Missing transter system destination(s) for -: " << psKeyItr->first << ", mode:" << mode; - - for (auto& sdest : streamDestinations) { - bool sDestValid = false; - sDestsValue.append(sdest); - for (auto& dest : destinations) { - if (dest == sdest) - sDestValid = true; - } - if (!sDestValid) - throw cms::Exception("EvFDaqDirector") - << " Invalid transter system destination specified for -: " << psKeyItr->first << ", mode:" << mode - << ", dest:" << sdest; - } - streamVal[mode] = sDestsValue; - } - (*transferSystemJson_)[psKeyItr->first] = streamVal; - } - } - } else { - if (requireTSPSet_) - throw cms::Exception("EvFDaqDirector") << "transferSystem PSet not found"; - } - } - - std::string EvFDaqDirector::getStreamDestinations(std::string const& stream) const { - std::string streamRequestName; - if (transferSystemJson_->isMember(stream.c_str())) - streamRequestName = stream; - else { - std::stringstream msg; - msg << "Transfer system mode definitions missing for -: " << stream; - if (requireTSPSet_) - throw cms::Exception("EvFDaqDirector") << msg.str(); - else { - edm::LogWarning("EvFDaqDirector") << msg.str() << " (permissive mode)"; - return std::string("Failsafe"); - } - } - //return empty if strict check parameter is not on - if (!requireTSPSet_ && (selectedTransferMode_.empty() || selectedTransferMode_ == "null")) { - edm::LogWarning("EvFDaqDirector") - << "Selected mode string is not provided as DaqDirector parameter." - << "Switch on requireTSPSet parameter to enforce this requirement. Setting mode to empty string."; - return std::string("Failsafe"); - } - if (requireTSPSet_ && (selectedTransferMode_.empty() || selectedTransferMode_ == "null")) { - throw cms::Exception("EvFDaqDirector") << "Selected mode string is not provided as DaqDirector parameter."; - } - //check if stream has properly listed transfer stream - if (!transferSystemJson_->get(streamRequestName, "").isMember(selectedTransferMode_.c_str())) { - std::stringstream msg; - msg << "Selected transfer mode " << selectedTransferMode_ << " is not specified for stream " << streamRequestName; - if (requireTSPSet_) - throw cms::Exception("EvFDaqDirector") << msg.str(); - else - edm::LogWarning("EvFDaqDirector") << msg.str() << " (permissive mode)"; - return std::string("Failsafe"); - } - Json::Value destsVec = transferSystemJson_->get(streamRequestName, "").get(selectedTransferMode_, ""); - - //flatten string json::Array into CSV std::string - std::string ret; - for (Json::Value::iterator it = destsVec.begin(); it != destsVec.end(); it++) { - if (!ret.empty()) - ret += ","; - ret += (*it).asString(); - } - return ret; - } - - void EvFDaqDirector::checkMergeTypePSet(edm::ProcessContext const& pc) { - if (mergeTypePset_.empty()) - return; - if (!mergeTypeMap_.empty()) - return; - edm::ParameterSet const& topPset = edm::getParameterSet(pc.parameterSetID()); - if (topPset.existsAs(mergeTypePset_, true)) { - const edm::ParameterSet& tsPset(topPset.getParameterSet(mergeTypePset_)); - for (const std::string& pname : tsPset.getParameterNames()) { - std::string streamType = tsPset.getParameter(pname); - tbb::concurrent_hash_map::accessor ac; - mergeTypeMap_.insert(ac, pname); - ac->second = streamType; - ac.release(); - } - } - } - - std::string EvFDaqDirector::getStreamMergeType(std::string const& stream, MergeType defaultType) { - tbb::concurrent_hash_map::const_accessor search_ac; - if (mergeTypeMap_.find(search_ac, stream)) - return search_ac->second; - - edm::LogInfo("EvFDaqDirector") << " No merging type specified for stream " << stream << ". Using default value"; - std::string defaultName = MergeTypeNames_[defaultType]; - tbb::concurrent_hash_map::accessor ac; - mergeTypeMap_.insert(ac, stream); - ac->second = defaultName; - ac.release(); - return defaultName; - } - void EvFDaqDirector::createProcessingNotificationMaybe() const { std::string proc_flag = run_dir_ + "/processing"; int proc_flag_fd = open(proc_flag.c_str(), O_RDWR | O_CREAT, S_IRWXU | S_IWGRP | S_IRGRP | S_IWOTH | S_IROTH); diff --git a/EventFilter/Utilities/src/EvFOutputModule.cc b/EventFilter/Utilities/src/EvFOutputModule.cc deleted file mode 100644 index 56581c4864fe0..0000000000000 --- a/EventFilter/Utilities/src/EvFOutputModule.cc +++ /dev/null @@ -1,289 +0,0 @@ -#include "EventFilter/Utilities/interface/EvFOutputModule.h" - -#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" - -#include "FWCore/ServiceRegistry/interface/Service.h" -#include "EventFilter/Utilities/interface/FastMonitoringService.h" -#include "EventFilter/Utilities/interface/EvFDaqDirector.h" - -#include "EventFilter/Utilities/interface/JSONSerializer.h" -#include "EventFilter/Utilities/interface/FileIO.h" -#include "FWCore/Utilities/interface/Adler32Calculator.h" - -#include "FWCore/Framework/interface/EventForOutput.h" -#include "FWCore/Framework/interface/LuminosityBlockForOutput.h" -#include "FWCore/Framework/interface/LuminosityBlock.h" - -#include "IOPool/Streamer/interface/InitMsgBuilder.h" -#include "IOPool/Streamer/interface/EventMsgBuilder.h" -#include "FWCore/Utilities/interface/UnixSignalHandlers.h" - -#include -#include -#include - -namespace evf { - - EvFOutputJSONWriter::EvFOutputJSONWriter(edm::StreamerOutputModuleCommon::Parameters const& commonParameters, - edm::SelectedProducts const* selections, - std::string const& streamLabel, - std::string const& moduleLabel) - : streamerCommon_(commonParameters, selections, moduleLabel), - processed_(0), - accepted_(0), - errorEvents_(0), - retCodeMask_(0), - filelist_(), - filesize_(0), - inputFiles_(), - fileAdler32_(1), - hltErrorEvents_(0) { - transferDestination_ = edm::Service()->getStreamDestinations(streamLabel); - mergeType_ = edm::Service()->getStreamMergeType(streamLabel, evf::MergeTypeDAT); - - std::string baseRunDir = edm::Service()->baseRunDir(); - LogDebug("EvFOutputModule") << "writing .dat files to -: " << baseRunDir; - - edm::Service()->createRunOpendirMaybe(); - - processed_.setName("Processed"); - accepted_.setName("Accepted"); - errorEvents_.setName("ErrorEvents"); - retCodeMask_.setName("ReturnCodeMask"); - filelist_.setName("Filelist"); - filesize_.setName("Filesize"); - inputFiles_.setName("InputFiles"); - fileAdler32_.setName("FileAdler32"); - transferDestination_.setName("TransferDestination"); - mergeType_.setName("MergeType"); - hltErrorEvents_.setName("HLTErrorEvents"); - - outJsonDef_.setDefaultGroup("data"); - outJsonDef_.addLegendItem("Processed", "integer", jsoncollector::DataPointDefinition::SUM); - outJsonDef_.addLegendItem("Accepted", "integer", jsoncollector::DataPointDefinition::SUM); - outJsonDef_.addLegendItem("ErrorEvents", "integer", jsoncollector::DataPointDefinition::SUM); - outJsonDef_.addLegendItem("ReturnCodeMask", "integer", jsoncollector::DataPointDefinition::BINARYOR); - outJsonDef_.addLegendItem("Filelist", "string", jsoncollector::DataPointDefinition::MERGE); - outJsonDef_.addLegendItem("Filesize", "integer", jsoncollector::DataPointDefinition::SUM); - outJsonDef_.addLegendItem("InputFiles", "string", jsoncollector::DataPointDefinition::CAT); - outJsonDef_.addLegendItem("FileAdler32", "integer", jsoncollector::DataPointDefinition::ADLER32); - outJsonDef_.addLegendItem("TransferDestination", "string", jsoncollector::DataPointDefinition::SAME); - outJsonDef_.addLegendItem("MergeType", "string", jsoncollector::DataPointDefinition::SAME); - outJsonDef_.addLegendItem("HLTErrorEvents", "integer", jsoncollector::DataPointDefinition::SUM); - - std::stringstream tmpss, ss; - tmpss << baseRunDir << "/open/" - << "output_" << getpid() << ".jsd"; - ss << baseRunDir << "/" - << "output_" << getpid() << ".jsd"; - std::string outTmpJsonDefName = tmpss.str(); - std::string outJsonDefName = ss.str(); - - edm::Service()->lockInitLock(); - struct stat fstat; - if (stat(outJsonDefName.c_str(), &fstat) != 0) { //file does not exist - LogDebug("EvFOutputModule") << "writing output definition file -: " << outJsonDefName; - std::string content; - jsoncollector::JSONSerializer::serialize(&outJsonDef_, content); - jsoncollector::FileIO::writeStringToFile(outTmpJsonDefName, content); - std::filesystem::rename(outTmpJsonDefName, outJsonDefName); - } - edm::Service()->unlockInitLock(); - - jsonMonitor_.reset(new jsoncollector::FastMonitor(&outJsonDef_, true)); - jsonMonitor_->setDefPath(outJsonDefName); - jsonMonitor_->registerGlobalMonitorable(&processed_, false); - jsonMonitor_->registerGlobalMonitorable(&accepted_, false); - jsonMonitor_->registerGlobalMonitorable(&errorEvents_, false); - jsonMonitor_->registerGlobalMonitorable(&retCodeMask_, false); - jsonMonitor_->registerGlobalMonitorable(&filelist_, false); - jsonMonitor_->registerGlobalMonitorable(&filesize_, false); - jsonMonitor_->registerGlobalMonitorable(&inputFiles_, false); - jsonMonitor_->registerGlobalMonitorable(&fileAdler32_, false); - jsonMonitor_->registerGlobalMonitorable(&transferDestination_, false); - jsonMonitor_->registerGlobalMonitorable(&mergeType_, false); - jsonMonitor_->registerGlobalMonitorable(&hltErrorEvents_, false); - jsonMonitor_->commit(nullptr); - } - - EvFOutputModule::EvFOutputModule(edm::ParameterSet const& ps) - : edm::one::OutputModuleBase(ps), - EvFOutputModuleType(ps), - commonParameters_(edm::StreamerOutputModuleCommon::parameters(ps)), - streamLabel_(ps.getParameter("@module_label")), - trToken_(consumes(edm::InputTag("TriggerResults"))), - psetToken_(consumes( - ps.getUntrackedParameter("psetMap"))) { - //replace hltOutoputA with stream if the HLT menu uses this convention - std::string testPrefix = "hltOutput"; - if (streamLabel_.find(testPrefix) == 0) - streamLabel_ = std::string("stream") + streamLabel_.substr(testPrefix.size()); - - if (streamLabel_.find('_') != std::string::npos) { - throw cms::Exception("EvFOutputModule") << "Underscore character is reserved can not be used for stream names in " - "FFF, but was detected in stream name -: " - << streamLabel_; - } - - std::string streamLabelLow = streamLabel_; - boost::algorithm::to_lower(streamLabelLow); - auto streampos = streamLabelLow.rfind("stream"); - if (streampos != 0 && streampos != std::string::npos) - throw cms::Exception("EvFOutputModule") - << "stream (case-insensitive) sequence was found in stream suffix. This is reserved and can not be used for " - "names in FFF based HLT, but was detected in stream name"; - - fms_ = (evf::FastMonitoringService*)(edm::Service().operator->()); - } - - EvFOutputModule::~EvFOutputModule() {} - - void EvFOutputModule::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { - edm::ParameterSetDescription desc; - edm::StreamerOutputModuleCommon::fillDescription(desc); - EvFOutputModuleType::fillDescription(desc); - desc.addUntracked("psetMap", {"hltPSetMap"}) - ->setComment("Optionally allow the map of ParameterSets to be calculated externally."); - descriptions.add("evfOutputModule", desc); - } - - void EvFOutputModule::beginRun(edm::RunForOutput const& run) { - //create run Cache holding JSON file writer and variables - jsonWriter_ = std::make_unique( - commonParameters_, &keptProducts()[edm::InEvent], streamLabel_, description().moduleLabel()); - - //output INI file (non-const). This doesn't require globalBeginRun to be finished - const std::string openIniFileName = edm::Service()->getOpenInitFilePath(streamLabel_); - edm::LogInfo("EvFOutputModule") << "beginRun init stream -: " << openIniFileName; - - StreamerOutputFile stream_writer_preamble(openIniFileName); - uint32 preamble_adler32 = 1; - edm::BranchIDLists const* bidlPtr = branchIDLists(); - - auto psetMapHandle = run.getHandle(psetToken_); - - std::unique_ptr init_message = - jsonWriter_->streamerCommon_.serializeRegistry(*jsonWriter_->streamerCommon_.getSerializerBuffer(), - *bidlPtr, - *thinnedAssociationsHelper(), - OutputModule::processName(), - description().moduleLabel(), - moduleDescription().mainParameterSetID(), - psetMapHandle.isValid() ? psetMapHandle.product() : nullptr); - - //Let us turn it into a View - InitMsgView view(init_message->startAddress()); - - //output header - stream_writer_preamble.write(view); - preamble_adler32 = stream_writer_preamble.adler32(); - stream_writer_preamble.close(); - - struct stat istat; - stat(openIniFileName.c_str(), &istat); - //read back file to check integrity of what was written - off_t readInput = 0; - uint32_t adlera = 1, adlerb = 0; - FILE* src = fopen(openIniFileName.c_str(), "r"); - - //allocate buffer to write INI file - unsigned char* outBuf = new unsigned char[1024 * 1024]; - while (readInput < istat.st_size) { - size_t toRead = readInput + 1024 * 1024 < istat.st_size ? 1024 * 1024 : istat.st_size - readInput; - fread(outBuf, toRead, 1, src); - cms::Adler32((const char*)outBuf, toRead, adlera, adlerb); - readInput += toRead; - } - fclose(src); - - //clear serialization buffers - jsonWriter_->streamerCommon_.getSerializerBuffer()->clearHeaderBuffer(); - - //free output buffer needed only for the file write - delete[] outBuf; - outBuf = nullptr; - - uint32_t adler32c = (adlerb << 16) | adlera; - if (adler32c != preamble_adler32) { - throw cms::Exception("EvFOutputModule") << "Checksum mismatch of ini file -: " << openIniFileName - << " expected:" << preamble_adler32 << " obtained:" << adler32c; - } else { - LogDebug("EvFOutputModule") << "Ini file checksum -: " << streamLabel_ << " " << adler32c; - std::filesystem::rename(openIniFileName, edm::Service()->getInitFilePath(streamLabel_)); - } - } - - Trig EvFOutputModule::getTriggerResults(edm::EDGetTokenT const& token, - edm::EventForOutput const& e) const { - Trig result; - e.getByToken(token, result); - return result; - } - - std::shared_ptr EvFOutputModule::globalBeginLuminosityBlock( - edm::LuminosityBlockForOutput const& iLB) const { - auto openDatFilePath = edm::Service()->getOpenDatFilePath(iLB.luminosityBlock(), streamLabel_); - - return std::make_shared(openDatFilePath); - } - - void EvFOutputModule::write(edm::EventForOutput const& e) { - unsigned int counter = 0; - while (edm::Service()->inputThrottled()) { - if (edm::shutdown_flag.load(std::memory_order_relaxed)) - break; - if (!(counter % 100)) - edm::LogWarning("FedRawDataInputSource") << "Input throttled detected, writing is paused..."; - usleep(100000); - counter++; - } - - edm::Handle const& triggerResults = getTriggerResults(trToken_, e); - - //auto lumiWriter = const_cast(luminosityBlockCache(e.getLuminosityBlock().index() )); - auto lumiWriter = luminosityBlockCache(e.getLuminosityBlock().index()); - std::unique_ptr msg = jsonWriter_->streamerCommon_.serializeEvent( - *jsonWriter_->streamerCommon_.getSerializerBuffer(), e, triggerResults, selectorConfig()); - lumiWriter->incAccepted(); - lumiWriter->doOutputEvent(*msg); //msg is written and discarded at this point - } - - void EvFOutputModule::globalEndLuminosityBlock(edm::LuminosityBlockForOutput const& iLB) { - auto lumiWriter = luminosityBlockCache(iLB.index()); - //close dat file - lumiWriter->close(); - - jsonWriter_->fileAdler32_.value() = lumiWriter->get_adler32(); - jsonWriter_->accepted_.value() = lumiWriter->getAccepted(); - - bool abortFlag = false; - jsonWriter_->processed_.value() = fms_->getEventsProcessedForLumi(iLB.luminosityBlock(), &abortFlag); - if (abortFlag) { - edm::LogInfo("EvFOutputModule") << "Abort flag has been set. Output is suppressed"; - return; - } - - if (jsonWriter_->processed_.value() != 0) { - struct stat istat; - std::filesystem::path openDatFilePath = lumiWriter->getFilePath(); - stat(openDatFilePath.string().c_str(), &istat); - jsonWriter_->filesize_ = istat.st_size; - std::filesystem::rename(openDatFilePath.string().c_str(), - edm::Service()->getDatFilePath(iLB.luminosityBlock(), streamLabel_)); - jsonWriter_->filelist_ = openDatFilePath.filename().string(); - } else { - //remove empty file when no event processing has occurred - remove(lumiWriter->getFilePath().c_str()); - jsonWriter_->filesize_ = 0; - jsonWriter_->filelist_ = ""; - jsonWriter_->fileAdler32_.value() = -1; //no files in signed long - } - - //produce JSON file - jsonWriter_->jsonMonitor_->snap(iLB.luminosityBlock()); - const std::string outputJsonNameStream = - edm::Service()->getOutputJsonFilePath(iLB.luminosityBlock(), streamLabel_); - jsonWriter_->jsonMonitor_->outputFullJSON(outputJsonNameStream, iLB.luminosityBlock()); - } - -} // namespace evf diff --git a/EventFilter/Utilities/src/FastMonitor.cc b/EventFilter/Utilities/src/FastMonitor.cc index afba3cabe2547..d9c9d9ae8f564 100644 --- a/EventFilter/Utilities/src/FastMonitor.cc +++ b/EventFilter/Utilities/src/FastMonitor.cc @@ -225,11 +225,12 @@ std::string FastMonitor::getCSVString(int sid) { return ss.str(); } -void FastMonitor::outputCSV(std::string const& path, std::string const& csvString) { +void FastMonitor::outputCSV(std::string const& path, std::vector const& csvs) { std::ofstream outputFile; outputFile.open(path.c_str(), std::fstream::out | std::fstream::trunc); outputFile << defPathFast_ << std::endl; - outputFile << csvString << std::endl; + for (const auto& csvString : csvs) + outputFile << csvString << std::endl; outputFile.close(); } @@ -240,31 +241,6 @@ JsonMonitorable* FastMonitor::getMergedIntJForLumi(std::string const& name, unsi return dataPoints_[it->second]->mergeAndRetrieveValue(forLumi); } -bool FastMonitor::outputFullJSONs(std::string const& pathstem, std::string const& ext, unsigned int lumi, bool output) { - LogDebug("FastMonitor") << "SNAP updates -: " << recentSnaps_ << " (by timer: " << recentSnapsTimer_ - << ") in lumisection "; - - recentSnaps_ = recentSnapsTimer_ = 0; - for (unsigned int i = 0; i < nStreams_; i++) { - //merge even if no output - Json::Value serializeRoot; - for (unsigned int j = 0; j < jsonDpIndex_.size(); j++) { - dataPoints_[jsonDpIndex_[j]]->mergeAndSerialize(serializeRoot, lumi, true, i); - } - if (!output) - continue; - //get extension - std::stringstream tidext; - tidext << "_tid" << i; - std::string path = pathstem + tidext.str() + ext; - - Json::StyledWriter writer; - std::string&& result = writer.write(serializeRoot); - FileIO::writeStringToFile(path, result); - } - return output; -} - bool FastMonitor::outputFullJSON(std::string const& path, unsigned int lumi, bool output) { LogDebug("FastMonitor") << "SNAP updates -: " << recentSnaps_ << " (by timer: " << recentSnapsTimer_ << ") in lumisection "; diff --git a/EventFilter/Utilities/src/FastMonitoringService.cc b/EventFilter/Utilities/src/FastMonitoringService.cc index 0cf41da56bc00..01f347960bf4e 100644 --- a/EventFilter/Utilities/src/FastMonitoringService.cc +++ b/EventFilter/Utilities/src/FastMonitoringService.cc @@ -6,7 +6,7 @@ #include "FWCore/ServiceRegistry/interface/SystemBounds.h" #include "FWCore/ServiceRegistry/interface/GlobalContext.h" #include "FWCore/ServiceRegistry/interface/StreamContext.h" -#include "FWCore/ServiceRegistry/interface/PathContext.h" +//#include "FWCore/ServiceRegistry/interface/PathContext.h" #include "EventFilter/Utilities/interface/EvFDaqDirector.h" #include "EventFilter/Utilities/interface/FedRawDataInputSource.h" #include "EventFilter/Utilities/interface/DAQSource.h" @@ -31,7 +31,7 @@ constexpr double throughputFactor() { return (1000000) / double(1024 * 1024); } namespace evf { - const edm::ModuleDescription FastMonitoringService::reservedMicroStateNames[FastMonState::mCOUNT] = { + const edm::ModuleDescription FastMonitoringService::specialMicroStateNames[FastMonState::mCOUNT] = { edm::ModuleDescription("Dummy", "Invalid"), edm::ModuleDescription("Dummy", "Idle"), edm::ModuleDescription("Dummy", "FwkOvhSrc"), @@ -41,7 +41,54 @@ namespace evf { edm::ModuleDescription("Dummy", "DQM"), edm::ModuleDescription("Dummy", "BoL"), edm::ModuleDescription("Dummy", "EoL"), - edm::ModuleDescription("Dummy", "GlobalEoL")}; + edm::ModuleDescription("Dummy", "GlobalEoL"), + edm::ModuleDescription("Dummy", "Fwk"), + edm::ModuleDescription("Dummy", "IdleSource"), + edm::ModuleDescription("Dummy", "Event"), + edm::ModuleDescription("Dummy", "Ignore")}; + + constexpr edm::ModuleDescription const* getmInvalid() { + return &FastMonitoringService::specialMicroStateNames[FastMonState::mInvalid]; + } + constexpr edm::ModuleDescription const* getmIdle() { + return &FastMonitoringService::specialMicroStateNames[FastMonState::mIdle]; + } + constexpr edm::ModuleDescription const* getmFwkOvhSrc() { + return &FastMonitoringService::specialMicroStateNames[FastMonState::mFwkOvhSrc]; + } + constexpr edm::ModuleDescription const* getmFwkOvhMod() { + return &FastMonitoringService::specialMicroStateNames[FastMonState::mFwkOvhMod]; + } + constexpr edm::ModuleDescription const* getmFwkEoL() { + return &FastMonitoringService::specialMicroStateNames[FastMonState::mFwkEoL]; + } + constexpr edm::ModuleDescription const* getmInput() { + return &FastMonitoringService::specialMicroStateNames[FastMonState::mInput]; + } + constexpr edm::ModuleDescription const* getmDqm() { + return &FastMonitoringService::specialMicroStateNames[FastMonState::mDqm]; + } + constexpr edm::ModuleDescription const* getmBoL() { + return &FastMonitoringService::specialMicroStateNames[FastMonState::mBoL]; + } + constexpr edm::ModuleDescription const* getmEoL() { + return &FastMonitoringService::specialMicroStateNames[FastMonState::mEoL]; + } + constexpr edm::ModuleDescription const* getmGlobEoL() { + return &FastMonitoringService::specialMicroStateNames[FastMonState::mGlobEoL]; + } + constexpr edm::ModuleDescription const* getmFwk() { + return &FastMonitoringService::specialMicroStateNames[FastMonState::mFwk]; + } + constexpr edm::ModuleDescription const* getmIdleSource() { + return &FastMonitoringService::specialMicroStateNames[FastMonState::mIdleSource]; + } + constexpr edm::ModuleDescription const* getmEvent() { + return &FastMonitoringService::specialMicroStateNames[FastMonState::mEvent]; + } + constexpr edm::ModuleDescription const* getmIgnore() { + return &FastMonitoringService::specialMicroStateNames[FastMonState::mIgnore]; + } const std::string FastMonitoringService::macroStateNames[FastMonState::MCOUNT] = {"Init", "JobReady", @@ -121,18 +168,40 @@ namespace evf { "inSupThrottled", "inThrottled"}; - const std::string FastMonitoringService::nopath_ = "NoPath"; + class ConcurrencyTracker : public tbb::task_scheduler_observer { + std::atomic num_threads; + unsigned max_threads; + std::vector> threadactive_; + + public: + ConcurrencyTracker(unsigned num_expected) + : num_threads(), max_threads(num_expected), threadactive_(num_expected, 0) { + //set array to if it will not be used + //for (unsigned i=0;i("tbbMonitoringMode", true)), + tbbConcurrencyTracker_(iPS.getUntrackedParameter("tbbConcurrencyTracker", true) && tbbMonitoringMode_), sleepTime_(iPS.getUntrackedParameter("sleepTime", 1)), fastMonIntervals_(iPS.getUntrackedParameter("fastMonIntervals", 2)), fastName_("fastmoni"), - slowName_("slowmoni"), - filePerFwkStream_(iPS.getUntrackedParameter("filePerFwkStream", false)), totalEventsProcessed_(0), verbose_(iPS.getUntrackedParameter("verbose")) { reg.watchPreallocate(this, &FastMonitoringService::preallocate); //receiving information on number of threads @@ -152,11 +221,10 @@ namespace evf { reg.watchPreStreamEndLumi(this, &FastMonitoringService::preStreamEndLumi); reg.watchPostStreamEndLumi(this, &FastMonitoringService::postStreamEndLumi); - reg.watchPrePathEvent(this, &FastMonitoringService::prePathEvent); - reg.watchPreEvent(this, &FastMonitoringService::preEvent); //stream reg.watchPostEvent(this, &FastMonitoringService::postEvent); + //readEvent (not getNextItemType) reg.watchPreSourceEvent(this, &FastMonitoringService::preSourceEvent); //source (with streamID of requestor) reg.watchPostSourceEvent(this, &FastMonitoringService::postSourceEvent); @@ -190,28 +258,20 @@ namespace evf { void FastMonitoringService::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { edm::ParameterSetDescription desc; desc.setComment("Service for File-based DAQ monitoring and event accounting"); + desc.addUntracked("tbbMonitoringMode", true) + ->setComment("Monitor individual module processing per TBB thread instead of stream"); + desc.addUntracked("tbbConcurrencyTracker", true) + ->setComment("Monitor TBB thread activity to flag microstate as real idle or overhead/other"); desc.addUntracked("sleepTime", 1)->setComment("Sleep time of the monitoring thread"); desc.addUntracked("fastMonIntervals", 2) ->setComment("Modulo of sleepTime intervals on which fastmon file is written out"); - desc.addUntracked("filePerFwkStream", false) + desc.addUntracked("filePerFwkStream", true) //obsolete ->setComment("Switches on monitoring output per framework stream"); desc.addUntracked("verbose", false)->setComment("Set to use LogInfo messages from the monitoring thread"); desc.setAllowAnything(); descriptions.add("FastMonitoringService", desc); } - std::string FastMonitoringService::makePathLegendaJson() { - Json::Value legendaVector(Json::arrayValue); - for (int i = 0; i < fmt_->m_data.encPath_[0].current_; i++) - legendaVector.append(Json::Value(*(static_cast(fmt_->m_data.encPath_[0].decode(i))))); - Json::Value valReserved(nReservedPaths); - Json::Value pathLegend; - pathLegend["names"] = legendaVector; - pathLegend["reserved"] = valReserved; - Json::StyledWriter writer; - return writer.write(pathLegend); - } - std::string FastMonitoringService::makeModuleLegendaJson() { Json::Value legendaVector(Json::arrayValue); for (int i = 0; i < fmt_->m_data.encModule_.current_; i++) @@ -251,12 +311,16 @@ namespace evf { nStreams_ = 1; if (nThreads_ == 0) nThreads_ = 1; + nMonThreads_ = std::max(nThreads_, nStreams_); + ct_ = std::make_unique(nThreads_); + //start concurrency tracking } - void FastMonitoringService::preBeginJob(edm::PathsAndConsumesOfModulesBase const& pathsInfo, - edm::ProcessContext const& pc) { + void FastMonitoringService::preBeginJob(edm::PathsAndConsumesOfModulesBase const&, edm::ProcessContext const& pc) { // FIND RUN DIRECTORY // The run dir should be set via the configuration of EvFDaqDirector + if (tbbConcurrencyTracker_) + ct_->activate(); if (edm::Service().operator->() == nullptr) { throw cms::Exception("FastMonitoringService") << "EvFDaqDirector is not present"; @@ -279,15 +343,6 @@ namespace evf { std::filesystem::path fast = workingDirectory_; fast /= fastFileName.str(); fastPath_ = fast.string(); - if (filePerFwkStream_) - for (unsigned int i = 0; i < nStreams_; i++) { - std::ostringstream fastFileNameTid; - fastFileNameTid << fastName_ << "_pid" << std::setfill('0') << std::setw(5) << getpid() << "_tid" << i - << ".fast"; - std::filesystem::path fastTid = workingDirectory_; - fastTid /= fastFileNameTid.str(); - fastPathList_.push_back(fastTid.string()); - } std::ostringstream moduleLegFile; std::ostringstream moduleLegFileJson; @@ -296,19 +351,11 @@ namespace evf { moduleLegendFile_ = (workingDirectory_ / moduleLegFile.str()).string(); moduleLegendFileJson_ = (workingDirectory_ / moduleLegFileJson.str()).string(); - std::ostringstream pathLegFile; - std::ostringstream pathLegFileJson; - pathLegFile << "pathlegend_pid" << std::setfill('0') << std::setw(5) << getpid() << ".leg"; - pathLegendFile_ = (workingDirectory_ / pathLegFile.str()).string(); - pathLegFileJson << "pathlegend_pid" << std::setfill('0') << std::setw(5) << getpid() << ".jsn"; - pathLegendFileJson_ = (workingDirectory_ / pathLegFileJson.str()).string(); - std::ostringstream inputLegFileJson; inputLegFileJson << "inputlegend_pid" << std::setfill('0') << std::setw(5) << getpid() << ".jsn"; inputLegendFileJson_ = (workingDirectory_ / inputLegFileJson.str()).string(); LogDebug("FastMonitoringService") << "Initializing FastMonitor with microstate def path -: " << microstateDefPath_; - //<< encPath_.current_ + 1 << " " << encModule_.current_ + 1 /* * initialize the fast monitor with: @@ -320,36 +367,23 @@ namespace evf { fmt_->m_data.macrostate_ = FastMonState::sInit; for (unsigned int i = 0; i < (FastMonState::mCOUNT); i++) - fmt_->m_data.encModule_.updateReserved(static_cast(reservedMicroStateNames + i)); + fmt_->m_data.encModule_.updateReserved(static_cast(specialMicroStateNames + i)); fmt_->m_data.encModule_.completeReservedWithDummies(); - for (unsigned int i = 0; i < nStreams_; i++) { - fmt_->m_data.ministate_.emplace_back(&nopath_); - fmt_->m_data.microstate_.emplace_back(&reservedMicroStateNames[FastMonState::mInvalid]); - fmt_->m_data.microstateAcqFlag_.push_back(0); + for (unsigned int i = 0; i < nMonThreads_; i++) { + microstate_.emplace_back(getmInvalid()); + microstateAcqFlag_.push_back(0); + tmicrostate_.emplace_back(getmInvalid()); + tmicrostateAcqFlag_.push_back(0); //for synchronization streamCounterUpdating_.push_back(new std::atomic(false)); - - //path (mini) state - fmt_->m_data.encPath_.emplace_back(0); - fmt_->m_data.encPath_[i].update(static_cast(&nopath_)); - - for (auto& path : pathsInfo.paths()) { - fmt_->m_data.encPath_[i].updatePreinit(path); - } - for (auto& endPath : pathsInfo.endPaths()) { - fmt_->m_data.encPath_[i].updatePreinit(endPath); - } } - //for (unsigned int i=0;im_data.macrostateBins_ = FastMonState::MCOUNT; fmt_->m_data.microstateBins_ = 0; fmt_->m_data.inputstateBins_ = FastMonState::inCOUNT; - fmt_->m_data.ministateBins_ = fmt_->m_data.encPath_[0].vecsize(); lastGlobalLumi_ = 0; isInitTransition_ = true; @@ -357,22 +391,11 @@ namespace evf { //startup monitoring fmt_->resetFastMonitor(microstateDefPath_, fastMicrostateDefPath_); - fmt_->jsonMonitor_->setNStreams(nStreams_); - fmt_->m_data.registerVariables(fmt_->jsonMonitor_.get(), nStreams_, threadIDAvailable_ ? nThreads_ : 0); + fmt_->jsonMonitor_->setNStreams(nMonThreads_); + fmt_->m_data.registerVariables(fmt_->jsonMonitor_.get(), nMonThreads_, nStreams_, nThreads_); monInit_.store(false, std::memory_order_release); if (sleepTime_ > 0) fmt_->start(&FastMonitoringService::snapshotRunner, this); - - //this definition needs: #include "tbb/compat/thread" - //however this would results in TBB imeplementation replacing std::thread - //(both supposedly call pthread_self()) - //number of threads created in process could be obtained from /proc, - //assuming that all posix threads are true kernel threads capable of running in parallel - - //#if TBB_IMPLEMENT_CPP0X - ////std::cout << "TBB thread id:" << tbb::thread::id() << std::endl; - //threadIDAvailable_=true; - //#endif } void FastMonitoringService::preStreamEarlyTermination(edm::StreamContext const& sc, edm::TerminationOrigin to) { @@ -458,8 +481,7 @@ namespace evf { //build a map of modules keyed by their module description address //here we need to treat output modules in a special way so they can be easily singled out if (desc.moduleName() == "Stream" || desc.moduleName() == "GlobalEvFOutputModule" || - desc.moduleName() == "EvFOutputModule" || desc.moduleName() == "EventStreamFileWriter" || - desc.moduleName() == "PoolOutputModule") { + desc.moduleName() == "EventStreamFileWriter" || desc.moduleName() == "PoolOutputModule") { fmt_->m_data.encModule_.updateReserved((void*)&desc); nOutputModules_++; } else @@ -473,9 +495,6 @@ namespace evf { std::string inputLegendStrJson = makeInputLegendaJson(); FileIO::writeStringToFile(inputLegendFileJson_, inputLegendStrJson); - std::string pathLegendStrJson = makePathLegendaJson(); - FileIO::writeStringToFile(pathLegendFileJson_, pathLegendStrJson); - fmt_->m_data.macrostate_ = FastMonState::sJobReady; //update number of entries in module histogram @@ -502,6 +521,11 @@ namespace evf { std::lock_guard lock(fmt_->monlock_); lumiStartTime_[newLumi] = lumiStartTime; + //reset all states to idle + if (tbbMonitoringMode_) + for (unsigned i = 0; i < nThreads_; i++) + if (tmicrostate_[i] == getmInvalid()) + tmicrostate_[i] = getmIdle(); } void FastMonitoringService::preGlobalEndLumi(edm::GlobalContext const& gc) { @@ -566,26 +590,8 @@ namespace evf { << " time = " << usecondsForLumi / 1000000 << " size = " << accuSize << " thr = " << throughput; delete lumiProcessedJptr; - //full global and stream merge&output for this lumi - - // create file name for slow monitoring file - bool output = sleepTime_ > 0; - if (filePerFwkStream_) { - std::stringstream slowFileNameStem; - slowFileNameStem << slowName_ << "_ls" << std::setfill('0') << std::setw(4) << lumi << "_pid" << std::setfill('0') - << std::setw(5) << getpid(); - std::filesystem::path slow = workingDirectory_; - slow /= slowFileNameStem.str(); - fmt_->jsonMonitor_->outputFullJSONs(slow.string(), ".jsn", lumi, output); - } else { - std::stringstream slowFileName; - slowFileName << slowName_ << "_ls" << std::setfill('0') << std::setw(4) << lumi << "_pid" << std::setfill('0') - << std::setw(5) << getpid() << ".jsn"; - std::filesystem::path slow = workingDirectory_; - slow /= slowFileName.str(); - //full global and stream merge and JSON write for this lumi - fmt_->jsonMonitor_->outputFullJSON(slow.string(), lumi, output); - } + //full global and stream merge (will be used by output modules), output from this service is deprecated + fmt_->jsonMonitor_->outputFullJSON("dummy", lumi, false); fmt_->jsonMonitor_->discardCollected(lumi); //we don't do further updates for this lumi } @@ -602,95 +608,91 @@ namespace evf { } void FastMonitoringService::preStreamBeginLumi(edm::StreamContext const& sc) { - unsigned int sid = sc.streamID().value(); - std::lock_guard lock(fmt_->monlock_); - fmt_->m_data.streamLumi_[sid] = sc.eventID().luminosityBlock(); + fmt_->m_data.streamLumi_[sc.streamID().value()] = sc.eventID().luminosityBlock(); //reset collected values for this stream - *(fmt_->m_data.processed_[sid]) = 0; + *(fmt_->m_data.processed_[sc.streamID().value()]) = 0; - fmt_->m_data.ministate_[sid] = &nopath_; - fmt_->m_data.microstate_[sid] = &reservedMicroStateNames[FastMonState::mBoL]; + microstate_[sc.streamID().value()] = getmBoL(); } void FastMonitoringService::postStreamBeginLumi(edm::StreamContext const& sc) { - fmt_->m_data.microstate_[sc.streamID().value()] = &reservedMicroStateNames[FastMonState::mIdle]; + microstate_[sc.streamID().value()] = getmIdle(); } void FastMonitoringService::preStreamEndLumi(edm::StreamContext const& sc) { - unsigned int sid = sc.streamID().value(); std::lock_guard lock(fmt_->monlock_); - //update processed count to be complete at this time //doStreamEOLSnapshot(sc.eventID().luminosityBlock(), sid); - fmt_->jsonMonitor_->snapStreamAtomic(sc.eventID().luminosityBlock(), sid); + fmt_->jsonMonitor_->snapStreamAtomic(sc.eventID().luminosityBlock(), sc.streamID().value()); //reset this in case stream does not get notified of next lumi (we keep processed events only) - fmt_->m_data.ministate_[sid] = &nopath_; - fmt_->m_data.microstate_[sid] = &reservedMicroStateNames[FastMonState::mEoL]; + microstate_[sc.streamID().value()] = getmEoL(); } + void FastMonitoringService::postStreamEndLumi(edm::StreamContext const& sc) { - fmt_->m_data.microstate_[sc.streamID().value()] = &reservedMicroStateNames[FastMonState::mFwkEoL]; + microstate_[sc.streamID().value()] = getmFwkEoL(); } - void FastMonitoringService::prePathEvent(edm::StreamContext const& sc, edm::PathContext const& pc) { - fmt_->m_data.ministate_[sc.streamID()] = &(pc.pathName()); + void FastMonitoringService::preEvent(edm::StreamContext const& sc) { + microstate_[sc.streamID().value()] = getmEvent(); } - void FastMonitoringService::preEvent(edm::StreamContext const& sc) {} - void FastMonitoringService::postEvent(edm::StreamContext const& sc) { - fmt_->m_data.microstate_[sc.streamID()] = &reservedMicroStateNames[FastMonState::mIdle]; - - fmt_->m_data.ministate_[sc.streamID()] = &nopath_; - - (*(fmt_->m_data.processed_[sc.streamID()]))++; - + (*(fmt_->m_data.processed_[sc.streamID().value()]))++; //fast path counter (events accumulated in a run) unsigned long res = totalEventsProcessed_.fetch_add(1, std::memory_order_relaxed); fmt_->m_data.fastPathProcessedJ_ = res + 1; + + microstate_[sc.streamID().value()] = getmIdle(); } void FastMonitoringService::preSourceEvent(edm::StreamID sid) { - fmt_->m_data.microstate_[sid.value()] = &reservedMicroStateNames[FastMonState::mInput]; + microstate_[getSID(sid)] = getmInput(); + if (!tbbMonitoringMode_) + return; + tmicrostate_[getTID()] = getmInput(); } void FastMonitoringService::postSourceEvent(edm::StreamID sid) { - fmt_->m_data.microstate_[sid.value()] = &reservedMicroStateNames[FastMonState::mFwkOvhSrc]; + microstate_[getSID(sid)] = getmFwkOvhSrc(); + if (!tbbMonitoringMode_) + return; + tmicrostate_[getTID()] = getmIdle(); } void FastMonitoringService::preModuleEventAcquire(edm::StreamContext const& sc, edm::ModuleCallingContext const& mcc) { - fmt_->m_data.microstate_[sc.streamID().value()] = (void*)(mcc.moduleDescription()); + microstate_[getSID(sc)] = (void*)(mcc.moduleDescription()); + microstateAcqFlag_[getSID(sc)] = 1; + if (!tbbMonitoringMode_) + return; + tmicrostate_[getTID()] = (void*)(mcc.moduleDescription()); + tmicrostateAcqFlag_[getTID()] = 1; } void FastMonitoringService::postModuleEventAcquire(edm::StreamContext const& sc, edm::ModuleCallingContext const& mcc) { - //fmt_->m_data.microstate_[sc.streamID().value()] = (void*)(mcc.moduleDescription()); - fmt_->m_data.microstateAcqFlag_[sc.streamID().value()] = 1; + microstate_[getSID(sc)] = getmFwkOvhMod(); + microstateAcqFlag_[getSID(sc)] = 0; + if (!tbbMonitoringMode_) + return; + tmicrostate_[getTID()] = getmIdle(); + tmicrostateAcqFlag_[getTID()] = 0; } void FastMonitoringService::preModuleEvent(edm::StreamContext const& sc, edm::ModuleCallingContext const& mcc) { - fmt_->m_data.microstate_[sc.streamID().value()] = (void*)(mcc.moduleDescription()); - fmt_->m_data.microstateAcqFlag_[sc.streamID().value()] = 0; + microstate_[getSID(sc)] = (void*)(mcc.moduleDescription()); + if (!tbbMonitoringMode_) + return; + tmicrostate_[getTID()] = (void*)(mcc.moduleDescription()); } void FastMonitoringService::postModuleEvent(edm::StreamContext const& sc, edm::ModuleCallingContext const& mcc) { - fmt_->m_data.microstate_[sc.streamID().value()] = &reservedMicroStateNames[FastMonState::mFwkOvhMod]; - } - - //FUNCTIONS CALLED FROM OUTSIDE - - //this is for old-fashioned service that is not thread safe and can block other streams - //(we assume the worst case - everything is blocked) - void FastMonitoringService::setMicroState(FastMonState::Microstate m) { - for (unsigned int i = 0; i < nStreams_; i++) - fmt_->m_data.microstate_[i] = &reservedMicroStateNames[m]; - } - - //this is for services that are multithreading-enabled or rarely blocks other streams - void FastMonitoringService::setMicroState(edm::StreamID sid, FastMonState::Microstate m) { - fmt_->m_data.microstate_[sid] = &reservedMicroStateNames[m]; + microstate_[getSID(sc)] = getmFwkOvhMod(); + if (!tbbMonitoringMode_) + return; + tmicrostate_[getTID()] = getmIdle(); } //from source @@ -749,6 +751,10 @@ namespace evf { lockStatsDuringLumi_[ls] = std::pair(waitTime, lockCount); } + void FastMonitoringService::setTMicrostate(FastMonState::Microstate m) { + tmicrostate_[tbb::this_task_arena::current_thread_index()] = &specialMicroStateNames[m]; + } + //for the output module unsigned int FastMonitoringService::getEventsProcessedForLumi(unsigned int lumi, bool* abortFlag) { std::lock_guard lock(fmt_->monlock_); @@ -793,28 +799,17 @@ namespace evf { doSnapshot(lastGlobalLumi_, false); - lastEnc.emplace_back(fmt_->m_data.ministateEncoded_); + lastEnc.emplace_back(fmt_->m_data.tmicrostateEncoded_); lastEnc.emplace_back(fmt_->m_data.microstateEncoded_); if (fastMonIntervals_ && (snapCounter_ % fastMonIntervals_) == 0) { - if (filePerFwkStream_) { - std::vector CSVv; - for (unsigned int i = 0; i < nStreams_; i++) { - CSVv.push_back(fmt_->jsonMonitor_->getCSVString((int)i)); - } - // release mutex before writing out fast path file - lock.release()->unlock(); - for (unsigned int i = 0; i < nStreams_; i++) { - if (!CSVv[i].empty()) - fmt_->jsonMonitor_->outputCSV(fastPathList_[i], CSVv[i]); - } - } else { - std::string CSV = fmt_->jsonMonitor_->getCSVString(); - // release mutex before writing out fast path file - lock.release()->unlock(); - if (!CSV.empty()) - fmt_->jsonMonitor_->outputCSV(fastPath_, CSV); + std::vector CSVv; + for (unsigned int i = 0; i < nMonThreads_; i++) { + CSVv.push_back(fmt_->jsonMonitor_->getCSVString((int)i)); } + // release mutex before writing out fast path file + lock.release()->unlock(); + fmt_->jsonMonitor_->outputCSV(fastPath_, CSVv); } snapCounter_++; } @@ -822,10 +817,10 @@ namespace evf { if (verbose_) { edm::LogInfo msg("FastMonitoringService"); auto f = [&](std::vector const& p) { - for (unsigned int i = 0; i < nStreams_; i++) { + for (unsigned int i = 0; i < nMonThreads_; i++) { if (i == 0) msg << "[" << p[i] << ","; - else if (i <= nStreams_ - 1) + else if (i <= nMonThreads_ - 1) msg << p[i] << ","; else msg << p[i] << "]"; @@ -847,9 +842,10 @@ namespace evf { // update macrostate fmt_->m_data.fastMacrostateJ_ = fmt_->m_data.macrostate_; - std::vector microstateCopy(fmt_->m_data.microstate_.begin(), fmt_->m_data.microstate_.end()); - std::vector microstateAcqCopy(fmt_->m_data.microstateAcqFlag_.begin(), - fmt_->m_data.microstateAcqFlag_.end()); + std::vector microstateCopy(microstate_.begin(), microstate_.end()); + std::vector tmicrostateCopy(tmicrostate_.begin(), tmicrostate_.end()); + std::vector microstateAcqCopy(microstateAcqFlag_.begin(), microstateAcqFlag_.end()); + std::vector tmicrostateAcqCopy(tmicrostateAcqFlag_.begin(), tmicrostateAcqFlag_.end()); if (!isInitTransition_) { auto itd = avgLeadTime_.find(ls); @@ -874,8 +870,19 @@ namespace evf { } } + for (unsigned int i = 0; i < nThreads_; i++) { + if (tmicrostateCopy[i] == getmIdle() && ct_->isThreadActive(i)) { + //overhead if thread is running + tmicrostateCopy[i] = getmFwk(); + } + if (tmicrostateAcqCopy[i]) + fmt_->m_data.tmicrostateEncoded_[i] = + fmt_->m_data.microstateBins_ + fmt_->m_data.encModule_.encode(tmicrostateCopy[i]); + else + fmt_->m_data.tmicrostateEncoded_[i] = fmt_->m_data.encModule_.encode(tmicrostateCopy[i]); + } + for (unsigned int i = 0; i < nStreams_; i++) { - fmt_->m_data.ministateEncoded_[i] = fmt_->m_data.encPath_[i].encodeString(fmt_->m_data.ministate_[i]); if (microstateAcqCopy[i]) fmt_->m_data.microstateEncoded_[i] = fmt_->m_data.microstateBins_ + fmt_->m_data.encModule_.encode(microstateCopy[i]); @@ -987,20 +994,22 @@ namespace evf { } } else if (inputState_ == FastMonState::inNoRequest) { inputStatePerThread = true; - for (unsigned int i = 0; i < nStreams_; i++) { - if (microstateCopy[i] == &reservedMicroStateNames[FastMonState::mIdle]) + for (unsigned int i = 0; i < nMonThreads_; i++) { + if (i >= nStreams_) + fmt_->m_data.inputState_[i] = FastMonState::inIgnore; + else if (microstateCopy[i] == getmIdle()) fmt_->m_data.inputState_[i] = FastMonState::inNoRequestWithIdleThreads; - else if (microstateCopy[i] == &reservedMicroStateNames[FastMonState::mEoL] || - microstateCopy[i] == &reservedMicroStateNames[FastMonState::mFwkEoL]) + else if (microstateCopy[i] == getmEoL() || microstateCopy[i] == getmFwkEoL()) fmt_->m_data.inputState_[i] = FastMonState::inNoRequestWithEoLThreads; else fmt_->m_data.inputState_[i] = FastMonState::inNoRequest; } } else if (inputState_ == FastMonState::inNewLumi) { inputStatePerThread = true; - for (unsigned int i = 0; i < nStreams_; i++) { - if (microstateCopy[i] == &reservedMicroStateNames[FastMonState::mEoL] || - microstateCopy[i] == &reservedMicroStateNames[FastMonState::mFwkEoL]) + for (unsigned int i = 0; i < nMonThreads_; i++) { + if (i >= nStreams_) + fmt_->m_data.inputState_[i] = FastMonState::inIgnore; + else if (microstateCopy[i] == getmEoL() || microstateCopy[i] == getmFwkEoL()) fmt_->m_data.inputState_[i] = FastMonState::inNewLumi; } } else if (inputSupervisorState_ == FastMonState::inSupThrottled) { @@ -1011,7 +1020,7 @@ namespace evf { //this is same for all streams if (!inputStatePerThread) - for (unsigned int i = 1; i < nStreams_; i++) + for (unsigned int i = 1; i < nMonThreads_; i++) fmt_->m_data.inputState_[i] = fmt_->m_data.inputState_[0]; if (isGlobalEOL) { //only update global variables diff --git a/EventFilter/Utilities/src/FedRawDataInputSource.cc b/EventFilter/Utilities/src/FedRawDataInputSource.cc index 9a608dd182576..ad3b51661cd64 100644 --- a/EventFilter/Utilities/src/FedRawDataInputSource.cc +++ b/EventFilter/Utilities/src/FedRawDataInputSource.cc @@ -32,7 +32,7 @@ #include "EventFilter/Utilities/interface/FedRawDataInputSource.h" -#include "EventFilter/Utilities/interface/FastMonitoringService.h" +#include "EventFilter/Utilities/interface/SourceCommon.h" #include "EventFilter/Utilities/interface/DataPointDefinition.h" #include "EventFilter/Utilities/interface/FFFNamingSchema.h" @@ -362,11 +362,14 @@ inline evf::EvFDaqDirector::FileStatus FedRawDataInputSource::getNextEvent() { if (!currentFile_.get()) { evf::EvFDaqDirector::FileStatus status = evf::EvFDaqDirector::noFile; setMonState(inWaitInput); - if (!fileQueue_.try_pop(currentFile_)) { - //sleep until wakeup (only in single-buffer mode) or timeout - std::unique_lock lkw(mWakeup_); - if (cvWakeup_.wait_for(lkw, std::chrono::milliseconds(100)) == std::cv_status::timeout || !currentFile_.get()) - return evf::EvFDaqDirector::noFile; + { + IdleSourceSentry ids(fms_); + if (!fileQueue_.try_pop(currentFile_)) { + //sleep until wakeup (only in single-buffer mode) or timeout + std::unique_lock lkw(mWakeup_); + if (cvWakeup_.wait_for(lkw, std::chrono::milliseconds(100)) == std::cv_status::timeout || !currentFile_.get()) + return evf::EvFDaqDirector::noFile; + } } status = currentFile_->status_; if (status == evf::EvFDaqDirector::runEnded) { @@ -469,10 +472,13 @@ inline evf::EvFDaqDirector::FileStatus FedRawDataInputSource::getNextEvent() { if (singleBufferMode_) { //should already be there setMonState(inWaitChunk); - while (!currentFile_->waitForChunk(currentFile_->currentChunk_)) { - usleep(10000); - if (currentFile_->parent_->exceptionState() || setExceptionState_) - currentFile_->parent_->threadError(); + { + IdleSourceSentry ids(fms_); + while (!currentFile_->waitForChunk(currentFile_->currentChunk_)) { + usleep(10000); + if (currentFile_->parent_->exceptionState() || setExceptionState_) + currentFile_->parent_->threadError(); + } } setMonState(inChunkReceived); @@ -528,10 +534,13 @@ inline evf::EvFDaqDirector::FileStatus FedRawDataInputSource::getNextEvent() { else { //wait for the current chunk to become added to the vector setMonState(inWaitChunk); - while (!currentFile_->waitForChunk(currentFile_->currentChunk_)) { - usleep(10000); - if (setExceptionState_) - threadError(); + { + IdleSourceSentry ids(fms_); + while (!currentFile_->waitForChunk(currentFile_->currentChunk_)) { + usleep(10000); + if (setExceptionState_) + threadError(); + } } setMonState(inChunkReceived); @@ -575,9 +584,10 @@ inline evf::EvFDaqDirector::FileStatus FedRawDataInputSource::getNextEvent() { //copy event to a chunk start and move pointers setMonState(inWaitChunk); - - chunkEnd = currentFile_->advance(dataPosition, FRDHeaderVersionSize[detectedFRDversion_] + msgSize); - + { + IdleSourceSentry ids(fms_); + chunkEnd = currentFile_->advance(dataPosition, FRDHeaderVersionSize[detectedFRDversion_] + msgSize); + } setMonState(inChunkReceived); assert(chunkEnd); diff --git a/EventFilter/Utilities/test/FU_scouting.py b/EventFilter/Utilities/test/FU_scouting.py deleted file mode 100644 index 96f1af7d0a436..0000000000000 --- a/EventFilter/Utilities/test/FU_scouting.py +++ /dev/null @@ -1,114 +0,0 @@ -from __future__ import print_function -import FWCore.ParameterSet.Config as cms -import FWCore.ParameterSet.VarParsing as VarParsing -import os - -options = VarParsing.VarParsing ('analysis') - -options.register ('runNumber', -# 325175, # default value - 325172, # default value - VarParsing.VarParsing.multiplicity.singleton, - VarParsing.VarParsing.varType.int, # string, int, or float - "Run Number") - -options.register ('buBaseDir', - 'ramdisk', # default value - VarParsing.VarParsing.multiplicity.singleton, - VarParsing.VarParsing.varType.string, # string, int, or float - "BU base directory") - -options.register ('fuBaseDir', - 'data', # default value - VarParsing.VarParsing.multiplicity.singleton, - VarParsing.VarParsing.varType.string, # string, int, or float - "BU base directory") - -options.register ('fffBaseDir', - '.', # default value - VarParsing.VarParsing.multiplicity.singleton, - VarParsing.VarParsing.varType.string, # string, int, or float - "FFF base directory") - -options.register ('numThreads', - 4, # default value - VarParsing.VarParsing.multiplicity.singleton, - VarParsing.VarParsing.varType.int, # string, int, or float - "Number of CMSSW threads") - -options.register ('numFwkStreams', - 4, # default value - VarParsing.VarParsing.multiplicity.singleton, - VarParsing.VarParsing.varType.int, # string, int, or float - "Number of CMSSW streams") - - -options.parseArguments() - -cmsswbase = os.path.expandvars("$CMSSW_BASE/") - -process = cms.Process("TESTFU") -process.maxEvents.input - -1 - -process.options = dict( - numberOfThreads = options.numThreads, - numberOfStreams = options.numFwkStreams, -# numberOfConcurrentLuminosityBlocks = 1 -) - -process.MessageLogger = cms.Service("MessageLogger", - cout = cms.untracked.PSet(threshold = cms.untracked.string( "ERROR" )), - destinations = cms.untracked.vstring( 'cout' ) -) - -process.FastMonitoringService = cms.Service("FastMonitoringService", - sleepTime = cms.untracked.int32(1) -) - -process.EvFDaqDirector = cms.Service("EvFDaqDirector", - useFileBroker = cms.untracked.bool(False), - fileBrokerHostFromCfg = cms.untracked.bool(True), - fileBrokerHost = cms.untracked.string("htcp40.cern.ch"), - runNumber = cms.untracked.uint32(options.runNumber), - baseDir = cms.untracked.string(options.fffBaseDir+"/"+options.fuBaseDir), - buBaseDir = cms.untracked.string(options.fffBaseDir+"/"+options.buBaseDir), - directorIsBU = cms.untracked.bool(False), -) - -try: - os.makedirs(options.fffBaseDir+"/"+options.fuBaseDir+"/run"+str(options.runNumber).zfill(6)) -except Exception as ex: - print(str(ex)) - pass - -ram_dir_path=options.buBaseDir+"/run"+str(options.runNumber).zfill(6)+"/" - -process.source = cms.Source("DAQSource", - dataMode = cms.untracked.string("ScoutingRun2"), - verifyChecksum = cms.untracked.bool(True), - useL1EventID = cms.untracked.bool(False), - eventChunkSize = cms.untracked.uint32(8), - eventChunkBlock = cms.untracked.uint32(8), - numBuffers = cms.untracked.uint32(2), - maxBufferedFiles = cms.untracked.uint32(2), - fileListMode = cms.untracked.bool(True), - fileNames = cms.untracked.vstring( -# ram_dir_path+"run325175_ls0001_index000001.raw" - ram_dir_path+"run325172_ls0455_index000000.raw" - ) - -) - -process.output = cms.OutputModule("PoolOutputModule", - fileName = cms.untracked.string('file:PoolOutputTest.root'), - outputCommands = cms.untracked.vstring("drop *","keep *_rawDataCollector_*_*") - ) - - -process.ep = cms.EndPath( -# process.streamA -# + process.streamB -# + process.streamC -# + process.streamD - process.output -) diff --git a/EventFilter/Utilities/test/FU_scouting_2.py b/EventFilter/Utilities/test/FU_scouting_2.py deleted file mode 100644 index a45fb4194babc..0000000000000 --- a/EventFilter/Utilities/test/FU_scouting_2.py +++ /dev/null @@ -1,117 +0,0 @@ -from __future__ import print_function -import FWCore.ParameterSet.Config as cms -import FWCore.ParameterSet.VarParsing as VarParsing -import os - -options = VarParsing.VarParsing ('analysis') - -options.register ('runNumber', -# 325175, # default value - 325172, # default value - VarParsing.VarParsing.multiplicity.singleton, - VarParsing.VarParsing.varType.int, # string, int, or float - "Run Number") - -options.register ('buBaseDir', - 'ramdisk', # default value - VarParsing.VarParsing.multiplicity.singleton, - VarParsing.VarParsing.varType.string, # string, int, or float - "BU base directory") - -options.register ('fuBaseDir', - 'data', # default value - VarParsing.VarParsing.multiplicity.singleton, - VarParsing.VarParsing.varType.string, # string, int, or float - "BU base directory") - -options.register ('fffBaseDir', - '.', # default value - VarParsing.VarParsing.multiplicity.singleton, - VarParsing.VarParsing.varType.string, # string, int, or float - "FFF base directory") - -options.register ('numThreads', - 1, # default value - VarParsing.VarParsing.multiplicity.singleton, - VarParsing.VarParsing.varType.int, # string, int, or float - "Number of CMSSW threads") - -options.register ('numFwkStreams', - 1, # default value - VarParsing.VarParsing.multiplicity.singleton, - VarParsing.VarParsing.varType.int, # string, int, or float - "Number of CMSSW streams") - - -options.parseArguments() - -cmsswbase = os.path.expandvars("$CMSSW_BASE/") - -process = cms.Process("TESTFU") -process.maxEvents = cms.untracked.PSet( - input = cms.untracked.int32(-1) -) - -process.options = cms.untracked.PSet( - numberOfThreads = cms.untracked.uint32(options.numThreads), - numberOfStreams = cms.untracked.uint32(options.numFwkStreams), - numberOfConcurrentLuminosityBlocks = cms.untracked.uint32(1) # ShmStreamConsumer requires synchronization at LuminosityBlock boundaries -) -process.MessageLogger = cms.Service("MessageLogger", - cout = cms.untracked.PSet(threshold = cms.untracked.string( "INFO" )), - destinations = cms.untracked.vstring( 'cout' ) -) - -process.FastMonitoringService = cms.Service("FastMonitoringService", - sleepTime = cms.untracked.int32(1) -) - -process.EvFDaqDirector = cms.Service("EvFDaqDirector", - useFileBroker = cms.untracked.bool(False), - fileBrokerHostFromCfg = cms.untracked.bool(True), - fileBrokerHost = cms.untracked.string("htcp40.cern.ch"), - runNumber = cms.untracked.uint32(options.runNumber), - baseDir = cms.untracked.string(options.fffBaseDir+"/"+options.fuBaseDir), - buBaseDir = cms.untracked.string(options.fffBaseDir+"/"+options.buBaseDir), - directorIsBU = cms.untracked.bool(False), -) - -try: - os.makedirs(options.fffBaseDir+"/"+options.fuBaseDir+"/run"+str(options.runNumber).zfill(6)) -except Exception as ex: - print(str(ex)) - pass - -ram_dir_path=options.buBaseDir+"/run"+str(options.runNumber).zfill(6)+"/" - -process.source = cms.Source("DAQSource", -# dataMode = cms.untracked.string("ScoutingRun2Muon"), - dataMode = cms.untracked.string("ScoutingRun2Multi"), - verifyChecksum = cms.untracked.bool(True), - useL1EventID = cms.untracked.bool(False), - eventChunkSize = cms.untracked.uint32(8), - eventChunkBlock = cms.untracked.uint32(8), - numBuffers = cms.untracked.uint32(2), - maxBufferedFiles = cms.untracked.uint32(2), - fileListMode = cms.untracked.bool(True), - fileNames = cms.untracked.vstring( -# ram_dir_path+"run325175_ls0001_index000001.raw" - ram_dir_path+"run325172_ls0010_index000000.raw", - ram_dir_path+"run325172_ls0380_index000000.raw" - ) - -) - -process.output = cms.OutputModule("PoolOutputModule", - fileName = cms.untracked.string('file:PoolOutputTest.root'), - outputCommands = cms.untracked.vstring("drop *","keep *_rawDataCollector_*_*") - ) - - -process.ep = cms.EndPath( -# process.streamA -# + process.streamB -# + process.streamC -# + process.streamD - process.output -) diff --git a/EventFilter/Utilities/test/dumpMuonScouting.py b/EventFilter/Utilities/test/dumpMuonScouting.py deleted file mode 100644 index f09081855f5ab..0000000000000 --- a/EventFilter/Utilities/test/dumpMuonScouting.py +++ /dev/null @@ -1,30 +0,0 @@ -import FWCore.ParameterSet.Config as cms - -process = cms.Process( "DUMP" ) - -process.MessageLogger = cms.Service("MessageLogger", - cerr = cms.untracked.PSet( - enable = cms.untracked.bool(False) - ), - cout = cms.untracked.PSet( - enable = cms.untracked.bool(True), - threshold = cms.untracked.string('INFO') - ) -) - - -process.source = cms.Source("PoolSource", - fileNames = cms.untracked.vstring("file:PoolOutputTest.root") -) - -process.dump = cms.EDAnalyzer("DumpMuonScouting", - muInputTag = cms.InputTag("rawDataCollector"), - minBx = cms.int32(0), - maxBx = cms.int32(4000) -) - - -process.p = cms.Path( - process.dump -) - diff --git a/EventFilter/Utilities/test/startFU.py b/EventFilter/Utilities/test/startFU.py index 7a72c69fc6a35..03cf3e1798af2 100644 --- a/EventFilter/Utilities/test/startFU.py +++ b/EventFilter/Utilities/test/startFU.py @@ -30,7 +30,7 @@ "FFF base directory") options.register ('numThreads', - 2, # default value + 3, # default value VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.int, # string, int, or float "Number of CMSSW threads") @@ -127,7 +127,7 @@ process.HLT_Physics = cms.Path(process.a*process.tcdsRawToDigi*process.filter1) process.HLT_Muon = cms.Path(process.b*process.filter2) -process.streamA = cms.OutputModule("EvFOutputModule", +process.streamA = cms.OutputModule("GlobalEvFOutputModule", SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring( 'HLT_Physics' )) ) diff --git a/EventFilter/Utilities/test/start_multiLS_FU.py b/EventFilter/Utilities/test/start_multiLS_FU.py index 1a81e5e00ee27..adf4d405b76df 100644 --- a/EventFilter/Utilities/test/start_multiLS_FU.py +++ b/EventFilter/Utilities/test/start_multiLS_FU.py @@ -30,7 +30,7 @@ "FFF base directory") options.register ('numThreads', - 2, # default value + 3, # default value VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.int, # string, int, or float "Number of CMSSW threads") @@ -123,11 +123,11 @@ process.p1 = cms.Path(process.a*process.filter1) process.p2 = cms.Path(process.b*process.filter2) -process.streamA = cms.OutputModule("EvFOutputModule", +process.streamA = cms.OutputModule("GlobalEvFOutputModule", SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring( 'p1' )) ) -process.streamB = cms.OutputModule("EvFOutputModule", +process.streamB = cms.OutputModule("GlobalEvFOutputModule", SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring( 'p2' )) ) diff --git a/EventFilter/Utilities/test/testScoutingRun3_unpackers.py b/EventFilter/Utilities/test/testScoutingRun3_unpackers.py new file mode 100644 index 0000000000000..b5e961df12de3 --- /dev/null +++ b/EventFilter/Utilities/test/testScoutingRun3_unpackers.py @@ -0,0 +1,160 @@ +from __future__ import print_function +import FWCore.ParameterSet.Config as cms +import FWCore.ParameterSet.VarParsing as VarParsing +import os, sys + +options = VarParsing.VarParsing ("analysis") + +options.register ("runNumber", + 368636, + VarParsing.VarParsing.multiplicity.singleton, + VarParsing.VarParsing.varType.int, + "Run Number") + +options.register ("daqSourceMode", + "ScoutingRun3", + VarParsing.VarParsing.multiplicity.singleton, + VarParsing.VarParsing.varType.string, + "DAQ source data mode") + +options.register ("buBaseDir", + "/dev/shm", + VarParsing.VarParsing.multiplicity.singleton, + VarParsing.VarParsing.varType.string, + "BU base directory") + +options.register ("fuBaseDir", + "/tmp/", + VarParsing.VarParsing.multiplicity.singleton, + VarParsing.VarParsing.varType.string, + "BU base directory") + +options.register ("fffBaseDir", + "/dev/shm", + VarParsing.VarParsing.multiplicity.singleton, + VarParsing.VarParsing.varType.string, + "FFF base directory") + +options.register ("numThreads", + 8, + VarParsing.VarParsing.multiplicity.singleton, + VarParsing.VarParsing.varType.int, + "Number of CMSSW threads") + +options.register ("numFwkStreams", + 8, + VarParsing.VarParsing.multiplicity.singleton, + VarParsing.VarParsing.varType.int, + "Number of CMSSW streams") + +options.parseArguments() + +cmsswbase = os.path.expandvars("$CMSSW_BASE/") + +process = cms.Process("SCPU") +process.maxEvents = cms.untracked.PSet( + input = cms.untracked.int32(-1) +) + +process.options = cms.untracked.PSet( + wantSummary = cms.untracked.bool(True), + numberOfThreads = cms.untracked.uint32(options.numThreads), + numberOfStreams = cms.untracked.uint32(options.numFwkStreams), + numberOfConcurrentLuminosityBlocks = cms.untracked.uint32(1) +) +process.MessageLogger = cms.Service("MessageLogger", + cout = cms.untracked.PSet( + threshold = cms.untracked.string( "WARNING" ) + ), + destinations = cms.untracked.vstring( "cout" ), +) + +process.FastMonitoringService = cms.Service("FastMonitoringService", + sleepTime = cms.untracked.int32(1) +) + +process.Timing = cms.Service("Timing", + summaryOnly = cms.untracked.bool(True), + useJobReport = cms.untracked.bool(True) +) + +process.EvFDaqDirector = cms.Service("EvFDaqDirector", + useFileBroker = cms.untracked.bool(False), + buBaseDirsAll = cms.untracked.vstring( + options.buBaseDir + ), + buBaseDirsNumStreams = cms.untracked.vint32( + 2 + ), + fileBrokerHostFromCfg = cms.untracked.bool(True), + fileBrokerHost = cms.untracked.string("htcp40.cern.ch"), + runNumber = cms.untracked.uint32(options.runNumber), + baseDir = cms.untracked.string(options.fffBaseDir+"/"+options.fuBaseDir), + buBaseDir = cms.untracked.string(options.fffBaseDir+"/"+options.buBaseDir), + directorIsBU = cms.untracked.bool(False), +) + +try: + os.makedirs(options.fffBaseDir+"/"+options.fuBaseDir+"/run"+str(options.runNumber).zfill(6)) +except Exception as ex: + print(str(ex)) + pass + +ram_dir_path=options.buBaseDir+"/run"+str(options.runNumber).zfill(6)+"/" +flist = [ + ram_dir_path + "run" + str(options.runNumber) + "_ls0340_index000028.raw" +] + +process.source = cms.Source("DAQSource", + testing = cms.untracked.bool(True), + dataMode = cms.untracked.string(options.daqSourceMode), + verifyChecksum = cms.untracked.bool(False), + useL1EventID = cms.untracked.bool(False), + eventChunkBlock = cms.untracked.uint32(64), + eventChunkSize = cms.untracked.uint32(128), + maxChunkSize = cms.untracked.uint32(256), + numBuffers = cms.untracked.uint32(2), + maxBufferedFiles = cms.untracked.uint32(2), + fileListMode = cms.untracked.bool(True), + fileNames = cms.untracked.vstring(*flist) + +) + +fuDir = options.fuBaseDir+("/run%06d" % options.runNumber) +buDir = options.buBaseDir+("/run%06d" % options.runNumber) +for d in fuDir, buDir, options.fuBaseDir, options.buBaseDir: + if not os.path.isdir(d): + os.makedirs(d) +os.system("touch " + buDir + "/" + "fu.lock") + +process.GmtUnpacker = cms.EDProducer('ScGMTRawToDigi', + srcInputTag = cms.InputTag('rawDataCollector'), + debug = cms.untracked.bool(False) +) + +process.CaloUnpacker = cms.EDProducer('ScCaloRawToDigi', + srcInputTag = cms.InputTag('rawDataCollector'), + enableAllSums = cms.untracked.bool(True), + debug = cms.untracked.bool(False) +) + +process.outputZB = cms.OutputModule("PoolOutputModule", + fileName = cms.untracked.string('file:/dev/shm/PoolOutputTest.root'), + outputCommands = cms.untracked.vstring( + "drop *", + "keep *_GmtUnpacker_*_*", + "keep *_CaloUnpacker_*_*" + ), + #compressionAlgorithm = cms.untracked.string("ZSTD"), + #compressionLevel = cms.untracked.int32(4) +) + +rawToDigiTask = cms.Task( + process.GmtUnpacker,process.CaloUnpacker +) + +process.p = cms.Path(rawToDigiTask) + +process.ep = cms.EndPath( + process.outputZB +) diff --git a/EventFilter/Utilities/test/unittest_FU.py b/EventFilter/Utilities/test/unittest_FU.py index 91a661228dbac..aa99c931a8b80 100644 --- a/EventFilter/Utilities/test/unittest_FU.py +++ b/EventFilter/Utilities/test/unittest_FU.py @@ -30,7 +30,7 @@ "FFF base directory") options.register ('numThreads', - 2, # default value + 3, # default value VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.int, # string, int, or float "Number of CMSSW threads") @@ -138,7 +138,7 @@ process.HLT_Physics = cms.Path(process.a*process.tcdsRawToDigi*process.filter1) process.HLT_Muon = cms.Path(process.b*process.filter2) -process.streamA = cms.OutputModule("EvFOutputModule", +process.streamA = cms.OutputModule("GlobalEvFOutputModule", SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring( 'HLT_Physics' )) ) diff --git a/EventFilter/Utilities/test/unittest_FU_daqsource.py b/EventFilter/Utilities/test/unittest_FU_daqsource.py index afbd801029eef..544df16429569 100644 --- a/EventFilter/Utilities/test/unittest_FU_daqsource.py +++ b/EventFilter/Utilities/test/unittest_FU_daqsource.py @@ -36,7 +36,7 @@ "FFF base directory") options.register ('numThreads', - 2, # default value + 3, # default value VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.int, # string, int, or float "Number of CMSSW threads") @@ -145,15 +145,15 @@ process.p1 = cms.Path(process.a*process.tcdsRawToDigi*process.filter1) process.p2 = cms.Path(process.b*process.filter2) -process.streamA = cms.OutputModule("EvFOutputModule", +process.streamA = cms.OutputModule("GlobalEvFOutputModule", SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring( 'p1' )) ) -process.streamB = cms.OutputModule("EvFOutputModule", +process.streamB = cms.OutputModule("GlobalEvFOutputModule", SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring( 'p2' )) ) -process.streamC = cms.OutputModule("EvFOutputModule", +process.streamC = cms.OutputModule("GlobalEvFOutputModule", SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring( 'p2' )) ) diff --git a/FWCore/Framework/interface/CallbackBase.h b/FWCore/Framework/interface/CallbackBase.h index 6854096c549aa..814001527bc1e 100644 --- a/FWCore/Framework/interface/CallbackBase.h +++ b/FWCore/Framework/interface/CallbackBase.h @@ -70,7 +70,7 @@ namespace edm { CallbackBase(T* iProd, std::shared_ptr iProduceFunc, unsigned int iID, const TDecorator& iDec) : proxyData_{}, producer_(iProd), - callingContext_(&iProd->description()), + callingContext_(&iProd->description(), iID), produceFunction_(std::move(iProduceFunc)), id_(iID), wasCalledForThisRecord_(false), diff --git a/FWCore/Framework/interface/EventForTransformer.h b/FWCore/Framework/interface/EventForTransformer.h index 6cddefe4263ed..4b627e6d2f861 100644 --- a/FWCore/Framework/interface/EventForTransformer.h +++ b/FWCore/Framework/interface/EventForTransformer.h @@ -16,6 +16,7 @@ #include "DataFormats/Common/interface/WrapperBase.h" #include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/ServiceRegistry/interface/ModuleCallingContext.h" #include "FWCore/Utilities/interface/TypeID.h" #include "FWCore/Utilities/interface/ProductResolverIndex.h" @@ -24,19 +25,20 @@ namespace edm { class EventPrincipal; - class ModuleCallingContext; class EventForTransformer { public: - EventForTransformer(EventPrincipal const&, ModuleCallingContext const*); + EventForTransformer(EventPrincipal const&, ModuleCallingContext); BasicHandle get(edm::TypeID const& iTypeID, ProductResolverIndex iIndex) const; void put(ProductResolverIndex index, std::unique_ptr edp, BasicHandle const& iGetHandle); + ModuleCallingContext const& moduleCallingContext() const { return mcc_; } + private: EventPrincipal const& eventPrincipal_; - ModuleCallingContext const* mcc_; + ModuleCallingContext mcc_; }; } // namespace edm #endif diff --git a/FWCore/Framework/interface/EventProcessor.h b/FWCore/Framework/interface/EventProcessor.h index 7740f0afbd2b2..a63d6299f7150 100644 --- a/FWCore/Framework/interface/EventProcessor.h +++ b/FWCore/Framework/interface/EventProcessor.h @@ -185,8 +185,9 @@ namespace edm { // The following functions are used by the code implementing // transition handling. - InputSource::ItemType nextTransitionType(); - InputSource::ItemType lastTransitionType() const { return lastSourceTransition_; } + InputSource::ItemTypeInfo nextTransitionType(); + InputSource::ItemTypeInfo lastTransitionType() const { return lastSourceTransition_; } + void nextTransitionTypeAsync(std::shared_ptr iRunStatus, WaitingTaskHolder nextTask); void readFile(); bool fileBlockValid() { return fb_.get() != nullptr; } @@ -294,6 +295,10 @@ namespace edm { void throwAboutModulesRequiringLuminosityBlockSynchronization() const; void warnAboutModulesRequiringRunSynchronization() const; void warnAboutLegacyModules() const; + + bool needToCallNext() const { return needToCallNext_; } + void setNeedToCallNext(bool val) { needToCallNext_ = val; } + //------------------------------------------------------------------ // // Data members below. @@ -311,7 +316,7 @@ namespace edm { edm::propagate_const> thinnedAssociationsHelper_; ServiceToken serviceToken_; edm::propagate_const> input_; - InputSource::ItemType lastSourceTransition_ = InputSource::IsInvalid; + InputSource::ItemTypeInfo lastSourceTransition_; edm::propagate_const> moduleTypeResolverMaker_; edm::propagate_const> espController_; edm::propagate_const> esp_; @@ -369,7 +374,7 @@ namespace edm { bool printDependencies_ = false; bool deleteNonConsumedUnscheduledModules_ = true; - bool firstItemAfterLumiMerge_ = true; + bool needToCallNext_ = true; }; // class EventProcessor //-------------------------------------------------------------------- diff --git a/FWCore/Framework/interface/InputSource.h b/FWCore/Framework/interface/InputSource.h index 82eefaf6612b8..859b2b27088d1 100644 --- a/FWCore/Framework/interface/InputSource.h +++ b/FWCore/Framework/interface/InputSource.h @@ -51,7 +51,36 @@ namespace edm { class InputSource { public: - enum ItemType { IsInvalid, IsStop, IsFile, IsRun, IsLumi, IsEvent, IsRepeat, IsSynchronize }; + enum class ItemType : char { IsInvalid, IsStop, IsFile, IsRun, IsLumi, IsEvent, IsRepeat, IsSynchronize }; + enum class ItemPosition : char { Invalid, LastItemToBeMerged, NotLastItemToBeMerged }; + + class ItemTypeInfo { + public: + constexpr ItemTypeInfo(ItemType type = ItemType::IsInvalid, ItemPosition position = ItemPosition::Invalid) + : type_(type), position_(position) {} + ItemType itemType() const { return type_; } + ItemPosition itemPosition() const { return position_; } + + // Note that conversion to ItemType is defined and often used to + // compare an ItemTypeInfo with an ItemType. + // operator== of two ItemTypeInfo's is intentionally NOT defined. + // The constructor also allows implicit conversion from ItemType and + // often assignment from ItemType to ItemTypeInfo occurs. + operator ItemType() const { return type_; } + + private: + ItemType type_; + + // position_ should always be Invalid if the itemType_ is not IsRun or IsLumi. + // Even for runs and lumis, it is OK to leave it Invalid because the + // Framework can figure this out based on the next item. Offline it is + // simplest to always leave it Invalid. For online sources, there are + // optimizations that the Framework can use when it knows that a run or + // lumi is the last to be merged before the following item is known. This + // is useful in cases where the function named getNextItemType + // might take a long time to return. + ItemPosition position_; + }; enum ProcessingMode { Runs, RunsAndLumis, RunsLumisAndEvents }; @@ -70,7 +99,7 @@ namespace edm { static void prevalidate(ConfigurationDescriptions&); /// Advances the source to the next item - ItemType nextItemType(); + ItemTypeInfo nextItemType(); /// Read next event void readEvent(EventPrincipal& ep, StreamContext&); @@ -329,7 +358,7 @@ namespace edm { ProductRegistry& productRegistryUpdate() { return *productRegistry_; } ProcessHistoryRegistry& processHistoryRegistryForUpdate() { return *processHistoryRegistry_; } - ItemType state() const { return state_; } + ItemTypeInfo state() const { return state_; } void setRunAuxiliary(RunAuxiliary* rp) { runAuxiliary_.reset(rp); newRun_ = newLumi_ = true; @@ -349,7 +378,7 @@ namespace edm { void reset() const { resetLuminosityBlockAuxiliary(); resetRunAuxiliary(); - state_ = IsInvalid; + state_ = ItemTypeInfo(); } bool newRun() const { return newRun_; } void setNewRun() { newRun_ = true; } @@ -386,8 +415,8 @@ namespace edm { return false; } bool limitReached() const { return eventLimitReached() || lumiLimitReached(); } - virtual ItemType getNextItemType() = 0; - ItemType nextItemType_(); + virtual ItemTypeInfo getNextItemType() = 0; + ItemTypeInfo nextItemType_(); virtual std::shared_ptr readRunAuxiliary_() = 0; virtual std::shared_ptr readLuminosityBlockAuxiliary_() = 0; virtual void fillProcessBlockHelper_(); @@ -431,7 +460,7 @@ namespace edm { mutable bool newRun_; mutable bool newLumi_; bool eventCached_; - mutable ItemType state_; + mutable ItemTypeInfo state_; mutable std::shared_ptr runAuxiliary_; mutable std::shared_ptr lumiAuxiliary_; std::string statusFileName_; diff --git a/FWCore/Framework/interface/TransformerBase.h b/FWCore/Framework/interface/TransformerBase.h index bc893b4d9c078..3c5eb94ccec7d 100644 --- a/FWCore/Framework/interface/TransformerBase.h +++ b/FWCore/Framework/interface/TransformerBase.h @@ -28,6 +28,7 @@ namespace edm { class ModuleDescription; class WaitingTaskWithArenaHolder; class WaitingTaskHolder; + class ActivityRegistry; class TransformerBase { public: @@ -48,6 +49,7 @@ namespace edm { ProductResolverIndex prefetchImp(std::size_t iIndex) const { return transformInfo_.get(iIndex); } void transformImpAsync(WaitingTaskHolder iTask, std::size_t iIndex, + edm::ActivityRegistry* iAct, ProducerBase const& iBase, edm::EventForTransformer&) const; diff --git a/FWCore/Framework/interface/global/EDFilterBase.h b/FWCore/Framework/interface/global/EDFilterBase.h index 95c5d6e055f78..e3152c315c6d0 100644 --- a/FWCore/Framework/interface/global/EDFilterBase.h +++ b/FWCore/Framework/interface/global/EDFilterBase.h @@ -83,7 +83,7 @@ namespace edm { size_t iTransformIndex, EventPrincipal const& iEvent, ActivityRegistry*, - ModuleCallingContext const*, + ModuleCallingContext, ServiceWeakToken const&); //For now this is a placeholder /*virtual*/ void preActionBeforeRunEventAsync(WaitingTaskHolder iTask, @@ -162,6 +162,7 @@ namespace edm { virtual void transformAsync_(WaitingTaskHolder iTask, std::size_t iIndex, edm::EventForTransformer& iEvent, + edm::ActivityRegistry* iAct, ServiceWeakToken const& iToken) const; virtual void clearInputProcessBlockCaches(); diff --git a/FWCore/Framework/interface/global/EDProducerBase.h b/FWCore/Framework/interface/global/EDProducerBase.h index 3156d85c777f2..0a8fda6f86af3 100644 --- a/FWCore/Framework/interface/global/EDProducerBase.h +++ b/FWCore/Framework/interface/global/EDProducerBase.h @@ -86,7 +86,7 @@ namespace edm { size_t iTransformIndex, EventPrincipal const& iEvent, ActivityRegistry*, - ModuleCallingContext const*, + ModuleCallingContext, ServiceWeakToken const&); void doPreallocate(PreallocationConfiguration const&); void doBeginJob(); @@ -165,6 +165,7 @@ namespace edm { virtual void transformAsync_(WaitingTaskHolder iTask, std::size_t iIndex, edm::EventForTransformer& iEvent, + edm::ActivityRegistry* iAct, ServiceWeakToken const& iToken) const; virtual void clearInputProcessBlockCaches(); diff --git a/FWCore/Framework/interface/global/implementors.h b/FWCore/Framework/interface/global/implementors.h index 2ca8f3bd7bb4c..83cc2fbd1072c 100644 --- a/FWCore/Framework/interface/global/implementors.h +++ b/FWCore/Framework/interface/global/implementors.h @@ -50,6 +50,7 @@ namespace edm { class WaitingTaskWithArenaHolder; class ServiceWeakToken; + class ActivityRegistry; namespace global { namespace impl { @@ -519,8 +520,9 @@ namespace edm { void transformAsync_(WaitingTaskHolder iTask, std::size_t iIndex, edm::EventForTransformer& iEvent, + edm::ActivityRegistry* iAct, ServiceWeakToken const& iToken) const final { - return TransformerBase::transformImpAsync(std::move(iTask), iIndex, *this, iEvent); + return TransformerBase::transformImpAsync(std::move(iTask), iIndex, iAct, *this, iEvent); } void extendUpdateLookup(BranchType iBranchType, ProductResolverIndexHelper const& iHelper) override { if (iBranchType == InEvent) { diff --git a/FWCore/Framework/interface/limited/EDFilterBase.h b/FWCore/Framework/interface/limited/EDFilterBase.h index f044d5dd756ea..2496ff84d1674 100644 --- a/FWCore/Framework/interface/limited/EDFilterBase.h +++ b/FWCore/Framework/interface/limited/EDFilterBase.h @@ -83,7 +83,7 @@ namespace edm { size_t iTransformIndex, EventPrincipal const& iEvent, ActivityRegistry*, - ModuleCallingContext const*, + ModuleCallingContext, ServiceWeakToken const&); //For now this is a placeholder @@ -163,6 +163,7 @@ namespace edm { virtual void transformAsync_(WaitingTaskHolder iTask, std::size_t iIndex, edm::EventForTransformer& iEvent, + edm::ActivityRegistry* iAct, ServiceWeakToken const& iToken) const; virtual void clearInputProcessBlockCaches(); diff --git a/FWCore/Framework/interface/limited/EDProducerBase.h b/FWCore/Framework/interface/limited/EDProducerBase.h index 9e1fc1554a9e3..c123c2125648b 100644 --- a/FWCore/Framework/interface/limited/EDProducerBase.h +++ b/FWCore/Framework/interface/limited/EDProducerBase.h @@ -86,7 +86,7 @@ namespace edm { size_t iTransformIndex, EventPrincipal const& iEvent, ActivityRegistry*, - ModuleCallingContext const*, + ModuleCallingContext, ServiceWeakToken const&); void doPreallocate(PreallocationConfiguration const&); void doBeginJob(); @@ -166,6 +166,7 @@ namespace edm { virtual void transformAsync_(WaitingTaskHolder iTask, std::size_t iIndex, edm::EventForTransformer& iEvent, + edm::ActivityRegistry* iAct, ServiceWeakToken const& iToken) const; virtual void clearInputProcessBlockCaches(); diff --git a/FWCore/Framework/interface/limited/implementors.h b/FWCore/Framework/interface/limited/implementors.h index 1e5ffbb0036a3..efd5a1e18c473 100644 --- a/FWCore/Framework/interface/limited/implementors.h +++ b/FWCore/Framework/interface/limited/implementors.h @@ -47,6 +47,7 @@ // forward declarations namespace edm { class ServiceWeakToken; + class ActivityRegistry; namespace limited { namespace impl { @@ -507,8 +508,9 @@ namespace edm { void transformAsync_(WaitingTaskHolder iTask, std::size_t iIndex, edm::EventForTransformer& iEvent, + edm::ActivityRegistry* iAct, ServiceWeakToken const& iToken) const final { - return TransformerBase::transformImpAsync(std::move(iTask), iIndex, *this, iEvent); + return TransformerBase::transformImpAsync(std::move(iTask), iIndex, iAct, *this, iEvent); } void extendUpdateLookup(BranchType iBranchType, ProductResolverIndexHelper const& iHelper) override { if (iBranchType == InEvent) { diff --git a/FWCore/Framework/interface/moduleAbilityEnums.h b/FWCore/Framework/interface/moduleAbilityEnums.h index 9b2c2a4488c9b..323702643b34b 100644 --- a/FWCore/Framework/interface/moduleAbilityEnums.h +++ b/FWCore/Framework/interface/moduleAbilityEnums.h @@ -45,49 +45,13 @@ namespace edm { kOneSharedResources, kOneWatchRuns, kOneWatchLuminosityBlocks, + kStreamWatchRuns, + kStreamWatchLuminosityBlocks, kWatchInputFiles, kExternalWork, kAccumulator, kTransformer }; - - namespace AbilityBits { - enum Bits { - kGlobalCache = 1, - kStreamCache = 2, - kRunCache = 4, - kLuminosityBlockCache = 8, - kRunSummaryCache = 16, - kLuminosityBlockSummaryCache = 32, - kBeginRunProducer = 64, - kEndRunProducer = 128, - kOneSharedResources = 256, - kOneWatchRuns = 512, - kOneWatchLuminosityBlocks = 1024, - kWatchInputFiles = 2048 - }; - } - - namespace AbilityToTransitions { - enum Bits { - kBeginStream = AbilityBits::kStreamCache, - kEndStream = AbilityBits::kStreamCache, - - kGlobalBeginRun = AbilityBits::kRunCache | AbilityBits::kRunSummaryCache | AbilityBits::kOneWatchRuns, - kGlobalEndRun = AbilityBits::kRunCache | AbilityBits::kRunSummaryCache | AbilityBits::kEndRunProducer | - AbilityBits::kOneWatchRuns, - kStreamBeginRun = AbilityBits::kStreamCache, - kStreamEndRun = AbilityBits::kStreamCache | AbilityBits::kRunSummaryCache, - - kGlobalBeginLuminosityBlock = AbilityBits::kLuminosityBlockCache | AbilityBits::kLuminosityBlockSummaryCache | - AbilityBits::kOneWatchLuminosityBlocks, - kGlobalEndLuminosityBlock = AbilityBits::kLuminosityBlockCache | AbilityBits::kLuminosityBlockSummaryCache | - AbilityBits::kOneWatchLuminosityBlocks, - kStreamBeginLuminosityBlock = AbilityBits::kStreamCache | AbilityBits::kLuminosityBlockSummaryCache, - kStreamEndLuminosityBlock = AbilityBits::kStreamCache | AbilityBits::kLuminosityBlockSummaryCache - - }; - } } // namespace module } // namespace edm diff --git a/FWCore/Framework/interface/one/EDFilterBase.h b/FWCore/Framework/interface/one/EDFilterBase.h index 7f931db2e7e51..e9c0b0f1d9b54 100644 --- a/FWCore/Framework/interface/one/EDFilterBase.h +++ b/FWCore/Framework/interface/one/EDFilterBase.h @@ -80,7 +80,7 @@ namespace edm { size_t iTransformIndex, EventPrincipal const& iEvent, ActivityRegistry*, - ModuleCallingContext const*, + ModuleCallingContext, ServiceWeakToken const&); //For now this is a placeholder /*virtual*/ void preActionBeforeRunEventAsync(WaitingTaskHolder, @@ -139,6 +139,7 @@ namespace edm { virtual void transformAsync_(WaitingTaskHolder iTask, std::size_t iIndex, edm::EventForTransformer& iEvent, + edm::ActivityRegistry* iAct, ServiceWeakToken const& iToken) const; virtual void clearInputProcessBlockCaches(); diff --git a/FWCore/Framework/interface/one/EDProducerBase.h b/FWCore/Framework/interface/one/EDProducerBase.h index 02f06bab6f5c1..0b2926f3e4543 100644 --- a/FWCore/Framework/interface/one/EDProducerBase.h +++ b/FWCore/Framework/interface/one/EDProducerBase.h @@ -85,7 +85,7 @@ namespace edm { size_t iTransformIndex, EventPrincipal const& iEvent, ActivityRegistry*, - ModuleCallingContext const*, + ModuleCallingContext, ServiceWeakToken const&); void doPreallocate(PreallocationConfiguration const&); virtual void preallocRuns(unsigned int); @@ -139,6 +139,7 @@ namespace edm { virtual void transformAsync_(WaitingTaskHolder iTask, std::size_t iIndex, edm::EventForTransformer& iEvent, + edm::ActivityRegistry* iAct, ServiceWeakToken const& iToken) const; virtual void clearInputProcessBlockCaches(); diff --git a/FWCore/Framework/interface/one/implementors.h b/FWCore/Framework/interface/one/implementors.h index 1a4a05fa029f0..d2aaa0219015b 100644 --- a/FWCore/Framework/interface/one/implementors.h +++ b/FWCore/Framework/interface/one/implementors.h @@ -50,6 +50,7 @@ namespace edm { class SharedResourcesAcquirer; class WaitingTaskHolder; class ServiceWeakToken; + class ActivityRegistry; namespace one { namespace impl { @@ -399,8 +400,9 @@ namespace edm { void transformAsync_(WaitingTaskHolder iTask, std::size_t iIndex, edm::EventForTransformer& iEvent, + edm::ActivityRegistry* iAct, ServiceWeakToken const& iToken) const final { - return TransformerBase::transformImpAsync(std::move(iTask), iIndex, *this, iEvent); + return TransformerBase::transformImpAsync(std::move(iTask), iIndex, iAct, *this, iEvent); } void extendUpdateLookup(BranchType iBranchType, ProductResolverIndexHelper const& iHelper) override { if (iBranchType == InEvent) { diff --git a/FWCore/Framework/interface/stream/AbilityChecker.h b/FWCore/Framework/interface/stream/AbilityChecker.h index 7b74ba896fe02..58a58fc800436 100644 --- a/FWCore/Framework/interface/stream/AbilityChecker.h +++ b/FWCore/Framework/interface/stream/AbilityChecker.h @@ -22,6 +22,7 @@ // user include files #include "FWCore/Framework/interface/moduleAbilities.h" +#include "FWCore/Framework/interface/stream/moduleAbilities.h" // forward declarations namespace edm { @@ -112,6 +113,16 @@ namespace edm { static constexpr bool kAccumulator = true; }; + template + struct HasAbility : public HasAbility { + static constexpr bool kWatchLuminosityBlocks = true; + }; + + template + struct HasAbility : public HasAbility { + static constexpr bool kWatchRuns = true; + }; + template <> struct HasAbility { static constexpr bool kGlobalCache = false; @@ -130,6 +141,8 @@ namespace edm { static constexpr bool kExternalWork = false; static constexpr bool kAccumulator = false; static constexpr bool kTransformer = false; + static constexpr bool kWatchLuminosityBlocks = true; + static constexpr bool kWatchRuns = true; }; } // namespace impl template diff --git a/FWCore/Framework/interface/stream/AbilityToImplementor.h b/FWCore/Framework/interface/stream/AbilityToImplementor.h index bf63478c0fa5c..c956caec1ed33 100644 --- a/FWCore/Framework/interface/stream/AbilityToImplementor.h +++ b/FWCore/Framework/interface/stream/AbilityToImplementor.h @@ -22,6 +22,7 @@ // user include files #include "FWCore/Framework/interface/moduleAbilities.h" +#include "FWCore/Framework/interface/stream/moduleAbilities.h" #include "FWCore/Framework/interface/stream/implementors.h" // forward declarations @@ -113,6 +114,17 @@ namespace edm { struct AbilityToImplementor { using Type = edm::stream::impl::Accumulator; }; + + template <> + struct AbilityToImplementor { + using Type = edm::stream::impl::WatchRuns; + }; + + template <> + struct AbilityToImplementor { + using Type = edm::stream::impl::WatchLuminosityBlocks; + }; + } // namespace stream } // namespace edm diff --git a/FWCore/Framework/interface/stream/EDAnalyzerAdaptor.h b/FWCore/Framework/interface/stream/EDAnalyzerAdaptor.h index 2a4e28760f961..d13a364889d88 100644 --- a/FWCore/Framework/interface/stream/EDAnalyzerAdaptor.h +++ b/FWCore/Framework/interface/stream/EDAnalyzerAdaptor.h @@ -73,9 +73,11 @@ namespace edm { bool wantsProcessBlocks() const final { return T::HasAbility::kWatchProcessBlock; } bool wantsInputProcessBlocks() const final { return T::HasAbility::kInputProcessBlockCache; } bool wantsGlobalRuns() const final { return T::HasAbility::kRunCache or T::HasAbility::kRunSummaryCache; } + bool wantsStreamRuns() const final { return T::HasAbility::kWatchRuns; } bool wantsGlobalLuminosityBlocks() const final { return T::HasAbility::kLuminosityBlockCache or T::HasAbility::kLuminosityBlockSummaryCache; } + bool wantsStreamLuminosityBlocks() const final { return T::HasAbility::kWatchLuminosityBlocks; } private: using MyGlobal = CallGlobal; diff --git a/FWCore/Framework/interface/stream/EDAnalyzerAdaptorBase.h b/FWCore/Framework/interface/stream/EDAnalyzerAdaptorBase.h index ceaa19ea4b43f..b2cdea8816e8e 100644 --- a/FWCore/Framework/interface/stream/EDAnalyzerAdaptorBase.h +++ b/FWCore/Framework/interface/stream/EDAnalyzerAdaptorBase.h @@ -86,8 +86,8 @@ namespace edm { virtual bool wantsInputProcessBlocks() const = 0; virtual bool wantsGlobalRuns() const = 0; virtual bool wantsGlobalLuminosityBlocks() const = 0; - bool wantsStreamRuns() const { return true; } - bool wantsStreamLuminosityBlocks() const { return true; } + virtual bool wantsStreamRuns() const = 0; + virtual bool wantsStreamLuminosityBlocks() const = 0; std::string workerType() const { return "WorkerT"; } void registerProductsAndCallbacks(EDAnalyzerAdaptorBase const*, ProductRegistry* reg); diff --git a/FWCore/Framework/interface/stream/EDProducerBase.h b/FWCore/Framework/interface/stream/EDProducerBase.h index 64b97c42b5e20..3c825ee7125a1 100644 --- a/FWCore/Framework/interface/stream/EDProducerBase.h +++ b/FWCore/Framework/interface/stream/EDProducerBase.h @@ -80,6 +80,7 @@ namespace edm { virtual void transformAsync_(WaitingTaskHolder iTask, std::size_t iIndex, edm::EventForTransformer& iEvent, + edm::ActivityRegistry* iAct, ServiceWeakToken const& iToken) const; void setModuleDescriptionPtr(ModuleDescription const* iDesc) { moduleDescriptionPtr_ = iDesc; } diff --git a/FWCore/Framework/interface/stream/ProducingModuleAdaptor.h b/FWCore/Framework/interface/stream/ProducingModuleAdaptor.h index afef036dfbcb2..545c125638750 100644 --- a/FWCore/Framework/interface/stream/ProducingModuleAdaptor.h +++ b/FWCore/Framework/interface/stream/ProducingModuleAdaptor.h @@ -67,10 +67,13 @@ namespace edm { return T::HasAbility::kRunCache or T::HasAbility::kRunSummaryCache or T::HasAbility::kBeginRunProducer or T::HasAbility::kEndRunProducer; } + bool wantsStreamRuns() const final { return T::HasAbility::kWatchRuns; } + bool wantsGlobalLuminosityBlocks() const final { return T::HasAbility::kLuminosityBlockCache or T::HasAbility::kLuminosityBlockSummaryCache or T::HasAbility::kBeginLuminosityBlockProducer or T::HasAbility::kEndLuminosityBlockProducer; } + bool wantsStreamLuminosityBlocks() const final { return T::HasAbility::kWatchLuminosityBlocks; } bool hasAcquire() const final { return T::HasAbility::kExternalWork; } diff --git a/FWCore/Framework/interface/stream/ProducingModuleAdaptorBase.h b/FWCore/Framework/interface/stream/ProducingModuleAdaptorBase.h index f39840bbab26c..99c2581fbf288 100644 --- a/FWCore/Framework/interface/stream/ProducingModuleAdaptorBase.h +++ b/FWCore/Framework/interface/stream/ProducingModuleAdaptorBase.h @@ -93,8 +93,8 @@ namespace edm { virtual bool wantsGlobalLuminosityBlocks() const = 0; virtual bool hasAcquire() const = 0; virtual bool hasAccumulator() const = 0; - bool wantsStreamRuns() const { return true; } - bool wantsStreamLuminosityBlocks() const { return true; } + virtual bool wantsStreamRuns() const = 0; + virtual bool wantsStreamLuminosityBlocks() const = 0; void registerProductsAndCallbacks(ProducingModuleAdaptorBase const*, ProductRegistry* reg); @@ -134,7 +134,7 @@ namespace edm { size_t iTransformIndex, EventPrincipal const& iEvent, ActivityRegistry*, - ModuleCallingContext const*, + ModuleCallingContext, ServiceWeakToken const&); protected: diff --git a/FWCore/Framework/interface/stream/implementors.h b/FWCore/Framework/interface/stream/implementors.h index aecc373b2d83d..64ffe5acc87cc 100644 --- a/FWCore/Framework/interface/stream/implementors.h +++ b/FWCore/Framework/interface/stream/implementors.h @@ -291,6 +291,27 @@ namespace edm { virtual void acquire(Event const&, edm::EventSetup const&, WaitingTaskWithArenaHolder) = 0; }; + class WatchLuminosityBlocks { + public: + WatchLuminosityBlocks() = default; + WatchLuminosityBlocks(WatchLuminosityBlocks const&) = delete; + WatchLuminosityBlocks& operator=(WatchLuminosityBlocks const&) = delete; + virtual ~WatchLuminosityBlocks() noexcept(false){}; + + // virtual void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) = 0; + // virtual void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) {} + }; + + class WatchRuns { + public: + WatchRuns() = default; + WatchRuns(WatchRuns const&) = delete; + WatchRuns& operator=(WatchRuns const&) = delete; + virtual ~WatchRuns() noexcept(false){}; + + // virtual void beginRun(edm::Run const&, edm::EventSetup const&) = 0; + // virtual void endRun(edm::Run const&, edm::EventSetup const&) {} + }; class Transformer : private TransformerBase, public EDProducerBase { public: Transformer() = default; @@ -353,8 +374,9 @@ namespace edm { void transformAsync_(WaitingTaskHolder iTask, std::size_t iIndex, edm::EventForTransformer& iEvent, + edm::ActivityRegistry* iAct, ServiceWeakToken const& iToken) const final { - return TransformerBase::transformImpAsync(std::move(iTask), iIndex, *this, iEvent); + return TransformerBase::transformImpAsync(std::move(iTask), iIndex, iAct, *this, iEvent); } void extendUpdateLookup(BranchType iBranchType, ProductResolverIndexHelper const& iHelper) override { if (iBranchType == InEvent) { diff --git a/FWCore/Framework/interface/stream/moduleAbilities.h b/FWCore/Framework/interface/stream/moduleAbilities.h new file mode 100644 index 0000000000000..0d1a56408a53d --- /dev/null +++ b/FWCore/Framework/interface/stream/moduleAbilities.h @@ -0,0 +1,42 @@ +#ifndef FWCore_Framework_stream_moduleAbilities_h +#define FWCore_Framework_stream_moduleAbilities_h +// -*- C++ -*- +// +// Package: FWCore/Framework +// Class : moduleAbilities +// +/**\file moduleAbilities moduleAbilities.h "FWCore/Framework/interface/one/moduleAbilities.h" + + Description: Template arguments which only apply to stream::{Module} classes + + Usage: + + +*/ +// +// Original Author: Chris Jones +// Created: Fri, 22 Dec 2023 19:38:53 GMT +// + +// system include files + +// user include files +#include "FWCore/Framework/interface/moduleAbilities.h" + +// forward declarations + +namespace edm { + namespace stream { + struct WatchRuns { + static constexpr module::Abilities kAbilities = module::Abilities::kStreamWatchRuns; + using Type = module::Empty; + }; + + struct WatchLuminosityBlocks { + static constexpr module::Abilities kAbilities = module::Abilities::kStreamWatchLuminosityBlocks; + using Type = module::Empty; + }; + } // namespace stream +} // namespace edm + +#endif diff --git a/FWCore/Framework/src/ESSourceProductResolverBase.cc b/FWCore/Framework/src/ESSourceProductResolverBase.cc index 66b010e9941e9..fe2696bc1e8e1 100644 --- a/FWCore/Framework/src/ESSourceProductResolverBase.cc +++ b/FWCore/Framework/src/ESSourceProductResolverBase.cc @@ -38,7 +38,8 @@ void edm::eventsetup::ESSourceProductResolverBase::doPrefetchAndSignals( edm::eventsetup::EventSetupRecordImpl const& iRecord, edm::eventsetup::DataKey const& iKey, edm::ESParentContext const& iParent) { - edm::ESModuleCallingContext context(providerDescription(), ESModuleCallingContext::State::kRunning, iParent); + edm::ESModuleCallingContext context( + providerDescription(), reinterpret_cast(this), ESModuleCallingContext::State::kRunning, iParent); iRecord.activityRegistry()->preESModuleSignal_.emit(iRecord.key(), context); struct EndGuard { EndGuard(EventSetupRecordImpl const& iRecord, ESModuleCallingContext const& iContext) diff --git a/FWCore/Framework/src/EventForTransformer.cc b/FWCore/Framework/src/EventForTransformer.cc index 3f13f55ed8110..fcdb6f621e177 100644 --- a/FWCore/Framework/src/EventForTransformer.cc +++ b/FWCore/Framework/src/EventForTransformer.cc @@ -11,12 +11,12 @@ namespace edm { - EventForTransformer::EventForTransformer(EventPrincipal const& ep, ModuleCallingContext const* moduleCallingContext) + EventForTransformer::EventForTransformer(EventPrincipal const& ep, ModuleCallingContext moduleCallingContext) : eventPrincipal_{ep}, mcc_{moduleCallingContext} {} BasicHandle EventForTransformer::get(edm::TypeID const& iTypeID, ProductResolverIndex iIndex) const { bool amb = false; - return eventPrincipal_.getByToken(PRODUCT_TYPE, iTypeID, iIndex, false, amb, nullptr, mcc_); + return eventPrincipal_.getByToken(PRODUCT_TYPE, iTypeID, iIndex, false, amb, nullptr, &mcc_); } void EventForTransformer::put(ProductResolverIndex index, diff --git a/FWCore/Framework/src/EventPrincipal.cc b/FWCore/Framework/src/EventPrincipal.cc index 8e25e00bd5063..4919d9b821151 100644 --- a/FWCore/Framework/src/EventPrincipal.cc +++ b/FWCore/Framework/src/EventPrincipal.cc @@ -81,7 +81,7 @@ namespace edm { } else { provRetrieverPtr_->mergeParentProcessRetriever(provRetriever); } - if (wasBranchListIndexesChangedFromInput(branchListIndexes)) { + if (wasBranchListIndexesChangedFromInput(branchListIndexes) or branchListIndexes_.empty()) { if (branchIDListHelper_->hasProducedProducts()) { // Add index into BranchIDListRegistry for products produced this process branchListIndexes.push_back(branchIDListHelper_->producedBranchListIndex()); @@ -99,7 +99,7 @@ namespace edm { DelayedReader* reader) { eventSelectionIDs_ = std::move(eventSelectionIDs); - if (wasBranchListIndexesChangedFromInput(branchListIndexes)) { + if (wasBranchListIndexesChangedFromInput(branchListIndexes) or branchListIndexes_.empty()) { if (branchIDListHelper_->hasProducedProducts()) { // Add index into BranchIDListRegistry for products produced this process branchListIndexes.push_back(branchIDListHelper_->producedBranchListIndex()); diff --git a/FWCore/Framework/src/EventProcessor.cc b/FWCore/Framework/src/EventProcessor.cc index 76de43cb7a500..25c48d59e80c1 100644 --- a/FWCore/Framework/src/EventProcessor.cc +++ b/FWCore/Framework/src/EventProcessor.cc @@ -867,29 +867,53 @@ namespace edm { edm::ActivityRegistry& act_; }; } // namespace - InputSource::ItemType EventProcessor::nextTransitionType() { + + InputSource::ItemTypeInfo EventProcessor::nextTransitionType() { SendSourceTerminationSignalIfException sentry(actReg_.get()); - InputSource::ItemType itemType; + InputSource::ItemTypeInfo itemTypeInfo; { SourceNextGuard guard(*actReg_.get()); //For now, do nothing with InputSource::IsSynchronize do { - itemType = input_->nextItemType(); - } while (itemType == InputSource::IsSynchronize); + itemTypeInfo = input_->nextItemType(); + } while (itemTypeInfo == InputSource::ItemType::IsSynchronize); } - lastSourceTransition_ = itemType; + lastSourceTransition_ = itemTypeInfo; sentry.completedSuccessfully(); StatusCode returnCode = epSuccess; if (checkForAsyncStopRequest(returnCode)) { actReg_->preSourceEarlyTerminationSignal_(TerminationOrigin::ExternalSignal); - lastSourceTransition_ = InputSource::IsStop; + lastSourceTransition_ = InputSource::ItemType::IsStop; } return lastSourceTransition_; } + void EventProcessor::nextTransitionTypeAsync(std::shared_ptr iRunStatus, + WaitingTaskHolder nextTask) { + auto group = nextTask.group(); + sourceResourcesAcquirer_.serialQueueChain().push( + *group, [this, runStatus = std::move(iRunStatus), nextHolder = std::move(nextTask)]() mutable { + CMS_SA_ALLOW try { + ServiceRegistry::Operate operate(serviceToken_); + std::lock_guard guard(*(sourceMutex_.get())); + nextTransitionType(); + if (lastTransitionType() == InputSource::ItemType::IsRun && + runStatus->runPrincipal()->run() == input_->run() && + runStatus->runPrincipal()->reducedProcessHistoryID() == input_->reducedProcessHistoryID()) { + throw Exception(errors::LogicError) + << "InputSource claimed previous Run Entry was last to be merged in this file,\n" + << "but the next entry has the same run number and reduced ProcessHistoryID.\n" + << "This is probably a bug in the InputSource. Please report to the Core group.\n"; + } + } catch (...) { + nextHolder.doneWaiting(std::current_exception()); + } + }); + } + EventProcessor::StatusCode EventProcessor::runToCompletion() { beginJob(); //make sure this was called @@ -919,11 +943,11 @@ namespace edm { if (deferredExceptionPtrIsSet_.load()) { std::rethrow_exception(deferredExceptionPtr_); } - if (trans != InputSource::IsStop) { + if (trans != InputSource::ItemType::IsStop) { //problem with the source doErrorStuff(); - throw cms::Exception("BadTransition") << "Unexpected transition change " << trans; + throw cms::Exception("BadTransition") << "Unexpected transition change " << static_cast(trans); } } while (not endOfLoop()); }); // convertException::wrap @@ -1152,7 +1176,7 @@ namespace edm { InputSource::ItemType EventProcessor::processRuns() { FinalWaitingTask waitTask{taskGroup_}; - assert(lastTransitionType() == InputSource::IsRun); + assert(lastTransitionType() == InputSource::ItemType::IsRun); if (streamRunActive_ == 0) { assert(streamLumiActive_ == 0); @@ -1163,12 +1187,15 @@ namespace edm { auto runStatus = streamRunStatus_[0]; - while (lastTransitionType() == InputSource::IsRun and runStatus->runPrincipal()->run() == input_->run() and + while (lastTransitionType() == InputSource::ItemType::IsRun and + runStatus->runPrincipal()->run() == input_->run() and runStatus->runPrincipal()->reducedProcessHistoryID() == input_->reducedProcessHistoryID()) { readAndMergeRun(*runStatus); nextTransitionType(); } + setNeedToCallNext(false); + WaitingTaskHolder holder{taskGroup_, &waitTask}; runStatus->setHolderOfTaskInProcessRuns(holder); if (streamLumiActive_ > 0) { @@ -1261,7 +1288,13 @@ namespace edm { using namespace edm::waiting_task::chain; chain::first([this, status](auto nextTask) mutable { - CMS_SA_ALLOW try { readAndMergeRunEntriesAsync(std::move(status), nextTask); } catch (...) { + CMS_SA_ALLOW try { + if (lastTransitionType().itemPosition() != InputSource::ItemPosition::LastItemToBeMerged) { + readAndMergeRunEntriesAsync(status, nextTask); + } else { + setNeedToCallNext(true); + } + } catch (...) { status->setStopBeforeProcessingRun(true); nextTask.doneWaiting(std::current_exception()); } @@ -1465,7 +1498,7 @@ namespace edm { } }); - if (lastTransitionType() == InputSource::IsRun) { + if (lastTransitionType() == InputSource::ItemType::IsRun) { CMS_SA_ALLOW try { beginRunAsync(IOVSyncValue(EventID(input_->run(), 0, 0), input_->runAuxiliary()->beginTime()), nextTask); } catch (...) { @@ -1625,7 +1658,7 @@ namespace edm { runStatus->setCleaningUpAfterException(cleaningUpAfterException); WaitingTaskHolder holder{taskGroup_, &waitTask}; runStatus->setHolderOfTaskInProcessRuns(holder); - lastSourceTransition_ = InputSource::IsStop; + lastSourceTransition_ = InputSource::ItemType::IsStop; endRunAsync(streamRunStatus_[0], std::move(holder)); waitTask.wait(); } @@ -1698,8 +1731,11 @@ namespace edm { using namespace edm::waiting_task::chain; chain::first([this, status](auto nextTask) mutable { - readAndMergeLumiEntriesAsync(std::move(status), std::move(nextTask)); - firstItemAfterLumiMerge_ = true; + if (lastTransitionType().itemPosition() != InputSource::ItemPosition::LastItemToBeMerged) { + readAndMergeLumiEntriesAsync(std::move(status), std::move(nextTask)); + } else { + setNeedToCallNext(true); + } }) | then([this, status, &es, &lumiPrincipal](auto nextTask) { LumiTransitionInfo transitionInfo(lumiPrincipal, es, &status->eventSetupImpls()); using Traits = OccurrenceTraits; @@ -1799,12 +1835,11 @@ namespace edm { auto status = streamLumiStatus_[0]; //read from streamLumiActive_ happened in calling routine status->setEventProcessingState(LuminosityBlockProcessingStatus::EventProcessingState::kProcessing); - while (lastTransitionType() == InputSource::IsLumi and + while (lastTransitionType() == InputSource::ItemType::IsLumi and status->lumiPrincipal()->luminosityBlock() == input_->luminosityBlock()) { readAndMergeLumi(*status); nextTransitionType(); } - firstItemAfterLumiMerge_ = true; }) | chain::then([this](auto nextTask) mutable { unsigned int streamIndex = 0; oneapi::tbb::task_arena arena{oneapi::tbb::task_arena::attach()}; @@ -2091,13 +2126,20 @@ namespace edm { std::lock_guard guard(*(sourceMutex_.get())); nextTransitionType(); - while (lastTransitionType() == InputSource::IsRun and status->runPrincipal()->run() == input_->run() and + setNeedToCallNext(false); + + while (lastTransitionType() == InputSource::ItemType::IsRun and + status->runPrincipal()->run() == input_->run() and status->runPrincipal()->reducedProcessHistoryID() == input_->reducedProcessHistoryID()) { if (status->holderOfTaskInProcessRuns().taskHasFailed()) { status->setStopBeforeProcessingRun(true); return; } readAndMergeRun(*status); + if (lastTransitionType().itemPosition() == InputSource::ItemPosition::LastItemToBeMerged) { + setNeedToCallNext(true); + return; + } nextTransitionType(); } } catch (...) { @@ -2118,9 +2160,15 @@ namespace edm { std::lock_guard guard(*(sourceMutex_.get())); nextTransitionType(); - while (lastTransitionType() == InputSource::IsLumi and + setNeedToCallNext(false); + + while (lastTransitionType() == InputSource::ItemType::IsLumi and iLumiStatus->lumiPrincipal()->luminosityBlock() == input_->luminosityBlock()) { readAndMergeLumi(*iLumiStatus); + if (lastTransitionType().itemPosition() == InputSource::ItemPosition::LastItemToBeMerged) { + setNeedToCallNext(true); + return; + } nextTransitionType(); } } catch (...) { @@ -2131,25 +2179,36 @@ namespace edm { void EventProcessor::handleNextItemAfterMergingRunEntries(std::shared_ptr iRunStatus, WaitingTaskHolder iHolder) { - if (lastTransitionType() == InputSource::IsFile) { - iRunStatus->holderOfTaskInProcessRuns().doneWaiting(std::exception_ptr{}); - iHolder.doneWaiting(std::exception_ptr{}); - } else if (lastTransitionType() == InputSource::IsLumi && !iHolder.taskHasFailed()) { - CMS_SA_ALLOW try { - beginLumiAsync(IOVSyncValue(EventID(input_->run(), input_->luminosityBlock(), 0), - input_->luminosityBlockAuxiliary()->beginTime()), - iRunStatus, - iHolder); - } catch (...) { - WaitingTaskHolder copyHolder(iHolder); - iHolder.doneWaiting(std::current_exception()); - endRunAsync(std::move(iRunStatus), std::move(iHolder)); + chain::first([this, iRunStatus](auto nextTask) mutable { + if (needToCallNext()) { + nextTransitionTypeAsync(std::move(iRunStatus), std::move(nextTask)); + } + }) | chain::then([this, iRunStatus](std::exception_ptr const* iException, auto nextTask) { + ServiceRegistry::Operate operate(serviceToken_); + if (iException) { + WaitingTaskHolder copyHolder(nextTask); + copyHolder.doneWaiting(*iException); + } + if (lastTransitionType() == InputSource::ItemType::IsFile) { + iRunStatus->holderOfTaskInProcessRuns().doneWaiting(std::exception_ptr{}); + return; + } + if (lastTransitionType() == InputSource::ItemType::IsLumi && !nextTask.taskHasFailed()) { + CMS_SA_ALLOW try { + beginLumiAsync(IOVSyncValue(EventID(input_->run(), input_->luminosityBlock(), 0), + input_->luminosityBlockAuxiliary()->beginTime()), + iRunStatus, + nextTask); + return; + } catch (...) { + WaitingTaskHolder copyHolder(nextTask); + copyHolder.doneWaiting(std::current_exception()); + } } - } else { // Note that endRunAsync will call beginRunAsync for the following run // if appropriate. - endRunAsync(std::move(iRunStatus), std::move(iHolder)); - } + endRunAsync(iRunStatus, std::move(nextTask)); + }) | chain::runLast(std::move(iHolder)); } bool EventProcessor::readNextEventForStream(WaitingTaskHolder const& iTask, @@ -2175,7 +2234,7 @@ namespace edm { // Are output modules or the looper requesting we stop? if (shouldWeStop()) { - lastSourceTransition_ = InputSource::IsStop; + lastSourceTransition_ = InputSource::ItemType::IsStop; iStatus.setEventProcessingState(LuminosityBlockProcessingStatus::EventProcessingState::kStopLumi); return false; } @@ -2190,17 +2249,24 @@ namespace edm { // If we didn't already call nextTransitionType while merging lumis, call it here. // This asks the input source what is next and also checks for signals. - InputSource::ItemType itemType = firstItemAfterLumiMerge_ ? lastTransitionType() : nextTransitionType(); - firstItemAfterLumiMerge_ = false; + InputSource::ItemType itemType = needToCallNext() ? nextTransitionType() : lastTransitionType(); + setNeedToCallNext(true); - if (InputSource::IsEvent != itemType) { + if (InputSource::ItemType::IsEvent != itemType) { // IsFile may continue processing the lumi and // looper_ can cause the input source to declare a new IsRun which is actually // just a continuation of the previous run - if (InputSource::IsStop == itemType or InputSource::IsLumi == itemType or - (InputSource::IsRun == itemType and + if (InputSource::ItemType::IsStop == itemType or InputSource::ItemType::IsLumi == itemType or + (InputSource::ItemType::IsRun == itemType and (iStatus.lumiPrincipal()->run() != input_->run() or iStatus.lumiPrincipal()->runPrincipal().reducedProcessHistoryID() != input_->reducedProcessHistoryID()))) { + if (itemType == InputSource::ItemType::IsLumi && + iStatus.lumiPrincipal()->luminosityBlock() == input_->luminosityBlock()) { + throw Exception(errors::LogicError) + << "InputSource claimed previous Lumi Entry was last to be merged in this file,\n" + << "but the next lumi entry has the same lumi number.\n" + << "This is probably a bug in the InputSource. Please report to the Core group.\n"; + } iStatus.setEventProcessingState(LuminosityBlockProcessingStatus::EventProcessingState::kStopLumi); } else { iStatus.setEventProcessingState(LuminosityBlockProcessingStatus::EventProcessingState::kPauseForFileTransition); @@ -2241,7 +2307,7 @@ namespace edm { if (status->eventProcessingState() == LuminosityBlockProcessingStatus::EventProcessingState::kStopLumi) { if (not status->haveStartedNextLumiOrEndedRun()) { status->startNextLumiOrEndRun(); - if (lastTransitionType() == InputSource::IsLumi && !iTask.taskHasFailed()) { + if (lastTransitionType() == InputSource::ItemType::IsLumi && !iTask.taskHasFailed()) { CMS_SA_ALLOW try { beginLumiAsync(IOVSyncValue(EventID(input_->run(), input_->luminosityBlock(), 0), input_->luminosityBlockAuxiliary()->beginTime()), diff --git a/FWCore/Framework/src/InputSource.cc b/FWCore/Framework/src/InputSource.cc index 8303180b6c1c4..7f676089aa725 100644 --- a/FWCore/Framework/src/InputSource.cc +++ b/FWCore/Framework/src/InputSource.cc @@ -64,7 +64,7 @@ namespace edm { newRun_(true), newLumi_(true), eventCached_(false), - state_(IsInvalid), + state_(), runAuxiliary_(), lumiAuxiliary_(), statusFileName_(), @@ -133,61 +133,62 @@ namespace edm { // implement the skipping internally, so that the performance gain is realized. // If this is done for a source, the 'if' blocks in this function will never be entered // for that source. - InputSource::ItemType InputSource::nextItemType_() { - ItemType itemType = callWithTryCatchAndPrint([this]() { return getNextItemType(); }, - "Calling InputSource::getNextItemType"); + InputSource::ItemTypeInfo InputSource::nextItemType_() { + ItemTypeInfo itemTypeInfo = callWithTryCatchAndPrint([this]() { return getNextItemType(); }, + "Calling InputSource::getNextItemType"); - if (itemType == IsEvent && processingMode() != RunsLumisAndEvents) { + if (itemTypeInfo == ItemType::IsEvent && processingMode() != RunsLumisAndEvents) { skipEvents(1); return nextItemType_(); } - if (itemType == IsLumi && processingMode() == Runs) { + if (itemTypeInfo == ItemType::IsLumi && processingMode() == Runs) { // QQQ skipLuminosityBlock_(); return nextItemType_(); } - return itemType; + return itemTypeInfo; } - InputSource::ItemType InputSource::nextItemType() { - ItemType oldState = state_; + InputSource::ItemTypeInfo InputSource::nextItemType() { + ItemType oldType = state_.itemType(); if (eventLimitReached()) { // If the maximum event limit has been reached, stop. - state_ = IsStop; + state_ = ItemType::IsStop; } else if (lumiLimitReached()) { // If the maximum lumi limit has been reached, stop // when reaching a new file, run, or lumi. - if (oldState == IsInvalid || oldState == IsFile || oldState == IsRun || processingMode() != RunsLumisAndEvents) { - state_ = IsStop; + if (oldType == ItemType::IsInvalid || oldType == ItemType::IsFile || oldType == ItemType::IsRun || + processingMode() != RunsLumisAndEvents) { + state_ = ItemType::IsStop; } else { - ItemType newState = nextItemType_(); - if (newState == IsEvent) { + ItemTypeInfo newState = nextItemType_(); + if (newState == ItemType::IsEvent) { assert(processingMode() == RunsLumisAndEvents); - state_ = IsEvent; + state_ = ItemType::IsEvent; } else { - state_ = IsStop; + state_ = ItemType::IsStop; } } } else { - ItemType newState = nextItemType_(); - if (newState == IsStop) { - state_ = IsStop; - } else if (newState == IsSynchronize) { - state_ = IsSynchronize; - } else if (newState == IsFile || oldState == IsInvalid) { - state_ = IsFile; - } else if (newState == IsRun || oldState == IsFile) { + ItemTypeInfo newState = nextItemType_(); + if (newState == ItemType::IsStop) { + state_ = ItemType::IsStop; + } else if (newState == ItemType::IsSynchronize) { + state_ = ItemType::IsSynchronize; + } else if (newState == ItemType::IsFile || oldType == ItemType::IsInvalid) { + state_ = ItemType::IsFile; + } else if (newState == ItemType::IsRun || oldType == ItemType::IsFile) { runAuxiliary_ = readRunAuxiliary(); - state_ = IsRun; - } else if (newState == IsLumi || oldState == IsRun) { + state_ = (newState == ItemType::IsRun) ? newState : ItemTypeInfo(ItemType::IsRun); + } else if (newState == ItemType::IsLumi || oldType == ItemType::IsRun) { assert(processingMode() != Runs); lumiAuxiliary_ = readLuminosityBlockAuxiliary(); - state_ = IsLumi; + state_ = (newState == ItemType::IsLumi) ? newState : ItemTypeInfo(ItemType::IsLumi); } else { assert(processingMode() == RunsLumisAndEvents); - state_ = IsEvent; + state_ = ItemType::IsEvent; } } - if (state_ == IsStop) { + if (state_ == ItemType::IsStop) { lumiAuxiliary_.reset(); runAuxiliary_.reset(); } @@ -220,7 +221,7 @@ namespace edm { // Return a dummy file block. std::shared_ptr InputSource::readFile() { - assert(state_ == IsFile); + assert(state_ == ItemType::IsFile); assert(!limitReached()); return callWithTryCatchAndPrint >([this]() { return readFile_(); }, "Calling InputSource::readFile_"); @@ -299,7 +300,7 @@ namespace edm { } void InputSource::readEvent(EventPrincipal& ep, StreamContext& streamContext) { - assert(state_ == IsEvent); + assert(state_ == ItemType::IsEvent); assert(!eventLimitReached()); { // block scope, in order to issue the PostSourceEvent signal before calling postRead and issueReports @@ -345,7 +346,7 @@ namespace edm { } void InputSource::rewind() { - state_ = IsInvalid; + state_ = ItemTypeInfo(); remainingEvents_ = maxEvents_; setNewRun(); setNewLumi(); diff --git a/FWCore/Framework/src/Path.cc b/FWCore/Framework/src/Path.cc index b9498a03ff23c..ba6462aa6f3e1 100644 --- a/FWCore/Framework/src/Path.cc +++ b/FWCore/Framework/src/Path.cc @@ -311,11 +311,13 @@ namespace edm { if (pathStatusInserter_) { // pathStatusInserter is null for EndPaths pathStatusInserter_->setPathStatus(streamID, status); } - std::exception_ptr jException = - pathStatusInserterWorker_->runModuleDirectly>( - iInfo, streamID, ParentContext(iContext), iContext); - if (jException && not iException) { - iException = jException; + if (pathStatusInserterWorker_) { + std::exception_ptr jException = + pathStatusInserterWorker_->runModuleDirectly>( + iInfo, streamID, ParentContext(iContext), iContext); + if (jException && not iException) { + iException = jException; + } } actReg_->postPathEventSignal_(*iContext, pathContext_, status); } catch (...) { diff --git a/FWCore/Framework/src/PathsAndConsumesOfModules.cc b/FWCore/Framework/src/PathsAndConsumesOfModules.cc index 343015af93415..89981a576fab0 100644 --- a/FWCore/Framework/src/PathsAndConsumesOfModules.cc +++ b/FWCore/Framework/src/PathsAndConsumesOfModules.cc @@ -328,6 +328,28 @@ namespace edm { return allDependenciesRan; } + + void findAllDependenciesForModule(unsigned int iModID, + std::vector const& iStatus, + std::vector>& oDependencies) { + auto const& dependsOn = iStatus[iModID].dependsOn_; + if (dependsOn.empty() or !oDependencies[iModID].empty()) { + return; + } + oDependencies[iModID].insert(dependsOn.begin(), dependsOn.end()); + for (auto dep : dependsOn) { + findAllDependenciesForModule(dep, iStatus, oDependencies); + oDependencies[iModID].merge(oDependencies[dep]); + } + } + std::vector> findAllDependenciesForModules( + std::vector const& iStatus) { + std::vector> ret(iStatus.size()); + for (unsigned int id = 0; id < iStatus.size(); ++id) { + findAllDependenciesForModule(id, iStatus, ret); + } + return ret; + } } // namespace void checkForModuleDependencyCorrectness(edm::PathsAndConsumesOfModulesBase const& iPnC, bool iPrintDependencies) { constexpr auto kInvalidIndex = std::numeric_limits::max(); @@ -425,12 +447,14 @@ namespace edm { statusOfModules[mod->id()].pathsOn_.push_back(p + offset); } } - status.nModules_ = uniqueModules.size() + 1; + status.nModules_ = uniqueModules.size(); //add the EndPathStatusInserter at the end auto found = pathStatusInserterModuleLabelToModuleID.find(iPnC.endPaths()[p]); - assert(found != pathStatusInserterModuleLabelToModuleID.end()); - status.modulesOnPath_.push_back(found->second); + if (found != pathStatusInserterModuleLabelToModuleID.end()) { + status.modulesOnPath_.push_back(found->second); + ++status.nModules_; + } } } @@ -450,6 +474,11 @@ namespace edm { } unsigned int nPathsFinished = 0; + for (auto const& status : statusOfPaths) { + if (status.nModules_ == 0) { + ++nPathsFinished; + } + } //if a circular dependency exception happens, stackTrace has the info std::vector stackTrace; @@ -636,26 +665,6 @@ namespace edm { //NOTE: although the following conditions are not needed for safe running, they are // policy choices the collaboration has made. - //Check to see if for each path if the order of the modules is correct based on dependencies - for (auto& p : statusOfPaths) { - for (unsigned long int i = 0; p.nModules_ > 0 and i < p.nModules_ - 1; ++i) { - auto moduleID = p.modulesOnPath_[i]; - if (not statusOfModules[moduleID].dependsOn_.empty()) { - for (unsigned long int j = i + 1; j < p.nModules_; ++j) { - auto testModuleID = p.modulesOnPath_[j]; - for (auto depModuleID : statusOfModules[moduleID].dependsOn_) { - if (depModuleID == testModuleID) { - throw edm::Exception(edm::errors::ScheduleExecutionFailure, "Unrunnable schedule\n") - << "Dependent module later on Path\n" - << " module '" << moduleIndexToNames[moduleID] << "' depends on '" - << moduleIndexToNames[depModuleID] << "' which is later on path " << pathName(p); - } - } - } - } - } - } - //HLT wants all paths to be equivalent. If a path has a module A that needs data from module B and module B appears on one path // as module A then B must appear on ALL paths that have A. unsigned int modIndex = 0; @@ -704,5 +713,24 @@ namespace edm { } ++modIndex; } + + //Check to see if for each path if the order of the modules is correct based on dependencies + auto allDependencies = findAllDependenciesForModules(statusOfModules); + for (auto& p : statusOfPaths) { + for (unsigned long int i = 0; p.nModules_ > 0 and i < p.nModules_ - 1; ++i) { + auto moduleID = p.modulesOnPath_[i]; + if (not allDependencies[moduleID].empty()) { + for (unsigned long int j = i + 1; j < p.nModules_; ++j) { + auto testModuleID = p.modulesOnPath_[j]; + if (allDependencies[moduleID].find(testModuleID) != allDependencies[moduleID].end()) { + throw edm::Exception(edm::errors::ScheduleExecutionFailure, "Unrunnable schedule\n") + << "Dependent module later on Path\n" + << " module '" << moduleIndexToNames[moduleID] << "' depends on '" + << moduleIndexToNames[testModuleID] << "' which is later on path " << pathName(p); + } + } + } + } + } } } // namespace edm diff --git a/FWCore/Framework/src/ProductResolvers.cc b/FWCore/Framework/src/ProductResolvers.cc index 61285158f5c9b..0346563353373 100644 --- a/FWCore/Framework/src/ProductResolvers.cc +++ b/FWCore/Framework/src/ProductResolvers.cc @@ -159,13 +159,12 @@ namespace edm { } namespace { - cms::Exception& extendException(cms::Exception& e, BranchDescription const& bd, ModuleCallingContext const* mcc) { + void extendException(cms::Exception& e, BranchDescription const& bd, ModuleCallingContext const* mcc) { e.addContext(std::string("While reading from source ") + bd.className() + " " + bd.moduleLabel() + " '" + bd.productInstanceName() + "' " + bd.processName()); if (mcc) { edm::exceptionContext(e, *mcc); } - return e; } } // namespace ProductResolverBase::Resolution DelayedReaderInputProductResolver::resolveProduct_( @@ -197,10 +196,12 @@ namespace edm { //another thread could have beaten us here setProduct(reader->getProduct(branchDescription().branchID(), &principal, mcc)); } catch (cms::Exception& e) { - throw extendException(e, branchDescription(), mcc); + extendException(e, branchDescription(), mcc); + throw; } catch (std::exception const& e) { auto newExcept = edm::Exception(errors::StdException) << e.what(); - throw extendException(newExcept, branchDescription(), mcc); + extendException(newExcept, branchDescription(), mcc); + throw newExcept; } } } @@ -299,10 +300,12 @@ namespace edm { //another thread could have finished this while we were waiting setProduct(reader->getProduct(branchDescription().branchID(), &principal, mcc)); } catch (cms::Exception& e) { - throw extendException(e, branchDescription(), mcc); + extendException(e, branchDescription(), mcc); + throw; } catch (std::exception const& e) { auto newExcept = edm::Exception(errors::StdException) << e.what(); - throw extendException(newExcept, branchDescription(), mcc); + extendException(newExcept, branchDescription(), mcc); + throw newExcept; } } } @@ -566,7 +569,8 @@ namespace edm { //This gives a lifetime greater than this call ParentContext parent(mcc); - mcc_ = ModuleCallingContext(worker_->description(), ModuleCallingContext::State::kPrefetching, parent, nullptr); + mcc_ = ModuleCallingContext( + worker_->description(), index_ + 1, ModuleCallingContext::State::kPrefetching, parent, nullptr); EventTransitionInfo const& info = aux_->eventTransitionInfo(); worker_->doTransformAsync(WaitingTaskHolder(*waitTask.group(), t), diff --git a/FWCore/Framework/src/Schedule.cc b/FWCore/Framework/src/Schedule.cc index 6c32305da7be5..da81825479246 100644 --- a/FWCore/Framework/src/Schedule.cc +++ b/FWCore/Framework/src/Schedule.cc @@ -510,13 +510,16 @@ namespace edm { processConfiguration, std::string("PathStatusInserter")); - makePathStatusInserters(endPathStatusInserters_, - *endPathNames_, - prealloc, - preg, - areg, - processConfiguration, - std::string("EndPathStatusInserter")); + if (endPathNames_->size() > 1) { + //NOTE: FinalPaths are a type of EndPath + makePathStatusInserters(endPathStatusInserters_, + *endPathNames_, + prealloc, + preg, + areg, + processConfiguration, + std::string("EndPathStatusInserter")); + } assert(0 < prealloc.numberOfStreams()); streamSchedules_.reserve(prealloc.numberOfStreams()); diff --git a/FWCore/Framework/src/StreamSchedule.cc b/FWCore/Framework/src/StreamSchedule.cc index 433f60b78c5c2..d1a4d1df2a393 100644 --- a/FWCore/Framework/src/StreamSchedule.cc +++ b/FWCore/Framework/src/StreamSchedule.cc @@ -1027,13 +1027,16 @@ namespace edm { return; } } - for (int empty_end_path : empty_end_paths_) { - std::exception_ptr except = endPathStatusInserterWorkers_[empty_end_path] - ->runModuleDirectly>( - info, streamID_, ParentContext(&streamContext_), &streamContext_); - if (except) { - iTask.doneWaiting(except); - return; + if (not endPathStatusInserterWorkers_.empty()) { + for (int empty_end_path : empty_end_paths_) { + std::exception_ptr except = + endPathStatusInserterWorkers_[empty_end_path] + ->runModuleDirectly>( + info, streamID_, ParentContext(&streamContext_), &streamContext_); + if (except) { + iTask.doneWaiting(except); + return; + } } } diff --git a/FWCore/Framework/src/TransformerBase.cc b/FWCore/Framework/src/TransformerBase.cc index dd06093b6212b..8e5e9e20a5f69 100644 --- a/FWCore/Framework/src/TransformerBase.cc +++ b/FWCore/Framework/src/TransformerBase.cc @@ -7,8 +7,51 @@ #include "DataFormats/Provenance/interface/BranchDescription.h" #include "DataFormats/Provenance/interface/ModuleDescription.h" +#include "FWCore/ServiceRegistry/interface/ActivityRegistry.h" +#include "FWCore/ServiceRegistry/interface/ModuleCallingContext.h" #include +namespace { + class TransformSignalSentry { + public: + TransformSignalSentry(edm::ActivityRegistry* a, edm::StreamContext const& sc, edm::ModuleCallingContext const& mcc) + : a_(a), sc_(sc), mcc_(mcc) { + if (a_) + a_->preModuleTransformSignal_(sc_, mcc_); + } + ~TransformSignalSentry() { + if (a_) + a_->postModuleTransformSignal_(sc_, mcc_); + } + + private: + edm::ActivityRegistry* a_; // We do not use propagate_const because the registry itself is mutable. + edm::StreamContext const& sc_; + edm::ModuleCallingContext const& mcc_; + }; + + class TransformAcquiringSignalSentry { + public: + TransformAcquiringSignalSentry(edm::ActivityRegistry* a, + edm::StreamContext const& sc, + edm::ModuleCallingContext const& mcc) + : a_(a), sc_(sc), mcc_(mcc) { + if (a_) + a_->preModuleTransformAcquiringSignal_(sc_, mcc_); + } + ~TransformAcquiringSignalSentry() { + if (a_) + a_->postModuleTransformAcquiringSignal_(sc_, mcc_); + } + + private: + edm::ActivityRegistry* a_; // We do not use propagate_const because the registry itself is mutable. + edm::StreamContext const& sc_; + edm::ModuleCallingContext const& mcc_; + }; + +} // namespace + namespace edm { void TransformerBase::registerTransformImp( ProducerBase& iBase, EDPutToken iToken, const TypeID& id, std::string instanceName, TransformFunction iFunc) { @@ -65,11 +108,15 @@ namespace edm { void TransformerBase::transformImpAsync(edm::WaitingTaskHolder iHolder, std::size_t iIndex, + edm::ActivityRegistry* iAct, ProducerBase const& iBase, edm::EventForTransformer& iEvent) const { + auto const& mcc = iEvent.moduleCallingContext(); if (transformInfo_.get(iIndex)) { std::optional(iIndex), transformInfo_.get(iIndex)))> handle; + //transform acquiring signal + TransformAcquiringSignalSentry sentry(iAct, *mcc.getStreamContext(), mcc); CMS_SA_ALLOW try { handle = iEvent.get(transformInfo_.get(iIndex), transformInfo_.get(iIndex)); } catch (...) { @@ -79,11 +126,14 @@ namespace edm { if (handle->wrapper()) { auto cache = std::make_shared(); auto nextTask = - edm::make_waiting_task([holder = iHolder, cache, iIndex, this, &iBase, handle = *handle, iEvent]( + edm::make_waiting_task([holder = iHolder, cache, iIndex, this, &iBase, handle = *handle, iEvent, iAct]( std::exception_ptr const* iPtr) mutable { if (iPtr) { holder.doneWaiting(*iPtr); } else { + //transform signal + auto mcc = iEvent.moduleCallingContext(); + TransformSignalSentry sentry(iAct, *mcc.getStreamContext(), mcc); iEvent.put(iBase.putTokenIndexToProductResolverIndex()[transformInfo_.get(iIndex).index()], transformInfo_.get(iIndex)(std::move(*cache)), handle); @@ -102,6 +152,8 @@ namespace edm { if (handle.wrapper()) { std::any v = handle.wrapper(); + //transform signal + TransformSignalSentry sentry(iAct, *mcc.getStreamContext(), mcc); iEvent.put(iBase.putTokenIndexToProductResolverIndex()[transformInfo_.get(iIndex).index()], transformInfo_.get(iIndex)(std::move(v)), handle); diff --git a/FWCore/Framework/src/TransitionProcessors.icc b/FWCore/Framework/src/TransitionProcessors.icc index 5757def9e30c3..dc819428738ea 100644 --- a/FWCore/Framework/src/TransitionProcessors.icc +++ b/FWCore/Framework/src/TransitionProcessors.icc @@ -101,17 +101,17 @@ public: edm::InputSource::ItemType processFiles(EventProcessor& iEP) { bool finished = false; - auto nextTransition = iEP.nextTransitionType(); - if (nextTransition != edm::InputSource::IsFile) + edm::InputSource::ItemType nextTransition = iEP.nextTransitionType(); + if (nextTransition != edm::InputSource::ItemType::IsFile) return nextTransition; do { switch (nextTransition) { - case edm::InputSource::IsFile: { + case edm::InputSource::ItemType::IsFile: { processFile(iEP); nextTransition = iEP.nextTransitionType(); break; } - case edm::InputSource::IsRun: { + case edm::InputSource::ItemType::IsRun: { nextTransition = runs_.processRuns(iEP); break; } diff --git a/FWCore/Framework/src/Worker.cc b/FWCore/Framework/src/Worker.cc index 50cd6ced86ccc..ced3d34d54937 100644 --- a/FWCore/Framework/src/Worker.cc +++ b/FWCore/Framework/src/Worker.cc @@ -252,23 +252,26 @@ namespace edm { ServiceWeakToken weakToken = iToken; //Need to make the services available early so other services can see them - auto task = make_waiting_task([this, iTask, weakToken, &iPrincipal, iTransformIndex, parent = mcc.parent()]( - std::exception_ptr const* iExcept) mutable { - if (iExcept) { - iTask.doneWaiting(*iExcept); - return; - } - implDoTransformAsync(iTask, iTransformIndex, iPrincipal, parent, weakToken); - }); + auto task = make_waiting_task( + [this, iTask, weakToken, &iPrincipal, iTransformIndex, mcc](std::exception_ptr const* iExcept) mutable { + //post prefetch signal + actReg_->postModuleTransformPrefetchingSignal_.emit(*mcc.getStreamContext(), mcc); + if (iExcept) { + iTask.doneWaiting(*iExcept); + return; + } + implDoTransformAsync(iTask, iTransformIndex, iPrincipal, mcc.parent(), weakToken); + }); - //NOTE: need different ModuleCallingContext. The ProductResolver will copy the context in order to get - // a longer lifetime than this function call. + //pre prefetch signal + actReg_->preModuleTransformPrefetchingSignal_.emit(*mcc.getStreamContext(), mcc); iPrincipal.prefetchAsync( WaitingTaskHolder(*iTask.group(), task), itemToGetForTransform(iTransformIndex), false, iToken, &mcc); } void Worker::resetModuleDescription(ModuleDescription const* iDesc) { ModuleCallingContext temp(iDesc, + 0, moduleCallingContext_.state(), moduleCallingContext_.parent(), moduleCallingContext_.previousModuleOnThread()); diff --git a/FWCore/Framework/src/WorkerT.cc b/FWCore/Framework/src/WorkerT.cc index a208b112e193b..3c4fb9ee238d6 100644 --- a/FWCore/Framework/src/WorkerT.cc +++ b/FWCore/Framework/src/WorkerT.cc @@ -252,8 +252,8 @@ namespace edm { ServiceRegistry::Operate guard(weakToken.lock()); ModuleCallingContext mcc( - &module_->moduleDescription(), ModuleCallingContext::State::kPrefetching, iParent, nullptr); - module_->doTransformAsync(iTask, iTransformIndex, iEvent, activityRegistry(), &mcc, weakToken); + &module_->moduleDescription(), iTransformIndex + 1, ModuleCallingContext::State::kRunning, iParent, nullptr); + module_->doTransformAsync(iTask, iTransformIndex, iEvent, activityRegistry(), mcc, weakToken); } catch (...) { iTask.doneWaiting(std::current_exception()); return; diff --git a/FWCore/Framework/src/global/EDFilterBase.cc b/FWCore/Framework/src/global/EDFilterBase.cc index bbf9fb931c7fc..fa2c23a87c149 100644 --- a/FWCore/Framework/src/global/EDFilterBase.cc +++ b/FWCore/Framework/src/global/EDFilterBase.cc @@ -87,11 +87,11 @@ namespace edm { void EDFilterBase::doTransformAsync(WaitingTaskHolder iTask, size_t iTransformIndex, EventPrincipal const& iEvent, - ActivityRegistry*, - ModuleCallingContext const* iMCC, + ActivityRegistry* iAct, + ModuleCallingContext iMCC, ServiceWeakToken const& iToken) { EventForTransformer ev(iEvent, iMCC); - transformAsync_(iTask, iTransformIndex, ev, iToken); + transformAsync_(iTask, iTransformIndex, ev, iAct, iToken); } size_t EDFilterBase::transformIndex_(edm::BranchDescription const& iBranch) const { return -1; } @@ -99,6 +99,7 @@ namespace edm { void EDFilterBase::transformAsync_(WaitingTaskHolder iTask, std::size_t iIndex, edm::EventForTransformer& iEvent, + edm::ActivityRegistry* iAct, ServiceWeakToken const& iToken) const {} void EDFilterBase::doPreallocate(PreallocationConfiguration const& iPrealloc) { diff --git a/FWCore/Framework/src/global/EDProducerBase.cc b/FWCore/Framework/src/global/EDProducerBase.cc index 1ef072ad3782d..1cbc8285383c1 100644 --- a/FWCore/Framework/src/global/EDProducerBase.cc +++ b/FWCore/Framework/src/global/EDProducerBase.cc @@ -93,11 +93,11 @@ namespace edm { void EDProducerBase::doTransformAsync(WaitingTaskHolder iTask, size_t iTransformIndex, EventPrincipal const& iEvent, - ActivityRegistry*, - ModuleCallingContext const* iMCC, + ActivityRegistry* iAct, + ModuleCallingContext iMCC, ServiceWeakToken const& iToken) { EventForTransformer ev(iEvent, iMCC); - transformAsync_(iTask, iTransformIndex, ev, iToken); + transformAsync_(iTask, iTransformIndex, ev, iAct, iToken); } size_t EDProducerBase::transformIndex_(edm::BranchDescription const& iBranch) const { return -1; } @@ -105,6 +105,7 @@ namespace edm { void EDProducerBase::transformAsync_(WaitingTaskHolder iTask, std::size_t iIndex, edm::EventForTransformer& iEvent, + edm::ActivityRegistry* iAct, ServiceWeakToken const& iToken) const {} void EDProducerBase::doPreallocate(PreallocationConfiguration const& iPrealloc) { diff --git a/FWCore/Framework/src/limited/EDFilterBase.cc b/FWCore/Framework/src/limited/EDFilterBase.cc index e0e657a659387..738c85d5435da 100644 --- a/FWCore/Framework/src/limited/EDFilterBase.cc +++ b/FWCore/Framework/src/limited/EDFilterBase.cc @@ -71,11 +71,11 @@ namespace edm { void EDFilterBase::doTransformAsync(WaitingTaskHolder iTask, size_t iTransformIndex, EventPrincipal const& iEvent, - ActivityRegistry*, - ModuleCallingContext const* iMCC, + ActivityRegistry* iAct, + ModuleCallingContext iMCC, ServiceWeakToken const& iToken) { EventForTransformer ev(iEvent, iMCC); - transformAsync_(iTask, iTransformIndex, ev, iToken); + transformAsync_(iTask, iTransformIndex, ev, iAct, iToken); } size_t EDFilterBase::transformIndex_(edm::BranchDescription const& iBranch) const { return -1; } @@ -83,6 +83,7 @@ namespace edm { void EDFilterBase::transformAsync_(WaitingTaskHolder iTask, std::size_t iIndex, edm::EventForTransformer& iEvent, + edm::ActivityRegistry* iAct, ServiceWeakToken const& iToken) const {} void EDFilterBase::doPreallocate(PreallocationConfiguration const& iPrealloc) { diff --git a/FWCore/Framework/src/limited/EDProducerBase.cc b/FWCore/Framework/src/limited/EDProducerBase.cc index b8ad9ab897985..251310b24af87 100644 --- a/FWCore/Framework/src/limited/EDProducerBase.cc +++ b/FWCore/Framework/src/limited/EDProducerBase.cc @@ -71,11 +71,11 @@ namespace edm { void EDProducerBase::doTransformAsync(WaitingTaskHolder iTask, size_t iTransformIndex, EventPrincipal const& iEvent, - ActivityRegistry*, - ModuleCallingContext const* iMCC, + ActivityRegistry* iAct, + ModuleCallingContext iMCC, ServiceWeakToken const& iToken) { EventForTransformer ev(iEvent, iMCC); - transformAsync_(iTask, iTransformIndex, ev, iToken); + transformAsync_(iTask, iTransformIndex, ev, iAct, iToken); } size_t EDProducerBase::transformIndex_(edm::BranchDescription const& iBranch) const { return -1; } @@ -83,6 +83,7 @@ namespace edm { void EDProducerBase::transformAsync_(WaitingTaskHolder iTask, std::size_t iIndex, edm::EventForTransformer& iEvent, + edm::ActivityRegistry* iAct, ServiceWeakToken const& iToken) const {} void EDProducerBase::doPreallocate(PreallocationConfiguration const& iPrealloc) { diff --git a/FWCore/Framework/src/one/EDFilterBase.cc b/FWCore/Framework/src/one/EDFilterBase.cc index 0771cac1d33ee..c10120425a0fd 100644 --- a/FWCore/Framework/src/one/EDFilterBase.cc +++ b/FWCore/Framework/src/one/EDFilterBase.cc @@ -83,11 +83,11 @@ namespace edm { void EDFilterBase::doTransformAsync(WaitingTaskHolder iTask, size_t iTransformIndex, EventPrincipal const& iEvent, - ActivityRegistry*, - ModuleCallingContext const* iMCC, + ActivityRegistry* iAct, + ModuleCallingContext iMCC, ServiceWeakToken const& iToken) { EventForTransformer ev(iEvent, iMCC); - transformAsync_(iTask, iTransformIndex, ev, iToken); + transformAsync_(iTask, iTransformIndex, ev, iAct, iToken); } size_t EDFilterBase::transformIndex_(edm::BranchDescription const& iBranch) const { return -1; } @@ -95,6 +95,7 @@ namespace edm { void EDFilterBase::transformAsync_(WaitingTaskHolder iTask, std::size_t iIndex, edm::EventForTransformer& iEvent, + edm::ActivityRegistry* iAct, ServiceWeakToken const& iToken) const {} void EDFilterBase::doPreallocate(PreallocationConfiguration const& iPrealloc) { diff --git a/FWCore/Framework/src/one/EDProducerBase.cc b/FWCore/Framework/src/one/EDProducerBase.cc index ec8a29acb8df4..3a3654bc2ca2b 100644 --- a/FWCore/Framework/src/one/EDProducerBase.cc +++ b/FWCore/Framework/src/one/EDProducerBase.cc @@ -74,11 +74,11 @@ namespace edm { void EDProducerBase::doTransformAsync(WaitingTaskHolder iTask, size_t iTransformIndex, EventPrincipal const& iEvent, - ActivityRegistry*, - ModuleCallingContext const* iMCC, + ActivityRegistry* iAct, + ModuleCallingContext iMCC, ServiceWeakToken const& iToken) { EventForTransformer ev(iEvent, iMCC); - transformAsync_(iTask, iTransformIndex, ev, iToken); + transformAsync_(iTask, iTransformIndex, ev, iAct, iToken); } size_t EDProducerBase::transformIndex_(edm::BranchDescription const& iBranch) const { return -1; } @@ -86,6 +86,7 @@ namespace edm { void EDProducerBase::transformAsync_(WaitingTaskHolder iTask, std::size_t iIndex, edm::EventForTransformer& iEvent, + edm::ActivityRegistry* iAct, ServiceWeakToken const& iToken) const {} void EDProducerBase::doBeginJob() { diff --git a/FWCore/Framework/src/stream/EDProducerAdaptorBase.cc b/FWCore/Framework/src/stream/EDProducerAdaptorBase.cc index 5fb48a55b6c59..2ddba34bfa4e6 100644 --- a/FWCore/Framework/src/stream/EDProducerAdaptorBase.cc +++ b/FWCore/Framework/src/stream/EDProducerAdaptorBase.cc @@ -46,11 +46,11 @@ namespace edm { void ProducingModuleAdaptorBase::doTransformAsync(WaitingTaskHolder iTask, size_t iTransformIndex, EventPrincipal const& iEvent, - ActivityRegistry*, - ModuleCallingContext const* iMCC, + ActivityRegistry* iAct, + ModuleCallingContext iMCC, ServiceWeakToken const& iToken) { EventForTransformer ev(iEvent, iMCC); - m_streamModules[iEvent.streamID()]->transformAsync_(iTask, iTransformIndex, ev, iToken); + m_streamModules[iEvent.streamID()]->transformAsync_(iTask, iTransformIndex, ev, iAct, iToken); } // diff --git a/FWCore/Framework/src/stream/EDProducerBase.cc b/FWCore/Framework/src/stream/EDProducerBase.cc index f3f166e766279..90eb17f274c6f 100644 --- a/FWCore/Framework/src/stream/EDProducerBase.cc +++ b/FWCore/Framework/src/stream/EDProducerBase.cc @@ -74,6 +74,7 @@ edm::ProductResolverIndex EDProducerBase::transformPrefetch_(std::size_t iIndex) void EDProducerBase::transformAsync_(WaitingTaskHolder iTask, std::size_t iIndex, edm::EventForTransformer& iEvent, + edm::ActivityRegistry* iAct, ServiceWeakToken const& iToken) const {} void EDProducerBase::prevalidate(ConfigurationDescriptions& iConfig) { edmodule_mightGet_config(iConfig); } diff --git a/FWCore/Framework/src/stream/ProducingModuleAdaptorBase.cc b/FWCore/Framework/src/stream/ProducingModuleAdaptorBase.cc index eeebb42d7efa8..cecde033791d7 100644 --- a/FWCore/Framework/src/stream/ProducingModuleAdaptorBase.cc +++ b/FWCore/Framework/src/stream/ProducingModuleAdaptorBase.cc @@ -196,7 +196,7 @@ namespace edm { size_t iTransformIndex, EventPrincipal const& iEvent, ActivityRegistry*, - ModuleCallingContext const* iMCC, + ModuleCallingContext iMCC, ServiceWeakToken const&) {} template diff --git a/FWCore/Framework/test/BuildFile.xml b/FWCore/Framework/test/BuildFile.xml index 03296780a276b..e4f8b00603f68 100644 --- a/FWCore/Framework/test/BuildFile.xml +++ b/FWCore/Framework/test/BuildFile.xml @@ -367,6 +367,7 @@ + diff --git a/FWCore/Framework/test/MockEventProcessor.cc b/FWCore/Framework/test/MockEventProcessor.cc index 5747c48a15e30..2854873ebabb9 100644 --- a/FWCore/Framework/test/MockEventProcessor.cc +++ b/FWCore/Framework/test/MockEventProcessor.cc @@ -48,7 +48,7 @@ namespace edm { token t; if (not(input_ >> t)) { reachedEndOfInput_ = true; - return lastTransition_ = InputSource::IsStop; + return lastTransition_ = InputSource::ItemType::IsStop; } char ch = t.id; @@ -57,11 +57,11 @@ namespace edm { if (ch == 'r') { output_ << " *** nextItemType: Run " << t.value << " ***\n"; nextRun_ = static_cast(t.value); - return lastTransition_ = InputSource::IsRun; + return lastTransition_ = InputSource::ItemType::IsRun; } else if (ch == 'l') { output_ << " *** nextItemType: Lumi " << t.value << " ***\n"; nextLumi_ = static_cast(t.value); - return lastTransition_ = InputSource::IsLumi; + return lastTransition_ = InputSource::ItemType::IsLumi; } else if (ch == 'e') { output_ << " *** nextItemType: Event ***\n"; // a special value for test purposes only @@ -71,7 +71,7 @@ namespace edm { } else { shouldWeStop_ = false; } - return lastTransition_ = InputSource::IsEvent; + return lastTransition_ = InputSource::ItemType::IsEvent; } else if (ch == 'f') { output_ << " *** nextItemType: File " << t.value << " ***\n"; // a special value for test purposes only @@ -79,7 +79,7 @@ namespace edm { shouldWeCloseOutput_ = false; else shouldWeCloseOutput_ = true; - return lastTransition_ = InputSource::IsFile; + return lastTransition_ = InputSource::ItemType::IsFile; } else if (ch == 's') { output_ << " *** nextItemType: Stop " << t.value << " ***\n"; // a special value for test purposes only @@ -87,17 +87,17 @@ namespace edm { shouldWeEndLoop_ = false; else shouldWeEndLoop_ = true; - return lastTransition_ = InputSource::IsStop; + return lastTransition_ = InputSource::ItemType::IsStop; } else if (ch == 'x') { output_ << " *** nextItemType: Restart " << t.value << " ***\n"; shouldWeEndLoop_ = t.value; - return lastTransition_ = InputSource::IsStop; + return lastTransition_ = InputSource::ItemType::IsStop; } else if (ch == 't') { output_ << " *** nextItemType: Throw " << t.value << " ***\n"; shouldThrow_ = true; return nextTransitionType(); } - return lastTransition_ = InputSource::IsInvalid; + return lastTransition_ = InputSource::ItemType::IsInvalid; } InputSource::ItemType MockEventProcessor::lastTransitionType() const { return lastTransition_; } @@ -112,9 +112,9 @@ namespace edm { } readAndProcessEvent(); if (shouldWeStop()) { - return InputSource::IsEvent; + return InputSource::ItemType::IsEvent; } - } while (nextTransitionType() == InputSource::IsEvent); + } while (nextTransitionType() == InputSource::ItemType::IsEvent); return lastTransitionType(); } @@ -137,7 +137,7 @@ namespace edm { fp.normalEnd(); - if (trans != InputSource::IsStop) { + if (trans != InputSource::ItemType::IsStop) { //problem with the source doErrorStuff(); break; @@ -188,15 +188,15 @@ namespace edm { InputSource::ItemType MockEventProcessor::processRuns() { bool finished = false; - auto nextTransition = edm::InputSource::IsRun; + auto nextTransition = edm::InputSource::ItemType::IsRun; do { switch (nextTransition) { - case edm::InputSource::IsRun: { + case edm::InputSource::ItemType::IsRun: { processRun(); nextTransition = nextTransitionType(); break; } - case edm::InputSource::IsLumi: { + case edm::InputSource::ItemType::IsLumi: { nextTransition = processLumis(); break; } @@ -225,10 +225,10 @@ namespace edm { InputSource::ItemType MockEventProcessor::processLumis() { if (lumiStatus_ and currentLumiNumber_ == nextLumi_) { readAndMergeLumi(); - if (nextTransitionType() == InputSource::IsEvent) { + if (nextTransitionType() == InputSource::ItemType::IsEvent) { readAndProcessEvents(); if (shouldWeStop()) { - return edm::InputSource::IsStop; + return edm::InputSource::ItemType::IsStop; } } } else { @@ -239,10 +239,10 @@ namespace edm { throwIfNeeded(); didGlobalBeginLumiSucceed_ = true; //Need to do event processing here - if (nextTransitionType() == InputSource::IsEvent) { + if (nextTransitionType() == InputSource::ItemType::IsEvent) { readAndProcessEvents(); if (shouldWeStop()) { - return edm::InputSource::IsStop; + return edm::InputSource::ItemType::IsStop; } } } diff --git a/FWCore/Framework/test/MockEventProcessor.h b/FWCore/Framework/test/MockEventProcessor.h index 2768bc821f422..87c41793c0e0a 100644 --- a/FWCore/Framework/test/MockEventProcessor.h +++ b/FWCore/Framework/test/MockEventProcessor.h @@ -118,7 +118,7 @@ namespace edm { bool lumiStatus_ = false; LuminosityBlockNumber_t currentLumiNumber_ = 0; bool didGlobalBeginLumiSucceed_ = false; - InputSource::ItemType lastTransition_ = InputSource::IsInvalid; + InputSource::ItemType lastTransition_ = InputSource::ItemType::IsInvalid; bool currentRun_ = false; RunNumber_t currentRunNumber_ = 0; diff --git a/FWCore/Framework/test/stubs/TestStreamAnalyzers.cc b/FWCore/Framework/test/stubs/TestStreamAnalyzers.cc index 71f84e4469ea6..ae6a0f6357751 100644 --- a/FWCore/Framework/test/stubs/TestStreamAnalyzers.cc +++ b/FWCore/Framework/test/stubs/TestStreamAnalyzers.cc @@ -105,7 +105,7 @@ namespace edmtest { } }; - class RunIntAnalyzer : public edm::stream::EDAnalyzer> { + class RunIntAnalyzer : public edm::stream::EDAnalyzer, edm::stream::WatchRuns> { public: static std::atomic m_count; unsigned int trans_; @@ -163,7 +163,8 @@ namespace edmtest { } }; - class LumiIntAnalyzer : public edm::stream::EDAnalyzer> { + class LumiIntAnalyzer + : public edm::stream::EDAnalyzer, edm::stream::WatchLuminosityBlocks> { public: static std::atomic m_count; unsigned int trans_; @@ -236,8 +237,9 @@ namespace edmtest { } }; - class RunSummaryIntAnalyzer - : public edm::stream::EDAnalyzer, edm::RunSummaryCache> { + class RunSummaryIntAnalyzer : public edm::stream::EDAnalyzer, + edm::RunSummaryCache, + edm::stream::WatchRuns> { public: static std::atomic m_count; unsigned int trans_; @@ -321,7 +323,8 @@ namespace edmtest { }; class LumiSummaryIntAnalyzer : public edm::stream::EDAnalyzer, - edm::LuminosityBlockSummaryCache> { + edm::LuminosityBlockSummaryCache, + edm::stream::WatchLuminosityBlocks> { public: static std::atomic m_count; unsigned int trans_; diff --git a/FWCore/Framework/test/stubs/TestStreamFilters.cc b/FWCore/Framework/test/stubs/TestStreamFilters.cc index 6f773b8dc8cd8..3d8122fb9a567 100644 --- a/FWCore/Framework/test/stubs/TestStreamFilters.cc +++ b/FWCore/Framework/test/stubs/TestStreamFilters.cc @@ -106,7 +106,7 @@ namespace edmtest { } }; - class RunIntFilter : public edm::stream::EDFilter> { + class RunIntFilter : public edm::stream::EDFilter, edm::stream::WatchRuns> { public: static std::atomic m_count; unsigned int trans_; @@ -163,7 +163,8 @@ namespace edmtest { } }; - class LumiIntFilter : public edm::stream::EDFilter> { + class LumiIntFilter + : public edm::stream::EDFilter, edm::stream::WatchLuminosityBlocks> { public: static std::atomic m_count; unsigned int trans_; @@ -235,7 +236,8 @@ namespace edmtest { } }; - class RunSummaryIntFilter : public edm::stream::EDFilter, edm::RunSummaryCache> { + class RunSummaryIntFilter + : public edm::stream::EDFilter, edm::RunSummaryCache, edm::stream::WatchRuns> { public: static std::atomic m_count; unsigned int trans_; @@ -321,8 +323,9 @@ namespace edmtest { } }; - class LumiSummaryIntFilter - : public edm::stream::EDFilter, edm::LuminosityBlockSummaryCache> { + class LumiSummaryIntFilter : public edm::stream::EDFilter, + edm::LuminosityBlockSummaryCache, + edm::stream::WatchLuminosityBlocks> { public: static std::atomic m_count; unsigned int trans_; diff --git a/FWCore/Framework/test/stubs/TestStreamProducers.cc b/FWCore/Framework/test/stubs/TestStreamProducers.cc index 148b4abeec2fd..254ebfe2ed048 100644 --- a/FWCore/Framework/test/stubs/TestStreamProducers.cc +++ b/FWCore/Framework/test/stubs/TestStreamProducers.cc @@ -105,7 +105,7 @@ namespace edmtest { } }; - class RunIntProducer : public edm::stream::EDProducer> { + class RunIntProducer : public edm::stream::EDProducer, edm::stream::WatchRuns> { public: static std::atomic m_count; unsigned int trans_; @@ -160,7 +160,8 @@ namespace edmtest { } }; - class LumiIntProducer : public edm::stream::EDProducer> { + class LumiIntProducer + : public edm::stream::EDProducer, edm::stream::WatchLuminosityBlocks> { public: static std::atomic m_count; unsigned int trans_; @@ -229,8 +230,9 @@ namespace edmtest { } }; - class RunSummaryIntProducer - : public edm::stream::EDProducer, edm::RunSummaryCache> { + class RunSummaryIntProducer : public edm::stream::EDProducer, + edm::RunSummaryCache, + edm::stream::WatchRuns> { public: static std::atomic m_count; unsigned int trans_; @@ -315,7 +317,8 @@ namespace edmtest { }; class LumiSummaryIntProducer : public edm::stream::EDProducer, - edm::LuminosityBlockSummaryCache> { + edm::LuminosityBlockSummaryCache, + edm::stream::WatchLuminosityBlocks> { public: static std::atomic m_count; unsigned int trans_; diff --git a/FWCore/Framework/test/test_bad_schedule_exception_message_cfg.py b/FWCore/Framework/test/test_bad_schedule_exception_message_cfg.py index bd5d0995b32b1..cdc135313670d 100644 --- a/FWCore/Framework/test/test_bad_schedule_exception_message_cfg.py +++ b/FWCore/Framework/test/test_bad_schedule_exception_message_cfg.py @@ -98,4 +98,14 @@ process.p1 = cms.Path(process.a, cms.Task(process.b, process.c)) - +elif mod == 8: + #cycle with filter on other path + process.a = cms.EDProducer("IntProducer", ivalue = cms.int32(10)) + process.b = cms.EDProducer("AddIntsProducer", labels = cms.VInputTag("c")) + process.c = process.b.clone(labels=["a"]) + process.dependingFilter = cms.EDFilter("IntProductFilter", label=cms.InputTag("b"), threshold=cms.int32(1000)) + process.rejectingFilter = cms.EDFilter("TestFilterModule", acceptValue = cms.untracked.int32(-1)) + + process.p1 = cms.Path(process.c) + process.p2 = cms.Path(process.b + process.dependingFilter + process.a) + process.p3 = cms.Path(process.rejectingFilter + process.a) diff --git a/FWCore/Framework/test/unit_test_outputs/test_bad_schedule_8.log b/FWCore/Framework/test/unit_test_outputs/test_bad_schedule_8.log new file mode 100644 index 0000000000000..a01ba08a1f058 --- /dev/null +++ b/FWCore/Framework/test/unit_test_outputs/test_bad_schedule_8.log @@ -0,0 +1,6 @@ +An exception of category 'ScheduleExecutionFailure' occurred while + [0] Calling beginJob +Exception Message: +Unrunnable schedule +Dependent module later on Path + module 'dependingFilter' depends on 'a' which is later on path p2 diff --git a/FWCore/Framework/test/unit_test_outputs/test_deepCall_unscheduled.log b/FWCore/Framework/test/unit_test_outputs/test_deepCall_unscheduled.log index cffed5d4487cd..666665139e80b 100644 --- a/FWCore/Framework/test/unit_test_outputs/test_deepCall_unscheduled.log +++ b/FWCore/Framework/test/unit_test_outputs/test_deepCall_unscheduled.log @@ -80,9 +80,6 @@ ModuleCallingContext state = Running ++++ finished: source run ++++ starting: global begin run 1 : time = 1000000 ++++ finished: global begin run 1 : time = 1000000 -++++ queuing: EventSetup synchronization run: 1 lumi: 1 event: 0 -++++ pre: EventSetup synchronizing run: 1 lumi: 1 event: 0 -++++ post: EventSetup synchronizing run: 1 lumi: 1 event: 0 ++++ starting: begin run: stream = 0 run = 1 time = 1000000 ++++++ starting: begin run for module: stream = 0 label = 'one' id = 4 StreamContext: StreamID = 0 transition = BeginRun @@ -109,6 +106,9 @@ ModuleCallingContext state = Running ProcessContext: TEST cf65ff121f7d0ed0d094247b80407269 ++++ finished: begin run: stream = 0 run = 1 time = 1000000 +++++ queuing: EventSetup synchronization run: 1 lumi: 1 event: 0 +++++ pre: EventSetup synchronizing run: 1 lumi: 1 event: 0 +++++ post: EventSetup synchronizing run: 1 lumi: 1 event: 0 ++++ starting: source lumi ++++ finished: source lumi ++++ starting: global begin lumi: run = 1 lumi = 1 time = 1000000 diff --git a/FWCore/Framework/test/unit_test_outputs/test_onPath_unscheduled.log b/FWCore/Framework/test/unit_test_outputs/test_onPath_unscheduled.log index 5f7598a2fd68c..7313caaa3ae8a 100644 --- a/FWCore/Framework/test/unit_test_outputs/test_onPath_unscheduled.log +++ b/FWCore/Framework/test/unit_test_outputs/test_onPath_unscheduled.log @@ -48,15 +48,15 @@ ++++ finished: source run ++++ starting: global begin run 1 : time = 1000000 ++++ finished: global begin run 1 : time = 1000000 -++++ queuing: EventSetup synchronization run: 1 lumi: 1 event: 0 -++++ pre: EventSetup synchronizing run: 1 lumi: 1 event: 0 -++++ post: EventSetup synchronizing run: 1 lumi: 1 event: 0 ++++ starting: begin run: stream = 0 run = 1 time = 1000000 ++++++ starting: begin run for module: stream = 0 label = 'two' id = 5 ++++++ finished: begin run for module: stream = 0 label = 'two' id = 5 ++++++ starting: begin run for module: stream = 0 label = 'one' id = 3 ++++++ finished: begin run for module: stream = 0 label = 'one' id = 3 ++++ finished: begin run: stream = 0 run = 1 time = 1000000 +++++ queuing: EventSetup synchronization run: 1 lumi: 1 event: 0 +++++ pre: EventSetup synchronizing run: 1 lumi: 1 event: 0 +++++ post: EventSetup synchronizing run: 1 lumi: 1 event: 0 ++++ starting: source lumi ++++ finished: source lumi ++++ starting: global begin lumi: run = 1 lumi = 1 time = 1000000 diff --git a/FWCore/Integration/plugins/PutOrMergeTestSource.cc b/FWCore/Integration/plugins/PutOrMergeTestSource.cc index b7f654e3ab458..2e57b5e81d1a5 100644 --- a/FWCore/Integration/plugins/PutOrMergeTestSource.cc +++ b/FWCore/Integration/plugins/PutOrMergeTestSource.cc @@ -30,7 +30,7 @@ namespace edmtest { void registerProducts() final; private: - ItemType getNextItemType() final; + ItemTypeInfo getNextItemType() final; std::shared_ptr readRunAuxiliary_() final; std::shared_ptr readLuminosityBlockAuxiliary_() final; std::shared_ptr readFile_() final; @@ -109,21 +109,21 @@ void PutOrMergeTestSource::registerProducts() { productRegistryUpdate().copyProduct(thingWithEqualDesc_); } -InputSource::ItemType PutOrMergeTestSource::getNextItemType() { +InputSource::ItemTypeInfo PutOrMergeTestSource::getNextItemType() { switch (stage_) { case 0: { - return IsFile; + return ItemType::IsFile; } case 1: { - return IsRun; + return ItemType::IsRun; } case 2: { - return IsRun; + return ItemType::IsRun; } default: - return IsStop; + return ItemType::IsStop; } - return IsInvalid; + return ItemType::IsInvalid; } std::shared_ptr PutOrMergeTestSource::readRunAuxiliary_() { diff --git a/FWCore/Integration/plugins/SourceWithWaits.cc b/FWCore/Integration/plugins/SourceWithWaits.cc index 3175c84458f04..22b6009f8b394 100644 --- a/FWCore/Integration/plugins/SourceWithWaits.cc +++ b/FWCore/Integration/plugins/SourceWithWaits.cc @@ -63,14 +63,19 @@ namespace edmtest { static void fillDescriptions(edm::ConfigurationDescriptions&); private: - edm::InputSource::ItemType getNextItemType() override; + edm::InputSource::ItemTypeInfo getNextItemType() override; std::shared_ptr readRunAuxiliary_() override; std::shared_ptr readLuminosityBlockAuxiliary_() override; void readEvent_(edm::EventPrincipal&) override; - unsigned int timePerLumi_; // seconds + double timePerLumi_; // seconds + double sleepAfterStartOfRun_; // seconds std::vector eventsPerLumi_; unsigned int lumisPerRun_; + unsigned int multipleEntriesForRun_; + unsigned int multipleEntriesForLumi_; + bool declareLast_; + bool declareAllLast_; edm::EventNumber_t currentEvent_ = 0; edm::LuminosityBlockNumber_t currentLumi_ = 0; @@ -78,66 +83,148 @@ namespace edmtest { unsigned int currentFile_ = 0; unsigned int eventInCurrentLumi_ = 0; unsigned int lumiInCurrentRun_ = 0; + bool startedNewRun_ = false; + bool lastEventOfLumi_ = false; + bool noEventsInLumi_ = false; }; SourceWithWaits::SourceWithWaits(edm::ParameterSet const& pset, edm::InputSourceDescription const& desc) : edm::InputSource(pset, desc), - timePerLumi_(pset.getUntrackedParameter("timePerLumi")), + timePerLumi_(pset.getUntrackedParameter("timePerLumi")), + sleepAfterStartOfRun_(pset.getUntrackedParameter("sleepAfterStartOfRun")), eventsPerLumi_(pset.getUntrackedParameter>("eventsPerLumi")), - lumisPerRun_(pset.getUntrackedParameter("lumisPerRun")) {} + lumisPerRun_(pset.getUntrackedParameter("lumisPerRun")), + multipleEntriesForRun_(pset.getUntrackedParameter("multipleEntriesForRun")), + multipleEntriesForLumi_(pset.getUntrackedParameter("multipleEntriesForLumi")), + declareLast_(pset.getUntrackedParameter("declareLast")), + declareAllLast_(pset.getUntrackedParameter("declareAllLast")) {} SourceWithWaits::~SourceWithWaits() {} void SourceWithWaits::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { edm::ParameterSetDescription desc; - desc.addUntracked("timePerLumi"); + desc.addUntracked("timePerLumi"); + desc.addUntracked("sleepAfterStartOfRun"); desc.addUntracked>("eventsPerLumi"); desc.addUntracked("lumisPerRun"); + desc.addUntracked("multipleEntriesForRun", 0); + desc.addUntracked("multipleEntriesForLumi", 0); + desc.addUntracked("declareLast", false); + desc.addUntracked("declareAllLast", false); descriptions.add("source", desc); } - edm::InputSource::ItemType SourceWithWaits::getNextItemType() { + edm::InputSource::ItemTypeInfo SourceWithWaits::getNextItemType() { constexpr unsigned int secondsToMicroseconds = 1000000; + if (startedNewRun_) { + usleep(secondsToMicroseconds * sleepAfterStartOfRun_); + startedNewRun_ = false; + } + + if (lastEventOfLumi_ || noEventsInLumi_) { + usleep(secondsToMicroseconds * timePerLumi_ / (eventsPerLumi_[currentLumi_ - 1] + 1)); + lastEventOfLumi_ = false; + noEventsInLumi_ = false; + } + // First three cases are for the initial file, run, and lumi transitions // Note that there will always be at exactly one file and at least // one run from this test source. if (currentFile_ == 0u) { ++currentFile_; - return edm::InputSource::IsFile; - } else if (currentRun_ == 0u) { + return ItemType::IsFile; + } + // First Run + else if (currentRun_ == 0u) { ++currentRun_; - return edm::InputSource::IsRun; - } else if (currentLumi_ == 0u) { + if (currentRun_ != multipleEntriesForRun_) { + startedNewRun_ = true; + auto const position = + (declareLast_ || declareAllLast_) ? ItemPosition::LastItemToBeMerged : ItemPosition::NotLastItemToBeMerged; + return ItemTypeInfo(ItemType::IsRun, position); + } else { + // declareAllLast_ with multipleEntriesForRun_ or multipleEntriesForLumi_ is an intentional bug, used to test + // if the Framework detects the potential InputSource bug and throws an exception. + auto const position = declareAllLast_ ? ItemPosition::LastItemToBeMerged : ItemPosition::NotLastItemToBeMerged; + return ItemTypeInfo(ItemType::IsRun, position); + } + } + // If configured, a second Entry for the same run number and reduced ProcessHistoryID + else if (currentRun_ == multipleEntriesForRun_) { + multipleEntriesForRun_ = 0; + startedNewRun_ = true; + auto const position = + (declareLast_ || declareAllLast_) ? ItemPosition::LastItemToBeMerged : ItemPosition::NotLastItemToBeMerged; + return ItemTypeInfo(ItemType::IsRun, position); + } + // First lumi + else if (currentLumi_ == 0u && lumisPerRun_ != 0) { ++currentLumi_; ++lumiInCurrentRun_; // The job will stop when we hit the end of the eventsPerLumi vector // unless maxEvents stopped it earlier. if ((currentLumi_ - 1) >= eventsPerLumi_.size()) { - return edm::InputSource::IsStop; + return ItemType::IsStop; } - return edm::InputSource::IsLumi; + if (currentLumi_ != multipleEntriesForLumi_) { + if (eventsPerLumi_[currentLumi_ - 1] == 0) { + noEventsInLumi_ = true; + } + auto const position = + (declareLast_ || declareAllLast_) ? ItemPosition::LastItemToBeMerged : ItemPosition::NotLastItemToBeMerged; + return ItemTypeInfo(ItemType::IsLumi, position); + } else { + // declareAllLast_ with multipleEntriesForRun_ or multipleEntriesForLumi_ is an intentional bug, used to test + // if the Framework detects the potential InputSource bug and throws an exception. + auto const position = declareAllLast_ ? ItemPosition::LastItemToBeMerged : ItemPosition::NotLastItemToBeMerged; + return ItemTypeInfo(ItemType::IsLumi, position); + } + } + // If configured, a second Entry for the same lumi number in the same run + else if (currentLumi_ == multipleEntriesForLumi_ && lumisPerRun_ != 0) { + multipleEntriesForLumi_ = 0; + if (eventsPerLumi_[currentLumi_ - 1] == 0) { + noEventsInLumi_ = true; + } + auto const position = + (declareLast_ || declareAllLast_) ? ItemPosition::LastItemToBeMerged : ItemPosition::NotLastItemToBeMerged; + return ItemTypeInfo(ItemType::IsLumi, position); } - // Handle more events in the current lumi - else if (eventInCurrentLumi_ < eventsPerLumi_[currentLumi_ - 1]) { + // Handle events in the current lumi + else if (eventInCurrentLumi_ < eventsPerLumi_[currentLumi_ - 1] && lumisPerRun_ != 0) { // note the argument to usleep is microseconds, timePerLumi_ is in seconds usleep(secondsToMicroseconds * timePerLumi_ / (eventsPerLumi_[currentLumi_ - 1] + 1)); ++eventInCurrentLumi_; ++currentEvent_; - return edm::InputSource::IsEvent; + if (eventInCurrentLumi_ == eventsPerLumi_[currentLumi_ - 1]) { + lastEventOfLumi_ = true; + } + return ItemType::IsEvent; } // Next lumi else if (lumiInCurrentRun_ < lumisPerRun_) { - usleep(secondsToMicroseconds * timePerLumi_ / (eventsPerLumi_[currentLumi_ - 1] + 1)); ++currentLumi_; ++lumiInCurrentRun_; // The job will stop when we hit the end of the eventsPerLumi vector // unless maxEvents stopped it earlier. if ((currentLumi_ - 1) >= eventsPerLumi_.size()) { - return edm::InputSource::IsStop; + return ItemType::IsStop; } eventInCurrentLumi_ = 0; - return edm::InputSource::IsLumi; + if (currentLumi_ != multipleEntriesForLumi_) { + if (eventsPerLumi_[currentLumi_ - 1] == 0) { + noEventsInLumi_ = true; + } + auto const position = + (declareLast_ || declareAllLast_) ? ItemPosition::LastItemToBeMerged : ItemPosition::NotLastItemToBeMerged; + return ItemTypeInfo(ItemType::IsLumi, position); + } else { + // declareAllLast_ with multipleEntriesForRun_ or multipleEntriesForLumi_ is an intentional bug, used to test + // if the Framework detects the potential InputSource bug and throws an exception. + auto const position = declareAllLast_ ? ItemPosition::LastItemToBeMerged : ItemPosition::NotLastItemToBeMerged; + return ItemTypeInfo(ItemType::IsLumi, position); + } } // Next run else { @@ -145,16 +232,30 @@ namespace edmtest { // unless maxEvents stopped it earlier. Don't start the run if // it will end with no lumis in it. if (currentLumi_ >= eventsPerLumi_.size()) { - return edm::InputSource::IsStop; + return ItemType::IsStop; } ++currentRun_; + // Avoid infinite job if lumisPerRun_ is 0 + if (currentRun_ > 100) { + return ItemType::IsStop; + } lumiInCurrentRun_ = 0; - return edm::InputSource::IsRun; + if (currentRun_ != multipleEntriesForRun_) { + startedNewRun_ = true; + auto const position = + (declareLast_ || declareAllLast_) ? ItemPosition::LastItemToBeMerged : ItemPosition::NotLastItemToBeMerged; + return ItemTypeInfo(ItemType::IsRun, position); + } else { + // declareAllLast_ with multipleEntriesForRun_ or multipleEntriesForLumi_ is an intentional bug, used to test + // if the Framework detects the potential InputSource bug and throws an exception. + auto const position = declareAllLast_ ? ItemPosition::LastItemToBeMerged : ItemPosition::NotLastItemToBeMerged; + return ItemTypeInfo(ItemType::IsRun, position); + } } // Should be impossible to get here assert(false); // return something so it will compile - return edm::InputSource::IsStop; + return ItemType::IsStop; } std::shared_ptr SourceWithWaits::readRunAuxiliary_() { diff --git a/FWCore/Integration/test/BuildFile.xml b/FWCore/Integration/test/BuildFile.xml index 123e0cb0cf339..9a56a76ce0497 100644 --- a/FWCore/Integration/test/BuildFile.xml +++ b/FWCore/Integration/test/BuildFile.xml @@ -40,6 +40,7 @@ + @@ -78,6 +79,7 @@ + @@ -95,6 +97,8 @@ + + diff --git a/FWCore/Integration/test/check_empty_event_cfg.py b/FWCore/Integration/test/check_empty_event_cfg.py new file mode 100644 index 0000000000000..bb0c69925e27a --- /dev/null +++ b/FWCore/Integration/test/check_empty_event_cfg.py @@ -0,0 +1,14 @@ +import FWCore.ParameterSet.Config as cms + +process = cms.Process("EMPTY") + +process.test = cms.EDAnalyzer("EventContentAnalyzer", listPathStatus = cms.untracked.bool(True)) + +process.e = cms.EndPath(process.test) + +#process.out = cms.OutputModule("AsciiOutputModule") +#process.e2 = cms.EndPath(process.out) + +process.source = cms.Source("EmptySource") + +process.maxEvents.input = 1 diff --git a/FWCore/Integration/test/inputSourceTest.sh b/FWCore/Integration/test/inputSourceTest.sh index 5e58120c58178..a2dc21bb77753 100755 --- a/FWCore/Integration/test/inputSourceTest.sh +++ b/FWCore/Integration/test/inputSourceTest.sh @@ -5,3 +5,20 @@ function die { echo $1: status $2 ; exit $2; } cmsRun ${SCRAM_TEST_PATH}/inputSourceTest_cfg.py || die 'Failed in inputSourceTest_cfg.py' $? cmsRun ${SCRAM_TEST_PATH}/testLateLumiClosure_cfg.py || die 'Failed in testLateLumiClosure_cfg.py' $? + +# The following demonstrates declaring the last run or lumi entry to be merged eliminates the delay +# before globalBeginRun and globalBeginLumi while waiting for the next thing to arrive +# to know that the last entry to be merged has already arrived. (Note the previous test is very similar +# and shows the delay, running without enableDeclareLast will also demonstrate it). + +cmsRun ${SCRAM_TEST_PATH}/testDeclareLastEntryForMerge_cfg.py --enableDeclareLast --multipleEntriesForRun 2 --multipleEntriesForLumi 4 || die 'Failed in testDeclareLastEntryForMerge_cfg.py' $? + +# The next two cmsRun processes should throw an exception (intentional) +# These two tests show the Framework will detect a buggy InputSource that +# declares something last that is NOT last. + +cmsRun ${SCRAM_TEST_PATH}/testDeclareLastEntryForMerge_cfg.py --enableDeclareAllLast --multipleEntriesForRun 1 && die 'Failed in testDeclareLastEntryForMerge_cfg.py, last run source bug not detected' 1 + +cmsRun ${SCRAM_TEST_PATH}/testDeclareLastEntryForMerge_cfg.py --enableDeclareAllLast --multipleEntriesForLumi 2 && die 'Failed in testDeclareLastEntryForMerge_cfg.py, last lumi source bug not detected' 1 + +exit 0 diff --git a/FWCore/Integration/test/makeEmptyRootFile.py b/FWCore/Integration/test/makeEmptyRootFile.py new file mode 100644 index 0000000000000..21e395362cb1d --- /dev/null +++ b/FWCore/Integration/test/makeEmptyRootFile.py @@ -0,0 +1,13 @@ +import FWCore.ParameterSet.Config as cms + +process = cms.Process("WRITE") + +process.source = cms.Source("EmptySource") + +process.maxEvents.input = 10 + +process.out = cms.OutputModule("PoolOutputModule", + outputCommands = cms.untracked.vstring("drop *"), + fileName = cms.untracked.string("empty.root")) + +process.o = cms.EndPath(process.out) diff --git a/FWCore/Integration/test/run_TestEmptyRootFile.sh b/FWCore/Integration/test/run_TestEmptyRootFile.sh new file mode 100755 index 0000000000000..a5c4ec3e9bcdf --- /dev/null +++ b/FWCore/Integration/test/run_TestEmptyRootFile.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +function die { echo Failure $1: status $2 ; exit $2 ; } + +LOCAL_TEST_DIR=${SCRAM_TEST_PATH} + +echo "write empty file" +cmsRun ${LOCAL_TEST_DIR}/makeEmptyRootFile.py || die "cmsRun makeEmptyRootFile.py" $? + +echo "read empty file" +cmsRun ${LOCAL_TEST_DIR}/useEmptyRootFile.py || die "cmsRun useEmptyRootFile.py" $? + +exit 0 diff --git a/FWCore/Integration/test/testDeclareLastEntryForMerge_cfg.py b/FWCore/Integration/test/testDeclareLastEntryForMerge_cfg.py new file mode 100644 index 0000000000000..58bedbbb7dbe0 --- /dev/null +++ b/FWCore/Integration/test/testDeclareLastEntryForMerge_cfg.py @@ -0,0 +1,57 @@ + +import FWCore.ParameterSet.Config as cms +import sys +import argparse + +parser = argparse.ArgumentParser(prog=sys.argv[0], description='Test InputSource Declaring last run or lumi entry for merge') + +parser.add_argument("--enableDeclareLast", action="store_true", help="Declare last entry for merge") +parser.add_argument("--enableDeclareAllLast", action="store_true", help="Declare all entries as last for merge (force intentional source bug)") +parser.add_argument("--multipleEntriesForRun", type=int) +parser.add_argument("--multipleEntriesForLumi", type=int) + +args = parser.parse_args() + +process = cms.Process("PROD") + +process.options = dict( + numberOfThreads = 2, + numberOfStreams = 2, + numberOfConcurrentRuns = 1, + numberOfConcurrentLuminosityBlocks = 2 +) + +process.Tracer = cms.Service("Tracer", + printTimestamps = cms.untracked.bool(True) +) + +process.source = cms.Source("SourceWithWaits", + timePerLumi = cms.untracked.double(1), + sleepAfterStartOfRun = cms.untracked.double(0.25), + eventsPerLumi = cms.untracked.vuint32(4,0,5,4,0,5), + lumisPerRun = cms.untracked.uint32(3), + declareLast = cms.untracked.bool(False), + declareAllLast = cms.untracked.bool(False), + multipleEntriesForLumi = cms.untracked.uint32(0), + multipleEntriesForRun = cms.untracked.uint32(0) +) + +if args.enableDeclareLast: + process.source.declareLast = True + +if args.enableDeclareAllLast: + process.source.declareAllLast = True + +if args.multipleEntriesForLumi is not None: + process.source.multipleEntriesForLumi = args.multipleEntriesForLumi + +if args.multipleEntriesForRun is not None: + process.source.multipleEntriesForRun = args.multipleEntriesForRun + +process.sleepingProducer = cms.EDProducer("timestudy::SleepingProducer", + ivalue = cms.int32(1), + consumes = cms.VInputTag(), + eventTimes = cms.vdouble(0.1, 0.1, 0.6, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1) +) + +process.p = cms.Path(process.sleepingProducer) diff --git a/FWCore/Integration/test/testLateLumiClosure_cfg.py b/FWCore/Integration/test/testLateLumiClosure_cfg.py index 8b01cd99c132e..3589d7ac62277 100644 --- a/FWCore/Integration/test/testLateLumiClosure_cfg.py +++ b/FWCore/Integration/test/testLateLumiClosure_cfg.py @@ -56,7 +56,8 @@ ) process.source = cms.Source("SourceWithWaits", - timePerLumi = cms.untracked.uint32(1), + timePerLumi = cms.untracked.double(1), + sleepAfterStartOfRun = cms.untracked.double(0.25), eventsPerLumi = cms.untracked.vuint32(4,0,5), lumisPerRun = cms.untracked.uint32(100) ) diff --git a/FWCore/Integration/test/unit_test_outputs/testGetBy1.log b/FWCore/Integration/test/unit_test_outputs/testGetBy1.log index 0af9db0bd10ae..1b5454140ad28 100644 --- a/FWCore/Integration/test/unit_test_outputs/testGetBy1.log +++ b/FWCore/Integration/test/unit_test_outputs/testGetBy1.log @@ -6,73 +6,69 @@ Module type=IntSource, Module label=source, Parameter Set ID=6f0b3d3a362a6270c80 ++++ finished: constructing module with label 'TriggerResults' id = 1 ++++ starting: constructing module with label 'p' id = 2 ++++ finished: constructing module with label 'p' id = 2 -++++ starting: constructing module with label 'e' id = 3 -++++ finished: constructing module with label 'e' id = 3 -++++ starting: constructing module with label 'intProducer' id = 4 -++++ finished: constructing module with label 'intProducer' id = 4 -++++ starting: constructing module with label 'a1' id = 5 +++++ starting: constructing module with label 'intProducer' id = 3 +++++ finished: constructing module with label 'intProducer' id = 3 +++++ starting: constructing module with label 'a1' id = 4 Module type=TestFindProduct, Module label=a1, Parameter Set ID=a7caa43fcf5ef35dee69e4bd85169d4c -++++ finished: constructing module with label 'a1' id = 5 +++++ finished: constructing module with label 'a1' id = 4 Module type=TestFindProduct, Module label=a1, Parameter Set ID=a7caa43fcf5ef35dee69e4bd85169d4c -++++ starting: constructing module with label 'a2' id = 6 -++++ finished: constructing module with label 'a2' id = 6 -++++ starting: constructing module with label 'a3' id = 7 -++++ finished: constructing module with label 'a3' id = 7 -++++ starting: constructing module with label 'out' id = 8 -++++ finished: constructing module with label 'out' id = 8 -++++ starting: constructing module with label 'intProducerA' id = 9 +++++ starting: constructing module with label 'a2' id = 5 +++++ finished: constructing module with label 'a2' id = 5 +++++ starting: constructing module with label 'a3' id = 6 +++++ finished: constructing module with label 'a3' id = 6 +++++ starting: constructing module with label 'out' id = 7 +++++ finished: constructing module with label 'out' id = 7 +++++ starting: constructing module with label 'intProducerA' id = 8 Module type=IntProducer, Module label=intProducerA, Parameter Set ID=56ea7c8bbb02df4e1c3b945954838318 -++++ finished: constructing module with label 'intProducerA' id = 9 +++++ finished: constructing module with label 'intProducerA' id = 8 Module type=IntProducer, Module label=intProducerA, Parameter Set ID=56ea7c8bbb02df4e1c3b945954838318 -++++ starting: constructing module with label 'intProducerB' id = 10 -++++ finished: constructing module with label 'intProducerB' id = 10 -++++ starting: constructing module with label 'intProducerBeginProcessBlock' id = 11 -++++ finished: constructing module with label 'intProducerBeginProcessBlock' id = 11 -++++ starting: constructing module with label 'intProducerEndProcessBlock' id = 12 -++++ finished: constructing module with label 'intProducerEndProcessBlock' id = 12 -++++ starting: constructing module with label 'intProducerU' id = 13 -++++ finished: constructing module with label 'intProducerU' id = 13 -++++ starting: constructing module with label 'intVectorProducer' id = 14 -++++ finished: constructing module with label 'intVectorProducer' id = 14 +++++ starting: constructing module with label 'intProducerB' id = 9 +++++ finished: constructing module with label 'intProducerB' id = 9 +++++ starting: constructing module with label 'intProducerBeginProcessBlock' id = 10 +++++ finished: constructing module with label 'intProducerBeginProcessBlock' id = 10 +++++ starting: constructing module with label 'intProducerEndProcessBlock' id = 11 +++++ finished: constructing module with label 'intProducerEndProcessBlock' id = 11 +++++ starting: constructing module with label 'intProducerU' id = 12 +++++ finished: constructing module with label 'intProducerU' id = 12 +++++ starting: constructing module with label 'intVectorProducer' id = 13 +++++ finished: constructing module with label 'intVectorProducer' id = 13 ++ preallocate: 1 concurrent runs, 1 concurrent luminosity sections, 1 streams ++ starting: begin job -++++ starting: begin job for module with label 'intProducerA' id = 9 +++++ starting: begin job for module with label 'intProducerA' id = 8 Module type=IntProducer, Module label=intProducerA, Parameter Set ID=56ea7c8bbb02df4e1c3b945954838318 -++++ finished: begin job for module with label 'intProducerA' id = 9 +++++ finished: begin job for module with label 'intProducerA' id = 8 Module type=IntProducer, Module label=intProducerA, Parameter Set ID=56ea7c8bbb02df4e1c3b945954838318 -++++ starting: begin job for module with label 'intProducerB' id = 10 -++++ finished: begin job for module with label 'intProducerB' id = 10 -++++ starting: begin job for module with label 'intProducerBeginProcessBlock' id = 11 -++++ finished: begin job for module with label 'intProducerBeginProcessBlock' id = 11 -++++ starting: begin job for module with label 'intProducerEndProcessBlock' id = 12 -++++ finished: begin job for module with label 'intProducerEndProcessBlock' id = 12 -++++ starting: begin job for module with label 'intProducerU' id = 13 -++++ finished: begin job for module with label 'intProducerU' id = 13 -++++ starting: begin job for module with label 'intVectorProducer' id = 14 -++++ finished: begin job for module with label 'intVectorProducer' id = 14 -++++ starting: begin job for module with label 'intProducer' id = 4 -++++ finished: begin job for module with label 'intProducer' id = 4 -++++ starting: begin job for module with label 'a1' id = 5 +++++ starting: begin job for module with label 'intProducerB' id = 9 +++++ finished: begin job for module with label 'intProducerB' id = 9 +++++ starting: begin job for module with label 'intProducerBeginProcessBlock' id = 10 +++++ finished: begin job for module with label 'intProducerBeginProcessBlock' id = 10 +++++ starting: begin job for module with label 'intProducerEndProcessBlock' id = 11 +++++ finished: begin job for module with label 'intProducerEndProcessBlock' id = 11 +++++ starting: begin job for module with label 'intProducerU' id = 12 +++++ finished: begin job for module with label 'intProducerU' id = 12 +++++ starting: begin job for module with label 'intVectorProducer' id = 13 +++++ finished: begin job for module with label 'intVectorProducer' id = 13 +++++ starting: begin job for module with label 'intProducer' id = 3 +++++ finished: begin job for module with label 'intProducer' id = 3 +++++ starting: begin job for module with label 'a1' id = 4 Module type=TestFindProduct, Module label=a1, Parameter Set ID=a7caa43fcf5ef35dee69e4bd85169d4c -++++ finished: begin job for module with label 'a1' id = 5 +++++ finished: begin job for module with label 'a1' id = 4 Module type=TestFindProduct, Module label=a1, Parameter Set ID=a7caa43fcf5ef35dee69e4bd85169d4c -++++ starting: begin job for module with label 'a2' id = 6 -++++ finished: begin job for module with label 'a2' id = 6 -++++ starting: begin job for module with label 'a3' id = 7 -++++ finished: begin job for module with label 'a3' id = 7 -++++ starting: begin job for module with label 'out' id = 8 -++++ finished: begin job for module with label 'out' id = 8 +++++ starting: begin job for module with label 'a2' id = 5 +++++ finished: begin job for module with label 'a2' id = 5 +++++ starting: begin job for module with label 'a3' id = 6 +++++ finished: begin job for module with label 'a3' id = 6 +++++ starting: begin job for module with label 'out' id = 7 +++++ finished: begin job for module with label 'out' id = 7 ++++ starting: begin job for module with label 'TriggerResults' id = 1 ++++ finished: begin job for module with label 'TriggerResults' id = 1 ++++ starting: begin job for module with label 'p' id = 2 ++++ finished: begin job for module with label 'p' id = 2 -++++ starting: begin job for module with label 'e' id = 3 -++++ finished: begin job for module with label 'e' id = 3 ++ starting: begin job ++ finished: begin job -++++ starting: begin stream for module: stream = 0 label = 'intProducer' id = 4 -++++ finished: begin stream for module: stream = 0 label = 'intProducer' id = 4 -++++ starting: begin stream for module: stream = 0 label = 'a1' id = 5 +++++ starting: begin stream for module: stream = 0 label = 'intProducer' id = 3 +++++ finished: begin stream for module: stream = 0 label = 'intProducer' id = 3 +++++ starting: begin stream for module: stream = 0 label = 'a1' id = 4 StreamContext: StreamID = 0 transition = BeginStream run: 0 lumi: 0 event: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 @@ -84,7 +80,7 @@ ModuleCallingContext state = Running runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++ finished: begin stream for module: stream = 0 label = 'a1' id = 5 +++++ finished: begin stream for module: stream = 0 label = 'a1' id = 4 StreamContext: StreamID = 0 transition = BeginStream run: 0 lumi: 0 event: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 @@ -96,19 +92,17 @@ ModuleCallingContext state = Running runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++ starting: begin stream for module: stream = 0 label = 'a2' id = 6 -++++ finished: begin stream for module: stream = 0 label = 'a2' id = 6 -++++ starting: begin stream for module: stream = 0 label = 'a3' id = 7 -++++ finished: begin stream for module: stream = 0 label = 'a3' id = 7 +++++ starting: begin stream for module: stream = 0 label = 'a2' id = 5 +++++ finished: begin stream for module: stream = 0 label = 'a2' id = 5 +++++ starting: begin stream for module: stream = 0 label = 'a3' id = 6 +++++ finished: begin stream for module: stream = 0 label = 'a3' id = 6 ++++ starting: begin stream for module: stream = 0 label = 'TriggerResults' id = 1 ++++ finished: begin stream for module: stream = 0 label = 'TriggerResults' id = 1 -++++ starting: begin stream for module: stream = 0 label = 'out' id = 8 -++++ finished: begin stream for module: stream = 0 label = 'out' id = 8 +++++ starting: begin stream for module: stream = 0 label = 'out' id = 7 +++++ finished: begin stream for module: stream = 0 label = 'out' id = 7 ++++ starting: begin stream for module: stream = 0 label = 'p' id = 2 ++++ finished: begin stream for module: stream = 0 label = 'p' id = 2 -++++ starting: begin stream for module: stream = 0 label = 'e' id = 3 -++++ finished: begin stream for module: stream = 0 label = 'e' id = 3 -++++ starting: begin stream for module: stream = 0 label = 'intProducerA' id = 9 +++++ starting: begin stream for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = BeginStream run: 0 lumi: 0 event: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 @@ -120,7 +114,7 @@ ModuleCallingContext state = Running runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++ finished: begin stream for module: stream = 0 label = 'intProducerA' id = 9 +++++ finished: begin stream for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = BeginStream run: 0 lumi: 0 event: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 @@ -132,25 +126,25 @@ ModuleCallingContext state = Running runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++ starting: begin stream for module: stream = 0 label = 'intProducerB' id = 10 -++++ finished: begin stream for module: stream = 0 label = 'intProducerB' id = 10 -++++ starting: begin stream for module: stream = 0 label = 'intProducerBeginProcessBlock' id = 11 -++++ finished: begin stream for module: stream = 0 label = 'intProducerBeginProcessBlock' id = 11 -++++ starting: begin stream for module: stream = 0 label = 'intProducerEndProcessBlock' id = 12 -++++ finished: begin stream for module: stream = 0 label = 'intProducerEndProcessBlock' id = 12 -++++ starting: begin stream for module: stream = 0 label = 'intProducerU' id = 13 -++++ finished: begin stream for module: stream = 0 label = 'intProducerU' id = 13 -++++ starting: begin stream for module: stream = 0 label = 'intVectorProducer' id = 14 -++++ finished: begin stream for module: stream = 0 label = 'intVectorProducer' id = 14 +++++ starting: begin stream for module: stream = 0 label = 'intProducerB' id = 9 +++++ finished: begin stream for module: stream = 0 label = 'intProducerB' id = 9 +++++ starting: begin stream for module: stream = 0 label = 'intProducerBeginProcessBlock' id = 10 +++++ finished: begin stream for module: stream = 0 label = 'intProducerBeginProcessBlock' id = 10 +++++ starting: begin stream for module: stream = 0 label = 'intProducerEndProcessBlock' id = 11 +++++ finished: begin stream for module: stream = 0 label = 'intProducerEndProcessBlock' id = 11 +++++ starting: begin stream for module: stream = 0 label = 'intProducerU' id = 12 +++++ finished: begin stream for module: stream = 0 label = 'intProducerU' id = 12 +++++ starting: begin stream for module: stream = 0 label = 'intVectorProducer' id = 13 +++++ finished: begin stream for module: stream = 0 label = 'intVectorProducer' id = 13 ++++ starting: begin process block GlobalContext: transition = BeginProcessBlock run: 0 luminosityBlock: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ starting: prefetching before processing begin ProcessBlock for module: label = 'a3' id = 7 -++++++++ starting: prefetching before processing begin ProcessBlock for module: label = 'a2' id = 6 -++++++++ starting: prefetching before processing begin ProcessBlock for module: label = 'a1' id = 5 +++++++++ starting: prefetching before processing begin ProcessBlock for module: label = 'a3' id = 6 +++++++++ starting: prefetching before processing begin ProcessBlock for module: label = 'a2' id = 5 +++++++++ starting: prefetching before processing begin ProcessBlock for module: label = 'a1' id = 4 GlobalContext: transition = BeginProcessBlock run: 0 luminosityBlock: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 @@ -162,12 +156,12 @@ ModuleCallingContext state = Prefetching runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ starting: prefetching before processing begin ProcessBlock for module: label = 'intProducerEndProcessBlock' id = 12 -++++++++ starting: prefetching before processing begin ProcessBlock for module: label = 'intProducerBeginProcessBlock' id = 11 -++++++++ finished: prefetching before processing begin ProcessBlock for module: label = 'intProducerBeginProcessBlock' id = 11 -++++++ starting: begin process block for module: label = 'intProducerBeginProcessBlock' id = 11 -++++++ finished: begin process block for module: label = 'intProducerBeginProcessBlock' id = 11 -++++++++ finished: prefetching before processing begin ProcessBlock for module: label = 'a1' id = 5 +++++++++ starting: prefetching before processing begin ProcessBlock for module: label = 'intProducerEndProcessBlock' id = 11 +++++++++ starting: prefetching before processing begin ProcessBlock for module: label = 'intProducerBeginProcessBlock' id = 10 +++++++++ finished: prefetching before processing begin ProcessBlock for module: label = 'intProducerBeginProcessBlock' id = 10 +++++++ starting: begin process block for module: label = 'intProducerBeginProcessBlock' id = 10 +++++++ finished: begin process block for module: label = 'intProducerBeginProcessBlock' id = 10 +++++++++ finished: prefetching before processing begin ProcessBlock for module: label = 'a1' id = 4 GlobalContext: transition = BeginProcessBlock run: 0 luminosityBlock: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 @@ -179,7 +173,7 @@ ModuleCallingContext state = Prefetching runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ starting: begin process block for module: label = 'a1' id = 5 +++++++ starting: begin process block for module: label = 'a1' id = 4 GlobalContext: transition = BeginProcessBlock run: 0 luminosityBlock: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 @@ -191,7 +185,7 @@ ModuleCallingContext state = Running runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ finished: begin process block for module: label = 'a1' id = 5 +++++++ finished: begin process block for module: label = 'a1' id = 4 GlobalContext: transition = BeginProcessBlock run: 0 luminosityBlock: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 @@ -203,15 +197,15 @@ ModuleCallingContext state = Running runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ finished: prefetching before processing begin ProcessBlock for module: label = 'intProducerEndProcessBlock' id = 12 -++++++ starting: begin process block for module: label = 'intProducerEndProcessBlock' id = 12 -++++++ finished: begin process block for module: label = 'intProducerEndProcessBlock' id = 12 -++++++++ finished: prefetching before processing begin ProcessBlock for module: label = 'a2' id = 6 -++++++ starting: begin process block for module: label = 'a2' id = 6 -++++++ finished: begin process block for module: label = 'a2' id = 6 -++++++++ finished: prefetching before processing begin ProcessBlock for module: label = 'a3' id = 7 -++++++ starting: begin process block for module: label = 'a3' id = 7 -++++++ finished: begin process block for module: label = 'a3' id = 7 +++++++++ finished: prefetching before processing begin ProcessBlock for module: label = 'intProducerEndProcessBlock' id = 11 +++++++ starting: begin process block for module: label = 'intProducerEndProcessBlock' id = 11 +++++++ finished: begin process block for module: label = 'intProducerEndProcessBlock' id = 11 +++++++++ finished: prefetching before processing begin ProcessBlock for module: label = 'a2' id = 5 +++++++ starting: begin process block for module: label = 'a2' id = 5 +++++++ finished: begin process block for module: label = 'a2' id = 5 +++++++++ finished: prefetching before processing begin ProcessBlock for module: label = 'a3' id = 6 +++++++ starting: begin process block for module: label = 'a3' id = 6 +++++++ finished: begin process block for module: label = 'a3' id = 6 ++++ finished: begin process block GlobalContext: transition = BeginProcessBlock run: 0 luminosityBlock: 0 @@ -243,9 +237,9 @@ GlobalContext: transition = BeginRun runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ starting: prefetching before processing global begin Run for module: label = 'a3' id = 7 -++++++++ starting: prefetching before processing global begin Run for module: label = 'a2' id = 6 -++++++++ starting: prefetching before processing global begin Run for module: label = 'a1' id = 5 +++++++++ starting: prefetching before processing global begin Run for module: label = 'a3' id = 6 +++++++++ starting: prefetching before processing global begin Run for module: label = 'a2' id = 5 +++++++++ starting: prefetching before processing global begin Run for module: label = 'a1' id = 4 GlobalContext: transition = BeginRun run: 1 luminosityBlock: 0 runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 1 @@ -257,7 +251,7 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ finished: prefetching before processing global begin Run for module: label = 'a1' id = 5 +++++++++ finished: prefetching before processing global begin Run for module: label = 'a1' id = 4 GlobalContext: transition = BeginRun run: 1 luminosityBlock: 0 runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 1 @@ -269,7 +263,7 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ starting: global begin run for module: label = 'a1' id = 5 +++++++ starting: global begin run for module: label = 'a1' id = 4 GlobalContext: transition = BeginRun run: 1 luminosityBlock: 0 runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 1 @@ -281,7 +275,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ finished: global begin run for module: label = 'a1' id = 5 +++++++ finished: global begin run for module: label = 'a1' id = 4 GlobalContext: transition = BeginRun run: 1 luminosityBlock: 0 runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 1 @@ -293,12 +287,12 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ finished: prefetching before processing global begin Run for module: label = 'a2' id = 6 -++++++ starting: global begin run for module: label = 'a2' id = 6 -++++++ finished: global begin run for module: label = 'a2' id = 6 -++++++++ finished: prefetching before processing global begin Run for module: label = 'a3' id = 7 -++++++ starting: global begin run for module: label = 'a3' id = 7 -++++++ finished: global begin run for module: label = 'a3' id = 7 +++++++++ finished: prefetching before processing global begin Run for module: label = 'a2' id = 5 +++++++ starting: global begin run for module: label = 'a2' id = 5 +++++++ finished: global begin run for module: label = 'a2' id = 5 +++++++++ finished: prefetching before processing global begin Run for module: label = 'a3' id = 6 +++++++ starting: global begin run for module: label = 'a3' id = 6 +++++++ finished: global begin run for module: label = 'a3' id = 6 ++++ finished: global begin run 1 : time = 1 GlobalContext: transition = BeginRun run: 1 luminosityBlock: 0 @@ -319,20 +313,17 @@ GlobalContext: transition = BeginRun ProcessContext: COPY 6f4335e86de793448be83fe14f12dcb7 parent ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++ queuing: EventSetup synchronization run: 1 lumi: 1 event: 0 -++++ pre: EventSetup synchronizing run: 1 lumi: 1 event: 0 -++++ post: EventSetup synchronizing run: 1 lumi: 1 event: 0 ++++ starting: begin run: stream = 0 run = 1 time = 1 StreamContext: StreamID = 0 transition = BeginRun run: 1 lumi: 0 event: 0 runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ starting: begin run for module: stream = 0 label = 'intProducerU' id = 13 -++++++ finished: begin run for module: stream = 0 label = 'intProducerU' id = 13 -++++++ starting: begin run for module: stream = 0 label = 'intProducerB' id = 10 -++++++ finished: begin run for module: stream = 0 label = 'intProducerB' id = 10 -++++++ starting: begin run for module: stream = 0 label = 'intProducerA' id = 9 +++++++ starting: begin run for module: stream = 0 label = 'intProducerU' id = 12 +++++++ finished: begin run for module: stream = 0 label = 'intProducerU' id = 12 +++++++ starting: begin run for module: stream = 0 label = 'intProducerB' id = 9 +++++++ finished: begin run for module: stream = 0 label = 'intProducerB' id = 9 +++++++ starting: begin run for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = BeginRun run: 1 lumi: 0 event: 0 runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 1 @@ -344,7 +335,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ finished: begin run for module: stream = 0 label = 'intProducerA' id = 9 +++++++ finished: begin run for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = BeginRun run: 1 lumi: 0 event: 0 runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 1 @@ -356,8 +347,8 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ starting: begin run for module: stream = 0 label = 'intProducer' id = 4 -++++++ finished: begin run for module: stream = 0 label = 'intProducer' id = 4 +++++++ starting: begin run for module: stream = 0 label = 'intProducer' id = 3 +++++++ finished: begin run for module: stream = 0 label = 'intProducer' id = 3 ++++ finished: begin run: stream = 0 run = 1 time = 1 StreamContext: StreamID = 0 transition = BeginRun run: 1 lumi: 0 event: 0 @@ -378,6 +369,9 @@ StreamContext: StreamID = 0 transition = BeginRun ProcessContext: COPY 6f4335e86de793448be83fe14f12dcb7 parent ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 +++++ queuing: EventSetup synchronization run: 1 lumi: 1 event: 0 +++++ pre: EventSetup synchronizing run: 1 lumi: 1 event: 0 +++++ post: EventSetup synchronizing run: 1 lumi: 1 event: 0 ++++ starting: source lumi ++++ finished: source lumi ++++ starting: global begin lumi: run = 1 lumi = 1 time = 1 @@ -386,9 +380,9 @@ GlobalContext: transition = BeginLuminosityBlock runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'a3' id = 7 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'a2' id = 6 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'a1' id = 5 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'a3' id = 6 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'a2' id = 5 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'a1' id = 4 GlobalContext: transition = BeginLuminosityBlock run: 1 luminosityBlock: 1 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 @@ -400,7 +394,7 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'a1' id = 5 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'a1' id = 4 GlobalContext: transition = BeginLuminosityBlock run: 1 luminosityBlock: 1 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 @@ -412,7 +406,7 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ starting: global begin lumi for module: label = 'a1' id = 5 +++++++ starting: global begin lumi for module: label = 'a1' id = 4 GlobalContext: transition = BeginLuminosityBlock run: 1 luminosityBlock: 1 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 @@ -424,7 +418,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ finished: global begin lumi for module: label = 'a1' id = 5 +++++++ finished: global begin lumi for module: label = 'a1' id = 4 GlobalContext: transition = BeginLuminosityBlock run: 1 luminosityBlock: 1 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 @@ -436,12 +430,12 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'a2' id = 6 -++++++ starting: global begin lumi for module: label = 'a2' id = 6 -++++++ finished: global begin lumi for module: label = 'a2' id = 6 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'a3' id = 7 -++++++ starting: global begin lumi for module: label = 'a3' id = 7 -++++++ finished: global begin lumi for module: label = 'a3' id = 7 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'a2' id = 5 +++++++ starting: global begin lumi for module: label = 'a2' id = 5 +++++++ finished: global begin lumi for module: label = 'a2' id = 5 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'a3' id = 6 +++++++ starting: global begin lumi for module: label = 'a3' id = 6 +++++++ finished: global begin lumi for module: label = 'a3' id = 6 ++++ finished: global begin lumi: run = 1 lumi = 1 time = 1 GlobalContext: transition = BeginLuminosityBlock run: 1 luminosityBlock: 1 @@ -468,11 +462,11 @@ StreamContext: StreamID = 0 transition = BeginLuminosityBlock runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ starting: begin lumi for module: stream = 0 label = 'intProducerU' id = 13 -++++++ finished: begin lumi for module: stream = 0 label = 'intProducerU' id = 13 -++++++ starting: begin lumi for module: stream = 0 label = 'intProducerB' id = 10 -++++++ finished: begin lumi for module: stream = 0 label = 'intProducerB' id = 10 -++++++ starting: begin lumi for module: stream = 0 label = 'intProducerA' id = 9 +++++++ starting: begin lumi for module: stream = 0 label = 'intProducerU' id = 12 +++++++ finished: begin lumi for module: stream = 0 label = 'intProducerU' id = 12 +++++++ starting: begin lumi for module: stream = 0 label = 'intProducerB' id = 9 +++++++ finished: begin lumi for module: stream = 0 label = 'intProducerB' id = 9 +++++++ starting: begin lumi for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = BeginLuminosityBlock run: 1 lumi: 1 event: 0 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 @@ -484,7 +478,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ finished: begin lumi for module: stream = 0 label = 'intProducerA' id = 9 +++++++ finished: begin lumi for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = BeginLuminosityBlock run: 1 lumi: 1 event: 0 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 @@ -496,8 +490,8 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ starting: begin lumi for module: stream = 0 label = 'intProducer' id = 4 -++++++ finished: begin lumi for module: stream = 0 label = 'intProducer' id = 4 +++++++ starting: begin lumi for module: stream = 0 label = 'intProducer' id = 3 +++++++ finished: begin lumi for module: stream = 0 label = 'intProducer' id = 3 ++++ finished: begin lumi: stream = 0 run = 1 lumi = 1 time = 1 StreamContext: StreamID = 0 transition = BeginLuminosityBlock run: 1 lumi: 1 event: 0 @@ -548,8 +542,8 @@ PathContext: pathName = p pathID = 0 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'a3' id = 7 -++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerA' id = 9 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'a3' id = 6 +++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 1 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 @@ -565,8 +559,8 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'a2' id = 6 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'a1' id = 5 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'a2' id = 5 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'a1' id = 4 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 1 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 @@ -580,11 +574,11 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 4 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 4 -++++++++ starting: processing event for module: stream = 0 label = 'intProducer' id = 4 -++++++++ finished: processing event for module: stream = 0 label = 'intProducer' id = 4 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'a1' id = 5 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 3 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 3 +++++++++ starting: processing event for module: stream = 0 label = 'intProducer' id = 3 +++++++++ finished: processing event for module: stream = 0 label = 'intProducer' id = 3 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'a1' id = 4 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 1 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 @@ -598,7 +592,7 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ starting: processing event for module: stream = 0 label = 'a1' id = 5 +++++++++ starting: processing event for module: stream = 0 label = 'a1' id = 4 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 1 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 @@ -612,7 +606,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ finished: processing event for module: stream = 0 label = 'a1' id = 5 +++++++++ finished: processing event for module: stream = 0 label = 'a1' id = 4 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 1 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 @@ -626,7 +620,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerA' id = 9 +++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 1 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 @@ -642,7 +636,7 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++++ starting: processing event for module: stream = 0 label = 'intProducerA' id = 9 +++++++++++ starting: processing event for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 1 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 @@ -658,7 +652,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++++ finished: processing event for module: stream = 0 label = 'intProducerA' id = 9 +++++++++++ finished: processing event for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 1 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 @@ -674,12 +668,12 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'a3' id = 7 -++++++++ starting: processing event for module: stream = 0 label = 'a3' id = 7 -++++++++ finished: processing event for module: stream = 0 label = 'a3' id = 7 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'a2' id = 6 -++++++++ starting: processing event for module: stream = 0 label = 'a2' id = 6 -++++++++ finished: processing event for module: stream = 0 label = 'a2' id = 6 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'a3' id = 6 +++++++++ starting: processing event for module: stream = 0 label = 'a3' id = 6 +++++++++ finished: processing event for module: stream = 0 label = 'a3' id = 6 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'a2' id = 5 +++++++++ starting: processing event for module: stream = 0 label = 'a2' id = 5 +++++++++ finished: processing event for module: stream = 0 label = 'a2' id = 5 ++++++++ starting: processing event for module: stream = 0 label = 'p' id = 2 ++++++++ finished: processing event for module: stream = 0 label = 'p' id = 2 ++++++ finished: processing path 'p' : stream = 0 @@ -695,24 +689,22 @@ PathContext: pathName = p pathID = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 1 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 1 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 8 -++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerB' id = 10 -++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 13 -++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 14 -++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 14 -++++++++++ starting: processing event for module: stream = 0 label = 'intVectorProducer' id = 14 -++++++++++ finished: processing event for module: stream = 0 label = 'intVectorProducer' id = 14 -++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 13 -++++++++++ starting: processing event for module: stream = 0 label = 'intProducerU' id = 13 -++++++++++ finished: processing event for module: stream = 0 label = 'intProducerU' id = 13 -++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerB' id = 10 -++++++++++ starting: processing event for module: stream = 0 label = 'intProducerB' id = 10 -++++++++++ finished: processing event for module: stream = 0 label = 'intProducerB' id = 10 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 8 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 8 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 8 -++++++++ starting: processing event for module: stream = 0 label = 'e' id = 3 -++++++++ finished: processing event for module: stream = 0 label = 'e' id = 3 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 7 +++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerB' id = 9 +++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 12 +++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 13 +++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 13 +++++++++++ starting: processing event for module: stream = 0 label = 'intVectorProducer' id = 13 +++++++++++ finished: processing event for module: stream = 0 label = 'intVectorProducer' id = 13 +++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 12 +++++++++++ starting: processing event for module: stream = 0 label = 'intProducerU' id = 12 +++++++++++ finished: processing event for module: stream = 0 label = 'intProducerU' id = 12 +++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerB' id = 9 +++++++++++ starting: processing event for module: stream = 0 label = 'intProducerB' id = 9 +++++++++++ finished: processing event for module: stream = 0 label = 'intProducerB' id = 9 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 7 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 7 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 7 ++++++ finished: processing path 'e' : stream = 0 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 1 @@ -774,8 +766,8 @@ PathContext: pathName = p pathID = 0 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'a3' id = 7 -++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerA' id = 9 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'a3' id = 6 +++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 2 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 @@ -791,8 +783,8 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'a2' id = 6 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'a1' id = 5 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'a2' id = 5 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'a1' id = 4 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 2 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 @@ -806,11 +798,11 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 4 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 4 -++++++++ starting: processing event for module: stream = 0 label = 'intProducer' id = 4 -++++++++ finished: processing event for module: stream = 0 label = 'intProducer' id = 4 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'a1' id = 5 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 3 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 3 +++++++++ starting: processing event for module: stream = 0 label = 'intProducer' id = 3 +++++++++ finished: processing event for module: stream = 0 label = 'intProducer' id = 3 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'a1' id = 4 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 2 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 @@ -824,7 +816,7 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ starting: processing event for module: stream = 0 label = 'a1' id = 5 +++++++++ starting: processing event for module: stream = 0 label = 'a1' id = 4 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 2 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 @@ -838,7 +830,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ finished: processing event for module: stream = 0 label = 'a1' id = 5 +++++++++ finished: processing event for module: stream = 0 label = 'a1' id = 4 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 2 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 @@ -852,7 +844,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerA' id = 9 +++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 2 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 @@ -868,7 +860,7 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++++ starting: processing event for module: stream = 0 label = 'intProducerA' id = 9 +++++++++++ starting: processing event for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 2 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 @@ -884,7 +876,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++++ finished: processing event for module: stream = 0 label = 'intProducerA' id = 9 +++++++++++ finished: processing event for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 2 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 @@ -900,12 +892,12 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'a3' id = 7 -++++++++ starting: processing event for module: stream = 0 label = 'a3' id = 7 -++++++++ finished: processing event for module: stream = 0 label = 'a3' id = 7 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'a2' id = 6 -++++++++ starting: processing event for module: stream = 0 label = 'a2' id = 6 -++++++++ finished: processing event for module: stream = 0 label = 'a2' id = 6 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'a3' id = 6 +++++++++ starting: processing event for module: stream = 0 label = 'a3' id = 6 +++++++++ finished: processing event for module: stream = 0 label = 'a3' id = 6 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'a2' id = 5 +++++++++ starting: processing event for module: stream = 0 label = 'a2' id = 5 +++++++++ finished: processing event for module: stream = 0 label = 'a2' id = 5 ++++++++ starting: processing event for module: stream = 0 label = 'p' id = 2 ++++++++ finished: processing event for module: stream = 0 label = 'p' id = 2 ++++++ finished: processing path 'p' : stream = 0 @@ -921,24 +913,22 @@ PathContext: pathName = p pathID = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 1 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 1 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 8 -++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerB' id = 10 -++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 13 -++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 14 -++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 14 -++++++++++ starting: processing event for module: stream = 0 label = 'intVectorProducer' id = 14 -++++++++++ finished: processing event for module: stream = 0 label = 'intVectorProducer' id = 14 -++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 13 -++++++++++ starting: processing event for module: stream = 0 label = 'intProducerU' id = 13 -++++++++++ finished: processing event for module: stream = 0 label = 'intProducerU' id = 13 -++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerB' id = 10 -++++++++++ starting: processing event for module: stream = 0 label = 'intProducerB' id = 10 -++++++++++ finished: processing event for module: stream = 0 label = 'intProducerB' id = 10 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 8 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 8 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 8 -++++++++ starting: processing event for module: stream = 0 label = 'e' id = 3 -++++++++ finished: processing event for module: stream = 0 label = 'e' id = 3 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 7 +++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerB' id = 9 +++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 12 +++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 13 +++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 13 +++++++++++ starting: processing event for module: stream = 0 label = 'intVectorProducer' id = 13 +++++++++++ finished: processing event for module: stream = 0 label = 'intVectorProducer' id = 13 +++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 12 +++++++++++ starting: processing event for module: stream = 0 label = 'intProducerU' id = 12 +++++++++++ finished: processing event for module: stream = 0 label = 'intProducerU' id = 12 +++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerB' id = 9 +++++++++++ starting: processing event for module: stream = 0 label = 'intProducerB' id = 9 +++++++++++ finished: processing event for module: stream = 0 label = 'intProducerB' id = 9 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 7 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 7 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 7 ++++++ finished: processing path 'e' : stream = 0 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 2 @@ -1000,8 +990,8 @@ PathContext: pathName = p pathID = 0 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'a3' id = 7 -++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerA' id = 9 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'a3' id = 6 +++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 3 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 @@ -1017,8 +1007,8 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'a2' id = 6 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'a1' id = 5 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'a2' id = 5 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'a1' id = 4 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 3 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 @@ -1032,11 +1022,11 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 4 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 4 -++++++++ starting: processing event for module: stream = 0 label = 'intProducer' id = 4 -++++++++ finished: processing event for module: stream = 0 label = 'intProducer' id = 4 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'a1' id = 5 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 3 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 3 +++++++++ starting: processing event for module: stream = 0 label = 'intProducer' id = 3 +++++++++ finished: processing event for module: stream = 0 label = 'intProducer' id = 3 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'a1' id = 4 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 3 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 @@ -1050,7 +1040,7 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ starting: processing event for module: stream = 0 label = 'a1' id = 5 +++++++++ starting: processing event for module: stream = 0 label = 'a1' id = 4 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 3 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 @@ -1064,7 +1054,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ finished: processing event for module: stream = 0 label = 'a1' id = 5 +++++++++ finished: processing event for module: stream = 0 label = 'a1' id = 4 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 3 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 @@ -1078,7 +1068,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerA' id = 9 +++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 3 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 @@ -1094,7 +1084,7 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++++ starting: processing event for module: stream = 0 label = 'intProducerA' id = 9 +++++++++++ starting: processing event for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 3 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 @@ -1110,7 +1100,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++++ finished: processing event for module: stream = 0 label = 'intProducerA' id = 9 +++++++++++ finished: processing event for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 3 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 @@ -1126,12 +1116,12 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'a3' id = 7 -++++++++ starting: processing event for module: stream = 0 label = 'a3' id = 7 -++++++++ finished: processing event for module: stream = 0 label = 'a3' id = 7 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'a2' id = 6 -++++++++ starting: processing event for module: stream = 0 label = 'a2' id = 6 -++++++++ finished: processing event for module: stream = 0 label = 'a2' id = 6 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'a3' id = 6 +++++++++ starting: processing event for module: stream = 0 label = 'a3' id = 6 +++++++++ finished: processing event for module: stream = 0 label = 'a3' id = 6 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'a2' id = 5 +++++++++ starting: processing event for module: stream = 0 label = 'a2' id = 5 +++++++++ finished: processing event for module: stream = 0 label = 'a2' id = 5 ++++++++ starting: processing event for module: stream = 0 label = 'p' id = 2 ++++++++ finished: processing event for module: stream = 0 label = 'p' id = 2 ++++++ finished: processing path 'p' : stream = 0 @@ -1147,24 +1137,22 @@ PathContext: pathName = p pathID = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 1 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 1 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 8 -++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerB' id = 10 -++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 13 -++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 14 -++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 14 -++++++++++ starting: processing event for module: stream = 0 label = 'intVectorProducer' id = 14 -++++++++++ finished: processing event for module: stream = 0 label = 'intVectorProducer' id = 14 -++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 13 -++++++++++ starting: processing event for module: stream = 0 label = 'intProducerU' id = 13 -++++++++++ finished: processing event for module: stream = 0 label = 'intProducerU' id = 13 -++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerB' id = 10 -++++++++++ starting: processing event for module: stream = 0 label = 'intProducerB' id = 10 -++++++++++ finished: processing event for module: stream = 0 label = 'intProducerB' id = 10 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 8 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 8 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 8 -++++++++ starting: processing event for module: stream = 0 label = 'e' id = 3 -++++++++ finished: processing event for module: stream = 0 label = 'e' id = 3 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 7 +++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerB' id = 9 +++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 12 +++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 13 +++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 13 +++++++++++ starting: processing event for module: stream = 0 label = 'intVectorProducer' id = 13 +++++++++++ finished: processing event for module: stream = 0 label = 'intVectorProducer' id = 13 +++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 12 +++++++++++ starting: processing event for module: stream = 0 label = 'intProducerU' id = 12 +++++++++++ finished: processing event for module: stream = 0 label = 'intProducerU' id = 12 +++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerB' id = 9 +++++++++++ starting: processing event for module: stream = 0 label = 'intProducerB' id = 9 +++++++++++ finished: processing event for module: stream = 0 label = 'intProducerB' id = 9 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 7 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 7 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 7 ++++++ finished: processing path 'e' : stream = 0 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 3 @@ -1205,11 +1193,11 @@ StreamContext: StreamID = 0 transition = EndLuminosityBlock runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ starting: end lumi for module: stream = 0 label = 'intProducerU' id = 13 -++++++ finished: end lumi for module: stream = 0 label = 'intProducerU' id = 13 -++++++ starting: end lumi for module: stream = 0 label = 'intProducerB' id = 10 -++++++ finished: end lumi for module: stream = 0 label = 'intProducerB' id = 10 -++++++ starting: end lumi for module: stream = 0 label = 'intProducerA' id = 9 +++++++ starting: end lumi for module: stream = 0 label = 'intProducerU' id = 12 +++++++ finished: end lumi for module: stream = 0 label = 'intProducerU' id = 12 +++++++ starting: end lumi for module: stream = 0 label = 'intProducerB' id = 9 +++++++ finished: end lumi for module: stream = 0 label = 'intProducerB' id = 9 +++++++ starting: end lumi for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = EndLuminosityBlock run: 1 lumi: 1 event: 0 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 @@ -1221,7 +1209,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ finished: end lumi for module: stream = 0 label = 'intProducerA' id = 9 +++++++ finished: end lumi for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = EndLuminosityBlock run: 1 lumi: 1 event: 0 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 @@ -1233,8 +1221,8 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ starting: end lumi for module: stream = 0 label = 'intProducer' id = 4 -++++++ finished: end lumi for module: stream = 0 label = 'intProducer' id = 4 +++++++ starting: end lumi for module: stream = 0 label = 'intProducer' id = 3 +++++++ finished: end lumi for module: stream = 0 label = 'intProducer' id = 3 ++++ finished: end lumi: stream = 0 run = 1 lumi = 1 time = 15000001 StreamContext: StreamID = 0 transition = EndLuminosityBlock run: 1 lumi: 1 event: 0 @@ -1261,9 +1249,9 @@ GlobalContext: transition = EndLuminosityBlock runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'a3' id = 7 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'a2' id = 6 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'a1' id = 5 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'a3' id = 6 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'a2' id = 5 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'a1' id = 4 GlobalContext: transition = EndLuminosityBlock run: 1 luminosityBlock: 1 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 @@ -1275,7 +1263,7 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'a1' id = 5 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'a1' id = 4 GlobalContext: transition = EndLuminosityBlock run: 1 luminosityBlock: 1 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 @@ -1287,7 +1275,7 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ starting: global end lumi for module: label = 'a1' id = 5 +++++++ starting: global end lumi for module: label = 'a1' id = 4 GlobalContext: transition = EndLuminosityBlock run: 1 luminosityBlock: 1 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 @@ -1299,7 +1287,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ finished: global end lumi for module: label = 'a1' id = 5 +++++++ finished: global end lumi for module: label = 'a1' id = 4 GlobalContext: transition = EndLuminosityBlock run: 1 luminosityBlock: 1 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 @@ -1311,12 +1299,12 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'a2' id = 6 -++++++ starting: global end lumi for module: label = 'a2' id = 6 -++++++ finished: global end lumi for module: label = 'a2' id = 6 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'a3' id = 7 -++++++ starting: global end lumi for module: label = 'a3' id = 7 -++++++ finished: global end lumi for module: label = 'a3' id = 7 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'a2' id = 5 +++++++ starting: global end lumi for module: label = 'a2' id = 5 +++++++ finished: global end lumi for module: label = 'a2' id = 5 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'a3' id = 6 +++++++ starting: global end lumi for module: label = 'a3' id = 6 +++++++ finished: global end lumi for module: label = 'a3' id = 6 ++++ finished: global end lumi: run = 1 lumi = 1 time = 1 GlobalContext: transition = EndLuminosityBlock run: 1 luminosityBlock: 1 @@ -1343,8 +1331,8 @@ GlobalContext: transition = WriteLuminosityBlock runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ starting: write lumi for module: label = 'out' id = 8 -++++++ finished: write lumi for module: label = 'out' id = 8 +++++++ starting: write lumi for module: label = 'out' id = 7 +++++++ finished: write lumi for module: label = 'out' id = 7 ++++ finished: global write lumi: run = 1 lumi = 1 time = 1 GlobalContext: transition = WriteLuminosityBlock run: 1 luminosityBlock: 1 @@ -1371,11 +1359,11 @@ StreamContext: StreamID = 0 transition = EndRun runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ starting: end run for module: stream = 0 label = 'intProducerU' id = 13 -++++++ finished: end run for module: stream = 0 label = 'intProducerU' id = 13 -++++++ starting: end run for module: stream = 0 label = 'intProducerB' id = 10 -++++++ finished: end run for module: stream = 0 label = 'intProducerB' id = 10 -++++++ starting: end run for module: stream = 0 label = 'intProducerA' id = 9 +++++++ starting: end run for module: stream = 0 label = 'intProducerU' id = 12 +++++++ finished: end run for module: stream = 0 label = 'intProducerU' id = 12 +++++++ starting: end run for module: stream = 0 label = 'intProducerB' id = 9 +++++++ finished: end run for module: stream = 0 label = 'intProducerB' id = 9 +++++++ starting: end run for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = EndRun run: 1 lumi: 0 event: 0 runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 15000001 @@ -1387,7 +1375,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ finished: end run for module: stream = 0 label = 'intProducerA' id = 9 +++++++ finished: end run for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = EndRun run: 1 lumi: 0 event: 0 runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 15000001 @@ -1399,8 +1387,8 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ starting: end run for module: stream = 0 label = 'intProducer' id = 4 -++++++ finished: end run for module: stream = 0 label = 'intProducer' id = 4 +++++++ starting: end run for module: stream = 0 label = 'intProducer' id = 3 +++++++ finished: end run for module: stream = 0 label = 'intProducer' id = 3 ++++ finished: end run: stream = 0 run = 1 time = 15000001 StreamContext: StreamID = 0 transition = EndRun run: 1 lumi: 0 event: 0 @@ -1427,9 +1415,9 @@ GlobalContext: transition = EndRun runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ starting: prefetching before processing global end Run for module: label = 'a3' id = 7 -++++++++ starting: prefetching before processing global end Run for module: label = 'a2' id = 6 -++++++++ starting: prefetching before processing global end Run for module: label = 'a1' id = 5 +++++++++ starting: prefetching before processing global end Run for module: label = 'a3' id = 6 +++++++++ starting: prefetching before processing global end Run for module: label = 'a2' id = 5 +++++++++ starting: prefetching before processing global end Run for module: label = 'a1' id = 4 GlobalContext: transition = EndRun run: 1 luminosityBlock: 0 runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 15000001 @@ -1441,7 +1429,7 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ finished: prefetching before processing global end Run for module: label = 'a1' id = 5 +++++++++ finished: prefetching before processing global end Run for module: label = 'a1' id = 4 GlobalContext: transition = EndRun run: 1 luminosityBlock: 0 runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 15000001 @@ -1453,7 +1441,7 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ starting: global end run for module: label = 'a1' id = 5 +++++++ starting: global end run for module: label = 'a1' id = 4 GlobalContext: transition = EndRun run: 1 luminosityBlock: 0 runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 15000001 @@ -1465,7 +1453,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ finished: global end run for module: label = 'a1' id = 5 +++++++ finished: global end run for module: label = 'a1' id = 4 GlobalContext: transition = EndRun run: 1 luminosityBlock: 0 runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 15000001 @@ -1477,12 +1465,12 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ finished: prefetching before processing global end Run for module: label = 'a2' id = 6 -++++++ starting: global end run for module: label = 'a2' id = 6 -++++++ finished: global end run for module: label = 'a2' id = 6 -++++++++ finished: prefetching before processing global end Run for module: label = 'a3' id = 7 -++++++ starting: global end run for module: label = 'a3' id = 7 -++++++ finished: global end run for module: label = 'a3' id = 7 +++++++++ finished: prefetching before processing global end Run for module: label = 'a2' id = 5 +++++++ starting: global end run for module: label = 'a2' id = 5 +++++++ finished: global end run for module: label = 'a2' id = 5 +++++++++ finished: prefetching before processing global end Run for module: label = 'a3' id = 6 +++++++ starting: global end run for module: label = 'a3' id = 6 +++++++ finished: global end run for module: label = 'a3' id = 6 ++++ finished: global end run 1 : time = 15000001 GlobalContext: transition = EndRun run: 1 luminosityBlock: 0 @@ -1509,8 +1497,8 @@ GlobalContext: transition = WriteRun runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ starting: write run for module: label = 'out' id = 8 -++++++ finished: write run for module: label = 'out' id = 8 +++++++ starting: write run for module: label = 'out' id = 7 +++++++ finished: write run for module: label = 'out' id = 7 ++++ finished: global write run 1 : time = 15000001 GlobalContext: transition = WriteRun run: 1 luminosityBlock: 0 @@ -1537,9 +1525,9 @@ GlobalContext: transition = EndProcessBlock runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ starting: prefetching before processing end ProcessBlock for module: label = 'a3' id = 7 -++++++++ starting: prefetching before processing end ProcessBlock for module: label = 'a2' id = 6 -++++++++ starting: prefetching before processing end ProcessBlock for module: label = 'a1' id = 5 +++++++++ starting: prefetching before processing end ProcessBlock for module: label = 'a3' id = 6 +++++++++ starting: prefetching before processing end ProcessBlock for module: label = 'a2' id = 5 +++++++++ starting: prefetching before processing end ProcessBlock for module: label = 'a1' id = 4 GlobalContext: transition = EndProcessBlock run: 0 luminosityBlock: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 @@ -1551,15 +1539,15 @@ ModuleCallingContext state = Prefetching runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ starting: prefetching before processing end ProcessBlock for module: label = 'intProducerEndProcessBlock' id = 12 -++++++++ starting: prefetching before processing end ProcessBlock for module: label = 'intProducerBeginProcessBlock' id = 11 -++++++++ finished: prefetching before processing end ProcessBlock for module: label = 'intProducerBeginProcessBlock' id = 11 -++++++ starting: end process block for module: label = 'intProducerBeginProcessBlock' id = 11 -++++++ finished: end process block for module: label = 'intProducerBeginProcessBlock' id = 11 -++++++++ finished: prefetching before processing end ProcessBlock for module: label = 'intProducerEndProcessBlock' id = 12 -++++++ starting: end process block for module: label = 'intProducerEndProcessBlock' id = 12 -++++++ finished: end process block for module: label = 'intProducerEndProcessBlock' id = 12 -++++++++ finished: prefetching before processing end ProcessBlock for module: label = 'a1' id = 5 +++++++++ starting: prefetching before processing end ProcessBlock for module: label = 'intProducerEndProcessBlock' id = 11 +++++++++ starting: prefetching before processing end ProcessBlock for module: label = 'intProducerBeginProcessBlock' id = 10 +++++++++ finished: prefetching before processing end ProcessBlock for module: label = 'intProducerBeginProcessBlock' id = 10 +++++++ starting: end process block for module: label = 'intProducerBeginProcessBlock' id = 10 +++++++ finished: end process block for module: label = 'intProducerBeginProcessBlock' id = 10 +++++++++ finished: prefetching before processing end ProcessBlock for module: label = 'intProducerEndProcessBlock' id = 11 +++++++ starting: end process block for module: label = 'intProducerEndProcessBlock' id = 11 +++++++ finished: end process block for module: label = 'intProducerEndProcessBlock' id = 11 +++++++++ finished: prefetching before processing end ProcessBlock for module: label = 'a1' id = 4 GlobalContext: transition = EndProcessBlock run: 0 luminosityBlock: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 @@ -1571,7 +1559,7 @@ ModuleCallingContext state = Prefetching runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ starting: end process block for module: label = 'a1' id = 5 +++++++ starting: end process block for module: label = 'a1' id = 4 GlobalContext: transition = EndProcessBlock run: 0 luminosityBlock: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 @@ -1583,7 +1571,7 @@ ModuleCallingContext state = Running runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ finished: end process block for module: label = 'a1' id = 5 +++++++ finished: end process block for module: label = 'a1' id = 4 GlobalContext: transition = EndProcessBlock run: 0 luminosityBlock: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 @@ -1595,12 +1583,12 @@ ModuleCallingContext state = Running runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++++ finished: prefetching before processing end ProcessBlock for module: label = 'a2' id = 6 -++++++ starting: end process block for module: label = 'a2' id = 6 -++++++ finished: end process block for module: label = 'a2' id = 6 -++++++++ finished: prefetching before processing end ProcessBlock for module: label = 'a3' id = 7 -++++++ starting: end process block for module: label = 'a3' id = 7 -++++++ finished: end process block for module: label = 'a3' id = 7 +++++++++ finished: prefetching before processing end ProcessBlock for module: label = 'a2' id = 5 +++++++ starting: end process block for module: label = 'a2' id = 5 +++++++ finished: end process block for module: label = 'a2' id = 5 +++++++++ finished: prefetching before processing end ProcessBlock for module: label = 'a3' id = 6 +++++++ starting: end process block for module: label = 'a3' id = 6 +++++++ finished: end process block for module: label = 'a3' id = 6 ++++ finished: end process block GlobalContext: transition = EndProcessBlock run: 0 luminosityBlock: 0 @@ -1655,8 +1643,8 @@ GlobalContext: transition = WriteProcessBlock runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++++ starting: write process block for module: label = 'out' id = 8 -++++++ finished: write process block for module: label = 'out' id = 8 +++++++ starting: write process block for module: label = 'out' id = 7 +++++++ finished: write process block for module: label = 'out' id = 7 ++++ finished: write process block GlobalContext: transition = WriteProcessBlock run: 0 luminosityBlock: 0 @@ -1677,9 +1665,9 @@ GlobalContext: transition = WriteProcessBlock ProcessContext: COPY 6f4335e86de793448be83fe14f12dcb7 parent ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++ starting: end stream for module: stream = 0 label = 'intProducer' id = 4 -++++ finished: end stream for module: stream = 0 label = 'intProducer' id = 4 -++++ starting: end stream for module: stream = 0 label = 'a1' id = 5 +++++ starting: end stream for module: stream = 0 label = 'intProducer' id = 3 +++++ finished: end stream for module: stream = 0 label = 'intProducer' id = 3 +++++ starting: end stream for module: stream = 0 label = 'a1' id = 4 StreamContext: StreamID = 0 transition = EndStream run: 0 lumi: 0 event: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 @@ -1691,7 +1679,7 @@ ModuleCallingContext state = Running runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++ finished: end stream for module: stream = 0 label = 'a1' id = 5 +++++ finished: end stream for module: stream = 0 label = 'a1' id = 4 StreamContext: StreamID = 0 transition = EndStream run: 0 lumi: 0 event: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 @@ -1703,19 +1691,17 @@ ModuleCallingContext state = Running runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++ starting: end stream for module: stream = 0 label = 'a2' id = 6 -++++ finished: end stream for module: stream = 0 label = 'a2' id = 6 -++++ starting: end stream for module: stream = 0 label = 'a3' id = 7 -++++ finished: end stream for module: stream = 0 label = 'a3' id = 7 +++++ starting: end stream for module: stream = 0 label = 'a2' id = 5 +++++ finished: end stream for module: stream = 0 label = 'a2' id = 5 +++++ starting: end stream for module: stream = 0 label = 'a3' id = 6 +++++ finished: end stream for module: stream = 0 label = 'a3' id = 6 ++++ starting: end stream for module: stream = 0 label = 'TriggerResults' id = 1 ++++ finished: end stream for module: stream = 0 label = 'TriggerResults' id = 1 -++++ starting: end stream for module: stream = 0 label = 'out' id = 8 -++++ finished: end stream for module: stream = 0 label = 'out' id = 8 +++++ starting: end stream for module: stream = 0 label = 'out' id = 7 +++++ finished: end stream for module: stream = 0 label = 'out' id = 7 ++++ starting: end stream for module: stream = 0 label = 'p' id = 2 ++++ finished: end stream for module: stream = 0 label = 'p' id = 2 -++++ starting: end stream for module: stream = 0 label = 'e' id = 3 -++++ finished: end stream for module: stream = 0 label = 'e' id = 3 -++++ starting: end stream for module: stream = 0 label = 'intProducerA' id = 9 +++++ starting: end stream for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = EndStream run: 0 lumi: 0 event: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 @@ -1727,7 +1713,7 @@ ModuleCallingContext state = Running runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++ finished: end stream for module: stream = 0 label = 'intProducerA' id = 9 +++++ finished: end stream for module: stream = 0 label = 'intProducerA' id = 8 StreamContext: StreamID = 0 transition = EndStream run: 0 lumi: 0 event: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 @@ -1739,49 +1725,47 @@ ModuleCallingContext state = Running runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD1 cf8fb4a5e3c9a108eac33826b2a17969 -++++ starting: end stream for module: stream = 0 label = 'intProducerB' id = 10 -++++ finished: end stream for module: stream = 0 label = 'intProducerB' id = 10 -++++ starting: end stream for module: stream = 0 label = 'intProducerBeginProcessBlock' id = 11 -++++ finished: end stream for module: stream = 0 label = 'intProducerBeginProcessBlock' id = 11 -++++ starting: end stream for module: stream = 0 label = 'intProducerEndProcessBlock' id = 12 -++++ finished: end stream for module: stream = 0 label = 'intProducerEndProcessBlock' id = 12 -++++ starting: end stream for module: stream = 0 label = 'intProducerU' id = 13 -++++ finished: end stream for module: stream = 0 label = 'intProducerU' id = 13 -++++ starting: end stream for module: stream = 0 label = 'intVectorProducer' id = 14 -++++ finished: end stream for module: stream = 0 label = 'intVectorProducer' id = 14 -++++ starting: end job for module with label 'intProducerA' id = 9 +++++ starting: end stream for module: stream = 0 label = 'intProducerB' id = 9 +++++ finished: end stream for module: stream = 0 label = 'intProducerB' id = 9 +++++ starting: end stream for module: stream = 0 label = 'intProducerBeginProcessBlock' id = 10 +++++ finished: end stream for module: stream = 0 label = 'intProducerBeginProcessBlock' id = 10 +++++ starting: end stream for module: stream = 0 label = 'intProducerEndProcessBlock' id = 11 +++++ finished: end stream for module: stream = 0 label = 'intProducerEndProcessBlock' id = 11 +++++ starting: end stream for module: stream = 0 label = 'intProducerU' id = 12 +++++ finished: end stream for module: stream = 0 label = 'intProducerU' id = 12 +++++ starting: end stream for module: stream = 0 label = 'intVectorProducer' id = 13 +++++ finished: end stream for module: stream = 0 label = 'intVectorProducer' id = 13 +++++ starting: end job for module with label 'intProducerA' id = 8 Module type=IntProducer, Module label=intProducerA, Parameter Set ID=56ea7c8bbb02df4e1c3b945954838318 -++++ finished: end job for module with label 'intProducerA' id = 9 +++++ finished: end job for module with label 'intProducerA' id = 8 Module type=IntProducer, Module label=intProducerA, Parameter Set ID=56ea7c8bbb02df4e1c3b945954838318 -++++ starting: end job for module with label 'intProducerB' id = 10 -++++ finished: end job for module with label 'intProducerB' id = 10 -++++ starting: end job for module with label 'intProducerBeginProcessBlock' id = 11 -++++ finished: end job for module with label 'intProducerBeginProcessBlock' id = 11 -++++ starting: end job for module with label 'intProducerEndProcessBlock' id = 12 -++++ finished: end job for module with label 'intProducerEndProcessBlock' id = 12 -++++ starting: end job for module with label 'intProducerU' id = 13 -++++ finished: end job for module with label 'intProducerU' id = 13 -++++ starting: end job for module with label 'intVectorProducer' id = 14 -++++ finished: end job for module with label 'intVectorProducer' id = 14 -++++ starting: end job for module with label 'intProducer' id = 4 -++++ finished: end job for module with label 'intProducer' id = 4 -++++ starting: end job for module with label 'a1' id = 5 +++++ starting: end job for module with label 'intProducerB' id = 9 +++++ finished: end job for module with label 'intProducerB' id = 9 +++++ starting: end job for module with label 'intProducerBeginProcessBlock' id = 10 +++++ finished: end job for module with label 'intProducerBeginProcessBlock' id = 10 +++++ starting: end job for module with label 'intProducerEndProcessBlock' id = 11 +++++ finished: end job for module with label 'intProducerEndProcessBlock' id = 11 +++++ starting: end job for module with label 'intProducerU' id = 12 +++++ finished: end job for module with label 'intProducerU' id = 12 +++++ starting: end job for module with label 'intVectorProducer' id = 13 +++++ finished: end job for module with label 'intVectorProducer' id = 13 +++++ starting: end job for module with label 'intProducer' id = 3 +++++ finished: end job for module with label 'intProducer' id = 3 +++++ starting: end job for module with label 'a1' id = 4 Module type=TestFindProduct, Module label=a1, Parameter Set ID=a7caa43fcf5ef35dee69e4bd85169d4c TestFindProduct sum = 530021 -++++ finished: end job for module with label 'a1' id = 5 +++++ finished: end job for module with label 'a1' id = 4 Module type=TestFindProduct, Module label=a1, Parameter Set ID=a7caa43fcf5ef35dee69e4bd85169d4c -++++ starting: end job for module with label 'a2' id = 6 +++++ starting: end job for module with label 'a2' id = 5 TestFindProduct sum = 300 -++++ finished: end job for module with label 'a2' id = 6 -++++ starting: end job for module with label 'a3' id = 7 +++++ finished: end job for module with label 'a2' id = 5 +++++ starting: end job for module with label 'a3' id = 6 TestFindProduct sum = 300 -++++ finished: end job for module with label 'a3' id = 7 -++++ starting: end job for module with label 'out' id = 8 -++++ finished: end job for module with label 'out' id = 8 +++++ finished: end job for module with label 'a3' id = 6 +++++ starting: end job for module with label 'out' id = 7 +++++ finished: end job for module with label 'out' id = 7 ++++ starting: end job for module with label 'TriggerResults' id = 1 ++++ finished: end job for module with label 'TriggerResults' id = 1 ++++ starting: end job for module with label 'p' id = 2 ++++ finished: end job for module with label 'p' id = 2 -++++ starting: end job for module with label 'e' id = 3 -++++ finished: end job for module with label 'e' id = 3 ++ finished: end job diff --git a/FWCore/Integration/test/unit_test_outputs/testGetBy2.log b/FWCore/Integration/test/unit_test_outputs/testGetBy2.log index d3109fc8b8098..1f5691790acf9 100644 --- a/FWCore/Integration/test/unit_test_outputs/testGetBy2.log +++ b/FWCore/Integration/test/unit_test_outputs/testGetBy2.log @@ -8,38 +8,34 @@ Module type=PoolSource, Module label=source, Parameter Set ID=e26370152bd22c8709 ++++ finished: constructing module with label 'TriggerResults' id = 1 ++++ starting: constructing module with label 'p' id = 2 ++++ finished: constructing module with label 'p' id = 2 -++++ starting: constructing module with label 'e' id = 3 -++++ finished: constructing module with label 'e' id = 3 -++++ starting: constructing module with label 'intProducer' id = 4 +++++ starting: constructing module with label 'intProducer' id = 3 Module type=IntProducer, Module label=intProducer, Parameter Set ID=b4b90439a3015d748c803aa5c60a25d3 -++++ finished: constructing module with label 'intProducer' id = 4 +++++ finished: constructing module with label 'intProducer' id = 3 Module type=IntProducer, Module label=intProducer, Parameter Set ID=b4b90439a3015d748c803aa5c60a25d3 -++++ starting: constructing module with label 'out' id = 5 -++++ finished: constructing module with label 'out' id = 5 -++++ starting: constructing module with label 'intProducerU' id = 6 -++++ finished: constructing module with label 'intProducerU' id = 6 -++++ starting: constructing module with label 'intVectorProducer' id = 7 -++++ finished: constructing module with label 'intVectorProducer' id = 7 +++++ starting: constructing module with label 'out' id = 4 +++++ finished: constructing module with label 'out' id = 4 +++++ starting: constructing module with label 'intProducerU' id = 5 +++++ finished: constructing module with label 'intProducerU' id = 5 +++++ starting: constructing module with label 'intVectorProducer' id = 6 +++++ finished: constructing module with label 'intVectorProducer' id = 6 ++ preallocate: 1 concurrent runs, 1 concurrent luminosity sections, 1 streams ++ starting: begin job -++++ starting: begin job for module with label 'intProducerU' id = 6 -++++ finished: begin job for module with label 'intProducerU' id = 6 -++++ starting: begin job for module with label 'intVectorProducer' id = 7 -++++ finished: begin job for module with label 'intVectorProducer' id = 7 -++++ starting: begin job for module with label 'intProducer' id = 4 +++++ starting: begin job for module with label 'intProducerU' id = 5 +++++ finished: begin job for module with label 'intProducerU' id = 5 +++++ starting: begin job for module with label 'intVectorProducer' id = 6 +++++ finished: begin job for module with label 'intVectorProducer' id = 6 +++++ starting: begin job for module with label 'intProducer' id = 3 Module type=IntProducer, Module label=intProducer, Parameter Set ID=b4b90439a3015d748c803aa5c60a25d3 -++++ finished: begin job for module with label 'intProducer' id = 4 +++++ finished: begin job for module with label 'intProducer' id = 3 Module type=IntProducer, Module label=intProducer, Parameter Set ID=b4b90439a3015d748c803aa5c60a25d3 -++++ starting: begin job for module with label 'out' id = 5 -++++ finished: begin job for module with label 'out' id = 5 +++++ starting: begin job for module with label 'out' id = 4 +++++ finished: begin job for module with label 'out' id = 4 ++++ starting: begin job for module with label 'TriggerResults' id = 1 ++++ finished: begin job for module with label 'TriggerResults' id = 1 ++++ starting: begin job for module with label 'p' id = 2 ++++ finished: begin job for module with label 'p' id = 2 -++++ starting: begin job for module with label 'e' id = 3 -++++ finished: begin job for module with label 'e' id = 3 ++ finished: begin job -++++ starting: begin stream for module: stream = 0 label = 'intProducer' id = 4 +++++ starting: begin stream for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = BeginStream run: 0 lumi: 0 event: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 @@ -51,7 +47,7 @@ ModuleCallingContext state = Running runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++ finished: begin stream for module: stream = 0 label = 'intProducer' id = 4 +++++ finished: begin stream for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = BeginStream run: 0 lumi: 0 event: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 @@ -65,16 +61,14 @@ ModuleCallingContext state = Running ++++ starting: begin stream for module: stream = 0 label = 'TriggerResults' id = 1 ++++ finished: begin stream for module: stream = 0 label = 'TriggerResults' id = 1 -++++ starting: begin stream for module: stream = 0 label = 'out' id = 5 -++++ finished: begin stream for module: stream = 0 label = 'out' id = 5 +++++ starting: begin stream for module: stream = 0 label = 'out' id = 4 +++++ finished: begin stream for module: stream = 0 label = 'out' id = 4 ++++ starting: begin stream for module: stream = 0 label = 'p' id = 2 ++++ finished: begin stream for module: stream = 0 label = 'p' id = 2 -++++ starting: begin stream for module: stream = 0 label = 'e' id = 3 -++++ finished: begin stream for module: stream = 0 label = 'e' id = 3 -++++ starting: begin stream for module: stream = 0 label = 'intProducerU' id = 6 -++++ finished: begin stream for module: stream = 0 label = 'intProducerU' id = 6 -++++ starting: begin stream for module: stream = 0 label = 'intVectorProducer' id = 7 -++++ finished: begin stream for module: stream = 0 label = 'intVectorProducer' id = 7 +++++ starting: begin stream for module: stream = 0 label = 'intProducerU' id = 5 +++++ finished: begin stream for module: stream = 0 label = 'intProducerU' id = 5 +++++ starting: begin stream for module: stream = 0 label = 'intVectorProducer' id = 6 +++++ finished: begin stream for module: stream = 0 label = 'intVectorProducer' id = 6 ++++ starting: begin process block GlobalContext: transition = BeginProcessBlock run: 0 luminosityBlock: 0 @@ -107,8 +101,8 @@ GlobalContext: transition = WriteProcessBlock runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++ starting: write process block for module: label = 'out' id = 5 -++++++ finished: write process block for module: label = 'out' id = 5 +++++++ starting: write process block for module: label = 'out' id = 4 +++++++ finished: write process block for module: label = 'out' id = 4 ++++ finished: write process block GlobalContext: transition = WriteProcessBlock run: 0 luminosityBlock: 0 @@ -132,18 +126,15 @@ GlobalContext: transition = BeginRun runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++ queuing: EventSetup synchronization run: 1 lumi: 1 event: 0 -++++ pre: EventSetup synchronizing run: 1 lumi: 1 event: 0 -++++ post: EventSetup synchronizing run: 1 lumi: 1 event: 0 ++++ starting: begin run: stream = 0 run = 1 time = 1 StreamContext: StreamID = 0 transition = BeginRun run: 1 lumi: 0 event: 0 runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++ starting: begin run for module: stream = 0 label = 'intProducerU' id = 6 -++++++ finished: begin run for module: stream = 0 label = 'intProducerU' id = 6 -++++++ starting: begin run for module: stream = 0 label = 'intProducer' id = 4 +++++++ starting: begin run for module: stream = 0 label = 'intProducerU' id = 5 +++++++ finished: begin run for module: stream = 0 label = 'intProducerU' id = 5 +++++++ starting: begin run for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = BeginRun run: 1 lumi: 0 event: 0 runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 1 @@ -155,7 +146,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++ finished: begin run for module: stream = 0 label = 'intProducer' id = 4 +++++++ finished: begin run for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = BeginRun run: 1 lumi: 0 event: 0 runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 1 @@ -173,6 +164,9 @@ StreamContext: StreamID = 0 transition = BeginRun runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 +++++ queuing: EventSetup synchronization run: 1 lumi: 1 event: 0 +++++ pre: EventSetup synchronizing run: 1 lumi: 1 event: 0 +++++ post: EventSetup synchronizing run: 1 lumi: 1 event: 0 ++++ starting: source lumi ++++ finished: source lumi ++++ starting: global begin lumi: run = 1 lumi = 1 time = 1 @@ -193,9 +187,9 @@ StreamContext: StreamID = 0 transition = BeginLuminosityBlock runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++ starting: begin lumi for module: stream = 0 label = 'intProducerU' id = 6 -++++++ finished: begin lumi for module: stream = 0 label = 'intProducerU' id = 6 -++++++ starting: begin lumi for module: stream = 0 label = 'intProducer' id = 4 +++++++ starting: begin lumi for module: stream = 0 label = 'intProducerU' id = 5 +++++++ finished: begin lumi for module: stream = 0 label = 'intProducerU' id = 5 +++++++ starting: begin lumi for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = BeginLuminosityBlock run: 1 lumi: 1 event: 0 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 @@ -207,7 +201,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++ finished: begin lumi for module: stream = 0 label = 'intProducer' id = 4 +++++++ finished: begin lumi for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = BeginLuminosityBlock run: 1 lumi: 1 event: 0 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 @@ -255,7 +249,7 @@ PathContext: pathName = p pathID = 0 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 4 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 1 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 @@ -269,7 +263,7 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 4 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 1 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 @@ -283,7 +277,7 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++++ starting: processing event for module: stream = 0 label = 'intProducer' id = 4 +++++++++ starting: processing event for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 1 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 @@ -297,7 +291,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++++ finished: processing event for module: stream = 0 label = 'intProducer' id = 4 +++++++++ finished: processing event for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 1 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 5000001 @@ -326,36 +320,34 @@ PathContext: pathName = p pathID = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 1 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 1 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 5 -++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 6 -++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 7 -++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 7 -++++++++++ starting: processing event for module: stream = 0 label = 'intVectorProducer' id = 7 -++++++++++ finished: processing event for module: stream = 0 label = 'intVectorProducer' id = 7 -++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 6 -++++++++++ starting: processing event for module: stream = 0 label = 'intProducerU' id = 6 -++++++++++ finished: processing event for module: stream = 0 label = 'intProducerU' id = 6 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 5 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 5 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 5 -++++++++ starting: processing event for module: stream = 0 label = 'e' id = 3 -++++++++ finished: processing event for module: stream = 0 label = 'e' id = 3 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 4 +++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 5 +++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 6 +++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 6 +++++++++++ starting: processing event for module: stream = 0 label = 'intVectorProducer' id = 6 +++++++++++ finished: processing event for module: stream = 0 label = 'intVectorProducer' id = 6 +++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 5 +++++++++++ starting: processing event for module: stream = 0 label = 'intProducerU' id = 5 +++++++++++ finished: processing event for module: stream = 0 label = 'intProducerU' id = 5 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 4 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 4 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 4 ++++++ finished: processing path 'e' : stream = 0 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 1 @@ -403,7 +395,7 @@ PathContext: pathName = p pathID = 0 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 4 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 2 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 @@ -417,7 +409,7 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 4 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 2 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 @@ -431,7 +423,7 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++++ starting: processing event for module: stream = 0 label = 'intProducer' id = 4 +++++++++ starting: processing event for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 2 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 @@ -445,7 +437,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++++ finished: processing event for module: stream = 0 label = 'intProducer' id = 4 +++++++++ finished: processing event for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 2 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 10000001 @@ -474,36 +466,34 @@ PathContext: pathName = p pathID = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 1 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 1 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 5 -++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 6 -++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 7 -++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 7 -++++++++++ starting: processing event for module: stream = 0 label = 'intVectorProducer' id = 7 -++++++++++ finished: processing event for module: stream = 0 label = 'intVectorProducer' id = 7 -++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 6 -++++++++++ starting: processing event for module: stream = 0 label = 'intProducerU' id = 6 -++++++++++ finished: processing event for module: stream = 0 label = 'intProducerU' id = 6 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 5 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 5 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 5 -++++++++ starting: processing event for module: stream = 0 label = 'e' id = 3 -++++++++ finished: processing event for module: stream = 0 label = 'e' id = 3 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 4 +++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 5 +++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 6 +++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 6 +++++++++++ starting: processing event for module: stream = 0 label = 'intVectorProducer' id = 6 +++++++++++ finished: processing event for module: stream = 0 label = 'intVectorProducer' id = 6 +++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 5 +++++++++++ starting: processing event for module: stream = 0 label = 'intProducerU' id = 5 +++++++++++ finished: processing event for module: stream = 0 label = 'intProducerU' id = 5 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 4 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 4 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 4 ++++++ finished: processing path 'e' : stream = 0 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 2 @@ -551,7 +541,7 @@ PathContext: pathName = p pathID = 0 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 4 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 3 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 @@ -565,7 +555,7 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 4 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 3 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 @@ -579,7 +569,7 @@ ModuleCallingContext state = Prefetching runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++++ starting: processing event for module: stream = 0 label = 'intProducer' id = 4 +++++++++ starting: processing event for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 3 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 @@ -593,7 +583,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++++ finished: processing event for module: stream = 0 label = 'intProducer' id = 4 +++++++++ finished: processing event for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 3 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 @@ -622,36 +612,34 @@ PathContext: pathName = p pathID = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 1 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 1 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 5 -++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 6 -++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 7 -++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 7 -++++++++++ starting: processing event for module: stream = 0 label = 'intVectorProducer' id = 7 -++++++++++ finished: processing event for module: stream = 0 label = 'intVectorProducer' id = 7 -++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 6 -++++++++++ starting: processing event for module: stream = 0 label = 'intProducerU' id = 6 -++++++++++ finished: processing event for module: stream = 0 label = 'intProducerU' id = 6 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 5 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 5 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 5 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 5 -++++++++ starting: processing event for module: stream = 0 label = 'e' id = 3 -++++++++ finished: processing event for module: stream = 0 label = 'e' id = 3 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 4 +++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 5 +++++++++++ starting: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 6 +++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intVectorProducer' id = 6 +++++++++++ starting: processing event for module: stream = 0 label = 'intVectorProducer' id = 6 +++++++++++ finished: processing event for module: stream = 0 label = 'intVectorProducer' id = 6 +++++++++++ finished: prefetching before processing event for module: stream = 0 label = 'intProducerU' id = 5 +++++++++++ starting: processing event for module: stream = 0 label = 'intProducerU' id = 5 +++++++++++ finished: processing event for module: stream = 0 label = 'intProducerU' id = 5 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ starting: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++++ finished: event delayed read from source: stream = 0 label = 'out' id = 4 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 4 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 4 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 4 ++++++ finished: processing path 'e' : stream = 0 StreamContext: StreamID = 0 transition = Event run: 1 lumi: 1 event: 3 @@ -678,9 +666,9 @@ StreamContext: StreamID = 0 transition = EndLuminosityBlock runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++ starting: end lumi for module: stream = 0 label = 'intProducerU' id = 6 -++++++ finished: end lumi for module: stream = 0 label = 'intProducerU' id = 6 -++++++ starting: end lumi for module: stream = 0 label = 'intProducer' id = 4 +++++++ starting: end lumi for module: stream = 0 label = 'intProducerU' id = 5 +++++++ finished: end lumi for module: stream = 0 label = 'intProducerU' id = 5 +++++++ starting: end lumi for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = EndLuminosityBlock run: 1 lumi: 1 event: 0 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 @@ -692,7 +680,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++ finished: end lumi for module: stream = 0 label = 'intProducer' id = 4 +++++++ finished: end lumi for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = EndLuminosityBlock run: 1 lumi: 1 event: 0 runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 15000001 @@ -728,8 +716,8 @@ GlobalContext: transition = WriteLuminosityBlock runIndex = 0 luminosityBlockIndex = 0 unixTime = 0 microsecondOffset = 1 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++ starting: write lumi for module: label = 'out' id = 5 -++++++ finished: write lumi for module: label = 'out' id = 5 +++++++ starting: write lumi for module: label = 'out' id = 4 +++++++ finished: write lumi for module: label = 'out' id = 4 ++++ finished: global write lumi: run = 1 lumi = 1 time = 1 GlobalContext: transition = WriteLuminosityBlock run: 1 luminosityBlock: 1 @@ -742,9 +730,9 @@ StreamContext: StreamID = 0 transition = EndRun runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++ starting: end run for module: stream = 0 label = 'intProducerU' id = 6 -++++++ finished: end run for module: stream = 0 label = 'intProducerU' id = 6 -++++++ starting: end run for module: stream = 0 label = 'intProducer' id = 4 +++++++ starting: end run for module: stream = 0 label = 'intProducerU' id = 5 +++++++ finished: end run for module: stream = 0 label = 'intProducerU' id = 5 +++++++ starting: end run for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = EndRun run: 1 lumi: 0 event: 0 runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 15000001 @@ -756,7 +744,7 @@ ModuleCallingContext state = Running runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++ finished: end run for module: stream = 0 label = 'intProducer' id = 4 +++++++ finished: end run for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = EndRun run: 1 lumi: 0 event: 0 runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 15000001 @@ -792,8 +780,8 @@ GlobalContext: transition = WriteRun runIndex = 0 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 15000001 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++ starting: write run for module: label = 'out' id = 5 -++++++ finished: write run for module: label = 'out' id = 5 +++++++ starting: write run for module: label = 'out' id = 4 +++++++ finished: write run for module: label = 'out' id = 4 ++++ finished: global write run 1 : time = 15000001 GlobalContext: transition = WriteRun run: 1 luminosityBlock: 0 @@ -820,15 +808,15 @@ GlobalContext: transition = WriteProcessBlock runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++++ starting: write process block for module: label = 'out' id = 5 -++++++ finished: write process block for module: label = 'out' id = 5 +++++++ starting: write process block for module: label = 'out' id = 4 +++++++ finished: write process block for module: label = 'out' id = 4 ++++ finished: write process block GlobalContext: transition = WriteProcessBlock run: 0 luminosityBlock: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++ starting: end stream for module: stream = 0 label = 'intProducer' id = 4 +++++ starting: end stream for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = EndStream run: 0 lumi: 0 event: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 @@ -840,7 +828,7 @@ ModuleCallingContext state = Running runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 ProcessContext: PROD2 9a42d7e4995aeb2a3c9d04a3ef0f3879 -++++ finished: end stream for module: stream = 0 label = 'intProducer' id = 4 +++++ finished: end stream for module: stream = 0 label = 'intProducer' id = 3 StreamContext: StreamID = 0 transition = EndStream run: 0 lumi: 0 event: 0 runIndex = 4294967295 luminosityBlockIndex = 4294967295 unixTime = 0 microsecondOffset = 0 @@ -854,30 +842,26 @@ ModuleCallingContext state = Running ++++ starting: end stream for module: stream = 0 label = 'TriggerResults' id = 1 ++++ finished: end stream for module: stream = 0 label = 'TriggerResults' id = 1 -++++ starting: end stream for module: stream = 0 label = 'out' id = 5 -++++ finished: end stream for module: stream = 0 label = 'out' id = 5 +++++ starting: end stream for module: stream = 0 label = 'out' id = 4 +++++ finished: end stream for module: stream = 0 label = 'out' id = 4 ++++ starting: end stream for module: stream = 0 label = 'p' id = 2 ++++ finished: end stream for module: stream = 0 label = 'p' id = 2 -++++ starting: end stream for module: stream = 0 label = 'e' id = 3 -++++ finished: end stream for module: stream = 0 label = 'e' id = 3 -++++ starting: end stream for module: stream = 0 label = 'intProducerU' id = 6 -++++ finished: end stream for module: stream = 0 label = 'intProducerU' id = 6 -++++ starting: end stream for module: stream = 0 label = 'intVectorProducer' id = 7 -++++ finished: end stream for module: stream = 0 label = 'intVectorProducer' id = 7 -++++ starting: end job for module with label 'intProducerU' id = 6 -++++ finished: end job for module with label 'intProducerU' id = 6 -++++ starting: end job for module with label 'intVectorProducer' id = 7 -++++ finished: end job for module with label 'intVectorProducer' id = 7 -++++ starting: end job for module with label 'intProducer' id = 4 +++++ starting: end stream for module: stream = 0 label = 'intProducerU' id = 5 +++++ finished: end stream for module: stream = 0 label = 'intProducerU' id = 5 +++++ starting: end stream for module: stream = 0 label = 'intVectorProducer' id = 6 +++++ finished: end stream for module: stream = 0 label = 'intVectorProducer' id = 6 +++++ starting: end job for module with label 'intProducerU' id = 5 +++++ finished: end job for module with label 'intProducerU' id = 5 +++++ starting: end job for module with label 'intVectorProducer' id = 6 +++++ finished: end job for module with label 'intVectorProducer' id = 6 +++++ starting: end job for module with label 'intProducer' id = 3 Module type=IntProducer, Module label=intProducer, Parameter Set ID=b4b90439a3015d748c803aa5c60a25d3 -++++ finished: end job for module with label 'intProducer' id = 4 +++++ finished: end job for module with label 'intProducer' id = 3 Module type=IntProducer, Module label=intProducer, Parameter Set ID=b4b90439a3015d748c803aa5c60a25d3 -++++ starting: end job for module with label 'out' id = 5 -++++ finished: end job for module with label 'out' id = 5 +++++ starting: end job for module with label 'out' id = 4 +++++ finished: end job for module with label 'out' id = 4 ++++ starting: end job for module with label 'TriggerResults' id = 1 ++++ finished: end job for module with label 'TriggerResults' id = 1 ++++ starting: end job for module with label 'p' id = 2 ++++ finished: end job for module with label 'p' id = 2 -++++ starting: end job for module with label 'e' id = 3 -++++ finished: end job for module with label 'e' id = 3 ++ finished: end job diff --git a/FWCore/Integration/test/unit_test_outputs/testSubProcess.grep2.txt b/FWCore/Integration/test/unit_test_outputs/testSubProcess.grep2.txt index 6c242b5af9b72..58c0e1f80ca95 100644 --- a/FWCore/Integration/test/unit_test_outputs/testSubProcess.grep2.txt +++ b/FWCore/Integration/test/unit_test_outputs/testSubProcess.grep2.txt @@ -32,48 +32,44 @@ ++++ finished: constructing module with label 'path3' id = 15 ++++ starting: constructing module with label 'path4' id = 16 ++++ finished: constructing module with label 'path4' id = 16 -++++ starting: constructing module with label 'endPath1' id = 17 -++++ finished: constructing module with label 'endPath1' id = 17 -++++ starting: constructing module with label 'thingWithMergeProducer' id = 18 -++++ finished: constructing module with label 'thingWithMergeProducer' id = 18 -++++ starting: constructing module with label 'test' id = 19 -++++ finished: constructing module with label 'test' id = 19 -++++ starting: constructing module with label 'testmerge' id = 20 -++++ finished: constructing module with label 'testmerge' id = 20 -++++ starting: constructing module with label 'get' id = 21 -++++ finished: constructing module with label 'get' id = 21 -++++ starting: constructing module with label 'getInt' id = 22 -++++ finished: constructing module with label 'getInt' id = 22 -++++ starting: constructing module with label 'dependsOnNoPut' id = 23 -++++ finished: constructing module with label 'dependsOnNoPut' id = 23 -++++ starting: constructing module with label 'out' id = 24 -++++ finished: constructing module with label 'out' id = 24 -++++ starting: constructing module with label 'TriggerResults' id = 25 -++++ finished: constructing module with label 'TriggerResults' id = 25 -++++ starting: constructing module with label 'path1' id = 26 -++++ finished: constructing module with label 'path1' id = 26 -++++ starting: constructing module with label 'path2' id = 27 -++++ finished: constructing module with label 'path2' id = 27 -++++ starting: constructing module with label 'path3' id = 28 -++++ finished: constructing module with label 'path3' id = 28 -++++ starting: constructing module with label 'path4' id = 29 -++++ finished: constructing module with label 'path4' id = 29 -++++ starting: constructing module with label 'endPath1' id = 30 -++++ finished: constructing module with label 'endPath1' id = 30 -++++ starting: constructing module with label 'thingWithMergeProducer' id = 31 -++++ finished: constructing module with label 'thingWithMergeProducer' id = 31 -++++ starting: constructing module with label 'test' id = 32 -++++ finished: constructing module with label 'test' id = 32 -++++ starting: constructing module with label 'testmerge' id = 33 -++++ finished: constructing module with label 'testmerge' id = 33 -++++ starting: constructing module with label 'get' id = 34 -++++ finished: constructing module with label 'get' id = 34 -++++ starting: constructing module with label 'getInt' id = 35 -++++ finished: constructing module with label 'getInt' id = 35 -++++ starting: constructing module with label 'dependsOnNoPut' id = 36 -++++ finished: constructing module with label 'dependsOnNoPut' id = 36 -++++ starting: constructing module with label 'out' id = 37 -++++ finished: constructing module with label 'out' id = 37 +++++ starting: constructing module with label 'thingWithMergeProducer' id = 17 +++++ finished: constructing module with label 'thingWithMergeProducer' id = 17 +++++ starting: constructing module with label 'test' id = 18 +++++ finished: constructing module with label 'test' id = 18 +++++ starting: constructing module with label 'testmerge' id = 19 +++++ finished: constructing module with label 'testmerge' id = 19 +++++ starting: constructing module with label 'get' id = 20 +++++ finished: constructing module with label 'get' id = 20 +++++ starting: constructing module with label 'getInt' id = 21 +++++ finished: constructing module with label 'getInt' id = 21 +++++ starting: constructing module with label 'dependsOnNoPut' id = 22 +++++ finished: constructing module with label 'dependsOnNoPut' id = 22 +++++ starting: constructing module with label 'out' id = 23 +++++ finished: constructing module with label 'out' id = 23 +++++ starting: constructing module with label 'TriggerResults' id = 24 +++++ finished: constructing module with label 'TriggerResults' id = 24 +++++ starting: constructing module with label 'path1' id = 25 +++++ finished: constructing module with label 'path1' id = 25 +++++ starting: constructing module with label 'path2' id = 26 +++++ finished: constructing module with label 'path2' id = 26 +++++ starting: constructing module with label 'path3' id = 27 +++++ finished: constructing module with label 'path3' id = 27 +++++ starting: constructing module with label 'path4' id = 28 +++++ finished: constructing module with label 'path4' id = 28 +++++ starting: constructing module with label 'thingWithMergeProducer' id = 29 +++++ finished: constructing module with label 'thingWithMergeProducer' id = 29 +++++ starting: constructing module with label 'test' id = 30 +++++ finished: constructing module with label 'test' id = 30 +++++ starting: constructing module with label 'testmerge' id = 31 +++++ finished: constructing module with label 'testmerge' id = 31 +++++ starting: constructing module with label 'get' id = 32 +++++ finished: constructing module with label 'get' id = 32 +++++ starting: constructing module with label 'getInt' id = 33 +++++ finished: constructing module with label 'getInt' id = 33 +++++ starting: constructing module with label 'dependsOnNoPut' id = 34 +++++ finished: constructing module with label 'dependsOnNoPut' id = 34 +++++ starting: constructing module with label 'out' id = 35 +++++ finished: constructing module with label 'out' id = 35 ++ preallocate: 1 concurrent runs, 1 concurrent luminosity sections, 1 streams ++ starting: begin job ++ starting: begin job @@ -102,20 +98,20 @@ ++++ finished: begin job for module with label 'path2' id = 4 ++ starting: begin job ++ starting: begin job -++++ starting: begin job for module with label 'thingWithMergeProducer' id = 18 -++++ finished: begin job for module with label 'thingWithMergeProducer' id = 18 -++++ starting: begin job for module with label 'test' id = 19 -++++ finished: begin job for module with label 'test' id = 19 -++++ starting: begin job for module with label 'testmerge' id = 20 -++++ finished: begin job for module with label 'testmerge' id = 20 -++++ starting: begin job for module with label 'get' id = 21 -++++ finished: begin job for module with label 'get' id = 21 -++++ starting: begin job for module with label 'getInt' id = 22 -++++ finished: begin job for module with label 'getInt' id = 22 -++++ starting: begin job for module with label 'dependsOnNoPut' id = 23 -++++ finished: begin job for module with label 'dependsOnNoPut' id = 23 -++++ starting: begin job for module with label 'out' id = 24 -++++ finished: begin job for module with label 'out' id = 24 +++++ starting: begin job for module with label 'thingWithMergeProducer' id = 17 +++++ finished: begin job for module with label 'thingWithMergeProducer' id = 17 +++++ starting: begin job for module with label 'test' id = 18 +++++ finished: begin job for module with label 'test' id = 18 +++++ starting: begin job for module with label 'testmerge' id = 19 +++++ finished: begin job for module with label 'testmerge' id = 19 +++++ starting: begin job for module with label 'get' id = 20 +++++ finished: begin job for module with label 'get' id = 20 +++++ starting: begin job for module with label 'getInt' id = 21 +++++ finished: begin job for module with label 'getInt' id = 21 +++++ starting: begin job for module with label 'dependsOnNoPut' id = 22 +++++ finished: begin job for module with label 'dependsOnNoPut' id = 22 +++++ starting: begin job for module with label 'out' id = 23 +++++ finished: begin job for module with label 'out' id = 23 ++++ starting: begin job for module with label 'TriggerResults' id = 12 ++++ finished: begin job for module with label 'TriggerResults' id = 12 ++++ starting: begin job for module with label 'path1' id = 13 @@ -126,35 +122,31 @@ ++++ finished: begin job for module with label 'path3' id = 15 ++++ starting: begin job for module with label 'path4' id = 16 ++++ finished: begin job for module with label 'path4' id = 16 -++++ starting: begin job for module with label 'endPath1' id = 17 -++++ finished: begin job for module with label 'endPath1' id = 17 ++ starting: begin job -++++ starting: begin job for module with label 'thingWithMergeProducer' id = 31 -++++ finished: begin job for module with label 'thingWithMergeProducer' id = 31 -++++ starting: begin job for module with label 'test' id = 32 -++++ finished: begin job for module with label 'test' id = 32 -++++ starting: begin job for module with label 'testmerge' id = 33 -++++ finished: begin job for module with label 'testmerge' id = 33 -++++ starting: begin job for module with label 'get' id = 34 -++++ finished: begin job for module with label 'get' id = 34 -++++ starting: begin job for module with label 'getInt' id = 35 -++++ finished: begin job for module with label 'getInt' id = 35 -++++ starting: begin job for module with label 'dependsOnNoPut' id = 36 -++++ finished: begin job for module with label 'dependsOnNoPut' id = 36 -++++ starting: begin job for module with label 'out' id = 37 -++++ finished: begin job for module with label 'out' id = 37 -++++ starting: begin job for module with label 'TriggerResults' id = 25 -++++ finished: begin job for module with label 'TriggerResults' id = 25 -++++ starting: begin job for module with label 'path1' id = 26 -++++ finished: begin job for module with label 'path1' id = 26 -++++ starting: begin job for module with label 'path2' id = 27 -++++ finished: begin job for module with label 'path2' id = 27 -++++ starting: begin job for module with label 'path3' id = 28 -++++ finished: begin job for module with label 'path3' id = 28 -++++ starting: begin job for module with label 'path4' id = 29 -++++ finished: begin job for module with label 'path4' id = 29 -++++ starting: begin job for module with label 'endPath1' id = 30 -++++ finished: begin job for module with label 'endPath1' id = 30 +++++ starting: begin job for module with label 'thingWithMergeProducer' id = 29 +++++ finished: begin job for module with label 'thingWithMergeProducer' id = 29 +++++ starting: begin job for module with label 'test' id = 30 +++++ finished: begin job for module with label 'test' id = 30 +++++ starting: begin job for module with label 'testmerge' id = 31 +++++ finished: begin job for module with label 'testmerge' id = 31 +++++ starting: begin job for module with label 'get' id = 32 +++++ finished: begin job for module with label 'get' id = 32 +++++ starting: begin job for module with label 'getInt' id = 33 +++++ finished: begin job for module with label 'getInt' id = 33 +++++ starting: begin job for module with label 'dependsOnNoPut' id = 34 +++++ finished: begin job for module with label 'dependsOnNoPut' id = 34 +++++ starting: begin job for module with label 'out' id = 35 +++++ finished: begin job for module with label 'out' id = 35 +++++ starting: begin job for module with label 'TriggerResults' id = 24 +++++ finished: begin job for module with label 'TriggerResults' id = 24 +++++ starting: begin job for module with label 'path1' id = 25 +++++ finished: begin job for module with label 'path1' id = 25 +++++ starting: begin job for module with label 'path2' id = 26 +++++ finished: begin job for module with label 'path2' id = 26 +++++ starting: begin job for module with label 'path3' id = 27 +++++ finished: begin job for module with label 'path3' id = 27 +++++ starting: begin job for module with label 'path4' id = 28 +++++ finished: begin job for module with label 'path4' id = 28 ++ finished: begin job ++++ starting: begin stream for module: stream = 0 label = 'thingWithMergeProducer' id = 5 ++++ finished: begin stream for module: stream = 0 label = 'thingWithMergeProducer' id = 5 @@ -178,22 +170,22 @@ ++++ finished: begin stream for module: stream = 0 label = 'path1' id = 3 ++++ starting: begin stream for module: stream = 0 label = 'path2' id = 4 ++++ finished: begin stream for module: stream = 0 label = 'path2' id = 4 -++++ starting: begin stream for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++ finished: begin stream for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++ starting: begin stream for module: stream = 0 label = 'test' id = 19 -++++ finished: begin stream for module: stream = 0 label = 'test' id = 19 -++++ starting: begin stream for module: stream = 0 label = 'testmerge' id = 20 -++++ finished: begin stream for module: stream = 0 label = 'testmerge' id = 20 -++++ starting: begin stream for module: stream = 0 label = 'get' id = 21 -++++ finished: begin stream for module: stream = 0 label = 'get' id = 21 -++++ starting: begin stream for module: stream = 0 label = 'getInt' id = 22 -++++ finished: begin stream for module: stream = 0 label = 'getInt' id = 22 -++++ starting: begin stream for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++ finished: begin stream for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++ starting: begin stream for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++ finished: begin stream for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++ starting: begin stream for module: stream = 0 label = 'test' id = 18 +++++ finished: begin stream for module: stream = 0 label = 'test' id = 18 +++++ starting: begin stream for module: stream = 0 label = 'testmerge' id = 19 +++++ finished: begin stream for module: stream = 0 label = 'testmerge' id = 19 +++++ starting: begin stream for module: stream = 0 label = 'get' id = 20 +++++ finished: begin stream for module: stream = 0 label = 'get' id = 20 +++++ starting: begin stream for module: stream = 0 label = 'getInt' id = 21 +++++ finished: begin stream for module: stream = 0 label = 'getInt' id = 21 +++++ starting: begin stream for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++ finished: begin stream for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++ starting: begin stream for module: stream = 0 label = 'TriggerResults' id = 12 ++++ finished: begin stream for module: stream = 0 label = 'TriggerResults' id = 12 -++++ starting: begin stream for module: stream = 0 label = 'out' id = 24 -++++ finished: begin stream for module: stream = 0 label = 'out' id = 24 +++++ starting: begin stream for module: stream = 0 label = 'out' id = 23 +++++ finished: begin stream for module: stream = 0 label = 'out' id = 23 ++++ starting: begin stream for module: stream = 0 label = 'path1' id = 13 ++++ finished: begin stream for module: stream = 0 label = 'path1' id = 13 ++++ starting: begin stream for module: stream = 0 label = 'path2' id = 14 @@ -202,34 +194,30 @@ ++++ finished: begin stream for module: stream = 0 label = 'path3' id = 15 ++++ starting: begin stream for module: stream = 0 label = 'path4' id = 16 ++++ finished: begin stream for module: stream = 0 label = 'path4' id = 16 -++++ starting: begin stream for module: stream = 0 label = 'endPath1' id = 17 -++++ finished: begin stream for module: stream = 0 label = 'endPath1' id = 17 -++++ starting: begin stream for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++ finished: begin stream for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++ starting: begin stream for module: stream = 0 label = 'test' id = 32 -++++ finished: begin stream for module: stream = 0 label = 'test' id = 32 -++++ starting: begin stream for module: stream = 0 label = 'testmerge' id = 33 -++++ finished: begin stream for module: stream = 0 label = 'testmerge' id = 33 -++++ starting: begin stream for module: stream = 0 label = 'get' id = 34 -++++ finished: begin stream for module: stream = 0 label = 'get' id = 34 -++++ starting: begin stream for module: stream = 0 label = 'getInt' id = 35 -++++ finished: begin stream for module: stream = 0 label = 'getInt' id = 35 -++++ starting: begin stream for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++ finished: begin stream for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++ starting: begin stream for module: stream = 0 label = 'TriggerResults' id = 25 -++++ finished: begin stream for module: stream = 0 label = 'TriggerResults' id = 25 -++++ starting: begin stream for module: stream = 0 label = 'out' id = 37 -++++ finished: begin stream for module: stream = 0 label = 'out' id = 37 -++++ starting: begin stream for module: stream = 0 label = 'path1' id = 26 -++++ finished: begin stream for module: stream = 0 label = 'path1' id = 26 -++++ starting: begin stream for module: stream = 0 label = 'path2' id = 27 -++++ finished: begin stream for module: stream = 0 label = 'path2' id = 27 -++++ starting: begin stream for module: stream = 0 label = 'path3' id = 28 -++++ finished: begin stream for module: stream = 0 label = 'path3' id = 28 -++++ starting: begin stream for module: stream = 0 label = 'path4' id = 29 -++++ finished: begin stream for module: stream = 0 label = 'path4' id = 29 -++++ starting: begin stream for module: stream = 0 label = 'endPath1' id = 30 -++++ finished: begin stream for module: stream = 0 label = 'endPath1' id = 30 +++++ starting: begin stream for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++ finished: begin stream for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++ starting: begin stream for module: stream = 0 label = 'test' id = 30 +++++ finished: begin stream for module: stream = 0 label = 'test' id = 30 +++++ starting: begin stream for module: stream = 0 label = 'testmerge' id = 31 +++++ finished: begin stream for module: stream = 0 label = 'testmerge' id = 31 +++++ starting: begin stream for module: stream = 0 label = 'get' id = 32 +++++ finished: begin stream for module: stream = 0 label = 'get' id = 32 +++++ starting: begin stream for module: stream = 0 label = 'getInt' id = 33 +++++ finished: begin stream for module: stream = 0 label = 'getInt' id = 33 +++++ starting: begin stream for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++ finished: begin stream for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++ starting: begin stream for module: stream = 0 label = 'TriggerResults' id = 24 +++++ finished: begin stream for module: stream = 0 label = 'TriggerResults' id = 24 +++++ starting: begin stream for module: stream = 0 label = 'out' id = 35 +++++ finished: begin stream for module: stream = 0 label = 'out' id = 35 +++++ starting: begin stream for module: stream = 0 label = 'path1' id = 25 +++++ finished: begin stream for module: stream = 0 label = 'path1' id = 25 +++++ starting: begin stream for module: stream = 0 label = 'path2' id = 26 +++++ finished: begin stream for module: stream = 0 label = 'path2' id = 26 +++++ starting: begin stream for module: stream = 0 label = 'path3' id = 27 +++++ finished: begin stream for module: stream = 0 label = 'path3' id = 27 +++++ starting: begin stream for module: stream = 0 label = 'path4' id = 28 +++++ finished: begin stream for module: stream = 0 label = 'path4' id = 28 ++++ starting: begin process block ++++ finished: begin process block ++++ starting: begin process block @@ -243,16 +231,16 @@ ++++ starting: begin process block ++++ finished: begin process block ++++ starting: begin process block -++++++++ starting: prefetching before processing begin ProcessBlock for module: label = 'getInt' id = 22 +++++++++ starting: prefetching before processing begin ProcessBlock for module: label = 'getInt' id = 21 ++++ starting: begin process block -++++++++ starting: prefetching before processing begin ProcessBlock for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing begin ProcessBlock for module: label = 'getInt' id = 35 -++++++ starting: begin process block for module: label = 'getInt' id = 35 -++++++ finished: begin process block for module: label = 'getInt' id = 35 +++++++++ starting: prefetching before processing begin ProcessBlock for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing begin ProcessBlock for module: label = 'getInt' id = 33 +++++++ starting: begin process block for module: label = 'getInt' id = 33 +++++++ finished: begin process block for module: label = 'getInt' id = 33 ++++ finished: begin process block -++++++++ finished: prefetching before processing begin ProcessBlock for module: label = 'getInt' id = 22 -++++++ starting: begin process block for module: label = 'getInt' id = 22 -++++++ finished: begin process block for module: label = 'getInt' id = 22 +++++++++ finished: prefetching before processing begin ProcessBlock for module: label = 'getInt' id = 21 +++++++ starting: begin process block for module: label = 'getInt' id = 21 +++++++ finished: begin process block for module: label = 'getInt' id = 21 ++++ finished: begin process block ++++ queuing: EventSetup synchronization run: 1 lumi: 0 event: 0 ++++ pre: EventSetup synchronizing run: 1 lumi: 0 event: 0 @@ -288,62 +276,61 @@ ++++ starting: global begin run 1 : time = 1 ++++ finished: global begin run 1 : time = 1 ++++ starting: global begin run 1 : time = 1 -++++++++ starting: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global begin Run for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global begin Run for module: label = 'get' id = 21 +++++++++ starting: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global begin Run for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global begin Run for module: label = 'get' id = 20 ++++++++ starting: prefetching for esmodule: label = '' type = DoodadESSource in record = GadgetRcd -++++++++ starting: prefetching before processing global begin Run for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global begin Run for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global begin Run for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global begin Run for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global begin run 1 : time = 1 -++++++++ starting: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global begin Run for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global begin Run for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global begin Run for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global begin Run for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global begin run for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global begin run for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin Run for module: label = 'test' id = 32 -++++++ starting: global begin run for module: label = 'test' id = 32 -++++++ finished: global begin run for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global begin Run for module: label = 'testmerge' id = 33 -++++++ starting: global begin run for module: label = 'testmerge' id = 33 -++++++ finished: global begin run for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global begin Run for module: label = 'getInt' id = 35 -++++++ starting: global begin run for module: label = 'getInt' id = 35 -++++++ finished: global begin run for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global begin run for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global begin run for module: label = 'dependsOnNoPut' id = 36 -++++++++ finished: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global begin run for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global begin run for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global begin Run for module: label = 'test' id = 19 -++++++ starting: global begin run for module: label = 'test' id = 19 -++++++ finished: global begin run for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global begin Run for module: label = 'testmerge' id = 20 -++++++ starting: global begin run for module: label = 'testmerge' id = 20 -++++++ finished: global begin run for module: label = 'testmerge' id = 20 +++++++++ starting: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global begin Run for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global begin Run for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global begin Run for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global begin Run for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global begin run for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global begin run for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin Run for module: label = 'test' id = 30 +++++++ starting: global begin run for module: label = 'test' id = 30 +++++++ finished: global begin run for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global begin Run for module: label = 'testmerge' id = 31 +++++++ starting: global begin run for module: label = 'testmerge' id = 31 +++++++ finished: global begin run for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global begin Run for module: label = 'getInt' id = 33 +++++++ starting: global begin run for module: label = 'getInt' id = 33 +++++++ finished: global begin run for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global begin run for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global begin run for module: label = 'dependsOnNoPut' id = 34 +++++++++ finished: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global begin run for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global begin run for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global begin Run for module: label = 'test' id = 18 +++++++ starting: global begin run for module: label = 'test' id = 18 +++++++ finished: global begin run for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global begin Run for module: label = 'testmerge' id = 19 +++++++ starting: global begin run for module: label = 'testmerge' id = 19 +++++++ finished: global begin run for module: label = 'testmerge' id = 19 ++++++++ finished: prefetching for esmodule: label = '' type = DoodadESSource in record = GadgetRcd ++++++++ starting: processing esmodule: label = '' type = DoodadESSource in record = GadgetRcd ++++++++ finished: processing esmodule: label = '' type = DoodadESSource in record = GadgetRcd -++++++++ finished: prefetching before processing global begin Run for module: label = 'get' id = 21 -++++++ starting: global begin run for module: label = 'get' id = 21 -++++++ finished: global begin run for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global begin Run for module: label = 'get' id = 34 -++++++ starting: global begin run for module: label = 'get' id = 34 -++++++ finished: global begin run for module: label = 'get' id = 34 +++++++++ finished: prefetching before processing global begin Run for module: label = 'get' id = 20 +++++++ starting: global begin run for module: label = 'get' id = 20 +++++++ finished: global begin run for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global begin Run for module: label = 'get' id = 32 +++++++ starting: global begin run for module: label = 'get' id = 32 +++++++ finished: global begin run for module: label = 'get' id = 32 ++++ finished: global begin run 1 : time = 1 -++++++++ finished: prefetching before processing global begin Run for module: label = 'getInt' id = 22 -++++++ starting: global begin run for module: label = 'getInt' id = 22 -++++++ finished: global begin run for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global begin run for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global begin run for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global begin Run for module: label = 'getInt' id = 21 +++++++ starting: global begin run for module: label = 'getInt' id = 21 +++++++ finished: global begin run for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global begin run for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global begin run for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global begin run 1 : time = 1 -++++ queuing: EventSetup synchronization run: 1 lumi: 1 event: 0 ++++ starting: begin run: stream = 0 run = 1 time = 1 ++++ finished: begin run: stream = 0 run = 1 time = 1 ++++ starting: begin run: stream = 0 run = 1 time = 1 @@ -362,6 +349,7 @@ ++++ finished: begin run: stream = 0 run = 1 time = 1 ++++ starting: begin run: stream = 0 run = 1 time = 1 ++++ finished: begin run: stream = 0 run = 1 time = 1 +++++ queuing: EventSetup synchronization run: 1 lumi: 1 event: 0 ++++ pre: EventSetup synchronizing run: 1 lumi: 1 event: 0 ++++ post: EventSetup synchronizing run: 1 lumi: 1 event: 0 ++++ starting: source lumi @@ -391,56 +379,56 @@ ++++ starting: global begin lumi: run = 1 lumi = 1 time = 1 ++++ finished: global begin lumi: run = 1 lumi = 1 time = 1 ++++ starting: global begin lumi: run = 1 lumi = 1 time = 1 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global begin lumi: run = 1 lumi = 1 time = 1 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 32 -++++++ starting: global begin lumi for module: label = 'test' id = 32 -++++++ finished: global begin lumi for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 33 -++++++ starting: global begin lumi for module: label = 'testmerge' id = 33 -++++++ finished: global begin lumi for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 34 -++++++ starting: global begin lumi for module: label = 'get' id = 34 -++++++ finished: global begin lumi for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 35 -++++++ starting: global begin lumi for module: label = 'getInt' id = 35 -++++++ finished: global begin lumi for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 30 +++++++ starting: global begin lumi for module: label = 'test' id = 30 +++++++ finished: global begin lumi for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 31 +++++++ starting: global begin lumi for module: label = 'testmerge' id = 31 +++++++ finished: global begin lumi for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 32 +++++++ starting: global begin lumi for module: label = 'get' id = 32 +++++++ finished: global begin lumi for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 33 +++++++ starting: global begin lumi for module: label = 'getInt' id = 33 +++++++ finished: global begin lumi for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global begin lumi: run = 1 lumi = 1 time = 1 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 19 -++++++ starting: global begin lumi for module: label = 'test' id = 19 -++++++ finished: global begin lumi for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 20 -++++++ starting: global begin lumi for module: label = 'testmerge' id = 20 -++++++ finished: global begin lumi for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 21 -++++++ starting: global begin lumi for module: label = 'get' id = 21 -++++++ finished: global begin lumi for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 22 -++++++ starting: global begin lumi for module: label = 'getInt' id = 22 -++++++ finished: global begin lumi for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 18 +++++++ starting: global begin lumi for module: label = 'test' id = 18 +++++++ finished: global begin lumi for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 19 +++++++ starting: global begin lumi for module: label = 'testmerge' id = 19 +++++++ finished: global begin lumi for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 20 +++++++ starting: global begin lumi for module: label = 'get' id = 20 +++++++ finished: global begin lumi for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 21 +++++++ starting: global begin lumi for module: label = 'getInt' id = 21 +++++++ finished: global begin lumi for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global begin lumi: run = 1 lumi = 1 time = 1 ++++ starting: begin lumi: stream = 0 run = 1 lumi = 1 time = 1 ++++ finished: begin lumi: stream = 0 run = 1 lumi = 1 time = 1 @@ -515,105 +503,101 @@ ++++ starting: processing event : stream = 0 run = 1 lumi = 1 event = 1 time = 5000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 1 lumi = 1 event = 1 time = 5000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 1 lumi = 1 event = 1 time = 5000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 1 lumi = 1 event = 1 time = 5000001 ++++ starting: source event @@ -671,105 +655,101 @@ ++++ starting: processing event : stream = 0 run = 1 lumi = 1 event = 2 time = 10000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 1 lumi = 1 event = 2 time = 10000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 1 lumi = 1 event = 2 time = 10000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 1 lumi = 1 event = 2 time = 10000001 ++++ starting: source event @@ -827,105 +807,101 @@ ++++ starting: processing event : stream = 0 run = 1 lumi = 1 event = 3 time = 15000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 1 lumi = 1 event = 3 time = 15000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 1 lumi = 1 event = 3 time = 15000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 1 lumi = 1 event = 3 time = 15000001 ++++ starting: source event @@ -983,105 +959,101 @@ ++++ starting: processing event : stream = 0 run = 1 lumi = 1 event = 4 time = 20000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 1 lumi = 1 event = 4 time = 20000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 1 lumi = 1 event = 4 time = 20000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 1 lumi = 1 event = 4 time = 20000001 ++++ queuing: EventSetup synchronization run: 1 lumi: 2 event: 0 @@ -1130,56 +1102,56 @@ ++++ starting: global end lumi: run = 1 lumi = 1 time = 1 ++++ finished: global end lumi: run = 1 lumi = 1 time = 1 ++++ starting: global end lumi: run = 1 lumi = 1 time = 1 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global end lumi: run = 1 lumi = 1 time = 1 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 33 -++++++ starting: global end lumi for module: label = 'testmerge' id = 33 -++++++ finished: global end lumi for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 32 -++++++ starting: global end lumi for module: label = 'test' id = 32 -++++++ finished: global end lumi for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 34 -++++++ starting: global end lumi for module: label = 'get' id = 34 -++++++ finished: global end lumi for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 35 -++++++ starting: global end lumi for module: label = 'getInt' id = 35 -++++++ finished: global end lumi for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 31 +++++++ starting: global end lumi for module: label = 'testmerge' id = 31 +++++++ finished: global end lumi for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 30 +++++++ starting: global end lumi for module: label = 'test' id = 30 +++++++ finished: global end lumi for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 32 +++++++ starting: global end lumi for module: label = 'get' id = 32 +++++++ finished: global end lumi for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 33 +++++++ starting: global end lumi for module: label = 'getInt' id = 33 +++++++ finished: global end lumi for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global end lumi: run = 1 lumi = 1 time = 1 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 20 -++++++ starting: global end lumi for module: label = 'testmerge' id = 20 -++++++ finished: global end lumi for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 19 -++++++ starting: global end lumi for module: label = 'test' id = 19 -++++++ finished: global end lumi for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 21 -++++++ starting: global end lumi for module: label = 'get' id = 21 -++++++ finished: global end lumi for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 22 -++++++ starting: global end lumi for module: label = 'getInt' id = 22 -++++++ finished: global end lumi for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 19 +++++++ starting: global end lumi for module: label = 'testmerge' id = 19 +++++++ finished: global end lumi for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 18 +++++++ starting: global end lumi for module: label = 'test' id = 18 +++++++ finished: global end lumi for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 20 +++++++ starting: global end lumi for module: label = 'get' id = 20 +++++++ finished: global end lumi for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 21 +++++++ starting: global end lumi for module: label = 'getInt' id = 21 +++++++ finished: global end lumi for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global end lumi: run = 1 lumi = 1 time = 1 ++++ starting: global write lumi: run = 1 lumi = 1 time = 1 ++++ finished: global write lumi: run = 1 lumi = 1 time = 1 @@ -1191,11 +1163,11 @@ ++++ finished: global write lumi: run = 1 lumi = 1 time = 1 ++++ starting: global write lumi: run = 1 lumi = 1 time = 1 ++++ starting: global write lumi: run = 1 lumi = 1 time = 1 -++++++ starting: write lumi for module: label = 'out' id = 37 -++++++ finished: write lumi for module: label = 'out' id = 37 +++++++ starting: write lumi for module: label = 'out' id = 35 +++++++ finished: write lumi for module: label = 'out' id = 35 ++++ finished: global write lumi: run = 1 lumi = 1 time = 1 -++++++ starting: write lumi for module: label = 'out' id = 24 -++++++ finished: write lumi for module: label = 'out' id = 24 +++++++ starting: write lumi for module: label = 'out' id = 23 +++++++ finished: write lumi for module: label = 'out' id = 23 ++++ finished: global write lumi: run = 1 lumi = 1 time = 1 ++++ starting: source lumi ++++ finished: source lumi @@ -1224,56 +1196,56 @@ ++++ starting: global begin lumi: run = 1 lumi = 2 time = 25000001 ++++ finished: global begin lumi: run = 1 lumi = 2 time = 25000001 ++++ starting: global begin lumi: run = 1 lumi = 2 time = 25000001 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global begin lumi: run = 1 lumi = 2 time = 25000001 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 32 -++++++ starting: global begin lumi for module: label = 'test' id = 32 -++++++ finished: global begin lumi for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 33 -++++++ starting: global begin lumi for module: label = 'testmerge' id = 33 -++++++ finished: global begin lumi for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 34 -++++++ starting: global begin lumi for module: label = 'get' id = 34 -++++++ finished: global begin lumi for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 35 -++++++ starting: global begin lumi for module: label = 'getInt' id = 35 -++++++ finished: global begin lumi for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 30 +++++++ starting: global begin lumi for module: label = 'test' id = 30 +++++++ finished: global begin lumi for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 31 +++++++ starting: global begin lumi for module: label = 'testmerge' id = 31 +++++++ finished: global begin lumi for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 32 +++++++ starting: global begin lumi for module: label = 'get' id = 32 +++++++ finished: global begin lumi for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 33 +++++++ starting: global begin lumi for module: label = 'getInt' id = 33 +++++++ finished: global begin lumi for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global begin lumi: run = 1 lumi = 2 time = 25000001 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 19 -++++++ starting: global begin lumi for module: label = 'test' id = 19 -++++++ finished: global begin lumi for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 20 -++++++ starting: global begin lumi for module: label = 'testmerge' id = 20 -++++++ finished: global begin lumi for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 21 -++++++ starting: global begin lumi for module: label = 'get' id = 21 -++++++ finished: global begin lumi for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 22 -++++++ starting: global begin lumi for module: label = 'getInt' id = 22 -++++++ finished: global begin lumi for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 18 +++++++ starting: global begin lumi for module: label = 'test' id = 18 +++++++ finished: global begin lumi for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 19 +++++++ starting: global begin lumi for module: label = 'testmerge' id = 19 +++++++ finished: global begin lumi for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 20 +++++++ starting: global begin lumi for module: label = 'get' id = 20 +++++++ finished: global begin lumi for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 21 +++++++ starting: global begin lumi for module: label = 'getInt' id = 21 +++++++ finished: global begin lumi for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global begin lumi: run = 1 lumi = 2 time = 25000001 ++++ starting: begin lumi: stream = 0 run = 1 lumi = 2 time = 25000001 ++++ finished: begin lumi: stream = 0 run = 1 lumi = 2 time = 25000001 @@ -1348,105 +1320,101 @@ ++++ starting: processing event : stream = 0 run = 1 lumi = 2 event = 5 time = 25000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 1 lumi = 2 event = 5 time = 25000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 1 lumi = 2 event = 5 time = 25000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 1 lumi = 2 event = 5 time = 25000001 ++++ starting: source event @@ -1504,105 +1472,101 @@ ++++ starting: processing event : stream = 0 run = 1 lumi = 2 event = 6 time = 30000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 1 lumi = 2 event = 6 time = 30000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 1 lumi = 2 event = 6 time = 30000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 1 lumi = 2 event = 6 time = 30000001 ++++ starting: source event @@ -1660,105 +1624,101 @@ ++++ starting: processing event : stream = 0 run = 1 lumi = 2 event = 7 time = 35000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 1 lumi = 2 event = 7 time = 35000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 1 lumi = 2 event = 7 time = 35000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 1 lumi = 2 event = 7 time = 35000001 ++++ starting: source event @@ -1816,105 +1776,101 @@ ++++ starting: processing event : stream = 0 run = 1 lumi = 2 event = 8 time = 40000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 1 lumi = 2 event = 8 time = 40000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 1 lumi = 2 event = 8 time = 40000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 1 lumi = 2 event = 8 time = 40000001 ++++ queuing: EventSetup synchronization run: 1 lumi: 3 event: 0 @@ -1963,56 +1919,56 @@ ++++ starting: global end lumi: run = 1 lumi = 2 time = 25000001 ++++ finished: global end lumi: run = 1 lumi = 2 time = 25000001 ++++ starting: global end lumi: run = 1 lumi = 2 time = 25000001 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global end lumi: run = 1 lumi = 2 time = 25000001 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 33 -++++++ starting: global end lumi for module: label = 'testmerge' id = 33 -++++++ finished: global end lumi for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 32 -++++++ starting: global end lumi for module: label = 'test' id = 32 -++++++ finished: global end lumi for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 34 -++++++ starting: global end lumi for module: label = 'get' id = 34 -++++++ finished: global end lumi for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 35 -++++++ starting: global end lumi for module: label = 'getInt' id = 35 -++++++ finished: global end lumi for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 31 +++++++ starting: global end lumi for module: label = 'testmerge' id = 31 +++++++ finished: global end lumi for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 30 +++++++ starting: global end lumi for module: label = 'test' id = 30 +++++++ finished: global end lumi for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 32 +++++++ starting: global end lumi for module: label = 'get' id = 32 +++++++ finished: global end lumi for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 33 +++++++ starting: global end lumi for module: label = 'getInt' id = 33 +++++++ finished: global end lumi for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global end lumi: run = 1 lumi = 2 time = 25000001 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 20 -++++++ starting: global end lumi for module: label = 'testmerge' id = 20 -++++++ finished: global end lumi for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 19 -++++++ starting: global end lumi for module: label = 'test' id = 19 -++++++ finished: global end lumi for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 21 -++++++ starting: global end lumi for module: label = 'get' id = 21 -++++++ finished: global end lumi for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 22 -++++++ starting: global end lumi for module: label = 'getInt' id = 22 -++++++ finished: global end lumi for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 19 +++++++ starting: global end lumi for module: label = 'testmerge' id = 19 +++++++ finished: global end lumi for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 18 +++++++ starting: global end lumi for module: label = 'test' id = 18 +++++++ finished: global end lumi for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 20 +++++++ starting: global end lumi for module: label = 'get' id = 20 +++++++ finished: global end lumi for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 21 +++++++ starting: global end lumi for module: label = 'getInt' id = 21 +++++++ finished: global end lumi for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global end lumi: run = 1 lumi = 2 time = 25000001 ++++ starting: global write lumi: run = 1 lumi = 2 time = 25000001 ++++ finished: global write lumi: run = 1 lumi = 2 time = 25000001 @@ -2024,11 +1980,11 @@ ++++ finished: global write lumi: run = 1 lumi = 2 time = 25000001 ++++ starting: global write lumi: run = 1 lumi = 2 time = 25000001 ++++ starting: global write lumi: run = 1 lumi = 2 time = 25000001 -++++++ starting: write lumi for module: label = 'out' id = 37 -++++++ finished: write lumi for module: label = 'out' id = 37 +++++++ starting: write lumi for module: label = 'out' id = 35 +++++++ finished: write lumi for module: label = 'out' id = 35 ++++ finished: global write lumi: run = 1 lumi = 2 time = 25000001 -++++++ starting: write lumi for module: label = 'out' id = 24 -++++++ finished: write lumi for module: label = 'out' id = 24 +++++++ starting: write lumi for module: label = 'out' id = 23 +++++++ finished: write lumi for module: label = 'out' id = 23 ++++ finished: global write lumi: run = 1 lumi = 2 time = 25000001 ++++ starting: source lumi ++++ finished: source lumi @@ -2057,56 +2013,56 @@ ++++ starting: global begin lumi: run = 1 lumi = 3 time = 45000001 ++++ finished: global begin lumi: run = 1 lumi = 3 time = 45000001 ++++ starting: global begin lumi: run = 1 lumi = 3 time = 45000001 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global begin lumi: run = 1 lumi = 3 time = 45000001 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 32 -++++++ starting: global begin lumi for module: label = 'test' id = 32 -++++++ finished: global begin lumi for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 33 -++++++ starting: global begin lumi for module: label = 'testmerge' id = 33 -++++++ finished: global begin lumi for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 34 -++++++ starting: global begin lumi for module: label = 'get' id = 34 -++++++ finished: global begin lumi for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 35 -++++++ starting: global begin lumi for module: label = 'getInt' id = 35 -++++++ finished: global begin lumi for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 30 +++++++ starting: global begin lumi for module: label = 'test' id = 30 +++++++ finished: global begin lumi for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 31 +++++++ starting: global begin lumi for module: label = 'testmerge' id = 31 +++++++ finished: global begin lumi for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 32 +++++++ starting: global begin lumi for module: label = 'get' id = 32 +++++++ finished: global begin lumi for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 33 +++++++ starting: global begin lumi for module: label = 'getInt' id = 33 +++++++ finished: global begin lumi for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global begin lumi: run = 1 lumi = 3 time = 45000001 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 19 -++++++ starting: global begin lumi for module: label = 'test' id = 19 -++++++ finished: global begin lumi for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 20 -++++++ starting: global begin lumi for module: label = 'testmerge' id = 20 -++++++ finished: global begin lumi for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 21 -++++++ starting: global begin lumi for module: label = 'get' id = 21 -++++++ finished: global begin lumi for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 22 -++++++ starting: global begin lumi for module: label = 'getInt' id = 22 -++++++ finished: global begin lumi for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 18 +++++++ starting: global begin lumi for module: label = 'test' id = 18 +++++++ finished: global begin lumi for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 19 +++++++ starting: global begin lumi for module: label = 'testmerge' id = 19 +++++++ finished: global begin lumi for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 20 +++++++ starting: global begin lumi for module: label = 'get' id = 20 +++++++ finished: global begin lumi for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 21 +++++++ starting: global begin lumi for module: label = 'getInt' id = 21 +++++++ finished: global begin lumi for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global begin lumi: run = 1 lumi = 3 time = 45000001 ++++ starting: begin lumi: stream = 0 run = 1 lumi = 3 time = 45000001 ++++ finished: begin lumi: stream = 0 run = 1 lumi = 3 time = 45000001 @@ -2181,105 +2137,101 @@ ++++ starting: processing event : stream = 0 run = 1 lumi = 3 event = 9 time = 45000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 1 lumi = 3 event = 9 time = 45000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 1 lumi = 3 event = 9 time = 45000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 1 lumi = 3 event = 9 time = 45000001 ++++ starting: source event @@ -2337,105 +2289,101 @@ ++++ starting: processing event : stream = 0 run = 1 lumi = 3 event = 10 time = 50000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 1 lumi = 3 event = 10 time = 50000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 1 lumi = 3 event = 10 time = 50000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 1 lumi = 3 event = 10 time = 50000001 ++++ queuing: EventSetup synchronization run: 1 lumi: 4294967295 event: 18446744073709551615 @@ -2484,56 +2432,56 @@ ++++ starting: global end lumi: run = 1 lumi = 3 time = 45000001 ++++ finished: global end lumi: run = 1 lumi = 3 time = 45000001 ++++ starting: global end lumi: run = 1 lumi = 3 time = 45000001 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global end lumi: run = 1 lumi = 3 time = 45000001 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 33 -++++++ starting: global end lumi for module: label = 'testmerge' id = 33 -++++++ finished: global end lumi for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 32 -++++++ starting: global end lumi for module: label = 'test' id = 32 -++++++ finished: global end lumi for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 34 -++++++ starting: global end lumi for module: label = 'get' id = 34 -++++++ finished: global end lumi for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 35 -++++++ starting: global end lumi for module: label = 'getInt' id = 35 -++++++ finished: global end lumi for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 31 +++++++ starting: global end lumi for module: label = 'testmerge' id = 31 +++++++ finished: global end lumi for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 30 +++++++ starting: global end lumi for module: label = 'test' id = 30 +++++++ finished: global end lumi for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 32 +++++++ starting: global end lumi for module: label = 'get' id = 32 +++++++ finished: global end lumi for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 33 +++++++ starting: global end lumi for module: label = 'getInt' id = 33 +++++++ finished: global end lumi for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global end lumi: run = 1 lumi = 3 time = 45000001 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 20 -++++++ starting: global end lumi for module: label = 'testmerge' id = 20 -++++++ finished: global end lumi for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 19 -++++++ starting: global end lumi for module: label = 'test' id = 19 -++++++ finished: global end lumi for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 21 -++++++ starting: global end lumi for module: label = 'get' id = 21 -++++++ finished: global end lumi for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 22 -++++++ starting: global end lumi for module: label = 'getInt' id = 22 -++++++ finished: global end lumi for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 19 +++++++ starting: global end lumi for module: label = 'testmerge' id = 19 +++++++ finished: global end lumi for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 18 +++++++ starting: global end lumi for module: label = 'test' id = 18 +++++++ finished: global end lumi for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 20 +++++++ starting: global end lumi for module: label = 'get' id = 20 +++++++ finished: global end lumi for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 21 +++++++ starting: global end lumi for module: label = 'getInt' id = 21 +++++++ finished: global end lumi for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global end lumi: run = 1 lumi = 3 time = 45000001 ++++ starting: global write lumi: run = 1 lumi = 3 time = 45000001 ++++ finished: global write lumi: run = 1 lumi = 3 time = 45000001 @@ -2545,11 +2493,11 @@ ++++ finished: global write lumi: run = 1 lumi = 3 time = 45000001 ++++ starting: global write lumi: run = 1 lumi = 3 time = 45000001 ++++ starting: global write lumi: run = 1 lumi = 3 time = 45000001 -++++++ starting: write lumi for module: label = 'out' id = 37 -++++++ finished: write lumi for module: label = 'out' id = 37 +++++++ starting: write lumi for module: label = 'out' id = 35 +++++++ finished: write lumi for module: label = 'out' id = 35 ++++ finished: global write lumi: run = 1 lumi = 3 time = 45000001 -++++++ starting: write lumi for module: label = 'out' id = 24 -++++++ finished: write lumi for module: label = 'out' id = 24 +++++++ starting: write lumi for module: label = 'out' id = 23 +++++++ finished: write lumi for module: label = 'out' id = 23 ++++ finished: global write lumi: run = 1 lumi = 3 time = 45000001 ++++ queuing: EventSetup synchronization run: 2 lumi: 0 event: 0 ++++ pre: EventSetup synchronizing run: 2 lumi: 0 event: 0 @@ -2597,56 +2545,56 @@ ++++ starting: global end run 1 : time = 0 ++++ finished: global end run 1 : time = 0 ++++ starting: global end run 1 : time = 0 -++++++++ starting: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global end Run for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global end Run for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global end Run for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global end Run for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global end Run for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global end Run for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global end Run for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global end Run for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global end run 1 : time = 0 -++++++++ starting: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global end Run for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global end Run for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global end Run for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global end Run for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global end run for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global end run for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end Run for module: label = 'testmerge' id = 33 -++++++ starting: global end run for module: label = 'testmerge' id = 33 -++++++ finished: global end run for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global end Run for module: label = 'test' id = 32 -++++++ starting: global end run for module: label = 'test' id = 32 -++++++ finished: global end run for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global end Run for module: label = 'get' id = 34 -++++++ starting: global end run for module: label = 'get' id = 34 -++++++ finished: global end run for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global end Run for module: label = 'getInt' id = 35 -++++++ starting: global end run for module: label = 'getInt' id = 35 -++++++ finished: global end run for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global end run for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global end run for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global end Run for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global end Run for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global end Run for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global end Run for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global end run for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global end run for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end Run for module: label = 'testmerge' id = 31 +++++++ starting: global end run for module: label = 'testmerge' id = 31 +++++++ finished: global end run for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global end Run for module: label = 'test' id = 30 +++++++ starting: global end run for module: label = 'test' id = 30 +++++++ finished: global end run for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global end Run for module: label = 'get' id = 32 +++++++ starting: global end run for module: label = 'get' id = 32 +++++++ finished: global end run for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global end Run for module: label = 'getInt' id = 33 +++++++ starting: global end run for module: label = 'getInt' id = 33 +++++++ finished: global end run for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global end run for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global end run for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global end run 1 : time = 0 -++++++++ finished: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global end run for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global end run for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global end Run for module: label = 'testmerge' id = 20 -++++++ starting: global end run for module: label = 'testmerge' id = 20 -++++++ finished: global end run for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global end Run for module: label = 'test' id = 19 -++++++ starting: global end run for module: label = 'test' id = 19 -++++++ finished: global end run for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global end Run for module: label = 'get' id = 21 -++++++ starting: global end run for module: label = 'get' id = 21 -++++++ finished: global end run for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global end Run for module: label = 'getInt' id = 22 -++++++ starting: global end run for module: label = 'getInt' id = 22 -++++++ finished: global end run for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global end run for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global end run for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global end run for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global end run for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global end Run for module: label = 'testmerge' id = 19 +++++++ starting: global end run for module: label = 'testmerge' id = 19 +++++++ finished: global end run for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global end Run for module: label = 'test' id = 18 +++++++ starting: global end run for module: label = 'test' id = 18 +++++++ finished: global end run for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global end Run for module: label = 'get' id = 20 +++++++ starting: global end run for module: label = 'get' id = 20 +++++++ finished: global end run for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global end Run for module: label = 'getInt' id = 21 +++++++ starting: global end run for module: label = 'getInt' id = 21 +++++++ finished: global end run for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global end run for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global end run for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global end run 1 : time = 0 ++++ starting: global write run 1 : time = 50000001 ++++ finished: global write run 1 : time = 50000001 @@ -2658,11 +2606,11 @@ ++++ finished: global write run 1 : time = 0 ++++ starting: global write run 1 : time = 0 ++++ starting: global write run 1 : time = 0 -++++++ starting: write run for module: label = 'out' id = 37 -++++++ finished: write run for module: label = 'out' id = 37 +++++++ starting: write run for module: label = 'out' id = 35 +++++++ finished: write run for module: label = 'out' id = 35 ++++ finished: global write run 1 : time = 0 -++++++ starting: write run for module: label = 'out' id = 24 -++++++ finished: write run for module: label = 'out' id = 24 +++++++ starting: write run for module: label = 'out' id = 23 +++++++ finished: write run for module: label = 'out' id = 23 ++++ finished: global write run 1 : time = 0 ++++ starting: source run ++++ finished: source run @@ -2691,60 +2639,57 @@ ++++ starting: global begin run 2 : time = 55000001 ++++ finished: global begin run 2 : time = 55000001 ++++ starting: global begin run 2 : time = 55000001 -++++++++ starting: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global begin Run for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global begin Run for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global begin Run for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global begin Run for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global begin Run for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global begin Run for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global begin Run for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global begin Run for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global begin run 2 : time = 55000001 -++++++++ starting: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global begin Run for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global begin Run for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global begin Run for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global begin Run for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global begin run for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global begin run for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin Run for module: label = 'test' id = 32 -++++++ starting: global begin run for module: label = 'test' id = 32 -++++++ finished: global begin run for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global begin Run for module: label = 'testmerge' id = 33 -++++++ starting: global begin run for module: label = 'testmerge' id = 33 -++++++ finished: global begin run for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global begin Run for module: label = 'get' id = 34 -++++++ starting: global begin run for module: label = 'get' id = 34 -++++++ finished: global begin run for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global begin Run for module: label = 'getInt' id = 35 -++++++ starting: global begin run for module: label = 'getInt' id = 35 -++++++ finished: global begin run for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global begin run for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global begin run for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global begin Run for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global begin Run for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global begin Run for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global begin Run for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global begin run for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global begin run for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin Run for module: label = 'test' id = 30 +++++++ starting: global begin run for module: label = 'test' id = 30 +++++++ finished: global begin run for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global begin Run for module: label = 'testmerge' id = 31 +++++++ starting: global begin run for module: label = 'testmerge' id = 31 +++++++ finished: global begin run for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global begin Run for module: label = 'get' id = 32 +++++++ starting: global begin run for module: label = 'get' id = 32 +++++++ finished: global begin run for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global begin Run for module: label = 'getInt' id = 33 +++++++ starting: global begin run for module: label = 'getInt' id = 33 +++++++ finished: global begin run for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global begin run for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global begin run for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global begin run 2 : time = 55000001 -++++++++ finished: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global begin run for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global begin run for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global begin Run for module: label = 'test' id = 19 -++++++ starting: global begin run for module: label = 'test' id = 19 -++++++ finished: global begin run for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global begin Run for module: label = 'testmerge' id = 20 -++++++ starting: global begin run for module: label = 'testmerge' id = 20 -++++++ finished: global begin run for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global begin Run for module: label = 'get' id = 21 -++++++ starting: global begin run for module: label = 'get' id = 21 -++++++ finished: global begin run for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global begin Run for module: label = 'getInt' id = 22 -++++++ starting: global begin run for module: label = 'getInt' id = 22 -++++++ finished: global begin run for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global begin run for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global begin run for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global begin run for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global begin run for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global begin Run for module: label = 'test' id = 18 +++++++ starting: global begin run for module: label = 'test' id = 18 +++++++ finished: global begin run for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global begin Run for module: label = 'testmerge' id = 19 +++++++ starting: global begin run for module: label = 'testmerge' id = 19 +++++++ finished: global begin run for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global begin Run for module: label = 'get' id = 20 +++++++ starting: global begin run for module: label = 'get' id = 20 +++++++ finished: global begin run for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global begin Run for module: label = 'getInt' id = 21 +++++++ starting: global begin run for module: label = 'getInt' id = 21 +++++++ finished: global begin run for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global begin run for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global begin run for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global begin run 2 : time = 55000001 -++++ queuing: EventSetup synchronization run: 2 lumi: 1 event: 0 -++++ pre: EventSetup synchronizing run: 2 lumi: 1 event: 0 -++++ post: EventSetup synchronizing run: 2 lumi: 1 event: 0 ++++ starting: begin run: stream = 0 run = 2 time = 55000001 ++++ finished: begin run: stream = 0 run = 2 time = 55000001 ++++ starting: begin run: stream = 0 run = 2 time = 55000001 @@ -2763,6 +2708,9 @@ ++++ finished: begin run: stream = 0 run = 2 time = 55000001 ++++ starting: begin run: stream = 0 run = 2 time = 55000001 ++++ finished: begin run: stream = 0 run = 2 time = 55000001 +++++ queuing: EventSetup synchronization run: 2 lumi: 1 event: 0 +++++ pre: EventSetup synchronizing run: 2 lumi: 1 event: 0 +++++ post: EventSetup synchronizing run: 2 lumi: 1 event: 0 ++++ starting: source lumi ++++ finished: source lumi ++++ starting: global begin lumi: run = 2 lumi = 1 time = 55000001 @@ -2790,56 +2738,56 @@ ++++ starting: global begin lumi: run = 2 lumi = 1 time = 55000001 ++++ finished: global begin lumi: run = 2 lumi = 1 time = 55000001 ++++ starting: global begin lumi: run = 2 lumi = 1 time = 55000001 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global begin lumi: run = 2 lumi = 1 time = 55000001 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 32 -++++++ starting: global begin lumi for module: label = 'test' id = 32 -++++++ finished: global begin lumi for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 33 -++++++ starting: global begin lumi for module: label = 'testmerge' id = 33 -++++++ finished: global begin lumi for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 34 -++++++ starting: global begin lumi for module: label = 'get' id = 34 -++++++ finished: global begin lumi for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 35 -++++++ starting: global begin lumi for module: label = 'getInt' id = 35 -++++++ finished: global begin lumi for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 30 +++++++ starting: global begin lumi for module: label = 'test' id = 30 +++++++ finished: global begin lumi for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 31 +++++++ starting: global begin lumi for module: label = 'testmerge' id = 31 +++++++ finished: global begin lumi for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 32 +++++++ starting: global begin lumi for module: label = 'get' id = 32 +++++++ finished: global begin lumi for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 33 +++++++ starting: global begin lumi for module: label = 'getInt' id = 33 +++++++ finished: global begin lumi for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global begin lumi: run = 2 lumi = 1 time = 55000001 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 19 -++++++ starting: global begin lumi for module: label = 'test' id = 19 -++++++ finished: global begin lumi for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 20 -++++++ starting: global begin lumi for module: label = 'testmerge' id = 20 -++++++ finished: global begin lumi for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 21 -++++++ starting: global begin lumi for module: label = 'get' id = 21 -++++++ finished: global begin lumi for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 22 -++++++ starting: global begin lumi for module: label = 'getInt' id = 22 -++++++ finished: global begin lumi for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 18 +++++++ starting: global begin lumi for module: label = 'test' id = 18 +++++++ finished: global begin lumi for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 19 +++++++ starting: global begin lumi for module: label = 'testmerge' id = 19 +++++++ finished: global begin lumi for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 20 +++++++ starting: global begin lumi for module: label = 'get' id = 20 +++++++ finished: global begin lumi for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 21 +++++++ starting: global begin lumi for module: label = 'getInt' id = 21 +++++++ finished: global begin lumi for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global begin lumi: run = 2 lumi = 1 time = 55000001 ++++ starting: begin lumi: stream = 0 run = 2 lumi = 1 time = 55000001 ++++ finished: begin lumi: stream = 0 run = 2 lumi = 1 time = 55000001 @@ -2914,105 +2862,101 @@ ++++ starting: processing event : stream = 0 run = 2 lumi = 1 event = 1 time = 55000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 2 lumi = 1 event = 1 time = 55000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 2 lumi = 1 event = 1 time = 55000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 2 lumi = 1 event = 1 time = 55000001 ++++ starting: source event @@ -3070,105 +3014,101 @@ ++++ starting: processing event : stream = 0 run = 2 lumi = 1 event = 2 time = 60000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 2 lumi = 1 event = 2 time = 60000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 2 lumi = 1 event = 2 time = 60000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 2 lumi = 1 event = 2 time = 60000001 ++++ starting: source event @@ -3226,105 +3166,101 @@ ++++ starting: processing event : stream = 0 run = 2 lumi = 1 event = 3 time = 65000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 2 lumi = 1 event = 3 time = 65000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 2 lumi = 1 event = 3 time = 65000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 2 lumi = 1 event = 3 time = 65000001 ++++ starting: source event @@ -3382,105 +3318,101 @@ ++++ starting: processing event : stream = 0 run = 2 lumi = 1 event = 4 time = 70000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 2 lumi = 1 event = 4 time = 70000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 2 lumi = 1 event = 4 time = 70000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 2 lumi = 1 event = 4 time = 70000001 ++++ queuing: EventSetup synchronization run: 2 lumi: 2 event: 0 @@ -3529,56 +3461,56 @@ ++++ starting: global end lumi: run = 2 lumi = 1 time = 55000001 ++++ finished: global end lumi: run = 2 lumi = 1 time = 55000001 ++++ starting: global end lumi: run = 2 lumi = 1 time = 55000001 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global end lumi: run = 2 lumi = 1 time = 55000001 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 33 -++++++ starting: global end lumi for module: label = 'testmerge' id = 33 -++++++ finished: global end lumi for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 32 -++++++ starting: global end lumi for module: label = 'test' id = 32 -++++++ finished: global end lumi for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 34 -++++++ starting: global end lumi for module: label = 'get' id = 34 -++++++ finished: global end lumi for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 35 -++++++ starting: global end lumi for module: label = 'getInt' id = 35 -++++++ finished: global end lumi for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 31 +++++++ starting: global end lumi for module: label = 'testmerge' id = 31 +++++++ finished: global end lumi for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 30 +++++++ starting: global end lumi for module: label = 'test' id = 30 +++++++ finished: global end lumi for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 32 +++++++ starting: global end lumi for module: label = 'get' id = 32 +++++++ finished: global end lumi for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 33 +++++++ starting: global end lumi for module: label = 'getInt' id = 33 +++++++ finished: global end lumi for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global end lumi: run = 2 lumi = 1 time = 55000001 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 20 -++++++ starting: global end lumi for module: label = 'testmerge' id = 20 -++++++ finished: global end lumi for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 19 -++++++ starting: global end lumi for module: label = 'test' id = 19 -++++++ finished: global end lumi for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 21 -++++++ starting: global end lumi for module: label = 'get' id = 21 -++++++ finished: global end lumi for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 22 -++++++ starting: global end lumi for module: label = 'getInt' id = 22 -++++++ finished: global end lumi for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 19 +++++++ starting: global end lumi for module: label = 'testmerge' id = 19 +++++++ finished: global end lumi for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 18 +++++++ starting: global end lumi for module: label = 'test' id = 18 +++++++ finished: global end lumi for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 20 +++++++ starting: global end lumi for module: label = 'get' id = 20 +++++++ finished: global end lumi for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 21 +++++++ starting: global end lumi for module: label = 'getInt' id = 21 +++++++ finished: global end lumi for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global end lumi: run = 2 lumi = 1 time = 55000001 ++++ starting: global write lumi: run = 2 lumi = 1 time = 55000001 ++++ finished: global write lumi: run = 2 lumi = 1 time = 55000001 @@ -3590,11 +3522,11 @@ ++++ finished: global write lumi: run = 2 lumi = 1 time = 55000001 ++++ starting: global write lumi: run = 2 lumi = 1 time = 55000001 ++++ starting: global write lumi: run = 2 lumi = 1 time = 55000001 -++++++ starting: write lumi for module: label = 'out' id = 37 -++++++ finished: write lumi for module: label = 'out' id = 37 +++++++ starting: write lumi for module: label = 'out' id = 35 +++++++ finished: write lumi for module: label = 'out' id = 35 ++++ finished: global write lumi: run = 2 lumi = 1 time = 55000001 -++++++ starting: write lumi for module: label = 'out' id = 24 -++++++ finished: write lumi for module: label = 'out' id = 24 +++++++ starting: write lumi for module: label = 'out' id = 23 +++++++ finished: write lumi for module: label = 'out' id = 23 ++++ finished: global write lumi: run = 2 lumi = 1 time = 55000001 ++++ starting: source lumi ++++ finished: source lumi @@ -3623,56 +3555,56 @@ ++++ starting: global begin lumi: run = 2 lumi = 2 time = 75000001 ++++ finished: global begin lumi: run = 2 lumi = 2 time = 75000001 ++++ starting: global begin lumi: run = 2 lumi = 2 time = 75000001 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global begin lumi: run = 2 lumi = 2 time = 75000001 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 32 -++++++ starting: global begin lumi for module: label = 'test' id = 32 -++++++ finished: global begin lumi for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 33 -++++++ starting: global begin lumi for module: label = 'testmerge' id = 33 -++++++ finished: global begin lumi for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 34 -++++++ starting: global begin lumi for module: label = 'get' id = 34 -++++++ finished: global begin lumi for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 35 -++++++ starting: global begin lumi for module: label = 'getInt' id = 35 -++++++ finished: global begin lumi for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 30 +++++++ starting: global begin lumi for module: label = 'test' id = 30 +++++++ finished: global begin lumi for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 31 +++++++ starting: global begin lumi for module: label = 'testmerge' id = 31 +++++++ finished: global begin lumi for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 32 +++++++ starting: global begin lumi for module: label = 'get' id = 32 +++++++ finished: global begin lumi for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 33 +++++++ starting: global begin lumi for module: label = 'getInt' id = 33 +++++++ finished: global begin lumi for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global begin lumi: run = 2 lumi = 2 time = 75000001 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 19 -++++++ starting: global begin lumi for module: label = 'test' id = 19 -++++++ finished: global begin lumi for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 20 -++++++ starting: global begin lumi for module: label = 'testmerge' id = 20 -++++++ finished: global begin lumi for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 21 -++++++ starting: global begin lumi for module: label = 'get' id = 21 -++++++ finished: global begin lumi for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 22 -++++++ starting: global begin lumi for module: label = 'getInt' id = 22 -++++++ finished: global begin lumi for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 18 +++++++ starting: global begin lumi for module: label = 'test' id = 18 +++++++ finished: global begin lumi for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 19 +++++++ starting: global begin lumi for module: label = 'testmerge' id = 19 +++++++ finished: global begin lumi for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 20 +++++++ starting: global begin lumi for module: label = 'get' id = 20 +++++++ finished: global begin lumi for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 21 +++++++ starting: global begin lumi for module: label = 'getInt' id = 21 +++++++ finished: global begin lumi for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global begin lumi: run = 2 lumi = 2 time = 75000001 ++++ starting: begin lumi: stream = 0 run = 2 lumi = 2 time = 75000001 ++++ finished: begin lumi: stream = 0 run = 2 lumi = 2 time = 75000001 @@ -3747,105 +3679,101 @@ ++++ starting: processing event : stream = 0 run = 2 lumi = 2 event = 5 time = 75000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 2 lumi = 2 event = 5 time = 75000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 2 lumi = 2 event = 5 time = 75000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 2 lumi = 2 event = 5 time = 75000001 ++++ starting: source event @@ -3903,105 +3831,101 @@ ++++ starting: processing event : stream = 0 run = 2 lumi = 2 event = 6 time = 80000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 2 lumi = 2 event = 6 time = 80000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 2 lumi = 2 event = 6 time = 80000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 2 lumi = 2 event = 6 time = 80000001 ++++ starting: source event @@ -4059,105 +3983,101 @@ ++++ starting: processing event : stream = 0 run = 2 lumi = 2 event = 7 time = 85000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 2 lumi = 2 event = 7 time = 85000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 2 lumi = 2 event = 7 time = 85000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 2 lumi = 2 event = 7 time = 85000001 ++++ starting: source event @@ -4215,105 +4135,101 @@ ++++ starting: processing event : stream = 0 run = 2 lumi = 2 event = 8 time = 90000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 2 lumi = 2 event = 8 time = 90000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 2 lumi = 2 event = 8 time = 90000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 2 lumi = 2 event = 8 time = 90000001 ++++ queuing: EventSetup synchronization run: 2 lumi: 3 event: 0 @@ -4362,56 +4278,56 @@ ++++ starting: global end lumi: run = 2 lumi = 2 time = 75000001 ++++ finished: global end lumi: run = 2 lumi = 2 time = 75000001 ++++ starting: global end lumi: run = 2 lumi = 2 time = 75000001 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global end lumi: run = 2 lumi = 2 time = 75000001 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 33 -++++++ starting: global end lumi for module: label = 'testmerge' id = 33 -++++++ finished: global end lumi for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 32 -++++++ starting: global end lumi for module: label = 'test' id = 32 -++++++ finished: global end lumi for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 34 -++++++ starting: global end lumi for module: label = 'get' id = 34 -++++++ finished: global end lumi for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 35 -++++++ starting: global end lumi for module: label = 'getInt' id = 35 -++++++ finished: global end lumi for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 31 +++++++ starting: global end lumi for module: label = 'testmerge' id = 31 +++++++ finished: global end lumi for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 30 +++++++ starting: global end lumi for module: label = 'test' id = 30 +++++++ finished: global end lumi for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 32 +++++++ starting: global end lumi for module: label = 'get' id = 32 +++++++ finished: global end lumi for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 33 +++++++ starting: global end lumi for module: label = 'getInt' id = 33 +++++++ finished: global end lumi for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global end lumi: run = 2 lumi = 2 time = 75000001 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 20 -++++++ starting: global end lumi for module: label = 'testmerge' id = 20 -++++++ finished: global end lumi for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 19 -++++++ starting: global end lumi for module: label = 'test' id = 19 -++++++ finished: global end lumi for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 21 -++++++ starting: global end lumi for module: label = 'get' id = 21 -++++++ finished: global end lumi for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 22 -++++++ starting: global end lumi for module: label = 'getInt' id = 22 -++++++ finished: global end lumi for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 19 +++++++ starting: global end lumi for module: label = 'testmerge' id = 19 +++++++ finished: global end lumi for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 18 +++++++ starting: global end lumi for module: label = 'test' id = 18 +++++++ finished: global end lumi for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 20 +++++++ starting: global end lumi for module: label = 'get' id = 20 +++++++ finished: global end lumi for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 21 +++++++ starting: global end lumi for module: label = 'getInt' id = 21 +++++++ finished: global end lumi for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global end lumi: run = 2 lumi = 2 time = 75000001 ++++ starting: global write lumi: run = 2 lumi = 2 time = 75000001 ++++ finished: global write lumi: run = 2 lumi = 2 time = 75000001 @@ -4423,11 +4339,11 @@ ++++ finished: global write lumi: run = 2 lumi = 2 time = 75000001 ++++ starting: global write lumi: run = 2 lumi = 2 time = 75000001 ++++ starting: global write lumi: run = 2 lumi = 2 time = 75000001 -++++++ starting: write lumi for module: label = 'out' id = 37 -++++++ finished: write lumi for module: label = 'out' id = 37 +++++++ starting: write lumi for module: label = 'out' id = 35 +++++++ finished: write lumi for module: label = 'out' id = 35 ++++ finished: global write lumi: run = 2 lumi = 2 time = 75000001 -++++++ starting: write lumi for module: label = 'out' id = 24 -++++++ finished: write lumi for module: label = 'out' id = 24 +++++++ starting: write lumi for module: label = 'out' id = 23 +++++++ finished: write lumi for module: label = 'out' id = 23 ++++ finished: global write lumi: run = 2 lumi = 2 time = 75000001 ++++ starting: source lumi ++++ finished: source lumi @@ -4456,56 +4372,56 @@ ++++ starting: global begin lumi: run = 2 lumi = 3 time = 95000001 ++++ finished: global begin lumi: run = 2 lumi = 3 time = 95000001 ++++ starting: global begin lumi: run = 2 lumi = 3 time = 95000001 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global begin lumi: run = 2 lumi = 3 time = 95000001 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 32 -++++++ starting: global begin lumi for module: label = 'test' id = 32 -++++++ finished: global begin lumi for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 33 -++++++ starting: global begin lumi for module: label = 'testmerge' id = 33 -++++++ finished: global begin lumi for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 34 -++++++ starting: global begin lumi for module: label = 'get' id = 34 -++++++ finished: global begin lumi for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 35 -++++++ starting: global begin lumi for module: label = 'getInt' id = 35 -++++++ finished: global begin lumi for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 30 +++++++ starting: global begin lumi for module: label = 'test' id = 30 +++++++ finished: global begin lumi for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 31 +++++++ starting: global begin lumi for module: label = 'testmerge' id = 31 +++++++ finished: global begin lumi for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 32 +++++++ starting: global begin lumi for module: label = 'get' id = 32 +++++++ finished: global begin lumi for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 33 +++++++ starting: global begin lumi for module: label = 'getInt' id = 33 +++++++ finished: global begin lumi for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global begin lumi: run = 2 lumi = 3 time = 95000001 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 19 -++++++ starting: global begin lumi for module: label = 'test' id = 19 -++++++ finished: global begin lumi for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 20 -++++++ starting: global begin lumi for module: label = 'testmerge' id = 20 -++++++ finished: global begin lumi for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 21 -++++++ starting: global begin lumi for module: label = 'get' id = 21 -++++++ finished: global begin lumi for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 22 -++++++ starting: global begin lumi for module: label = 'getInt' id = 22 -++++++ finished: global begin lumi for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 18 +++++++ starting: global begin lumi for module: label = 'test' id = 18 +++++++ finished: global begin lumi for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 19 +++++++ starting: global begin lumi for module: label = 'testmerge' id = 19 +++++++ finished: global begin lumi for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 20 +++++++ starting: global begin lumi for module: label = 'get' id = 20 +++++++ finished: global begin lumi for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 21 +++++++ starting: global begin lumi for module: label = 'getInt' id = 21 +++++++ finished: global begin lumi for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global begin lumi: run = 2 lumi = 3 time = 95000001 ++++ starting: begin lumi: stream = 0 run = 2 lumi = 3 time = 95000001 ++++ finished: begin lumi: stream = 0 run = 2 lumi = 3 time = 95000001 @@ -4580,105 +4496,101 @@ ++++ starting: processing event : stream = 0 run = 2 lumi = 3 event = 9 time = 95000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 2 lumi = 3 event = 9 time = 95000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 2 lumi = 3 event = 9 time = 95000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 2 lumi = 3 event = 9 time = 95000001 ++++ starting: source event @@ -4736,105 +4648,101 @@ ++++ starting: processing event : stream = 0 run = 2 lumi = 3 event = 10 time = 100000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 2 lumi = 3 event = 10 time = 100000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 2 lumi = 3 event = 10 time = 100000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 2 lumi = 3 event = 10 time = 100000001 ++++ queuing: EventSetup synchronization run: 2 lumi: 4294967295 event: 18446744073709551615 @@ -4883,56 +4791,56 @@ ++++ starting: global end lumi: run = 2 lumi = 3 time = 95000001 ++++ finished: global end lumi: run = 2 lumi = 3 time = 95000001 ++++ starting: global end lumi: run = 2 lumi = 3 time = 95000001 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global end lumi: run = 2 lumi = 3 time = 95000001 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 33 -++++++ starting: global end lumi for module: label = 'testmerge' id = 33 -++++++ finished: global end lumi for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 32 -++++++ starting: global end lumi for module: label = 'test' id = 32 -++++++ finished: global end lumi for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 34 -++++++ starting: global end lumi for module: label = 'get' id = 34 -++++++ finished: global end lumi for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 35 -++++++ starting: global end lumi for module: label = 'getInt' id = 35 -++++++ finished: global end lumi for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 31 +++++++ starting: global end lumi for module: label = 'testmerge' id = 31 +++++++ finished: global end lumi for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 30 +++++++ starting: global end lumi for module: label = 'test' id = 30 +++++++ finished: global end lumi for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 32 +++++++ starting: global end lumi for module: label = 'get' id = 32 +++++++ finished: global end lumi for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 33 +++++++ starting: global end lumi for module: label = 'getInt' id = 33 +++++++ finished: global end lumi for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global end lumi: run = 2 lumi = 3 time = 95000001 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 20 -++++++ starting: global end lumi for module: label = 'testmerge' id = 20 -++++++ finished: global end lumi for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 19 -++++++ starting: global end lumi for module: label = 'test' id = 19 -++++++ finished: global end lumi for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 21 -++++++ starting: global end lumi for module: label = 'get' id = 21 -++++++ finished: global end lumi for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 22 -++++++ starting: global end lumi for module: label = 'getInt' id = 22 -++++++ finished: global end lumi for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 19 +++++++ starting: global end lumi for module: label = 'testmerge' id = 19 +++++++ finished: global end lumi for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 18 +++++++ starting: global end lumi for module: label = 'test' id = 18 +++++++ finished: global end lumi for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 20 +++++++ starting: global end lumi for module: label = 'get' id = 20 +++++++ finished: global end lumi for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 21 +++++++ starting: global end lumi for module: label = 'getInt' id = 21 +++++++ finished: global end lumi for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global end lumi: run = 2 lumi = 3 time = 95000001 ++++ starting: global write lumi: run = 2 lumi = 3 time = 95000001 ++++ finished: global write lumi: run = 2 lumi = 3 time = 95000001 @@ -4944,11 +4852,11 @@ ++++ finished: global write lumi: run = 2 lumi = 3 time = 95000001 ++++ starting: global write lumi: run = 2 lumi = 3 time = 95000001 ++++ starting: global write lumi: run = 2 lumi = 3 time = 95000001 -++++++ starting: write lumi for module: label = 'out' id = 37 -++++++ finished: write lumi for module: label = 'out' id = 37 +++++++ starting: write lumi for module: label = 'out' id = 35 +++++++ finished: write lumi for module: label = 'out' id = 35 ++++ finished: global write lumi: run = 2 lumi = 3 time = 95000001 -++++++ starting: write lumi for module: label = 'out' id = 24 -++++++ finished: write lumi for module: label = 'out' id = 24 +++++++ starting: write lumi for module: label = 'out' id = 23 +++++++ finished: write lumi for module: label = 'out' id = 23 ++++ finished: global write lumi: run = 2 lumi = 3 time = 95000001 ++++ queuing: EventSetup synchronization run: 3 lumi: 0 event: 0 ++++ pre: EventSetup synchronizing run: 3 lumi: 0 event: 0 @@ -4996,56 +4904,56 @@ ++++ starting: global end run 2 : time = 0 ++++ finished: global end run 2 : time = 0 ++++ starting: global end run 2 : time = 0 -++++++++ starting: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global end Run for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global end Run for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global end Run for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global end Run for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global end Run for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global end Run for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global end Run for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global end Run for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global end run 2 : time = 0 -++++++++ starting: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global end Run for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global end Run for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global end Run for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global end Run for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global end run for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global end run for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end Run for module: label = 'testmerge' id = 33 -++++++ starting: global end run for module: label = 'testmerge' id = 33 -++++++ finished: global end run for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global end Run for module: label = 'test' id = 32 -++++++ starting: global end run for module: label = 'test' id = 32 -++++++ finished: global end run for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global end Run for module: label = 'get' id = 34 -++++++ starting: global end run for module: label = 'get' id = 34 -++++++ finished: global end run for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global end Run for module: label = 'getInt' id = 35 -++++++ starting: global end run for module: label = 'getInt' id = 35 -++++++ finished: global end run for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global end run for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global end run for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global end Run for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global end Run for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global end Run for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global end Run for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global end run for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global end run for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end Run for module: label = 'testmerge' id = 31 +++++++ starting: global end run for module: label = 'testmerge' id = 31 +++++++ finished: global end run for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global end Run for module: label = 'test' id = 30 +++++++ starting: global end run for module: label = 'test' id = 30 +++++++ finished: global end run for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global end Run for module: label = 'get' id = 32 +++++++ starting: global end run for module: label = 'get' id = 32 +++++++ finished: global end run for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global end Run for module: label = 'getInt' id = 33 +++++++ starting: global end run for module: label = 'getInt' id = 33 +++++++ finished: global end run for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global end run for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global end run for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global end run 2 : time = 0 -++++++++ finished: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global end run for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global end run for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global end Run for module: label = 'testmerge' id = 20 -++++++ starting: global end run for module: label = 'testmerge' id = 20 -++++++ finished: global end run for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global end Run for module: label = 'test' id = 19 -++++++ starting: global end run for module: label = 'test' id = 19 -++++++ finished: global end run for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global end Run for module: label = 'get' id = 21 -++++++ starting: global end run for module: label = 'get' id = 21 -++++++ finished: global end run for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global end Run for module: label = 'getInt' id = 22 -++++++ starting: global end run for module: label = 'getInt' id = 22 -++++++ finished: global end run for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global end run for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global end run for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global end run for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global end run for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global end Run for module: label = 'testmerge' id = 19 +++++++ starting: global end run for module: label = 'testmerge' id = 19 +++++++ finished: global end run for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global end Run for module: label = 'test' id = 18 +++++++ starting: global end run for module: label = 'test' id = 18 +++++++ finished: global end run for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global end Run for module: label = 'get' id = 20 +++++++ starting: global end run for module: label = 'get' id = 20 +++++++ finished: global end run for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global end Run for module: label = 'getInt' id = 21 +++++++ starting: global end run for module: label = 'getInt' id = 21 +++++++ finished: global end run for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global end run for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global end run for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global end run 2 : time = 0 ++++ starting: global write run 2 : time = 100000001 ++++ finished: global write run 2 : time = 100000001 @@ -5057,11 +4965,11 @@ ++++ finished: global write run 2 : time = 0 ++++ starting: global write run 2 : time = 0 ++++ starting: global write run 2 : time = 0 -++++++ starting: write run for module: label = 'out' id = 37 -++++++ finished: write run for module: label = 'out' id = 37 +++++++ starting: write run for module: label = 'out' id = 35 +++++++ finished: write run for module: label = 'out' id = 35 ++++ finished: global write run 2 : time = 0 -++++++ starting: write run for module: label = 'out' id = 24 -++++++ finished: write run for module: label = 'out' id = 24 +++++++ starting: write run for module: label = 'out' id = 23 +++++++ finished: write run for module: label = 'out' id = 23 ++++ finished: global write run 2 : time = 0 ++++ starting: source run ++++ finished: source run @@ -5090,60 +4998,57 @@ ++++ starting: global begin run 3 : time = 105000001 ++++ finished: global begin run 3 : time = 105000001 ++++ starting: global begin run 3 : time = 105000001 -++++++++ starting: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global begin Run for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global begin Run for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global begin Run for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global begin Run for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global begin Run for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global begin Run for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global begin Run for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global begin Run for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global begin run 3 : time = 105000001 -++++++++ starting: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global begin Run for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global begin Run for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global begin Run for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global begin Run for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global begin run for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global begin run for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin Run for module: label = 'test' id = 32 -++++++ starting: global begin run for module: label = 'test' id = 32 -++++++ finished: global begin run for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global begin Run for module: label = 'testmerge' id = 33 -++++++ starting: global begin run for module: label = 'testmerge' id = 33 -++++++ finished: global begin run for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global begin Run for module: label = 'get' id = 34 -++++++ starting: global begin run for module: label = 'get' id = 34 -++++++ finished: global begin run for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global begin Run for module: label = 'getInt' id = 35 -++++++ starting: global begin run for module: label = 'getInt' id = 35 -++++++ finished: global begin run for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global begin run for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global begin run for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global begin Run for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global begin Run for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global begin Run for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global begin Run for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global begin run for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global begin run for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin Run for module: label = 'test' id = 30 +++++++ starting: global begin run for module: label = 'test' id = 30 +++++++ finished: global begin run for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global begin Run for module: label = 'testmerge' id = 31 +++++++ starting: global begin run for module: label = 'testmerge' id = 31 +++++++ finished: global begin run for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global begin Run for module: label = 'get' id = 32 +++++++ starting: global begin run for module: label = 'get' id = 32 +++++++ finished: global begin run for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global begin Run for module: label = 'getInt' id = 33 +++++++ starting: global begin run for module: label = 'getInt' id = 33 +++++++ finished: global begin run for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global begin run for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global begin run for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global begin run 3 : time = 105000001 -++++++++ finished: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global begin run for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global begin run for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global begin Run for module: label = 'test' id = 19 -++++++ starting: global begin run for module: label = 'test' id = 19 -++++++ finished: global begin run for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global begin Run for module: label = 'testmerge' id = 20 -++++++ starting: global begin run for module: label = 'testmerge' id = 20 -++++++ finished: global begin run for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global begin Run for module: label = 'get' id = 21 -++++++ starting: global begin run for module: label = 'get' id = 21 -++++++ finished: global begin run for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global begin Run for module: label = 'getInt' id = 22 -++++++ starting: global begin run for module: label = 'getInt' id = 22 -++++++ finished: global begin run for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global begin run for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global begin run for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global begin Run for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global begin run for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global begin run for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global begin Run for module: label = 'test' id = 18 +++++++ starting: global begin run for module: label = 'test' id = 18 +++++++ finished: global begin run for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global begin Run for module: label = 'testmerge' id = 19 +++++++ starting: global begin run for module: label = 'testmerge' id = 19 +++++++ finished: global begin run for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global begin Run for module: label = 'get' id = 20 +++++++ starting: global begin run for module: label = 'get' id = 20 +++++++ finished: global begin run for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global begin Run for module: label = 'getInt' id = 21 +++++++ starting: global begin run for module: label = 'getInt' id = 21 +++++++ finished: global begin run for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global begin Run for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global begin run for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global begin run for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global begin run 3 : time = 105000001 -++++ queuing: EventSetup synchronization run: 3 lumi: 1 event: 0 -++++ pre: EventSetup synchronizing run: 3 lumi: 1 event: 0 -++++ post: EventSetup synchronizing run: 3 lumi: 1 event: 0 ++++ starting: begin run: stream = 0 run = 3 time = 105000001 ++++ finished: begin run: stream = 0 run = 3 time = 105000001 ++++ starting: begin run: stream = 0 run = 3 time = 105000001 @@ -5162,6 +5067,9 @@ ++++ finished: begin run: stream = 0 run = 3 time = 105000001 ++++ starting: begin run: stream = 0 run = 3 time = 105000001 ++++ finished: begin run: stream = 0 run = 3 time = 105000001 +++++ queuing: EventSetup synchronization run: 3 lumi: 1 event: 0 +++++ pre: EventSetup synchronizing run: 3 lumi: 1 event: 0 +++++ post: EventSetup synchronizing run: 3 lumi: 1 event: 0 ++++ starting: source lumi ++++ finished: source lumi ++++ starting: global begin lumi: run = 3 lumi = 1 time = 105000001 @@ -5189,56 +5097,56 @@ ++++ starting: global begin lumi: run = 3 lumi = 1 time = 105000001 ++++ finished: global begin lumi: run = 3 lumi = 1 time = 105000001 ++++ starting: global begin lumi: run = 3 lumi = 1 time = 105000001 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global begin lumi: run = 3 lumi = 1 time = 105000001 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 32 -++++++ starting: global begin lumi for module: label = 'test' id = 32 -++++++ finished: global begin lumi for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 33 -++++++ starting: global begin lumi for module: label = 'testmerge' id = 33 -++++++ finished: global begin lumi for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 34 -++++++ starting: global begin lumi for module: label = 'get' id = 34 -++++++ finished: global begin lumi for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 35 -++++++ starting: global begin lumi for module: label = 'getInt' id = 35 -++++++ finished: global begin lumi for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 30 +++++++ starting: global begin lumi for module: label = 'test' id = 30 +++++++ finished: global begin lumi for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 31 +++++++ starting: global begin lumi for module: label = 'testmerge' id = 31 +++++++ finished: global begin lumi for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 32 +++++++ starting: global begin lumi for module: label = 'get' id = 32 +++++++ finished: global begin lumi for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 33 +++++++ starting: global begin lumi for module: label = 'getInt' id = 33 +++++++ finished: global begin lumi for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global begin lumi: run = 3 lumi = 1 time = 105000001 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 19 -++++++ starting: global begin lumi for module: label = 'test' id = 19 -++++++ finished: global begin lumi for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 20 -++++++ starting: global begin lumi for module: label = 'testmerge' id = 20 -++++++ finished: global begin lumi for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 21 -++++++ starting: global begin lumi for module: label = 'get' id = 21 -++++++ finished: global begin lumi for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 22 -++++++ starting: global begin lumi for module: label = 'getInt' id = 22 -++++++ finished: global begin lumi for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 18 +++++++ starting: global begin lumi for module: label = 'test' id = 18 +++++++ finished: global begin lumi for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 19 +++++++ starting: global begin lumi for module: label = 'testmerge' id = 19 +++++++ finished: global begin lumi for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 20 +++++++ starting: global begin lumi for module: label = 'get' id = 20 +++++++ finished: global begin lumi for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 21 +++++++ starting: global begin lumi for module: label = 'getInt' id = 21 +++++++ finished: global begin lumi for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global begin lumi: run = 3 lumi = 1 time = 105000001 ++++ starting: begin lumi: stream = 0 run = 3 lumi = 1 time = 105000001 ++++ finished: begin lumi: stream = 0 run = 3 lumi = 1 time = 105000001 @@ -5313,105 +5221,101 @@ ++++ starting: processing event : stream = 0 run = 3 lumi = 1 event = 1 time = 105000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 3 lumi = 1 event = 1 time = 105000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 3 lumi = 1 event = 1 time = 105000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 3 lumi = 1 event = 1 time = 105000001 ++++ starting: source event @@ -5469,105 +5373,101 @@ ++++ starting: processing event : stream = 0 run = 3 lumi = 1 event = 2 time = 110000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 3 lumi = 1 event = 2 time = 110000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 3 lumi = 1 event = 2 time = 110000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 3 lumi = 1 event = 2 time = 110000001 ++++ starting: source event @@ -5625,105 +5525,101 @@ ++++ starting: processing event : stream = 0 run = 3 lumi = 1 event = 3 time = 115000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 3 lumi = 1 event = 3 time = 115000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 3 lumi = 1 event = 3 time = 115000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 3 lumi = 1 event = 3 time = 115000001 ++++ starting: source event @@ -5781,105 +5677,101 @@ ++++ starting: processing event : stream = 0 run = 3 lumi = 1 event = 4 time = 120000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 3 lumi = 1 event = 4 time = 120000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 3 lumi = 1 event = 4 time = 120000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 3 lumi = 1 event = 4 time = 120000001 ++++ queuing: EventSetup synchronization run: 3 lumi: 2 event: 0 @@ -5928,56 +5820,56 @@ ++++ starting: global end lumi: run = 3 lumi = 1 time = 105000001 ++++ finished: global end lumi: run = 3 lumi = 1 time = 105000001 ++++ starting: global end lumi: run = 3 lumi = 1 time = 105000001 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global end lumi: run = 3 lumi = 1 time = 105000001 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 33 -++++++ starting: global end lumi for module: label = 'testmerge' id = 33 -++++++ finished: global end lumi for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 32 -++++++ starting: global end lumi for module: label = 'test' id = 32 -++++++ finished: global end lumi for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 34 -++++++ starting: global end lumi for module: label = 'get' id = 34 -++++++ finished: global end lumi for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 35 -++++++ starting: global end lumi for module: label = 'getInt' id = 35 -++++++ finished: global end lumi for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 31 +++++++ starting: global end lumi for module: label = 'testmerge' id = 31 +++++++ finished: global end lumi for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 30 +++++++ starting: global end lumi for module: label = 'test' id = 30 +++++++ finished: global end lumi for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 32 +++++++ starting: global end lumi for module: label = 'get' id = 32 +++++++ finished: global end lumi for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 33 +++++++ starting: global end lumi for module: label = 'getInt' id = 33 +++++++ finished: global end lumi for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global end lumi: run = 3 lumi = 1 time = 105000001 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 20 -++++++ starting: global end lumi for module: label = 'testmerge' id = 20 -++++++ finished: global end lumi for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 19 -++++++ starting: global end lumi for module: label = 'test' id = 19 -++++++ finished: global end lumi for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 21 -++++++ starting: global end lumi for module: label = 'get' id = 21 -++++++ finished: global end lumi for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 22 -++++++ starting: global end lumi for module: label = 'getInt' id = 22 -++++++ finished: global end lumi for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 19 +++++++ starting: global end lumi for module: label = 'testmerge' id = 19 +++++++ finished: global end lumi for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 18 +++++++ starting: global end lumi for module: label = 'test' id = 18 +++++++ finished: global end lumi for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 20 +++++++ starting: global end lumi for module: label = 'get' id = 20 +++++++ finished: global end lumi for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 21 +++++++ starting: global end lumi for module: label = 'getInt' id = 21 +++++++ finished: global end lumi for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global end lumi: run = 3 lumi = 1 time = 105000001 ++++ starting: global write lumi: run = 3 lumi = 1 time = 105000001 ++++ finished: global write lumi: run = 3 lumi = 1 time = 105000001 @@ -5989,11 +5881,11 @@ ++++ finished: global write lumi: run = 3 lumi = 1 time = 105000001 ++++ starting: global write lumi: run = 3 lumi = 1 time = 105000001 ++++ starting: global write lumi: run = 3 lumi = 1 time = 105000001 -++++++ starting: write lumi for module: label = 'out' id = 37 -++++++ finished: write lumi for module: label = 'out' id = 37 +++++++ starting: write lumi for module: label = 'out' id = 35 +++++++ finished: write lumi for module: label = 'out' id = 35 ++++ finished: global write lumi: run = 3 lumi = 1 time = 105000001 -++++++ starting: write lumi for module: label = 'out' id = 24 -++++++ finished: write lumi for module: label = 'out' id = 24 +++++++ starting: write lumi for module: label = 'out' id = 23 +++++++ finished: write lumi for module: label = 'out' id = 23 ++++ finished: global write lumi: run = 3 lumi = 1 time = 105000001 ++++ starting: source lumi ++++ finished: source lumi @@ -6022,56 +5914,56 @@ ++++ starting: global begin lumi: run = 3 lumi = 2 time = 125000001 ++++ finished: global begin lumi: run = 3 lumi = 2 time = 125000001 ++++ starting: global begin lumi: run = 3 lumi = 2 time = 125000001 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global begin lumi: run = 3 lumi = 2 time = 125000001 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 32 -++++++ starting: global begin lumi for module: label = 'test' id = 32 -++++++ finished: global begin lumi for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 33 -++++++ starting: global begin lumi for module: label = 'testmerge' id = 33 -++++++ finished: global begin lumi for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 34 -++++++ starting: global begin lumi for module: label = 'get' id = 34 -++++++ finished: global begin lumi for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 35 -++++++ starting: global begin lumi for module: label = 'getInt' id = 35 -++++++ finished: global begin lumi for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 30 +++++++ starting: global begin lumi for module: label = 'test' id = 30 +++++++ finished: global begin lumi for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 31 +++++++ starting: global begin lumi for module: label = 'testmerge' id = 31 +++++++ finished: global begin lumi for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 32 +++++++ starting: global begin lumi for module: label = 'get' id = 32 +++++++ finished: global begin lumi for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 33 +++++++ starting: global begin lumi for module: label = 'getInt' id = 33 +++++++ finished: global begin lumi for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global begin lumi: run = 3 lumi = 2 time = 125000001 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 19 -++++++ starting: global begin lumi for module: label = 'test' id = 19 -++++++ finished: global begin lumi for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 20 -++++++ starting: global begin lumi for module: label = 'testmerge' id = 20 -++++++ finished: global begin lumi for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 21 -++++++ starting: global begin lumi for module: label = 'get' id = 21 -++++++ finished: global begin lumi for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 22 -++++++ starting: global begin lumi for module: label = 'getInt' id = 22 -++++++ finished: global begin lumi for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 18 +++++++ starting: global begin lumi for module: label = 'test' id = 18 +++++++ finished: global begin lumi for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 19 +++++++ starting: global begin lumi for module: label = 'testmerge' id = 19 +++++++ finished: global begin lumi for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 20 +++++++ starting: global begin lumi for module: label = 'get' id = 20 +++++++ finished: global begin lumi for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 21 +++++++ starting: global begin lumi for module: label = 'getInt' id = 21 +++++++ finished: global begin lumi for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global begin lumi: run = 3 lumi = 2 time = 125000001 ++++ starting: begin lumi: stream = 0 run = 3 lumi = 2 time = 125000001 ++++ finished: begin lumi: stream = 0 run = 3 lumi = 2 time = 125000001 @@ -6146,105 +6038,101 @@ ++++ starting: processing event : stream = 0 run = 3 lumi = 2 event = 5 time = 125000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 3 lumi = 2 event = 5 time = 125000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 3 lumi = 2 event = 5 time = 125000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 3 lumi = 2 event = 5 time = 125000001 ++++ starting: source event @@ -6302,105 +6190,101 @@ ++++ starting: processing event : stream = 0 run = 3 lumi = 2 event = 6 time = 130000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 3 lumi = 2 event = 6 time = 130000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 3 lumi = 2 event = 6 time = 130000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 3 lumi = 2 event = 6 time = 130000001 ++++ starting: source event @@ -6458,105 +6342,101 @@ ++++ starting: processing event : stream = 0 run = 3 lumi = 2 event = 7 time = 135000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 3 lumi = 2 event = 7 time = 135000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 3 lumi = 2 event = 7 time = 135000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 3 lumi = 2 event = 7 time = 135000001 ++++ starting: source event @@ -6614,105 +6494,101 @@ ++++ starting: processing event : stream = 0 run = 3 lumi = 2 event = 8 time = 140000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 3 lumi = 2 event = 8 time = 140000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 3 lumi = 2 event = 8 time = 140000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 3 lumi = 2 event = 8 time = 140000001 ++++ queuing: EventSetup synchronization run: 3 lumi: 3 event: 0 @@ -6761,56 +6637,56 @@ ++++ starting: global end lumi: run = 3 lumi = 2 time = 125000001 ++++ finished: global end lumi: run = 3 lumi = 2 time = 125000001 ++++ starting: global end lumi: run = 3 lumi = 2 time = 125000001 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global end lumi: run = 3 lumi = 2 time = 125000001 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 33 -++++++ starting: global end lumi for module: label = 'testmerge' id = 33 -++++++ finished: global end lumi for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 32 -++++++ starting: global end lumi for module: label = 'test' id = 32 -++++++ finished: global end lumi for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 34 -++++++ starting: global end lumi for module: label = 'get' id = 34 -++++++ finished: global end lumi for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 35 -++++++ starting: global end lumi for module: label = 'getInt' id = 35 -++++++ finished: global end lumi for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 31 +++++++ starting: global end lumi for module: label = 'testmerge' id = 31 +++++++ finished: global end lumi for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 30 +++++++ starting: global end lumi for module: label = 'test' id = 30 +++++++ finished: global end lumi for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 32 +++++++ starting: global end lumi for module: label = 'get' id = 32 +++++++ finished: global end lumi for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 33 +++++++ starting: global end lumi for module: label = 'getInt' id = 33 +++++++ finished: global end lumi for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global end lumi: run = 3 lumi = 2 time = 125000001 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 20 -++++++ starting: global end lumi for module: label = 'testmerge' id = 20 -++++++ finished: global end lumi for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 19 -++++++ starting: global end lumi for module: label = 'test' id = 19 -++++++ finished: global end lumi for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 21 -++++++ starting: global end lumi for module: label = 'get' id = 21 -++++++ finished: global end lumi for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 22 -++++++ starting: global end lumi for module: label = 'getInt' id = 22 -++++++ finished: global end lumi for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 19 +++++++ starting: global end lumi for module: label = 'testmerge' id = 19 +++++++ finished: global end lumi for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 18 +++++++ starting: global end lumi for module: label = 'test' id = 18 +++++++ finished: global end lumi for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 20 +++++++ starting: global end lumi for module: label = 'get' id = 20 +++++++ finished: global end lumi for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 21 +++++++ starting: global end lumi for module: label = 'getInt' id = 21 +++++++ finished: global end lumi for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global end lumi: run = 3 lumi = 2 time = 125000001 ++++ starting: global write lumi: run = 3 lumi = 2 time = 125000001 ++++ finished: global write lumi: run = 3 lumi = 2 time = 125000001 @@ -6822,11 +6698,11 @@ ++++ finished: global write lumi: run = 3 lumi = 2 time = 125000001 ++++ starting: global write lumi: run = 3 lumi = 2 time = 125000001 ++++ starting: global write lumi: run = 3 lumi = 2 time = 125000001 -++++++ starting: write lumi for module: label = 'out' id = 37 -++++++ finished: write lumi for module: label = 'out' id = 37 +++++++ starting: write lumi for module: label = 'out' id = 35 +++++++ finished: write lumi for module: label = 'out' id = 35 ++++ finished: global write lumi: run = 3 lumi = 2 time = 125000001 -++++++ starting: write lumi for module: label = 'out' id = 24 -++++++ finished: write lumi for module: label = 'out' id = 24 +++++++ starting: write lumi for module: label = 'out' id = 23 +++++++ finished: write lumi for module: label = 'out' id = 23 ++++ finished: global write lumi: run = 3 lumi = 2 time = 125000001 ++++ starting: source lumi ++++ finished: source lumi @@ -6855,56 +6731,56 @@ ++++ starting: global begin lumi: run = 3 lumi = 3 time = 145000001 ++++ finished: global begin lumi: run = 3 lumi = 3 time = 145000001 ++++ starting: global begin lumi: run = 3 lumi = 3 time = 145000001 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global begin lumi: run = 3 lumi = 3 time = 145000001 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 32 -++++++ starting: global begin lumi for module: label = 'test' id = 32 -++++++ finished: global begin lumi for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 33 -++++++ starting: global begin lumi for module: label = 'testmerge' id = 33 -++++++ finished: global begin lumi for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 34 -++++++ starting: global begin lumi for module: label = 'get' id = 34 -++++++ finished: global begin lumi for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 35 -++++++ starting: global begin lumi for module: label = 'getInt' id = 35 -++++++ finished: global begin lumi for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 30 +++++++ starting: global begin lumi for module: label = 'test' id = 30 +++++++ finished: global begin lumi for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 31 +++++++ starting: global begin lumi for module: label = 'testmerge' id = 31 +++++++ finished: global begin lumi for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 32 +++++++ starting: global begin lumi for module: label = 'get' id = 32 +++++++ finished: global begin lumi for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 33 +++++++ starting: global begin lumi for module: label = 'getInt' id = 33 +++++++ finished: global begin lumi for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global begin lumi: run = 3 lumi = 3 time = 145000001 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 19 -++++++ starting: global begin lumi for module: label = 'test' id = 19 -++++++ finished: global begin lumi for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 20 -++++++ starting: global begin lumi for module: label = 'testmerge' id = 20 -++++++ finished: global begin lumi for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 21 -++++++ starting: global begin lumi for module: label = 'get' id = 21 -++++++ finished: global begin lumi for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 22 -++++++ starting: global begin lumi for module: label = 'getInt' id = 22 -++++++ finished: global begin lumi for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global begin lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global begin lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'test' id = 18 +++++++ starting: global begin lumi for module: label = 'test' id = 18 +++++++ finished: global begin lumi for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'testmerge' id = 19 +++++++ starting: global begin lumi for module: label = 'testmerge' id = 19 +++++++ finished: global begin lumi for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'get' id = 20 +++++++ starting: global begin lumi for module: label = 'get' id = 20 +++++++ finished: global begin lumi for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'getInt' id = 21 +++++++ starting: global begin lumi for module: label = 'getInt' id = 21 +++++++ finished: global begin lumi for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global begin LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global begin lumi for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global begin lumi for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global begin lumi: run = 3 lumi = 3 time = 145000001 ++++ starting: begin lumi: stream = 0 run = 3 lumi = 3 time = 145000001 ++++ finished: begin lumi: stream = 0 run = 3 lumi = 3 time = 145000001 @@ -6979,105 +6855,101 @@ ++++ starting: processing event : stream = 0 run = 3 lumi = 3 event = 9 time = 145000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 3 lumi = 3 event = 9 time = 145000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 3 lumi = 3 event = 9 time = 145000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 3 lumi = 3 event = 9 time = 145000001 ++++ starting: source event @@ -7135,105 +7007,101 @@ ++++ starting: processing event : stream = 0 run = 3 lumi = 3 event = 10 time = 150000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 34 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 32 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 32 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 30 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 ++++ starting: processing event : stream = 0 run = 3 lumi = 3 event = 10 time = 150000001 ++++++ starting: processing path 'endPath1' : stream = 0 ++++++ starting: processing path 'path4' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++ starting: processing path 'path3' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'get' id = 20 ++++++ starting: processing path 'path2' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'test' id = 18 ++++++ starting: processing path 'path1' : stream = 0 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 17 ++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 13 ++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 13 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 19 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 19 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 20 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 18 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 18 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 19 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 19 ++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 14 ++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 14 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 21 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 21 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 22 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 22 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 20 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 20 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 21 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 21 ++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 15 ++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 15 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 16 ++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 16 ++++++ finished: processing path 'path4' : stream = 0 ++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 12 ++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 12 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 24 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 17 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 17 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 23 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 23 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 23 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 3 lumi = 3 event = 10 time = 150000001 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 26 -++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 26 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ finished: processing event for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++++++ starting: processing event for module: stream = 0 label = 'path1' id = 25 +++++++++ finished: processing event for module: stream = 0 label = 'path1' id = 25 ++++++ finished: processing path 'path1' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 32 -++++++++ starting: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: processing event for module: stream = 0 label = 'test' id = 32 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 33 -++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 27 -++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 27 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'test' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: processing event for module: stream = 0 label = 'test' id = 30 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ finished: processing event for module: stream = 0 label = 'testmerge' id = 31 +++++++++ starting: processing event for module: stream = 0 label = 'path2' id = 26 +++++++++ finished: processing event for module: stream = 0 label = 'path2' id = 26 ++++++ finished: processing path 'path2' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 34 -++++++++ starting: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: processing event for module: stream = 0 label = 'get' id = 34 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 35 -++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 28 -++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 28 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'get' id = 32 +++++++++ starting: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: processing event for module: stream = 0 label = 'get' id = 32 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ finished: processing event for module: stream = 0 label = 'getInt' id = 33 +++++++++ starting: processing event for module: stream = 0 label = 'path3' id = 27 +++++++++ finished: processing event for module: stream = 0 label = 'path3' id = 27 ++++++ finished: processing path 'path3' : stream = 0 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 29 -++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 29 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ finished: processing event for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++++++ starting: processing event for module: stream = 0 label = 'path4' id = 28 +++++++++ finished: processing event for module: stream = 0 label = 'path4' id = 28 ++++++ finished: processing path 'path4' : stream = 0 -++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 25 -++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ finished: processing event for module: stream = 0 label = 'out' id = 37 -++++++++ starting: processing event for module: stream = 0 label = 'endPath1' id = 30 -++++++++ finished: processing event for module: stream = 0 label = 'endPath1' id = 30 +++++++++ starting: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ finished: processing event for module: stream = 0 label = 'TriggerResults' id = 24 +++++++++ starting: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: prefetching before processing event for module: stream = 0 label = 'out' id = 35 +++++++++ starting: processing event for module: stream = 0 label = 'out' id = 35 +++++++++ finished: processing event for module: stream = 0 label = 'out' id = 35 ++++++ finished: processing path 'endPath1' : stream = 0 ++++ finished: processing event : stream = 0 run = 3 lumi = 3 event = 10 time = 150000001 ++++ queuing: EventSetup synchronization run: 3 lumi: 4294967295 event: 18446744073709551615 @@ -7282,56 +7150,56 @@ ++++ starting: global end lumi: run = 3 lumi = 3 time = 145000001 ++++ finished: global end lumi: run = 3 lumi = 3 time = 145000001 ++++ starting: global end lumi: run = 3 lumi = 3 time = 145000001 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global end lumi: run = 3 lumi = 3 time = 145000001 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 33 -++++++ starting: global end lumi for module: label = 'testmerge' id = 33 -++++++ finished: global end lumi for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 32 -++++++ starting: global end lumi for module: label = 'test' id = 32 -++++++ finished: global end lumi for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 34 -++++++ starting: global end lumi for module: label = 'get' id = 34 -++++++ finished: global end lumi for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 35 -++++++ starting: global end lumi for module: label = 'getInt' id = 35 -++++++ finished: global end lumi for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 31 +++++++ starting: global end lumi for module: label = 'testmerge' id = 31 +++++++ finished: global end lumi for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 30 +++++++ starting: global end lumi for module: label = 'test' id = 30 +++++++ finished: global end lumi for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 32 +++++++ starting: global end lumi for module: label = 'get' id = 32 +++++++ finished: global end lumi for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 33 +++++++ starting: global end lumi for module: label = 'getInt' id = 33 +++++++ finished: global end lumi for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global end lumi: run = 3 lumi = 3 time = 145000001 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 20 -++++++ starting: global end lumi for module: label = 'testmerge' id = 20 -++++++ finished: global end lumi for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 19 -++++++ starting: global end lumi for module: label = 'test' id = 19 -++++++ finished: global end lumi for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 21 -++++++ starting: global end lumi for module: label = 'get' id = 21 -++++++ finished: global end lumi for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 22 -++++++ starting: global end lumi for module: label = 'getInt' id = 22 -++++++ finished: global end lumi for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global end lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global end lumi for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'testmerge' id = 19 +++++++ starting: global end lumi for module: label = 'testmerge' id = 19 +++++++ finished: global end lumi for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'test' id = 18 +++++++ starting: global end lumi for module: label = 'test' id = 18 +++++++ finished: global end lumi for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'get' id = 20 +++++++ starting: global end lumi for module: label = 'get' id = 20 +++++++ finished: global end lumi for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'getInt' id = 21 +++++++ starting: global end lumi for module: label = 'getInt' id = 21 +++++++ finished: global end lumi for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global end LuminosityBlock for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global end lumi for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global end lumi for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global end lumi: run = 3 lumi = 3 time = 145000001 ++++ starting: global write lumi: run = 3 lumi = 3 time = 145000001 ++++ finished: global write lumi: run = 3 lumi = 3 time = 145000001 @@ -7343,11 +7211,11 @@ ++++ finished: global write lumi: run = 3 lumi = 3 time = 145000001 ++++ starting: global write lumi: run = 3 lumi = 3 time = 145000001 ++++ starting: global write lumi: run = 3 lumi = 3 time = 145000001 -++++++ starting: write lumi for module: label = 'out' id = 37 -++++++ finished: write lumi for module: label = 'out' id = 37 +++++++ starting: write lumi for module: label = 'out' id = 35 +++++++ finished: write lumi for module: label = 'out' id = 35 ++++ finished: global write lumi: run = 3 lumi = 3 time = 145000001 -++++++ starting: write lumi for module: label = 'out' id = 24 -++++++ finished: write lumi for module: label = 'out' id = 24 +++++++ starting: write lumi for module: label = 'out' id = 23 +++++++ finished: write lumi for module: label = 'out' id = 23 ++++ finished: global write lumi: run = 3 lumi = 3 time = 145000001 ++++ starting: end run: stream = 0 run = 3 time = 150000001 ++++ finished: end run: stream = 0 run = 3 time = 150000001 @@ -7392,56 +7260,56 @@ ++++ starting: global end run 3 : time = 0 ++++ finished: global end run 3 : time = 0 ++++ starting: global end run 3 : time = 0 -++++++++ starting: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 23 -++++++++ starting: prefetching before processing global end Run for module: label = 'getInt' id = 22 -++++++++ starting: prefetching before processing global end Run for module: label = 'get' id = 21 -++++++++ starting: prefetching before processing global end Run for module: label = 'testmerge' id = 20 -++++++++ starting: prefetching before processing global end Run for module: label = 'test' id = 19 -++++++++ starting: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 18 +++++++++ starting: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 22 +++++++++ starting: prefetching before processing global end Run for module: label = 'getInt' id = 21 +++++++++ starting: prefetching before processing global end Run for module: label = 'get' id = 20 +++++++++ starting: prefetching before processing global end Run for module: label = 'testmerge' id = 19 +++++++++ starting: prefetching before processing global end Run for module: label = 'test' id = 18 +++++++++ starting: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 17 ++++ starting: global end run 3 : time = 0 -++++++++ starting: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 36 -++++++++ starting: prefetching before processing global end Run for module: label = 'getInt' id = 35 -++++++++ starting: prefetching before processing global end Run for module: label = 'get' id = 34 -++++++++ starting: prefetching before processing global end Run for module: label = 'testmerge' id = 33 -++++++++ starting: prefetching before processing global end Run for module: label = 'test' id = 32 -++++++++ starting: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 31 -++++++ starting: global end run for module: label = 'thingWithMergeProducer' id = 31 -++++++ finished: global end run for module: label = 'thingWithMergeProducer' id = 31 -++++++++ finished: prefetching before processing global end Run for module: label = 'testmerge' id = 33 -++++++ starting: global end run for module: label = 'testmerge' id = 33 -++++++ finished: global end run for module: label = 'testmerge' id = 33 -++++++++ finished: prefetching before processing global end Run for module: label = 'test' id = 32 -++++++ starting: global end run for module: label = 'test' id = 32 -++++++ finished: global end run for module: label = 'test' id = 32 -++++++++ finished: prefetching before processing global end Run for module: label = 'get' id = 34 -++++++ starting: global end run for module: label = 'get' id = 34 -++++++ finished: global end run for module: label = 'get' id = 34 -++++++++ finished: prefetching before processing global end Run for module: label = 'getInt' id = 35 -++++++ starting: global end run for module: label = 'getInt' id = 35 -++++++ finished: global end run for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 36 -++++++ starting: global end run for module: label = 'dependsOnNoPut' id = 36 -++++++ finished: global end run for module: label = 'dependsOnNoPut' id = 36 +++++++++ starting: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 34 +++++++++ starting: prefetching before processing global end Run for module: label = 'getInt' id = 33 +++++++++ starting: prefetching before processing global end Run for module: label = 'get' id = 32 +++++++++ starting: prefetching before processing global end Run for module: label = 'testmerge' id = 31 +++++++++ starting: prefetching before processing global end Run for module: label = 'test' id = 30 +++++++++ starting: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 29 +++++++ starting: global end run for module: label = 'thingWithMergeProducer' id = 29 +++++++ finished: global end run for module: label = 'thingWithMergeProducer' id = 29 +++++++++ finished: prefetching before processing global end Run for module: label = 'testmerge' id = 31 +++++++ starting: global end run for module: label = 'testmerge' id = 31 +++++++ finished: global end run for module: label = 'testmerge' id = 31 +++++++++ finished: prefetching before processing global end Run for module: label = 'test' id = 30 +++++++ starting: global end run for module: label = 'test' id = 30 +++++++ finished: global end run for module: label = 'test' id = 30 +++++++++ finished: prefetching before processing global end Run for module: label = 'get' id = 32 +++++++ starting: global end run for module: label = 'get' id = 32 +++++++ finished: global end run for module: label = 'get' id = 32 +++++++++ finished: prefetching before processing global end Run for module: label = 'getInt' id = 33 +++++++ starting: global end run for module: label = 'getInt' id = 33 +++++++ finished: global end run for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 34 +++++++ starting: global end run for module: label = 'dependsOnNoPut' id = 34 +++++++ finished: global end run for module: label = 'dependsOnNoPut' id = 34 ++++ finished: global end run 3 : time = 0 -++++++++ finished: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 18 -++++++ starting: global end run for module: label = 'thingWithMergeProducer' id = 18 -++++++ finished: global end run for module: label = 'thingWithMergeProducer' id = 18 -++++++++ finished: prefetching before processing global end Run for module: label = 'testmerge' id = 20 -++++++ starting: global end run for module: label = 'testmerge' id = 20 -++++++ finished: global end run for module: label = 'testmerge' id = 20 -++++++++ finished: prefetching before processing global end Run for module: label = 'test' id = 19 -++++++ starting: global end run for module: label = 'test' id = 19 -++++++ finished: global end run for module: label = 'test' id = 19 -++++++++ finished: prefetching before processing global end Run for module: label = 'get' id = 21 -++++++ starting: global end run for module: label = 'get' id = 21 -++++++ finished: global end run for module: label = 'get' id = 21 -++++++++ finished: prefetching before processing global end Run for module: label = 'getInt' id = 22 -++++++ starting: global end run for module: label = 'getInt' id = 22 -++++++ finished: global end run for module: label = 'getInt' id = 22 -++++++++ finished: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 23 -++++++ starting: global end run for module: label = 'dependsOnNoPut' id = 23 -++++++ finished: global end run for module: label = 'dependsOnNoPut' id = 23 +++++++++ finished: prefetching before processing global end Run for module: label = 'thingWithMergeProducer' id = 17 +++++++ starting: global end run for module: label = 'thingWithMergeProducer' id = 17 +++++++ finished: global end run for module: label = 'thingWithMergeProducer' id = 17 +++++++++ finished: prefetching before processing global end Run for module: label = 'testmerge' id = 19 +++++++ starting: global end run for module: label = 'testmerge' id = 19 +++++++ finished: global end run for module: label = 'testmerge' id = 19 +++++++++ finished: prefetching before processing global end Run for module: label = 'test' id = 18 +++++++ starting: global end run for module: label = 'test' id = 18 +++++++ finished: global end run for module: label = 'test' id = 18 +++++++++ finished: prefetching before processing global end Run for module: label = 'get' id = 20 +++++++ starting: global end run for module: label = 'get' id = 20 +++++++ finished: global end run for module: label = 'get' id = 20 +++++++++ finished: prefetching before processing global end Run for module: label = 'getInt' id = 21 +++++++ starting: global end run for module: label = 'getInt' id = 21 +++++++ finished: global end run for module: label = 'getInt' id = 21 +++++++++ finished: prefetching before processing global end Run for module: label = 'dependsOnNoPut' id = 22 +++++++ starting: global end run for module: label = 'dependsOnNoPut' id = 22 +++++++ finished: global end run for module: label = 'dependsOnNoPut' id = 22 ++++ finished: global end run 3 : time = 0 ++++ starting: global write run 3 : time = 150000001 ++++ finished: global write run 3 : time = 150000001 @@ -7453,11 +7321,11 @@ ++++ finished: global write run 3 : time = 0 ++++ starting: global write run 3 : time = 0 ++++ starting: global write run 3 : time = 0 -++++++ starting: write run for module: label = 'out' id = 37 -++++++ finished: write run for module: label = 'out' id = 37 +++++++ starting: write run for module: label = 'out' id = 35 +++++++ finished: write run for module: label = 'out' id = 35 ++++ finished: global write run 3 : time = 0 -++++++ starting: write run for module: label = 'out' id = 24 -++++++ finished: write run for module: label = 'out' id = 24 +++++++ starting: write run for module: label = 'out' id = 23 +++++++ finished: write run for module: label = 'out' id = 23 ++++ finished: global write run 3 : time = 0 ++++ starting: end process block ++++ finished: end process block @@ -7472,16 +7340,16 @@ ++++ starting: end process block ++++ finished: end process block ++++ starting: end process block -++++++++ starting: prefetching before processing end ProcessBlock for module: label = 'getInt' id = 22 +++++++++ starting: prefetching before processing end ProcessBlock for module: label = 'getInt' id = 21 ++++ starting: end process block -++++++++ starting: prefetching before processing end ProcessBlock for module: label = 'getInt' id = 35 -++++++++ finished: prefetching before processing end ProcessBlock for module: label = 'getInt' id = 35 -++++++ starting: end process block for module: label = 'getInt' id = 35 -++++++ finished: end process block for module: label = 'getInt' id = 35 +++++++++ starting: prefetching before processing end ProcessBlock for module: label = 'getInt' id = 33 +++++++++ finished: prefetching before processing end ProcessBlock for module: label = 'getInt' id = 33 +++++++ starting: end process block for module: label = 'getInt' id = 33 +++++++ finished: end process block for module: label = 'getInt' id = 33 ++++ finished: end process block -++++++++ finished: prefetching before processing end ProcessBlock for module: label = 'getInt' id = 22 -++++++ starting: end process block for module: label = 'getInt' id = 22 -++++++ finished: end process block for module: label = 'getInt' id = 22 +++++++++ finished: prefetching before processing end ProcessBlock for module: label = 'getInt' id = 21 +++++++ starting: end process block for module: label = 'getInt' id = 21 +++++++ finished: end process block for module: label = 'getInt' id = 21 ++++ finished: end process block ++++ starting: write process block ++++ finished: write process block @@ -7493,11 +7361,11 @@ ++++ finished: write process block ++++ starting: write process block ++++ starting: write process block -++++++ starting: write process block for module: label = 'out' id = 37 -++++++ finished: write process block for module: label = 'out' id = 37 +++++++ starting: write process block for module: label = 'out' id = 35 +++++++ finished: write process block for module: label = 'out' id = 35 ++++ finished: write process block -++++++ starting: write process block for module: label = 'out' id = 24 -++++++ finished: write process block for module: label = 'out' id = 24 +++++++ starting: write process block for module: label = 'out' id = 23 +++++++ finished: write process block for module: label = 'out' id = 23 ++++ finished: write process block ++++ starting: end stream for module: stream = 0 label = 'thingWithMergeProducer' id = 5 ++++ finished: end stream for module: stream = 0 label = 'thingWithMergeProducer' id = 5 @@ -7521,22 +7389,22 @@ ++++ finished: end stream for module: stream = 0 label = 'path1' id = 3 ++++ starting: end stream for module: stream = 0 label = 'path2' id = 4 ++++ finished: end stream for module: stream = 0 label = 'path2' id = 4 -++++ starting: end stream for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++ finished: end stream for module: stream = 0 label = 'thingWithMergeProducer' id = 18 -++++ starting: end stream for module: stream = 0 label = 'test' id = 19 -++++ finished: end stream for module: stream = 0 label = 'test' id = 19 -++++ starting: end stream for module: stream = 0 label = 'testmerge' id = 20 -++++ finished: end stream for module: stream = 0 label = 'testmerge' id = 20 -++++ starting: end stream for module: stream = 0 label = 'get' id = 21 -++++ finished: end stream for module: stream = 0 label = 'get' id = 21 -++++ starting: end stream for module: stream = 0 label = 'getInt' id = 22 -++++ finished: end stream for module: stream = 0 label = 'getInt' id = 22 -++++ starting: end stream for module: stream = 0 label = 'dependsOnNoPut' id = 23 -++++ finished: end stream for module: stream = 0 label = 'dependsOnNoPut' id = 23 +++++ starting: end stream for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++ finished: end stream for module: stream = 0 label = 'thingWithMergeProducer' id = 17 +++++ starting: end stream for module: stream = 0 label = 'test' id = 18 +++++ finished: end stream for module: stream = 0 label = 'test' id = 18 +++++ starting: end stream for module: stream = 0 label = 'testmerge' id = 19 +++++ finished: end stream for module: stream = 0 label = 'testmerge' id = 19 +++++ starting: end stream for module: stream = 0 label = 'get' id = 20 +++++ finished: end stream for module: stream = 0 label = 'get' id = 20 +++++ starting: end stream for module: stream = 0 label = 'getInt' id = 21 +++++ finished: end stream for module: stream = 0 label = 'getInt' id = 21 +++++ starting: end stream for module: stream = 0 label = 'dependsOnNoPut' id = 22 +++++ finished: end stream for module: stream = 0 label = 'dependsOnNoPut' id = 22 ++++ starting: end stream for module: stream = 0 label = 'TriggerResults' id = 12 ++++ finished: end stream for module: stream = 0 label = 'TriggerResults' id = 12 -++++ starting: end stream for module: stream = 0 label = 'out' id = 24 -++++ finished: end stream for module: stream = 0 label = 'out' id = 24 +++++ starting: end stream for module: stream = 0 label = 'out' id = 23 +++++ finished: end stream for module: stream = 0 label = 'out' id = 23 ++++ starting: end stream for module: stream = 0 label = 'path1' id = 13 ++++ finished: end stream for module: stream = 0 label = 'path1' id = 13 ++++ starting: end stream for module: stream = 0 label = 'path2' id = 14 @@ -7545,34 +7413,30 @@ ++++ finished: end stream for module: stream = 0 label = 'path3' id = 15 ++++ starting: end stream for module: stream = 0 label = 'path4' id = 16 ++++ finished: end stream for module: stream = 0 label = 'path4' id = 16 -++++ starting: end stream for module: stream = 0 label = 'endPath1' id = 17 -++++ finished: end stream for module: stream = 0 label = 'endPath1' id = 17 -++++ starting: end stream for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++ finished: end stream for module: stream = 0 label = 'thingWithMergeProducer' id = 31 -++++ starting: end stream for module: stream = 0 label = 'test' id = 32 -++++ finished: end stream for module: stream = 0 label = 'test' id = 32 -++++ starting: end stream for module: stream = 0 label = 'testmerge' id = 33 -++++ finished: end stream for module: stream = 0 label = 'testmerge' id = 33 -++++ starting: end stream for module: stream = 0 label = 'get' id = 34 -++++ finished: end stream for module: stream = 0 label = 'get' id = 34 -++++ starting: end stream for module: stream = 0 label = 'getInt' id = 35 -++++ finished: end stream for module: stream = 0 label = 'getInt' id = 35 -++++ starting: end stream for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++ finished: end stream for module: stream = 0 label = 'dependsOnNoPut' id = 36 -++++ starting: end stream for module: stream = 0 label = 'TriggerResults' id = 25 -++++ finished: end stream for module: stream = 0 label = 'TriggerResults' id = 25 -++++ starting: end stream for module: stream = 0 label = 'out' id = 37 -++++ finished: end stream for module: stream = 0 label = 'out' id = 37 -++++ starting: end stream for module: stream = 0 label = 'path1' id = 26 -++++ finished: end stream for module: stream = 0 label = 'path1' id = 26 -++++ starting: end stream for module: stream = 0 label = 'path2' id = 27 -++++ finished: end stream for module: stream = 0 label = 'path2' id = 27 -++++ starting: end stream for module: stream = 0 label = 'path3' id = 28 -++++ finished: end stream for module: stream = 0 label = 'path3' id = 28 -++++ starting: end stream for module: stream = 0 label = 'path4' id = 29 -++++ finished: end stream for module: stream = 0 label = 'path4' id = 29 -++++ starting: end stream for module: stream = 0 label = 'endPath1' id = 30 -++++ finished: end stream for module: stream = 0 label = 'endPath1' id = 30 +++++ starting: end stream for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++ finished: end stream for module: stream = 0 label = 'thingWithMergeProducer' id = 29 +++++ starting: end stream for module: stream = 0 label = 'test' id = 30 +++++ finished: end stream for module: stream = 0 label = 'test' id = 30 +++++ starting: end stream for module: stream = 0 label = 'testmerge' id = 31 +++++ finished: end stream for module: stream = 0 label = 'testmerge' id = 31 +++++ starting: end stream for module: stream = 0 label = 'get' id = 32 +++++ finished: end stream for module: stream = 0 label = 'get' id = 32 +++++ starting: end stream for module: stream = 0 label = 'getInt' id = 33 +++++ finished: end stream for module: stream = 0 label = 'getInt' id = 33 +++++ starting: end stream for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++ finished: end stream for module: stream = 0 label = 'dependsOnNoPut' id = 34 +++++ starting: end stream for module: stream = 0 label = 'TriggerResults' id = 24 +++++ finished: end stream for module: stream = 0 label = 'TriggerResults' id = 24 +++++ starting: end stream for module: stream = 0 label = 'out' id = 35 +++++ finished: end stream for module: stream = 0 label = 'out' id = 35 +++++ starting: end stream for module: stream = 0 label = 'path1' id = 25 +++++ finished: end stream for module: stream = 0 label = 'path1' id = 25 +++++ starting: end stream for module: stream = 0 label = 'path2' id = 26 +++++ finished: end stream for module: stream = 0 label = 'path2' id = 26 +++++ starting: end stream for module: stream = 0 label = 'path3' id = 27 +++++ finished: end stream for module: stream = 0 label = 'path3' id = 27 +++++ starting: end stream for module: stream = 0 label = 'path4' id = 28 +++++ finished: end stream for module: stream = 0 label = 'path4' id = 28 ++++ starting: end job for module with label 'thingWithMergeProducer' id = 5 ++++ finished: end job for module with label 'thingWithMergeProducer' id = 5 ++++ starting: end job for module with label 'get' id = 6 @@ -7595,20 +7459,20 @@ ++++ finished: end job for module with label 'path1' id = 3 ++++ starting: end job for module with label 'path2' id = 4 ++++ finished: end job for module with label 'path2' id = 4 -++++ starting: end job for module with label 'thingWithMergeProducer' id = 18 -++++ finished: end job for module with label 'thingWithMergeProducer' id = 18 -++++ starting: end job for module with label 'test' id = 19 -++++ finished: end job for module with label 'test' id = 19 -++++ starting: end job for module with label 'testmerge' id = 20 -++++ finished: end job for module with label 'testmerge' id = 20 -++++ starting: end job for module with label 'get' id = 21 -++++ finished: end job for module with label 'get' id = 21 -++++ starting: end job for module with label 'getInt' id = 22 -++++ finished: end job for module with label 'getInt' id = 22 -++++ starting: end job for module with label 'dependsOnNoPut' id = 23 -++++ finished: end job for module with label 'dependsOnNoPut' id = 23 -++++ starting: end job for module with label 'out' id = 24 -++++ finished: end job for module with label 'out' id = 24 +++++ starting: end job for module with label 'thingWithMergeProducer' id = 17 +++++ finished: end job for module with label 'thingWithMergeProducer' id = 17 +++++ starting: end job for module with label 'test' id = 18 +++++ finished: end job for module with label 'test' id = 18 +++++ starting: end job for module with label 'testmerge' id = 19 +++++ finished: end job for module with label 'testmerge' id = 19 +++++ starting: end job for module with label 'get' id = 20 +++++ finished: end job for module with label 'get' id = 20 +++++ starting: end job for module with label 'getInt' id = 21 +++++ finished: end job for module with label 'getInt' id = 21 +++++ starting: end job for module with label 'dependsOnNoPut' id = 22 +++++ finished: end job for module with label 'dependsOnNoPut' id = 22 +++++ starting: end job for module with label 'out' id = 23 +++++ finished: end job for module with label 'out' id = 23 ++++ starting: end job for module with label 'TriggerResults' id = 12 ++++ finished: end job for module with label 'TriggerResults' id = 12 ++++ starting: end job for module with label 'path1' id = 13 @@ -7619,32 +7483,28 @@ ++++ finished: end job for module with label 'path3' id = 15 ++++ starting: end job for module with label 'path4' id = 16 ++++ finished: end job for module with label 'path4' id = 16 -++++ starting: end job for module with label 'endPath1' id = 17 -++++ finished: end job for module with label 'endPath1' id = 17 -++++ starting: end job for module with label 'thingWithMergeProducer' id = 31 -++++ finished: end job for module with label 'thingWithMergeProducer' id = 31 -++++ starting: end job for module with label 'test' id = 32 -++++ finished: end job for module with label 'test' id = 32 -++++ starting: end job for module with label 'testmerge' id = 33 -++++ finished: end job for module with label 'testmerge' id = 33 -++++ starting: end job for module with label 'get' id = 34 -++++ finished: end job for module with label 'get' id = 34 -++++ starting: end job for module with label 'getInt' id = 35 -++++ finished: end job for module with label 'getInt' id = 35 -++++ starting: end job for module with label 'dependsOnNoPut' id = 36 -++++ finished: end job for module with label 'dependsOnNoPut' id = 36 -++++ starting: end job for module with label 'out' id = 37 -++++ finished: end job for module with label 'out' id = 37 -++++ starting: end job for module with label 'TriggerResults' id = 25 -++++ finished: end job for module with label 'TriggerResults' id = 25 -++++ starting: end job for module with label 'path1' id = 26 -++++ finished: end job for module with label 'path1' id = 26 -++++ starting: end job for module with label 'path2' id = 27 -++++ finished: end job for module with label 'path2' id = 27 -++++ starting: end job for module with label 'path3' id = 28 -++++ finished: end job for module with label 'path3' id = 28 -++++ starting: end job for module with label 'path4' id = 29 -++++ finished: end job for module with label 'path4' id = 29 -++++ starting: end job for module with label 'endPath1' id = 30 -++++ finished: end job for module with label 'endPath1' id = 30 +++++ starting: end job for module with label 'thingWithMergeProducer' id = 29 +++++ finished: end job for module with label 'thingWithMergeProducer' id = 29 +++++ starting: end job for module with label 'test' id = 30 +++++ finished: end job for module with label 'test' id = 30 +++++ starting: end job for module with label 'testmerge' id = 31 +++++ finished: end job for module with label 'testmerge' id = 31 +++++ starting: end job for module with label 'get' id = 32 +++++ finished: end job for module with label 'get' id = 32 +++++ starting: end job for module with label 'getInt' id = 33 +++++ finished: end job for module with label 'getInt' id = 33 +++++ starting: end job for module with label 'dependsOnNoPut' id = 34 +++++ finished: end job for module with label 'dependsOnNoPut' id = 34 +++++ starting: end job for module with label 'out' id = 35 +++++ finished: end job for module with label 'out' id = 35 +++++ starting: end job for module with label 'TriggerResults' id = 24 +++++ finished: end job for module with label 'TriggerResults' id = 24 +++++ starting: end job for module with label 'path1' id = 25 +++++ finished: end job for module with label 'path1' id = 25 +++++ starting: end job for module with label 'path2' id = 26 +++++ finished: end job for module with label 'path2' id = 26 +++++ starting: end job for module with label 'path3' id = 27 +++++ finished: end job for module with label 'path3' id = 27 +++++ starting: end job for module with label 'path4' id = 28 +++++ finished: end job for module with label 'path4' id = 28 ++ finished: end job diff --git a/FWCore/Integration/test/useEmptyRootFile.py b/FWCore/Integration/test/useEmptyRootFile.py new file mode 100644 index 0000000000000..849358efa7926 --- /dev/null +++ b/FWCore/Integration/test/useEmptyRootFile.py @@ -0,0 +1,11 @@ +import FWCore.ParameterSet.Config as cms + +process = cms.Process("READ") + +process.source = cms.Source("PoolSource", fileNames = cms.untracked.vstring("file:empty.root")) + +process.Thing = cms.EDProducer("ThingProducer") + +process.OtherThing = cms.EDProducer("OtherThingProducer") + +process.p = cms.Path(process.Thing * process.OtherThing) diff --git a/FWCore/Modules/src/EventContentAnalyzer.cc b/FWCore/Modules/src/EventContentAnalyzer.cc index af5acd0136133..5d4d01454f471 100644 --- a/FWCore/Modules/src/EventContentAnalyzer.cc +++ b/FWCore/Modules/src/EventContentAnalyzer.cc @@ -278,21 +278,23 @@ namespace edm { std::map cumulates_; bool listContent_; bool listProvenance_; + bool listPathStatus_; }; // // constructors and destructor // EventContentAnalyzer::EventContentAnalyzer(ParameterSet const& iConfig) - : indentation_(iConfig.getUntrackedParameter("indentation", std::string("++"))), - verboseIndentation_(iConfig.getUntrackedParameter("verboseIndentation", std::string(" "))), - moduleLabels_(iConfig.getUntrackedParameter("verboseForModuleLabels", std::vector())), - verbose_(iConfig.getUntrackedParameter("verbose", false) || !moduleLabels_.empty()), - getModuleLabels_(iConfig.getUntrackedParameter("getDataForModuleLabels", std::vector())), - getData_(iConfig.getUntrackedParameter("getData", false) || !getModuleLabels_.empty()), + : indentation_(iConfig.getUntrackedParameter("indentation")), + verboseIndentation_(iConfig.getUntrackedParameter("verboseIndentation")), + moduleLabels_(iConfig.getUntrackedParameter>("verboseForModuleLabels")), + verbose_(iConfig.getUntrackedParameter("verbose") || !moduleLabels_.empty()), + getModuleLabels_(iConfig.getUntrackedParameter>("getDataForModuleLabels")), + getData_(iConfig.getUntrackedParameter("getData") || !getModuleLabels_.empty()), evno_(1), - listContent_(iConfig.getUntrackedParameter("listContent", true)), - listProvenance_(iConfig.getUntrackedParameter("listProvenance", false)) { + listContent_(iConfig.getUntrackedParameter("listContent")), + listProvenance_(iConfig.getUntrackedParameter("listProvenance")), + listPathStatus_(iConfig.getUntrackedParameter("listPathStatus")) { //now do what ever initialization is needed sort_all(moduleLabels_); sort_all(getModuleLabels_); @@ -347,7 +349,7 @@ namespace edm { std::string const& className = provenance->className(); const std::string kPathStatus("edm::PathStatus"); const std::string kEndPathStatus("edm::EndPathStatus"); - if (className == kPathStatus || className == kEndPathStatus) { + if (not listPathStatus_ and (className == kPathStatus || className == kEndPathStatus)) { continue; } std::string const& friendlyName = provenance->friendlyClassName(); @@ -448,36 +450,39 @@ namespace edm { ParameterDescriptionNode* np; std::string defaultString("++"); - np = desc.addOptionalUntracked("indentation", defaultString); + np = desc.addUntracked("indentation", defaultString); np->setComment("This string is printed at the beginning of every line printed during event processing."); - np = desc.addOptionalUntracked("verbose", false); + np = desc.addUntracked("verbose", false); np->setComment("If true, the contents of products are printed."); defaultString = " "; - np = desc.addOptionalUntracked("verboseIndentation", defaultString); + np = desc.addUntracked("verboseIndentation", defaultString); np->setComment( "This string is used to further indent lines when printing the contents of products in verbose mode."); std::vector defaultVString; - np = desc.addOptionalUntracked >("verboseForModuleLabels", defaultVString); + np = desc.addUntracked>("verboseForModuleLabels", defaultVString); np->setComment("If this vector is not empty, then only products with module labels on this list are printed."); - np = desc.addOptionalUntracked("getData", false); + np = desc.addUntracked("getData", false); np->setComment("If true the products will be retrieved using getByLabel."); - np = desc.addOptionalUntracked >("getDataForModuleLabels", defaultVString); + np = desc.addUntracked>("getDataForModuleLabels", defaultVString); np->setComment( "If this vector is not empty, then only products with module labels on this list are retrieved by getByLabel."); - np = desc.addOptionalUntracked("listContent", true); + np = desc.addUntracked("listContent", true); np->setComment("If true then print a list of all the event content."); - np = desc.addOptionalUntracked("listProvenance", false); + np = desc.addUntracked("listProvenance", false); np->setComment("If true, and if listContent or verbose is true, print provenance information for each product"); + desc.addUntracked("listPathStatus", false) + ->setComment("If true, also show PathStatus/EndPathStatus data products."); descriptions.add("printContent", desc); + descriptions.addDefault(desc); } } // namespace edm diff --git a/FWCore/Modules/src/GenericConsumer.cc b/FWCore/Modules/src/GenericConsumer.cc index ded80c7638e0b..4cceb6f5113ca 100644 --- a/FWCore/Modules/src/GenericConsumer.cc +++ b/FWCore/Modules/src/GenericConsumer.cc @@ -1,23 +1,120 @@ /* - * This plugin depends on all the event, lumi and run products produced by the modules listed in its configuration: - * - eventProducts: depend on the event products from these modules - * - lumiProducts: depend on the lumi products from these modules - * - runProducts: depend on the run products from these modules + * This EDAnalyzer will depend on all the event, lumi, run or process products declared by its configuration, both + * transient and persistent. * - * Use "*" to depend on all the products in a given branch. + * The dependencies can be specified either as module labels (e.g. "") or as branch names (e.g. + * "___"). + * If a module label is used, no underscore ("_") must be present; this module will depend all the products produced + * by that module, including those produced by the Transformer functionality (such as the implicitly copied-to-host + * products in case of Alpaka-based modules). + * If a branch name is used, all four fields must be present, separated by underscores; this module will depend only + * on the matching product(s). + * + * Glob expressions ("?" and "*") are supported in module labels and within the individual fields of branch names, + * similar to an OutputModule's "keep" statements. + * Use "*" to depend on all products of a given category. + * + * For example, in the case of Alpaka-based modules running on a device, using + * + * eventProducts = cms.untracked.vstring( "module" ) + * + * will cause "module" to run, along with automatic copy of its device products to the host. + * To avoid the copy, the DeviceProduct branch can be specified explicitly with + * + * eventProducts = cms.untracked.vstring( "*DeviceProduct_module_*_*" ) + * + * . */ #include #include +#include #include +#include + +#include "DataFormats/Provenance/interface/BranchDescription.h" #include "FWCore/Framework/interface/global/EDAnalyzer.h" -#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" #include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" #include "FWCore/ParameterSet/interface/ParameterDescriptionNode.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +namespace { + struct ProductBranch { + public: + ProductBranch(std::string const& label) { + static const char kSeparator = '_'; + static const char kWildcard = '*'; + static const std::regex kAny{".*"}; + + // wildcard + if (label == kWildcard) { + type_ = kAny; + moduleLabel_ = kAny; + productInstanceName_ = kAny; + processName_ = kAny; + return; + } + + int fields = std::count(label.begin(), label.end(), kSeparator) + 1; + if (fields == 1) { + // convert the module label into a regular expression + type_ = kAny; + moduleLabel_ = glob_to_regex(label); + productInstanceName_ = kAny; + processName_ = kAny; + } else if (fields == 4) { + // split the branch name into ___ + // and convert the glob expressions into regular expressions + size_t first = 0, last = 0; + last = label.find(kSeparator, first); + type_ = glob_to_regex(label.substr(first, last - first)); + first = last + 1; + last = label.find(kSeparator, first); + moduleLabel_ = glob_to_regex(label.substr(first, last - first)); + first = last + 1; + last = label.find(kSeparator, first); + productInstanceName_ = glob_to_regex(label.substr(first, last - first)); + first = last + 1; + last = label.find(kSeparator, first); + processName_ = glob_to_regex(label.substr(first, last - first)); + } else { + // invalid input + throw edm::Exception(edm::errors::Configuration) << "Invalid module label or branch name: \"" << label << "\""; + } + } + + bool match(edm::BranchDescription const& branch) const { + return (std::regex_match(branch.friendlyClassName(), type_) and + std::regex_match(branch.moduleLabel(), moduleLabel_) and + std::regex_match(branch.productInstanceName(), productInstanceName_) and + std::regex_match(branch.processName(), processName_)); + } + + private: + static std::regex glob_to_regex(std::string pattern) { + boost::replace_all(pattern, "*", ".*"); + boost::replace_all(pattern, "?", "."); + return std::regex(pattern); + } + + std::regex type_; + std::regex moduleLabel_; + std::regex productInstanceName_; + std::regex processName_; + }; + + std::vector make_patterns(std::vector const& labels) { + std::vector patterns; + patterns.reserve(labels.size()); + for (auto const& label : labels) + patterns.emplace_back(label); + return patterns; + } +} // namespace + namespace edm { class GenericConsumer : public edm::global::EDAnalyzer<> { public: @@ -29,59 +126,89 @@ namespace edm { static void fillDescriptions(ConfigurationDescriptions& descriptions); private: - std::vector eventLabels_; - std::vector lumiLabels_; - std::vector runLabels_; - std::vector processLabels_; + std::vector eventProducts_; + std::vector lumiProducts_; + std::vector runProducts_; + std::vector processProducts_; + std::string label_; + bool verbose_; }; GenericConsumer::GenericConsumer(ParameterSet const& config) - : eventLabels_(config.getUntrackedParameter>("eventProducts")), - lumiLabels_(config.getUntrackedParameter>("lumiProducts")), - runLabels_(config.getUntrackedParameter>("runProducts")), - processLabels_(config.getUntrackedParameter>("processProducts")) { - std::sort(eventLabels_.begin(), eventLabels_.end()); - std::sort(lumiLabels_.begin(), lumiLabels_.end()); - std::sort(runLabels_.begin(), runLabels_.end()); - std::sort(processLabels_.begin(), processLabels_.end()); - + : eventProducts_(make_patterns(config.getUntrackedParameter>("eventProducts"))), + lumiProducts_(make_patterns(config.getUntrackedParameter>("lumiProducts"))), + runProducts_(make_patterns(config.getUntrackedParameter>("runProducts"))), + processProducts_(make_patterns(config.getUntrackedParameter>("processProducts"))), + label_(config.getParameter("@module_label")), + verbose_(config.getUntrackedParameter("verbose")) { callWhenNewProductsRegistered([this](edm::BranchDescription const& branch) { - static const std::string kWildcard("*"); static const std::string kPathStatus("edm::PathStatus"); static const std::string kEndPathStatus("edm::EndPathStatus"); switch (branch.branchType()) { case InEvent: - if (std::binary_search(eventLabels_.begin(), eventLabels_.end(), branch.moduleLabel()) or - (std::binary_search(eventLabels_.begin(), eventLabels_.end(), kWildcard) and - branch.className() != kPathStatus and branch.className() != kEndPathStatus)) - this->consumes(edm::TypeToGet{branch.unwrappedTypeID(), PRODUCT_TYPE}, - edm::InputTag{branch.moduleLabel(), branch.productInstanceName(), branch.processName()}); + if (branch.className() == kPathStatus or branch.className() == kEndPathStatus) + return; + for (auto const& label : eventProducts_) + if (label.match(branch)) { + this->consumes(edm::TypeToGet{branch.unwrappedTypeID(), PRODUCT_TYPE}, + edm::InputTag{branch.moduleLabel(), branch.productInstanceName(), branch.processName()}); + if (verbose_) { + edm::LogVerbatim("GenericConsumer") + << label_ << " consumes Event product " << branch.friendlyClassName() << '_' << branch.moduleLabel() + << '_' << branch.productInstanceName() << '_' << branch.processName() << '\n'; + } + break; + } break; case InLumi: - if (std::binary_search(lumiLabels_.begin(), lumiLabels_.end(), branch.moduleLabel()) or - std::binary_search(lumiLabels_.begin(), lumiLabels_.end(), kWildcard)) - this->consumes( - edm::TypeToGet{branch.unwrappedTypeID(), PRODUCT_TYPE}, - edm::InputTag{branch.moduleLabel(), branch.productInstanceName(), branch.processName()}); + for (auto const& label : lumiProducts_) + if (label.match(branch)) { + this->consumes( + edm::TypeToGet{branch.unwrappedTypeID(), PRODUCT_TYPE}, + edm::InputTag{branch.moduleLabel(), branch.productInstanceName(), branch.processName()}); + if (verbose_) { + edm::LogVerbatim("GenericConsumer") + << label_ << " consumes LuminosityBlock product " << branch.friendlyClassName() << '_' + << branch.moduleLabel() << '_' << branch.productInstanceName() << '_' << branch.processName() + << '\n'; + } + break; + } break; case InRun: - if (std::binary_search(runLabels_.begin(), runLabels_.end(), branch.moduleLabel()) or - std::binary_search(runLabels_.begin(), runLabels_.end(), kWildcard)) - this->consumes( - edm::TypeToGet{branch.unwrappedTypeID(), PRODUCT_TYPE}, - edm::InputTag{branch.moduleLabel(), branch.productInstanceName(), branch.processName()}); + for (auto const& label : runProducts_) + if (label.match(branch)) { + this->consumes( + edm::TypeToGet{branch.unwrappedTypeID(), PRODUCT_TYPE}, + edm::InputTag{branch.moduleLabel(), branch.productInstanceName(), branch.processName()}); + if (verbose_) { + edm::LogVerbatim("GenericConsumer") + << label_ << " consumes Run product " << branch.friendlyClassName() << '_' << branch.moduleLabel() + << '_' << branch.productInstanceName() << '_' << branch.processName() << '\n'; + } + break; + } break; case InProcess: - if (std::binary_search(processLabels_.begin(), processLabels_.end(), branch.moduleLabel()) or - std::binary_search(processLabels_.begin(), processLabels_.end(), kWildcard)) - this->consumes( - edm::TypeToGet{branch.unwrappedTypeID(), PRODUCT_TYPE}, - edm::InputTag{branch.moduleLabel(), branch.productInstanceName(), branch.processName()}); + for (auto const& label : processProducts_) + if (label.match(branch)) { + this->consumes( + edm::TypeToGet{branch.unwrappedTypeID(), PRODUCT_TYPE}, + edm::InputTag{branch.moduleLabel(), branch.productInstanceName(), branch.processName()}); + if (verbose_) { + edm::LogVerbatim("GenericConsumer") + << label_ << " consumes Process product " << branch.friendlyClassName() << '_' + << branch.moduleLabel() << '_' << branch.productInstanceName() << '_' << branch.processName() + << '\n'; + } + break; + } break; + default: throw Exception(errors::LogicError) << "Unexpected branch type " << branch.branchType() << "\nPlease contact a Framework developer\n"; @@ -91,30 +218,42 @@ namespace edm { void GenericConsumer::fillDescriptions(ConfigurationDescriptions& descriptions) { descriptions.setComment( - "This plugin depends on all the event, lumi and run products " - "produced by the modules listed in its configuration."); + R"(This EDAnalyzer will depend on all the event, lumi, run or process products declared by its configuration, both transient and persistent. + +The dependencies can be specified either as module labels (e.g. "") or as branch names (e.g. "___"). +If a module label is used, no underscore ("_") must be present; this module will depend all the products produced by that module, including those produced by the Transformer functionality (such as the implicitly copied-to-host products in case of Alpaka-based modules). +If a branch name is used, all four fields must be present, separated by underscores; this module will depend only on the matching product(s). + +Glob expressions ("?" and "*") are supported in module labels and within the individual fields of branch names, similar to an OutputModule's "keep" statements. +Use "*" to depend on all products of a given category. + +For example, in the case of Alpaka-based modules running on a device, using + + eventProducts = cms.untracked.vstring( "module" ) + +will cause "module" to run, along with automatic copy of its device products to the host. +To avoid the copy, the DeviceProduct branch can be specified explicitly with + + eventProducts = cms.untracked.vstring( "*DeviceProduct_module_*_*" ) + +.)"); ParameterSetDescription desc; desc.addUntracked>("eventProducts", {}) - ->setComment( - "List of modules whose event products this module will depend on. " - "Use \"*\" to depend on all event products."); + ->setComment("List of modules or branches whose event products this module will depend on."); desc.addUntracked>("lumiProducts", {}) - ->setComment( - "List of modules whose lumi products this module will depend on. " - "Use \"*\" to depend on all lumi products."); + ->setComment("List of modules or branches whose lumi products this module will depend on."); desc.addUntracked>("runProducts", {}) - ->setComment( - "List of modules whose run products this module will depend on. " - "Use \"*\" to depend on all run products."); + ->setComment("List of modules or branches whose run products this module will depend on."); desc.addUntracked>("processProducts", {}) - ->setComment( - "List of modules whose process products this module will depend on. " - "Use \"*\" to depend on all process products."); + ->setComment("List of modules or branches whose process products this module will depend on."); + desc.addUntracked("verbose", false) + ->setComment("Print the actual branch names for which the dependency are declared."); descriptions.addWithDefaultLabel(desc); } } // namespace edm +#include "FWCore/Framework/interface/MakerMacros.h" using edm::GenericConsumer; DEFINE_FWK_MODULE(GenericConsumer); diff --git a/FWCore/Modules/src/LogErrorFilter.cc b/FWCore/Modules/src/LogErrorFilter.cc index ddeafbc807fb3..04c9e76e84669 100644 --- a/FWCore/Modules/src/LogErrorFilter.cc +++ b/FWCore/Modules/src/LogErrorFilter.cc @@ -32,7 +32,7 @@ // class declaration // -class LogErrorFilter : public edm::stream::EDFilter<> { +class LogErrorFilter : public edm::stream::EDFilter { public: explicit LogErrorFilter(edm::ParameterSet const&); ~LogErrorFilter() override; diff --git a/FWCore/Modules/src/TestSource.cc b/FWCore/Modules/src/TestSource.cc index fa9378a4ff507..f0fe9d1ce9e40 100644 --- a/FWCore/Modules/src/TestSource.cc +++ b/FWCore/Modules/src/TestSource.cc @@ -15,7 +15,7 @@ namespace edm { static void fillDescriptions(ConfigurationDescriptions& descriptions); private: - ItemType getNextItemType() final; + ItemTypeInfo getNextItemType() final; std::shared_ptr readRunAuxiliary_() final; std::shared_ptr readLuminosityBlockAuxiliary_() final; void readEvent_(EventPrincipal& eventPrincipal) final; @@ -36,32 +36,32 @@ namespace edm { TestSource::ItemType TestSource::stringToType(const std::string& iTrans) { if (iTrans == "IsStop") { - return IsStop; + return ItemType::IsStop; } if (iTrans == "IsFile") { - return IsFile; + return ItemType::IsFile; } if (iTrans == "IsRun") { - return IsRun; + return ItemType::IsRun; } if (iTrans == "IsLumi") { - return IsLumi; + return ItemType::IsLumi; } if (iTrans == "IsEvent") { - return IsEvent; + return ItemType::IsEvent; } if (iTrans == "IsSynchronize") { - return IsSynchronize; + return ItemType::IsSynchronize; } throw edm::Exception(errors::Configuration) << "Unknown transition type \'" << iTrans << "\'"; - return IsInvalid; + return ItemType::IsInvalid; } - TestSource::ItemType TestSource::getNextItemType() { + TestSource::ItemTypeInfo TestSource::getNextItemType() { if (m_nextTransition == m_transitions.end()) { - return IsStop; + return ItemType::IsStop; } auto trans = m_nextTransition->first; ++m_nextTransition; diff --git a/FWCore/Modules/test/BuildFile.xml b/FWCore/Modules/test/BuildFile.xml index e18f700307177..d1054436b83b1 100644 --- a/FWCore/Modules/test/BuildFile.xml +++ b/FWCore/Modules/test/BuildFile.xml @@ -4,6 +4,8 @@ + + diff --git a/FWCore/Modules/test/testGenericConsumer.py b/FWCore/Modules/test/testGenericConsumer.py new file mode 100644 index 0000000000000..d8d4da12f804b --- /dev/null +++ b/FWCore/Modules/test/testGenericConsumer.py @@ -0,0 +1,49 @@ +import FWCore.ParameterSet.Config as cms + +process = cms.Process("TEST") + +process.load("FWCore.Framework.test.cmsExceptionsFatal_cff") +process.load("FWCore.MessageService.MessageLogger_cfi") +process.MessageLogger.cerr.INFO.limit = 10000000 + +process.maxEvents = cms.untracked.PSet( + input = cms.untracked.int32(5) +) + +process.source = cms.Source("EmptySource") + +# This should run because it's consumed directly by process.consumer +process.thing = cms.EDProducer("ThingProducer") + +# This should not run, because it's mot consumed by any other module +process.notRunningThing = cms.EDProducer("ThingProducer") + +# This should run because it's consumed indirectly by process.consumer, via process.otherThing +process.anotherThing = cms.EDProducer("ThingProducer") + +# This should run because it's consumed directly by process.consumer +process.otherThing = cms.EDProducer("OtherThingProducer", + thingTag = cms.InputTag('anotherThing'), + transient = cms.untracked.bool(True) +) + +# Make the various modules available for unscheduled execution +process.task = cms.Task( + process.thing, + process.anotherThing, + process.otherThing, + process.notRunningThing +) + +# Consumes the products of process.thing and process.otherThing, causing them to run +process.consumer = cms.EDAnalyzer("GenericConsumer", + eventProducts = cms.untracked.vstring("*_thing_*_*", "otherThing"), + verbose = cms.untracked.bool(True) +) + +# Explicilty schedule process.consumer, causing it to run along with its dependencies, provided by process.task +process.path = cms.Path(process.consumer, process.task) + +# Print the summary of all modules that were run +# The content of the summary is tested by testGenericConsumer.sh +process.options.wantSummary = True diff --git a/FWCore/Modules/test/testGenericConsumer.sh b/FWCore/Modules/test/testGenericConsumer.sh new file mode 100755 index 0000000000000..7066de5e32417 --- /dev/null +++ b/FWCore/Modules/test/testGenericConsumer.sh @@ -0,0 +1,6 @@ +#! /bin/bash + +[ "${LOCALTOP}" ] || LOCALTOP=$CMSSW_BASE + +cmsRun ${LOCALTOP}/src/FWCore/Modules/test/testGenericConsumer.py 2>&1 | grep '^TrigReport' | \ + awk 'BEGIN { KEEP = 0; } /Module Summary/ { KEEP = 1; } { if (! KEEP) next; print; } /\|\|\/ { if ($3 == 0) exit 1; } /\/ { if ($3 != 0) exit 1; }' diff --git a/FWCore/ParameterSet/python/Config.py b/FWCore/ParameterSet/python/Config.py index 698f37b2155c2..7f24bb514ddd5 100644 --- a/FWCore/ParameterSet/python/Config.py +++ b/FWCore/ParameterSet/python/Config.py @@ -1285,10 +1285,9 @@ def _insertPaths(self, processPSet, nodeVisitor): iFinalPath.resolve(self.__dict__) finalpathValidator.setLabel(finalpathname) iFinalPath.visit(finalpathValidator) - if finalpathValidator.filtersOnFinalpaths or finalpathValidator.producersOnFinalpaths: - names = [p.label_ for p in finalpathValidator.filtersOnFinalpaths] - names.extend( [p.label_ for p in finalpathValidator.producersOnFinalpaths]) - raise RuntimeError("FinalPath %s has non OutputModules %s" % (finalpathname, ",".join(names))) + invalidModules = finalpathValidator.invalidModulesOnFinalpaths + if invalidModules: + raise RuntimeError("FinalPath %s has non OutputModules %s" % (finalpathname, ",".join(invalidModules))) modulesOnFinalPath.extend(iFinalPath.moduleNames()) for m in modulesOnFinalPath: mod = getattr(self, m) @@ -3326,7 +3325,7 @@ def testFinalPath(self): path = FinalPath(p.a*(p.b+p.c)) self.assertEqual(str(path),'a+b+c') p.es = ESProducer("AnESProducer") - self.assertRaises(TypeError,FinalPath,p.es) + self.assertRaises(TypeError,FinalPath, p.es) t = FinalPath() self.assertEqual(t.dumpPython(PrintOptions()), 'cms.FinalPath()\n') @@ -3348,7 +3347,27 @@ def testFinalPath(self): p.t = FinalPath(p.a) p.a = OutputModule("ReplacedOutputModule") self.assertEqual(p.t.dumpPython(PrintOptions()), 'cms.FinalPath(process.a)\n') - + + p.anal = EDAnalyzer("MyAnalyzer") + p.t = FinalPath(p.anal) + pset = TestMakePSet() + self.assertRaises(RuntimeError, p.fillProcessDesc, pset) + + p.prod = EDProducer("MyProducer") + p.t = FinalPath(p.prod) + pset = TestMakePSet() + self.assertRaises(RuntimeError, p.fillProcessDesc, pset) + + p.filt = EDFilter("MyFilter") + p.t = FinalPath(p.filt) + pset = TestMakePSet() + self.assertRaises(RuntimeError, p.fillProcessDesc, pset) + + p.outp = OutputModule("MyOutputModule") + p.t = FinalPath(p.outp) + pset = TestMakePSet() + p.fillProcessDesc(pset) + def testCloneSequence(self): p = Process("test") a = EDAnalyzer("MyAnalyzer") diff --git a/FWCore/ParameterSet/python/SequenceVisitors.py b/FWCore/ParameterSet/python/SequenceVisitors.py index 679d44614bcf7..3717f193cc16d 100644 --- a/FWCore/ParameterSet/python/SequenceVisitors.py +++ b/FWCore/ParameterSet/python/SequenceVisitors.py @@ -72,8 +72,7 @@ class FinalPathValidator(object): def __init__(self): self.__label = '' self._levelInTasks = 0 - self.filtersOnFinalpaths = [] - self.producersOnFinalpaths = [] + self.invalidModulesOnFinalpaths = [] def setLabel(self,label): self.__label = "'"+label+"' " def enter(self,visitee): @@ -88,10 +87,8 @@ def enter(self,visitee): self._levelInTasks += 1 if self._levelInTasks > 0: return - if isinstance(visitee,EDFilter): - self.filtersOnFinalpaths.append(visitee.type_()) - if isinstance(visitee,EDProducer): - self.producersOnFinalpaths.append(visitee.type_()) + if isinstance(visitee,(EDAnalyzer,EDProducer,EDFilter)): + self.invalidModulesOnFinalpaths.append(visitee.type_()) def leave(self,visitee): if self._levelInTasks > 0: if isinstance(visitee, Task): diff --git a/FWCore/ServiceRegistry/interface/ActivityRegistry.h b/FWCore/ServiceRegistry/interface/ActivityRegistry.h index 716fbf534459d..b8d2a78e1f015 100644 --- a/FWCore/ServiceRegistry/interface/ActivityRegistry.h +++ b/FWCore/ServiceRegistry/interface/ActivityRegistry.h @@ -763,6 +763,54 @@ namespace edm { } AR_WATCH_USING_METHOD_2(watchPostModuleEventAcquire) + /// signal is emitted before the module starts a transform during the Event and before prefetching for the transform has started + typedef signalslot::Signal PreModuleTransformPrefetching; + PreModuleTransformPrefetching preModuleTransformPrefetchingSignal_; + void watchPreModuleTransformPrefetching(PreModuleTransformPrefetching::slot_type const& iSlot) { + preModuleTransformPrefetchingSignal_.connect(iSlot); + } + AR_WATCH_USING_METHOD_2(watchPreModuleTransformPrefetching) + + /// signal is emitted before the module starts a transform during the Event and after prefetching for the transform has finished + typedef signalslot::Signal PostModuleTransformPrefetching; + PostModuleTransformPrefetching postModuleTransformPrefetchingSignal_; + void watchPostModuleTransformPrefetching(PostModuleTransformPrefetching::slot_type const& iSlot) { + postModuleTransformPrefetchingSignal_.connect_front(iSlot); + } + AR_WATCH_USING_METHOD_2(watchPostModuleTransformPrefetching) + + /// signal is emitted before the module starts a transform during the Event + typedef signalslot::Signal PreModuleTransform; + PreModuleTransform preModuleTransformSignal_; + void watchPreModuleTransform(PreModuleTransform::slot_type const& iSlot) { + preModuleTransformSignal_.connect(iSlot); + } + AR_WATCH_USING_METHOD_2(watchPreModuleTransform) + + /// signal is emitted after the module finished a transform during the Event + typedef signalslot::Signal PostModuleTransform; + PostModuleTransform postModuleTransformSignal_; + void watchPostModuleTransform(PostModuleTransform::slot_type const& iSlot) { + postModuleTransformSignal_.connect_front(iSlot); + } + AR_WATCH_USING_METHOD_2(watchPostModuleTransform) + + /// signal is emitted before the module starts the acquire method for a transform during the Event + typedef signalslot::Signal PreModuleTransformAcquiring; + PreModuleTransformAcquiring preModuleTransformAcquiringSignal_; + void watchPreModuleTransformAcquiring(PreModuleTransformAcquiring::slot_type const& iSlot) { + preModuleTransformAcquiringSignal_.connect(iSlot); + } + AR_WATCH_USING_METHOD_2(watchPreModuleTransformAcquiring) + + /// signal is emitted after the module finishes the acquire method for a transform during the Event + typedef signalslot::Signal PostModuleTransformAcquiring; + PostModuleTransformAcquiring postModuleTransformAcquiringSignal_; + void watchPostModuleTransformAcquiring(PostModuleTransformAcquiring::slot_type const& iSlot) { + postModuleTransformAcquiringSignal_.connect_front(iSlot); + } + AR_WATCH_USING_METHOD_2(watchPostModuleTransformAcquiring) + /// signal is emitted after the module starts processing the Event and before a delayed get has started typedef signalslot::Signal PreModuleEventDelayedGet; PreModuleEventDelayedGet preModuleEventDelayedGetSignal_; diff --git a/FWCore/ServiceRegistry/interface/ESModuleCallingContext.h b/FWCore/ServiceRegistry/interface/ESModuleCallingContext.h index e392d00eb7326..46062d8ad058a 100644 --- a/FWCore/ServiceRegistry/interface/ESModuleCallingContext.h +++ b/FWCore/ServiceRegistry/interface/ESModuleCallingContext.h @@ -17,6 +17,7 @@ Services as an argument to their callback functions. #include "FWCore/ServiceRegistry/interface/ESParentContext.h" #include +#include namespace edm { @@ -34,9 +35,10 @@ namespace edm { kInvalid }; - ESModuleCallingContext(edm::eventsetup::ComponentDescription const* moduleDescription); + ESModuleCallingContext(edm::eventsetup::ComponentDescription const* moduleDescription, std::uintptr_t id); ESModuleCallingContext(edm::eventsetup::ComponentDescription const* moduleDescription, + std::uintptr_t id, State state, ESParentContext const& parent); @@ -47,6 +49,10 @@ namespace edm { edm::eventsetup::ComponentDescription const* componentDescription() const { return componentDescription_; } State state() const { return state_; } Type type() const { return parent_.type(); } + /** Returns a unique id for this module to differentiate possibly concurrent calls to the module. + The value returned may be large so not appropriate for an index lookup. + */ + std::uintptr_t callID() const { return id_; } ESParentContext const& parent() const { return parent_; } ModuleCallingContext const* moduleCallingContext() const { return parent_.moduleCallingContext(); } ESModuleCallingContext const* esmoduleCallingContext() const { return parent_.esmoduleCallingContext(); } @@ -62,6 +68,7 @@ namespace edm { private: edm::eventsetup::ComponentDescription const* componentDescription_; ESParentContext parent_; + std::uintptr_t id_; State state_; }; } // namespace edm diff --git a/FWCore/ServiceRegistry/interface/ModuleCallingContext.h b/FWCore/ServiceRegistry/interface/ModuleCallingContext.h index e025e55464bbb..dde97a0483dae 100644 --- a/FWCore/ServiceRegistry/interface/ModuleCallingContext.h +++ b/FWCore/ServiceRegistry/interface/ModuleCallingContext.h @@ -17,6 +17,7 @@ Services as an argument to their callback functions. #include "FWCore/ServiceRegistry/interface/ParentContext.h" #include +#include namespace cms { class Exception; @@ -42,6 +43,7 @@ namespace edm { ModuleCallingContext(ModuleDescription const* moduleDescription); ModuleCallingContext(ModuleDescription const* moduleDescription, + std::uintptr_t id, State state, ParentContext const& parent, ModuleCallingContext const* previousOnThread); @@ -53,6 +55,11 @@ namespace edm { ModuleDescription const* moduleDescription() const { return moduleDescription_; } State state() const { return state_; } Type type() const { return parent_.type(); } + /** Returns a unique id for this module to differentiate possibly concurrent calls to the module. + The value returned may be large so not appropriate for an index lookup. + A value of 0 denotes a call to produce, analyze or filter. Other values denote a transform. + */ + std::uintptr_t callID() const { return id_; } ParentContext const& parent() const { return parent_; } ModuleCallingContext const* moduleCallingContext() const { return parent_.moduleCallingContext(); } PlaceInPathContext const* placeInPathContext() const { return parent_.placeInPathContext(); } @@ -81,6 +88,7 @@ namespace edm { ModuleCallingContext const* previousModuleOnThread_; ModuleDescription const* moduleDescription_; ParentContext parent_; + std::uintptr_t id_; State state_; }; diff --git a/FWCore/ServiceRegistry/src/ActivityRegistry.cc b/FWCore/ServiceRegistry/src/ActivityRegistry.cc index 5cf9e7650f555..0da045a80e699 100644 --- a/FWCore/ServiceRegistry/src/ActivityRegistry.cc +++ b/FWCore/ServiceRegistry/src/ActivityRegistry.cc @@ -249,6 +249,15 @@ namespace edm { preModuleEventAcquireSignal_.connect(std::cref(iOther.preModuleEventAcquireSignal_)); postModuleEventAcquireSignal_.connect(std::cref(iOther.postModuleEventAcquireSignal_)); + preModuleTransformPrefetchingSignal_.connect(std::cref(iOther.preModuleTransformPrefetchingSignal_)); + postModuleTransformPrefetchingSignal_.connect(std::cref(iOther.postModuleTransformPrefetchingSignal_)); + + preModuleTransformSignal_.connect(std::cref(iOther.preModuleTransformSignal_)); + postModuleTransformSignal_.connect(std::cref(iOther.postModuleTransformSignal_)); + + preModuleTransformAcquiringSignal_.connect(std::cref(iOther.preModuleTransformAcquiringSignal_)); + postModuleTransformAcquiringSignal_.connect(std::cref(iOther.postModuleTransformAcquiringSignal_)); + preModuleEventDelayedGetSignal_.connect(std::cref(iOther.preModuleEventDelayedGetSignal_)); postModuleEventDelayedGetSignal_.connect(std::cref(iOther.postModuleEventDelayedGetSignal_)); @@ -486,6 +495,15 @@ namespace edm { copySlotsToFrom(preModuleEventAcquireSignal_, iOther.preModuleEventAcquireSignal_); copySlotsToFromReverse(postModuleEventAcquireSignal_, iOther.postModuleEventAcquireSignal_); + copySlotsToFrom(preModuleTransformPrefetchingSignal_, iOther.preModuleTransformPrefetchingSignal_); + copySlotsToFromReverse(postModuleTransformPrefetchingSignal_, iOther.postModuleTransformPrefetchingSignal_); + + copySlotsToFrom(preModuleTransformSignal_, iOther.preModuleTransformSignal_); + copySlotsToFromReverse(postModuleTransformSignal_, iOther.postModuleTransformSignal_); + + copySlotsToFrom(preModuleTransformAcquiringSignal_, iOther.preModuleTransformAcquiringSignal_); + copySlotsToFromReverse(postModuleTransformAcquiringSignal_, iOther.postModuleTransformAcquiringSignal_); + copySlotsToFrom(preModuleEventDelayedGetSignal_, iOther.preModuleEventDelayedGetSignal_); copySlotsToFromReverse(postModuleEventDelayedGetSignal_, iOther.postModuleEventDelayedGetSignal_); diff --git a/FWCore/ServiceRegistry/src/ESModuleCallingContext.cc b/FWCore/ServiceRegistry/src/ESModuleCallingContext.cc index 4e4ee8e0932c4..f7df6ed816948 100644 --- a/FWCore/ServiceRegistry/src/ESModuleCallingContext.cc +++ b/FWCore/ServiceRegistry/src/ESModuleCallingContext.cc @@ -7,13 +7,15 @@ namespace edm { - ESModuleCallingContext::ESModuleCallingContext(edm::eventsetup::ComponentDescription const* componentDescription) - : componentDescription_(componentDescription), parent_(), state_(State::kInvalid) {} + ESModuleCallingContext::ESModuleCallingContext(edm::eventsetup::ComponentDescription const* componentDescription, + std::uintptr_t id) + : componentDescription_(componentDescription), parent_(), id_(id), state_(State::kInvalid) {} ESModuleCallingContext::ESModuleCallingContext(edm::eventsetup::ComponentDescription const* componentDescription, + std::uintptr_t id, State state, ESParentContext const& parent) - : componentDescription_(componentDescription), parent_(parent), state_(state) {} + : componentDescription_(componentDescription), parent_(parent), id_(id), state_(state) {} void ESModuleCallingContext::setContext(State state, ESParentContext const& parent) { state_ = state; diff --git a/FWCore/ServiceRegistry/src/ModuleCallingContext.cc b/FWCore/ServiceRegistry/src/ModuleCallingContext.cc index 9b3817f8526e8..bb9ae9bf71283 100644 --- a/FWCore/ServiceRegistry/src/ModuleCallingContext.cc +++ b/FWCore/ServiceRegistry/src/ModuleCallingContext.cc @@ -12,15 +12,21 @@ namespace edm { ModuleCallingContext::ModuleCallingContext(ModuleDescription const* moduleDescription) - : previousModuleOnThread_(nullptr), moduleDescription_(moduleDescription), parent_(), state_(State::kInvalid) {} + : previousModuleOnThread_(nullptr), + moduleDescription_(moduleDescription), + parent_(), + id_(0), + state_(State::kInvalid) {} ModuleCallingContext::ModuleCallingContext(ModuleDescription const* moduleDescription, + std::uintptr_t id, State state, ParentContext const& parent, ModuleCallingContext const* previousOnThread) : previousModuleOnThread_(previousOnThread), moduleDescription_(moduleDescription), parent_(parent), + id_(id), state_(state) {} void ModuleCallingContext::setContext(State state, diff --git a/FWCore/Services/plugins/DependencyGraph.cc b/FWCore/Services/plugins/DependencyGraph.cc index 8ec47c5ea751e..e5457651f574e 100644 --- a/FWCore/Services/plugins/DependencyGraph.cc +++ b/FWCore/Services/plugins/DependencyGraph.cc @@ -96,7 +96,7 @@ class DependencyGraph { using GraphvizAttributes = std::map; // directed graph, with `node` properties attached to each vertex - boost::subgraph>>>>> - m_graph; + boost::property>>>>>; + GraphType m_graph; std::string m_filename; std::unordered_set m_highlightModules; @@ -266,7 +266,7 @@ void DependencyGraph::preBeginJob(PathsAndConsumesOfModulesBase const &pathsAndC edm::LogInfo("DependencyGraph") << "module " << consumer->moduleLabel() << " depends on module " << module->moduleLabel(); auto edge_status = boost::add_edge(consumer->id(), module->id(), m_graph); - // highlight the arrow between highlighted nodes + // highlight the edge between highlighted nodes if (highlighted(module->moduleLabel()) and highlighted(consumer->moduleLabel())) { auto const &edge = edge_status.first; auto &attributes = boost::get(boost::get(boost::edge_attribute, m_graph), edge); @@ -275,7 +275,49 @@ void DependencyGraph::preBeginJob(PathsAndConsumesOfModulesBase const &pathsAndC } } - // marke the modules in the paths as scheduled, and add a soft dependency to reflect the order of modules along each path + // save each Path and EndPath as a Graphviz subgraph + for (unsigned int i = 0; i < paths.size(); ++i) { + // create a subgraph to match the Path + auto &graph = m_graph.create_subgraph(); + + // set the subgraph name property to the Path name + boost::get_property(graph, boost::graph_name) = paths[i]; + boost::get_property(graph, boost::graph_graph_attribute)["label"] = "Path " + paths[i]; + boost::get_property(graph, boost::graph_graph_attribute)["labelloc"] = "bottom"; + + // add to the subgraph the node corresponding to the scheduled modules on the Path + for (edm::ModuleDescription const *module : pathsAndConsumes.modulesOnPath(i)) { + boost::add_vertex(module->id(), graph); + } + } + for (unsigned int i = 0; i < endps.size(); ++i) { + // create a subgraph to match the EndPath + auto &graph = m_graph.create_subgraph(); + + // set the subgraph name property to the EndPath name + boost::get_property(graph, boost::graph_name) = paths[i]; + boost::get_property(graph, boost::graph_graph_attribute)["label"] = "EndPath " + paths[i]; + boost::get_property(graph, boost::graph_graph_attribute)["labelloc"] = "bottom"; + + // add to the subgraph the node corresponding to the scheduled modules on the EndPath + for (edm::ModuleDescription const *module : pathsAndConsumes.modulesOnEndPath(i)) { + boost::add_vertex(module->id(), graph); + } + } + + // optionally, add a dependency of the TriggerResults module on the PathStatusInserter modules + const int size = boost::num_vertices(m_graph); + int triggerResults = -1; + bool highlightTriggerResults = false; + for (int i = 0; i < size; ++i) { + if (m_graph.m_graph[i].label == "TriggerResults") { + triggerResults = i; + highlightTriggerResults = highlighted("TriggerResults"); + break; + } + } + + // mark the modules in the paths as scheduled, and add a soft dependency to reflect the order of modules along each path edm::ModuleDescription const *previous; for (unsigned int i = 0; i < paths.size(); ++i) { previous = nullptr; @@ -285,7 +327,7 @@ void DependencyGraph::preBeginJob(PathsAndConsumesOfModulesBase const &pathsAndC attributes["fillcolor"] = highlighted(module->moduleLabel()) ? "lightgreen" : "white"; if (previous and m_showPathDependencies) { edm::LogInfo("DependencyGraph") << "module " << module->moduleLabel() << " follows module " - << previous->moduleLabel() << " in Path " << i; + << previous->moduleLabel() << " in Path " << paths[i]; auto edge_status = boost::lookup_edge(module->id(), previous->id(), m_graph); bool found = edge_status.second; if (not found) { @@ -293,14 +335,46 @@ void DependencyGraph::preBeginJob(PathsAndConsumesOfModulesBase const &pathsAndC auto const &edge = edge_status.first; auto &edgeAttributes = boost::get(boost::get(boost::edge_attribute, m_graph), edge); edgeAttributes["style"] = "dashed"; - // highlight the arrow between highlighted nodes + // highlight the edge between highlighted nodes if (highlighted(module->moduleLabel()) and highlighted(previous->moduleLabel())) edgeAttributes["color"] = "darkgreen"; } } previous = module; } + // previous points to the last scheduled module on the path + if (previous and m_showPathDependencies) { + // look for the PathStatusInserter module corresponding to this path + for (int j = 0; j < size; ++j) { + if (m_graph.m_graph[j].label == paths[i]) { + edm::LogInfo("DependencyGraph") << "module " << paths[i] << " implicitly follows module " + << previous->moduleLabel() << " in Path " << paths[i]; + // add an edge from the PathStatusInserter module to the last module scheduled on the path + auto edge_status = boost::add_edge(j, previous->id(), m_graph); + auto const &edge = edge_status.first; + auto &edgeAttributes = boost::get(boost::get(boost::edge_attribute, m_graph), edge); + edgeAttributes["style"] = "dashed"; + // highlight the edge between highlighted nodes + bool highlightedPath = highlighted(paths[i]); + if (highlightedPath and highlighted(previous->moduleLabel())) + edgeAttributes["color"] = "darkgreen"; + if (triggerResults > 0) { + // add an edge from the TriggerResults module to the PathStatusInserter module + auto edge_status = boost::add_edge(triggerResults, j, m_graph); + auto const &edge = edge_status.first; + auto &edgeAttributes = boost::get(boost::get(boost::edge_attribute, m_graph), edge); + edgeAttributes["style"] = "dashed"; + // highlight the edge between highlighted nodes + if (highlightedPath and highlightTriggerResults) + edgeAttributes["color"] = "darkgreen"; + } + break; + } + } + } } + + // mark the modules in the endpaths as scheduled, and add a soft dependency to reflect the order of modules along each endpath for (unsigned int i = 0; i < endps.size(); ++i) { previous = nullptr; for (edm::ModuleDescription const *module : pathsAndConsumes.modulesOnEndPath(i)) { @@ -317,7 +391,7 @@ void DependencyGraph::preBeginJob(PathsAndConsumesOfModulesBase const &pathsAndC auto const &edge = edge_status.first; auto &edgeAttributes = boost::get(boost::get(boost::edge_attribute, m_graph), edge); edgeAttributes["style"] = "dashed"; - // highlight the arrow between highlighted nodes + // highlight the edge between highlighted nodes if (highlighted(module->moduleLabel()) and highlighted(previous->moduleLabel())) edgeAttributes["color"] = "darkgreen"; } @@ -331,6 +405,12 @@ void DependencyGraph::postBeginJob() { if (not m_initialized) return; + // remove the nodes corresponding to the modules that have been removed from the process + for (int i = boost::num_vertices(m_graph) - 1; i > 1; --i) { + if (m_graph.m_graph[i].label.empty()) + boost::remove_vertex(i, m_graph.m_graph); + } + // draw the dependency graph std::ofstream out(m_filename); boost::write_graphviz(out, m_graph); diff --git a/FWCore/Services/plugins/InitRootHandlers.cc b/FWCore/Services/plugins/InitRootHandlers.cc index a186ae6cb3e92..dee198b3fe7af 100644 --- a/FWCore/Services/plugins/InitRootHandlers.cc +++ b/FWCore/Services/plugins/InitRootHandlers.cc @@ -186,9 +186,11 @@ namespace { "Inverter::Dinv", "RTaskArenaWrapper"}}; - constexpr std::array in_message_print_error{{"number of iterations was insufficient", - "bad integrand behavior", - "integral is divergent, or slowly convergent"}}; + constexpr std::array in_message_print_error{ + {"number of iterations was insufficient", + "bad integrand behavior", + "integral is divergent, or slowly convergent", + "VariableMetricBuilder Initial matrix not pos.def."}}; void RootErrorHandlerImpl(int level, char const* location, char const* message) { bool die = false; diff --git a/FWCore/Services/plugins/Tracer.cc b/FWCore/Services/plugins/Tracer.cc index 555d11fe66ee7..9b97ca8cfa1b2 100644 --- a/FWCore/Services/plugins/Tracer.cc +++ b/FWCore/Services/plugins/Tracer.cc @@ -161,6 +161,12 @@ namespace edm { void postModuleEventDelayedGet(StreamContext const&, ModuleCallingContext const&); void preEventReadFromSource(StreamContext const&, ModuleCallingContext const&); void postEventReadFromSource(StreamContext const&, ModuleCallingContext const&); + void preModuleTransformPrefetching(StreamContext const&, ModuleCallingContext const&); + void postModuleTransformPrefetching(StreamContext const&, ModuleCallingContext const&); + void preModuleTransform(StreamContext const&, ModuleCallingContext const&); + void postModuleTransform(StreamContext const&, ModuleCallingContext const&); + void preModuleTransformAcquiring(StreamContext const&, ModuleCallingContext const&); + void postModuleTransformAcquiring(StreamContext const&, ModuleCallingContext const&); void preModuleStreamPrefetching(StreamContext const&, ModuleCallingContext const&); void postModuleStreamPrefetching(StreamContext const&, ModuleCallingContext const&); @@ -359,6 +365,12 @@ Tracer::Tracer(ParameterSet const& iPS, ActivityRegistry& iRegistry) iRegistry.watchPostModuleEventDelayedGet(this, &Tracer::postModuleEventDelayedGet); iRegistry.watchPreEventReadFromSource(this, &Tracer::preEventReadFromSource); iRegistry.watchPostEventReadFromSource(this, &Tracer::postEventReadFromSource); + iRegistry.watchPreModuleTransformPrefetching(this, &Tracer::preModuleTransformPrefetching); + iRegistry.watchPostModuleTransformPrefetching(this, &Tracer::postModuleTransformPrefetching); + iRegistry.watchPreModuleTransform(this, &Tracer::preModuleTransform); + iRegistry.watchPostModuleTransform(this, &Tracer::postModuleTransform); + iRegistry.watchPreModuleTransformAcquiring(this, &Tracer::preModuleTransformAcquiring); + iRegistry.watchPostModuleTransformAcquiring(this, &Tracer::postModuleTransformAcquiring); iRegistry.watchPreModuleStreamPrefetching(this, &Tracer::preModuleStreamPrefetching); iRegistry.watchPostModuleStreamPrefetching(this, &Tracer::postModuleStreamPrefetching); @@ -1228,6 +1240,93 @@ void Tracer::postEventReadFromSource(StreamContext const& sc, ModuleCallingConte << mcc.moduleDescription()->moduleLabel() << "' id = " << mcc.moduleDescription()->id(); } +void Tracer::preModuleTransformPrefetching(StreamContext const& sc, ModuleCallingContext const& mcc) { + LogAbsolute out("Tracer"); + out << TimeStamper(printTimestamps_); + unsigned int nIndents = mcc.depth() + 4; + for (unsigned int i = 0; i < nIndents; ++i) { + out << indention_; + } + out << " starting: prefetching before transform in event for module: stream = " << sc.streamID() << " label = '" + << mcc.moduleDescription()->moduleLabel() << "' id = " << mcc.moduleDescription()->id() + << " callID = " << mcc.callID(); + if (dumpContextForLabels_.find(mcc.moduleDescription()->moduleLabel()) != dumpContextForLabels_.end()) { + out << "\n" << sc; + out << mcc; + } +} + +void Tracer::postModuleTransformPrefetching(StreamContext const& sc, ModuleCallingContext const& mcc) { + LogAbsolute out("Tracer"); + out << TimeStamper(printTimestamps_); + unsigned int nIndents = mcc.depth() + 4; + for (unsigned int i = 0; i < nIndents; ++i) { + out << indention_; + } + out << " finished: prefetching before transform in event for module: stream = " << sc.streamID() << " label = '" + << mcc.moduleDescription()->moduleLabel() << "' id = " << mcc.moduleDescription()->id() + << " callID = " << mcc.callID(); + if (dumpContextForLabels_.find(mcc.moduleDescription()->moduleLabel()) != dumpContextForLabels_.end()) { + out << "\n" << sc; + out << mcc; + } +} + +void Tracer::preModuleTransform(StreamContext const& sc, ModuleCallingContext const& mcc) { + LogAbsolute out("Tracer"); + out << TimeStamper(printTimestamps_); + unsigned int nIndents = mcc.depth() + 4; + for (unsigned int i = 0; i < nIndents; ++i) { + out << indention_; + } + out << " starting: transform in event for module: stream = " << sc.streamID() << " label = '" + << mcc.moduleDescription()->moduleLabel() << "' id = " << mcc.moduleDescription()->id() + << " callID = " << mcc.callID(); + if (dumpContextForLabels_.find(mcc.moduleDescription()->moduleLabel()) != dumpContextForLabels_.end()) { + out << "\n" << sc; + out << mcc; + } +} + +void Tracer::postModuleTransform(StreamContext const& sc, ModuleCallingContext const& mcc) { + LogAbsolute out("Tracer"); + out << TimeStamper(printTimestamps_); + unsigned int nIndents = mcc.depth() + 4; + for (unsigned int i = 0; i < nIndents; ++i) { + out << indention_; + } + out << " finished: transform in event for module: stream = " << sc.streamID() << " label = '" + << mcc.moduleDescription()->moduleLabel() << "' id = " << mcc.moduleDescription()->id() + << " callID = " << mcc.callID(); + if (dumpContextForLabels_.find(mcc.moduleDescription()->moduleLabel()) != dumpContextForLabels_.end()) { + out << "\n" << sc; + out << mcc; + } +} + +void Tracer::preModuleTransformAcquiring(StreamContext const& sc, ModuleCallingContext const& mcc) { + LogAbsolute out("Tracer"); + out << TimeStamper(printTimestamps_); + unsigned int nIndents = mcc.depth() + 4; + for (unsigned int i = 0; i < nIndents; ++i) { + out << indention_; + } + out << " starting: acquiring before transform in event for module: stream = " << sc.streamID() << " label = '" + << mcc.moduleDescription()->moduleLabel() << "' id = " << mcc.moduleDescription()->id() + << " callID = " << mcc.callID(); +} + +void Tracer::postModuleTransformAcquiring(StreamContext const& sc, ModuleCallingContext const& mcc) { + LogAbsolute out("Tracer"); + out << TimeStamper(printTimestamps_); + unsigned int nIndents = mcc.depth() + 4; + for (unsigned int i = 0; i < nIndents; ++i) { + out << indention_; + } + out << " finished: acquiring before transform in event acquire for module: stream = " << sc.streamID() << " label = '" + << mcc.moduleDescription()->moduleLabel() << "' id = " << mcc.moduleDescription()->id() + << " callID = " << mcc.callID(); +} void Tracer::preModuleStreamPrefetching(StreamContext const& sc, ModuleCallingContext const& mcc) { LogAbsolute out("Tracer"); out << TimeStamper(printTimestamps_); diff --git a/FWCore/Services/plugins/monitor_file_utilities.h b/FWCore/Services/plugins/monitor_file_utilities.h index 05e08d135e949..61504e5a49d48 100644 --- a/FWCore/Services/plugins/monitor_file_utilities.h +++ b/FWCore/Services/plugins/monitor_file_utilities.h @@ -19,6 +19,10 @@ namespace edm::service::monitor_file_utilities { inline auto module_id(edm::ESModuleCallingContext const& mcc) { return mcc.componentDescription()->id_; } + inline auto module_callid(edm::ModuleCallingContext const& mcc) { return mcc.callID(); } + + inline auto module_callid(edm::ESModuleCallingContext const& mcc) { return mcc.callID(); } + template std::enable_if_t::value> concatenate(std::ostream& os, T const t) { os << ' ' << t; diff --git a/FWCore/Services/plugins/tracer_setupFile.cc b/FWCore/Services/plugins/tracer_setupFile.cc index 45c2cfd772c6d..2221489942b2a 100644 --- a/FWCore/Services/plugins/tracer_setupFile.cc +++ b/FWCore/Services/plugins/tracer_setupFile.cc @@ -213,18 +213,22 @@ namespace { } auto recordIndex = findRecordIndices(iKey); long long requestingModuleID; + decltype(iContext.moduleCallingContext()->callID()) requestingCallID; if (iContext.type() == ESParentContext::Type::kModule) { requestingModuleID = iContext.moduleCallingContext()->moduleDescription()->id(); + requestingCallID = iContext.moduleCallingContext()->callID(); } else { requestingModuleID = -1 * static_cast(iContext.esmoduleCallingContext()->componentDescription()->id_ + 1); + requestingCallID = iContext.esmoduleCallingContext()->callID(); } auto msg = assembleMessage(phase, phaseID, iContext.componentDescription()->id_ + 1, recordIndex, - reinterpret_cast(&iContext), + iContext.callID(), requestingModuleID, + requestingCallID, t); logFile_->write(std::move(msg)); } @@ -250,10 +254,18 @@ namespace { using namespace edm; auto const t = std::chrono::duration_cast(now() - beginTime_).count(); long requestingModuleID = 0; + decltype(mcc.parent().moduleCallingContext()->callID()) requestingCallID = 0; if (mcc.type() == ParentContext::Type::kModule) { requestingModuleID = module_id(*mcc.parent().moduleCallingContext()); + requestingCallID = mcc.parent().moduleCallingContext()->callID(); } - auto msg = assembleMessage(toTransition(gc), toTransitionIndex(gc), module_id(mcc), requestingModuleID, t); + auto msg = assembleMessage(toTransition(gc), + toTransitionIndex(gc), + module_id(mcc), + module_callid(mcc), + requestingModuleID, + requestingCallID, + t); logFile_->write(std::move(msg)); } @@ -271,10 +283,13 @@ namespace { using namespace edm; auto const t = std::chrono::duration_cast(now() - beginTime_).count(); long requestingModuleID = 0; + decltype(mcc.parent().moduleCallingContext()->callID()) requestingCallID = 0; if (mcc.type() == ParentContext::Type::kModule) { requestingModuleID = module_id(*mcc.parent().moduleCallingContext()); + requestingCallID = mcc.parent().moduleCallingContext()->callID(); } - auto msg = assembleMessage(toTransition(sc), stream_id(sc), module_id(mcc), requestingModuleID, t); + auto msg = assembleMessage( + toTransition(sc), stream_id(sc), module_id(mcc), module_callid(mcc), requestingModuleID, requestingCallID, t); logFile_->write(std::move(msg)); } @@ -383,89 +398,93 @@ namespace edm::service::tracer { logFile->write(oss.str()); }); - iRegistry.watchPreBeginJob( - [logFile, moduleLabelsPtr, esModuleLabelsPtr, moduleCtrDtrPtr, sourceCtrPtr, beginTime, beginTracer]( - auto&, auto&) mutable { - { - std::ostringstream oss; - moduleIdToLabel(oss, *moduleLabelsPtr, 'M', "EDModule ID", "Module label"); - logFile->write(oss.str()); - moduleLabelsPtr.reset(); - } - { - std::ostringstream oss; - moduleIdToLabel(oss, *esModuleLabelsPtr, 'N', "ESModule ID", "ESModule label"); - logFile->write(oss.str()); - esModuleLabelsPtr.reset(); - } - { - auto const tracerStart = duration_cast(beginTracer - beginTime).count(); - auto msg = assembleMessage( - static_cast>(Phase::startTracing), 0, 0, 0, 0, tracerStart); - logFile->write(std::move(msg)); - } - //NOTE: the source construction can run concurently with module construction so we need to properly - // interleave its timing in with the modules - auto srcBeginConstruction = sourceCtrPtr->beginConstruction; - auto srcEndConstruction = sourceCtrPtr->endConstruction; - sourceCtrPtr.reset(); - auto handleSource = [&srcBeginConstruction, &srcEndConstruction, &logFile](long long iTime) mutable { - if (srcBeginConstruction != 0 and srcBeginConstruction < iTime) { - auto bmsg = assembleMessage( - static_cast>(Phase::construction), 0, srcBeginConstruction); - logFile->write(std::move(bmsg)); - srcBeginConstruction = 0; - } - if (srcEndConstruction != 0 and srcEndConstruction < iTime) { - auto bmsg = assembleMessage( - static_cast>(Phase::construction), 0, srcEndConstruction); - logFile->write(std::move(bmsg)); - srcEndConstruction = 0; - } - }; - { - std::sort(moduleCtrDtrPtr->begin(), moduleCtrDtrPtr->end(), [](auto const& l, auto const& r) { - return l.beginConstruction < r.beginConstruction; - }); - int id = 0; - for (auto const& ctr : *moduleCtrDtrPtr) { - if (ctr.beginConstruction != 0) { - handleSource(ctr.beginConstruction); - auto bmsg = assembleMessage( - static_cast>(Phase::construction), 0, id, 0, ctr.beginConstruction); - logFile->write(std::move(bmsg)); - handleSource(ctr.endConstruction); - auto emsg = assembleMessage( - static_cast>(Phase::construction), 0, id, 0, ctr.endConstruction); - logFile->write(std::move(emsg)); - } - ++id; - } - id = 0; - std::sort(moduleCtrDtrPtr->begin(), moduleCtrDtrPtr->end(), [](auto const& l, auto const& r) { - return l.beginDestruction < r.beginDestruction; - }); - for (auto const& dtr : *moduleCtrDtrPtr) { - if (dtr.beginDestruction != 0) { - handleSource(dtr.beginDestruction); - auto bmsg = assembleMessage( - static_cast>(Phase::destruction), 0, id, 0, dtr.beginDestruction); - logFile->write(std::move(bmsg)); - handleSource(dtr.endDestruction); - auto emsg = assembleMessage( - static_cast>(Phase::destruction), 0, id, 0, dtr.endDestruction); - logFile->write(std::move(emsg)); - } - ++id; - } - moduleCtrDtrPtr.reset(); + iRegistry.watchPreBeginJob([logFile, + moduleLabelsPtr, + esModuleLabelsPtr, + moduleCtrDtrPtr, + sourceCtrPtr, + beginTime, + beginTracer](auto&, auto&) mutable { + { + std::ostringstream oss; + moduleIdToLabel(oss, *moduleLabelsPtr, 'M', "EDModule ID", "Module label"); + logFile->write(oss.str()); + moduleLabelsPtr.reset(); + } + { + std::ostringstream oss; + moduleIdToLabel(oss, *esModuleLabelsPtr, 'N', "ESModule ID", "ESModule label"); + logFile->write(oss.str()); + esModuleLabelsPtr.reset(); + } + { + auto const tracerStart = duration_cast(beginTracer - beginTime).count(); + auto msg = assembleMessage( + static_cast>(Phase::startTracing), 0, 0, 0, 0, tracerStart); + logFile->write(std::move(msg)); + } + //NOTE: the source construction can run concurently with module construction so we need to properly + // interleave its timing in with the modules + auto srcBeginConstruction = sourceCtrPtr->beginConstruction; + auto srcEndConstruction = sourceCtrPtr->endConstruction; + sourceCtrPtr.reset(); + auto handleSource = [&srcBeginConstruction, &srcEndConstruction, &logFile](long long iTime) mutable { + if (srcBeginConstruction != 0 and srcBeginConstruction < iTime) { + auto bmsg = assembleMessage( + static_cast>(Phase::construction), 0, srcBeginConstruction); + logFile->write(std::move(bmsg)); + srcBeginConstruction = 0; + } + if (srcEndConstruction != 0 and srcEndConstruction < iTime) { + auto bmsg = assembleMessage( + static_cast>(Phase::construction), 0, srcEndConstruction); + logFile->write(std::move(bmsg)); + srcEndConstruction = 0; + } + }; + { + std::sort(moduleCtrDtrPtr->begin(), moduleCtrDtrPtr->end(), [](auto const& l, auto const& r) { + return l.beginConstruction < r.beginConstruction; + }); + int id = 0; + for (auto const& ctr : *moduleCtrDtrPtr) { + if (ctr.beginConstruction != 0) { + handleSource(ctr.beginConstruction); + auto bmsg = assembleMessage( + static_cast>(Phase::construction), 0, id, 0, 0, 0, ctr.beginConstruction); + logFile->write(std::move(bmsg)); + handleSource(ctr.endConstruction); + auto emsg = assembleMessage( + static_cast>(Phase::construction), 0, id, 0, 0, 0, ctr.endConstruction); + logFile->write(std::move(emsg)); } - auto const t = duration_cast(now() - beginTime).count(); - handleSource(t); - auto msg = assembleMessage( - static_cast>(Phase::beginJob), 0, 0, 0, 0, t); - logFile->write(std::move(msg)); + ++id; + } + id = 0; + std::sort(moduleCtrDtrPtr->begin(), moduleCtrDtrPtr->end(), [](auto const& l, auto const& r) { + return l.beginDestruction < r.beginDestruction; }); + for (auto const& dtr : *moduleCtrDtrPtr) { + if (dtr.beginDestruction != 0) { + handleSource(dtr.beginDestruction); + auto bmsg = assembleMessage( + static_cast>(Phase::destruction), 0, id, 0, 0, 0, dtr.beginDestruction); + logFile->write(std::move(bmsg)); + handleSource(dtr.endDestruction); + auto emsg = assembleMessage( + static_cast>(Phase::destruction), 0, id, 0, 0, 0, dtr.endDestruction); + logFile->write(std::move(emsg)); + } + ++id; + } + moduleCtrDtrPtr.reset(); + } + auto const t = duration_cast(now() - beginTime).count(); + handleSource(t); + auto msg = assembleMessage( + static_cast>(Phase::beginJob), 0, 0, 0, 0, t); + logFile->write(std::move(msg)); + }); iRegistry.watchPostBeginJob([logFile, beginTime]() { auto const t = duration_cast(now() - beginTime).count(); auto msg = assembleMessage( @@ -703,13 +722,13 @@ namespace edm::service::tracer { iRegistry.watchPreModuleBeginJob([logFile, beginTime](auto const& md) { auto const t = duration_cast(now() - beginTime).count(); auto msg = assembleMessage( - static_cast>(Phase::beginJob), 0, md.id(), 0, t); + static_cast>(Phase::beginJob), 0, md.id(), 0, 0, 0, t); logFile->write(std::move(msg)); }); iRegistry.watchPostModuleBeginJob([logFile, beginTime](auto const& md) { auto const t = duration_cast(now() - beginTime).count(); auto msg = assembleMessage( - static_cast>(Phase::beginJob), 0, md.id(), 0, t); + static_cast>(Phase::beginJob), 0, md.id(), 0, 0, 0, t); logFile->write(std::move(msg)); }); @@ -722,13 +741,13 @@ namespace edm::service::tracer { iRegistry.watchPreModuleEndJob([logFile, beginTime](auto const& md) { auto const t = duration_cast(now() - beginTime).count(); auto msg = assembleMessage( - static_cast>(Phase::endJob), 0, md.id(), 0, t); + static_cast>(Phase::endJob), 0, md.id(), 0, 0, 0, t); logFile->write(std::move(msg)); }); iRegistry.watchPostModuleEndJob([logFile, beginTime](auto const& md) { auto const t = duration_cast(now() - beginTime).count(); auto msg = assembleMessage( - static_cast>(Phase::endJob), 0, md.id(), 0, t); + static_cast>(Phase::endJob), 0, md.id(), 0, 0, 0, t); logFile->write(std::move(msg)); }); @@ -745,6 +764,15 @@ namespace edm::service::tracer { iRegistry.watchPreEventReadFromSource(StreamEDModuleState(logFile, beginTime)); iRegistry.watchPostEventReadFromSource(StreamEDModuleState(logFile, beginTime)); + iRegistry.watchPreModuleTransform(StreamEDModuleState(logFile, beginTime)); + iRegistry.watchPostModuleTransform(StreamEDModuleState(logFile, beginTime)); + iRegistry.watchPreModuleTransformPrefetching(StreamEDModuleState(logFile, beginTime)); + iRegistry.watchPostModuleTransformPrefetching( + StreamEDModuleState(logFile, beginTime)); + iRegistry.watchPreModuleTransformAcquiring(StreamEDModuleState(logFile, beginTime)); + iRegistry.watchPostModuleTransformAcquiring( + StreamEDModuleState(logFile, beginTime)); + iRegistry.watchPreModuleStreamPrefetching(StreamEDModuleState(logFile, beginTime)); iRegistry.watchPostModuleStreamPrefetching(StreamEDModuleState(logFile, beginTime)); @@ -837,50 +865,62 @@ namespace edm::service::tracer { << "# postSourceTransition " << Step::postSourceTransition << " + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/HeterogeneousCore/AlpakaInterface/test/alpaka/testAtomicPairCounter.dev.cc b/HeterogeneousCore/AlpakaInterface/test/alpaka/testAtomicPairCounter.dev.cc new file mode 100644 index 0000000000000..1687feb8c1bab --- /dev/null +++ b/HeterogeneousCore/AlpakaInterface/test/alpaka/testAtomicPairCounter.dev.cc @@ -0,0 +1,111 @@ +#include +#include + +#define CATCH_CONFIG_MAIN +#include + +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/AtomicPairCounter.h" + +using namespace cms::alpakatools; +using namespace ALPAKA_ACCELERATOR_NAMESPACE; + +static constexpr auto s_tag = "[" ALPAKA_TYPE_ALIAS_NAME(alpakaTestAtomicPair) "]"; + +struct update { + template + ALPAKA_FN_ACC void operator()( + const TAcc &acc, AtomicPairCounter *dc, uint32_t *ind, uint32_t *cont, uint32_t n) const { + for (auto i : elements_with_stride(acc, n)) { + auto m = i % 11; + m = m % 6 + 1; // max 6, no 0 + auto c = dc->inc_add(acc, m); + assert(c.first < n); + ind[c.first] = c.second; + for (uint32_t j = c.second; j < c.second + m; ++j) + cont[j] = i; + } + } +}; + +struct finalize { + template + ALPAKA_FN_ACC void operator()( + const TAcc &acc, AtomicPairCounter const *dc, uint32_t *ind, uint32_t *cont, uint32_t n) const { + assert(dc->get().first == n); + ind[n] = dc->get().second; + } +}; + +TEST_CASE("Standard checks of " ALPAKA_TYPE_ALIAS_NAME(alpakaTestAtomicPair), s_tag) { + SECTION("AtomicPairCounter") { + auto const &devices = cms::alpakatools::devices(); + if (devices.empty()) { + std::cout << "No devices available on the platform " << EDM_STRINGIZE(ALPAKA_ACCELERATOR_NAMESPACE) + << ", the test will be skipped.\n"; + REQUIRE(not devices.empty()); + } + + // run the test on each device + for (auto const &device : devices) { + std::cout << "Test AtomicPairCounter on " << alpaka::getName(device) << '\n'; + auto queue = Queue(device); + + auto c_d = make_device_buffer(queue); + alpaka::memset(queue, c_d, 0); + + std::cout << "- size " << sizeof(AtomicPairCounter) << std::endl; + + constexpr uint32_t N = 20000; + constexpr uint32_t M = N * 6; + auto n_d = make_device_buffer(queue, N); + auto m_d = make_device_buffer(queue, M); + + constexpr uint32_t NUM_VALUES = 10000; + + // Update + const auto blocksPerGrid = 2000u; + const auto threadsPerBlockOrElementsPerThread = 512u; + const auto workDiv = make_workdiv(blocksPerGrid, threadsPerBlockOrElementsPerThread); + alpaka::enqueue( + queue, alpaka::createTaskKernel(workDiv, update(), c_d.data(), n_d.data(), m_d.data(), NUM_VALUES)); + + // Finalize + const auto blocksPerGridFinalize = 1u; + const auto threadsPerBlockOrElementsPerThreadFinalize = 1u; + const auto workDivFinalize = + make_workdiv(blocksPerGridFinalize, threadsPerBlockOrElementsPerThreadFinalize); + alpaka::enqueue( + queue, + alpaka::createTaskKernel(workDivFinalize, finalize(), c_d.data(), n_d.data(), m_d.data(), NUM_VALUES)); + + auto c_h = make_host_buffer(queue); + auto n_h = make_host_buffer(queue, N); + auto m_h = make_host_buffer(queue, M); + + // copy the results from the device to the host + alpaka::memcpy(queue, c_h, c_d); + alpaka::memcpy(queue, n_h, n_d); + alpaka::memcpy(queue, m_h, m_d); + + // wait for all the operations to complete + alpaka::wait(queue); + + REQUIRE(c_h.data()->get().first == NUM_VALUES); + REQUIRE(n_h[NUM_VALUES] == c_h.data()->get().second); + REQUIRE(n_h[0] == 0); + + for (size_t i = 0; i < NUM_VALUES; ++i) { + auto ib = n_h.data()[i]; + auto ie = n_h.data()[i + 1]; + auto k = m_h.data()[ib++]; + REQUIRE(k < NUM_VALUES); + + for (; ib < ie; ++ib) + REQUIRE(m_h.data()[ib] == k); + } + } + } +} diff --git a/HeterogeneousCore/AlpakaInterface/test/alpaka/testHistoContainer.dev.cc b/HeterogeneousCore/AlpakaInterface/test/alpaka/testHistoContainer.dev.cc new file mode 100644 index 0000000000000..57b80cc9cf275 --- /dev/null +++ b/HeterogeneousCore/AlpakaInterface/test/alpaka/testHistoContainer.dev.cc @@ -0,0 +1,255 @@ +#include +#include +#include +#include +#include + +#define CATCH_CONFIG_MAIN +#include + +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/HistoContainer.h" + +using namespace cms::alpakatools; +using namespace ALPAKA_ACCELERATOR_NAMESPACE; + +static constexpr auto s_tag = "[" ALPAKA_TYPE_ALIAS_NAME(alpakaTestHistoContainer) "]"; + +template +void checkContents(Hist* h, + unsigned int N, + VERIFY& verify, + INCR& incr, + T window, + uint32_t nParts, + const T* v, + const uint32_t* offsets) { + for (uint32_t j = 0; j < nParts; ++j) { + auto off = Hist::histOff(j); + for (uint32_t i = 0; i < Hist::nbins(); ++i) { + auto ii = i + off; + if (0 == h->size(ii)) + continue; + auto k = *h->begin(ii); + if (j % 2) + k = *(h->begin(ii) + (h->end(ii) - h->begin(ii)) / 2); +#ifndef NDEBUG + [[maybe_unused]] auto bk = h->bin(v[k]); +#endif + ALPAKA_ASSERT_OFFLOAD(bk == i); + ALPAKA_ASSERT_OFFLOAD(k < offsets[j + 1]); + auto kl = h->bin(v[k] - window); + auto kh = h->bin(v[k] + window); + ALPAKA_ASSERT_OFFLOAD(kl != i); + ALPAKA_ASSERT_OFFLOAD(kh != i); + // std::cout << kl << ' ' << kh << std::endl; + + auto me = v[k]; + auto tot = 0; + auto nm = 0; + bool l = true; + auto khh = kh; + incr(khh); + for (auto kk = kl; kk != khh; incr(kk)) { + if (kk != kl && kk != kh) + nm += h->size(kk + off); + for (auto p = h->begin(kk + off); p < h->end(kk + off); ++p) { + if (std::min(std::abs(T(v[*p] - me)), std::abs(T(me - v[*p]))) > window) { + } else { + ++tot; + } + } + if (kk == i) { + l = false; + continue; + } + if (l) + for (auto p = h->begin(kk + off); p < h->end(kk + off); ++p) + verify(i, k, k, (*p)); + else + for (auto p = h->begin(kk + off); p < h->end(kk + off); ++p) + verify(i, k, (*p), k); + } + if (!(tot >= nm)) { + std::cout << "too bad " << j << ' ' << i << ' ' << int(me) << '/' << (int)T(me - window) << '/' + << (int)T(me + window) << ": " << kl << '/' << kh << ' ' << khh << ' ' << tot << '/' << nm + << std::endl; + } + if (l) + std::cout << "what? " << j << ' ' << i << ' ' << int(me) << '/' << (int)T(me - window) << '/' + << (int)T(me + window) << ": " << kl << '/' << kh << ' ' << khh << ' ' << tot << '/' << nm + << std::endl; + ALPAKA_ASSERT_OFFLOAD(!l); + } + } + int status; + auto* demangled = abi::__cxa_demangle(typeid(Hist).name(), NULL, NULL, &status); + status || printf("Check contents OK with %s\n", demangled); + std::free(demangled); +} + +template +int go(const DevHost& host, const Device& device, Queue& queue) { + std::mt19937 eng(2708); + std::uniform_int_distribution rgen(std::numeric_limits::min(), std::numeric_limits::max()); + + constexpr unsigned int N = 12000; + auto v = make_host_buffer(queue, N); + auto v_d = make_device_buffer(queue, N); + alpaka::memcpy(queue, v_d, v); + + constexpr uint32_t nParts = 10; + constexpr uint32_t partSize = N / nParts; + + using Hist = HistoContainer; + using HistR = HistoContainer; + std::cout << "HistoContainer " << (int)(offsetof(Hist, off)) << ' ' << Hist::nbins() << ' ' << Hist::totbins() << ' ' + << Hist{}.capacity() << ' ' << offsetof(Hist, content) - offsetof(Hist, off) << ' ' + << (std::numeric_limits::max() - std::numeric_limits::min()) / Hist::nbins() << std::endl; + std::cout << "HistoContainer Runtime sized " << (int)(offsetof(HistR, off)) << ' ' << HistR::nbins() << ' ' + << HistR::totbins() << ' ' << HistR{}.capacity() << ' ' << offsetof(HistR, content) - offsetof(HistR, off) + << ' ' << (std::numeric_limits::max() - std::numeric_limits::min()) / HistR::nbins() << std::endl; + + // Offsets for each histogram. + auto offsets = make_host_buffer(queue, nParts + 1); + auto offsets_d = make_device_buffer(queue, nParts + 1); + + // Compile sized histogram (self contained) + auto h = make_host_buffer(queue); + auto h_d = make_device_buffer(queue); + + // Run time sized histogram + auto hr = make_host_buffer(queue); + // Data storage for histogram content (host) + auto hd = make_host_buffer(queue, N); + auto hr_d = make_device_buffer(queue); + // Data storage for histogram content (device) + auto hd_d = make_device_buffer(queue, N); + + // We iterate the test 5 times. + for (int it = 0; it < 5; ++it) { + offsets[0] = 0; + for (uint32_t j = 1; j < nParts + 1; ++j) { + offsets[j] = offsets[j - 1] + partSize - 3 * j; + ALPAKA_ASSERT_OFFLOAD(offsets[j] <= N); + } + + if (it == 1) { // special cases... + offsets[0] = 0; + offsets[1] = 0; + offsets[2] = 19; + offsets[3] = 32 + offsets[2]; + offsets[4] = 123 + offsets[3]; + offsets[5] = 256 + offsets[4]; + offsets[6] = 311 + offsets[5]; + offsets[7] = 2111 + offsets[6]; + offsets[8] = 256 * 11 + offsets[7]; + offsets[9] = 44 + offsets[8]; + offsets[10] = 3297 + offsets[9]; + } + + alpaka::memcpy(queue, offsets_d, offsets); + + for (long long j = 0; j < N; j++) + v[j] = rgen(eng); + + if (it == 2) { // big bin + for (long long j = 1000; j < 2000; j++) + v[j] = sizeof(T) == 1 ? 22 : 3456; + } + + // for(unsigned int i=0;i" << v[i] << std::endl; + // } + alpaka::memcpy(queue, v_d, v); + + alpaka::memset(queue, h_d, 0); + alpaka::memset(queue, hr_d, 0); + alpaka::memset(queue, hd_d, 0); + + alpaka::wait(queue); + + std::cout << "Calling fillManyFromVector - " << h->size() << std::endl; + fillManyFromVector(h_d.data(), nParts, v_d.data(), offsets_d.data(), offsets[10], 256, queue); + + std::cout << "Calling fillManyFromVector(runtime sized) - " << h->size() << std::endl; + typename HistR::View hrv_d; + hrv_d.assoc = hr_d.data(); + hrv_d.offSize = -1; + hrv_d.offStorage = nullptr; + hrv_d.contentSize = N; + hrv_d.contentStorage = hd_d.data(); + fillManyFromVector(hr_d.data(), hrv_d, nParts, v_d.data(), offsets_d.data(), offsets[10], 256, queue); + + alpaka::memcpy(queue, h, h_d); + // For the runtime sized version: + // We need the histogram for non external data (here, the offsets) + // .. and external data storage (here, the contents) + // .. and plug the data storage address into the histo container + alpaka::memcpy(queue, hr, hr_d); + alpaka::memcpy(queue, hd, hd_d); + + // std::cout << "Calling fillManyFromVector - " << h->size() << std::endl; + alpaka::wait(queue); + + // We cannot update the contents address of the histo container before the copy from device happened + typename HistR::View hrv; + hrv.assoc = hr.data(); + hrv.offSize = -1; + hrv.offStorage = nullptr; + hrv.contentSize = N; + hrv.contentStorage = hd.data(); + hr->initStorage(hrv); + + std::cout << "Copied results" << std::endl; + // for(int i =0;i<=10;i++) + // { + // std::cout << offsets[i] <<" - "<< h->size() << std::endl; + // } + + ALPAKA_ASSERT_OFFLOAD(0 == h->off[0]); + ALPAKA_ASSERT_OFFLOAD(offsets[10] == h->size()); + ALPAKA_ASSERT_OFFLOAD(0 == hr->off[0]); + ALPAKA_ASSERT_OFFLOAD(offsets[10] == hr->size()); + + auto verify = [&](uint32_t i, uint32_t k, uint32_t t1, uint32_t t2) { + ALPAKA_ASSERT_OFFLOAD(t1 < N); + ALPAKA_ASSERT_OFFLOAD(t2 < N); + if (T(v[t1] - v[t2]) <= 0) + std::cout << "for " << i << ':' << v[k] << " failed " << v[t1] << ' ' << v[t2] << std::endl; + }; + + auto incr = [](auto& k) { return k = (k + 1) % Hist::nbins(); }; + + // make sure it spans 3 bins... + auto window = T(1300); + checkContents(h.data(), N, verify, incr, window, nParts, v.data(), offsets.data()); + checkContents(hr.data(), N, verify, incr, window, nParts, v.data(), offsets.data()); + } + + return 0; +} + +TEST_CASE("Standard checks of " ALPAKA_TYPE_ALIAS_NAME(alpakaTestHistoContainer), s_tag) { + SECTION("HistoContainerKernel") { + // get the list of devices on the current platform + auto const& devices = cms::alpakatools::devices(); + auto const& host = cms::alpakatools::host(); + if (devices.empty()) { + std::cout << "No devices available on the platform " << EDM_STRINGIZE(ALPAKA_ACCELERATOR_NAMESPACE) + << ", the test will be skipped.\n"; + return; + } + // run the test on each device + for (auto const& device : devices) { + std::cout << "Test Histo Container on " << alpaka::getName(device) << '\n'; + auto queue = Queue(device); + + REQUIRE(go(host, device, queue) == 0); + REQUIRE(go(host, device, queue) == 0); + } + } +} diff --git a/HeterogeneousCore/AlpakaInterface/test/alpaka/testIndependentKernel.dev.cc b/HeterogeneousCore/AlpakaInterface/test/alpaka/testIndependentKernel.dev.cc new file mode 100644 index 0000000000000..bd98efcfa32d6 --- /dev/null +++ b/HeterogeneousCore/AlpakaInterface/test/alpaka/testIndependentKernel.dev.cc @@ -0,0 +1,144 @@ +#include +#include + +#include + +#define CATCH_CONFIG_MAIN +#include + +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" + +// each test binary is built for a single Alpaka backend +using namespace ALPAKA_ACCELERATOR_NAMESPACE; + +/* Add the group id to te value of each element in the group. + * Each group is composed by the elements first[group]..first[group+1]-1 . + */ +struct IndependentWorkKernel { + template + ALPAKA_FN_ACC void operator()(TAcc const& acc, + T const* __restrict__ in, + T* __restrict__ out, + size_t const* __restrict__ indices, + size_t groups) const { + for (auto group : cms::alpakatools::independent_groups(acc, groups)) { + size_t first = indices[group]; + size_t last = indices[group + 1]; + size_t size = last - first; + for (auto index : cms::alpakatools::independent_group_elements(acc, size)) { + out[first + index] = in[first + index] + group; + } + } + } +}; + +/* Test the IndependentWorkKernel kernel on all devices + */ +template +void testIndependentWorkKernel(size_t groups, size_t grid_size, size_t block_size, TKernel kernel) { + // random number generator with a gaussian distribution + std::random_device rd{}; + std::default_random_engine engine{rd()}; + + // uniform distribution + std::uniform_int_distribution random_size{100, 201}; + + // gaussian distribution + std::normal_distribution dist{0., 1.}; + + // build the groups + std::vector sizes(groups); + auto indices_h = cms::alpakatools::make_host_buffer(groups + 1); + indices_h[0] = 0; + for (size_t i = 0; i < groups; ++i) { + auto size = random_size(engine); + sizes[i] = size; + indices_h[i + 1] = indices_h[i] + size; + } + + // tolerance + constexpr float epsilon = 0.000001; + + // buffer size + const size_t size = indices_h[groups]; + + // allocate the input and output host buffer in pinned memory accessible by the Platform devices + auto in_h = cms::alpakatools::make_host_buffer(size); + auto out_h = cms::alpakatools::make_host_buffer(size); + + // fill the input buffers with random data, and the output buffer with zeros + for (size_t i = 0; i < size; ++i) { + in_h[i] = dist(engine); + out_h[i] = 0; + } + + // run the test on each device + for (auto const& device : cms::alpakatools::devices()) { + std::cout << "Test IndependentWorkKernel on " << alpaka::getName(device) << " over " << size << " elements in " + << groups << " independent groups with " << grid_size << " blocks of " << block_size << " elements\n"; + auto queue = Queue(device); + + // allocate input and output buffers on the device + auto indices_d = cms::alpakatools::make_device_buffer(queue, groups + 1); + auto in_d = cms::alpakatools::make_device_buffer(queue, size); + auto out_d = cms::alpakatools::make_device_buffer(queue, size); + + // copy the input data to the device; the size is known from the buffer objects + alpaka::memcpy(queue, indices_d, indices_h); + alpaka::memcpy(queue, in_d, in_h); + + // fill the output buffer with zeros; the size is known from the buffer objects + alpaka::memset(queue, out_d, 0.); + + // launch the 1-dimensional kernel with independent work groups + auto div = cms::alpakatools::make_workdiv(grid_size, block_size); + alpaka::exec(queue, div, kernel, in_d.data(), out_d.data(), indices_d.data(), groups); + + // copy the results from the device to the host + alpaka::memcpy(queue, out_h, out_d); + + // wait for all the operations to complete + alpaka::wait(queue); + + // check the results + for (size_t g = 0; g < groups; ++g) { + size_t first = indices_h[g]; + size_t last = indices_h[g + 1]; + for (size_t i = first; i < last; ++i) { + float sum = in_h[i] + g; + float delta = std::max(std::fabs(sum) * epsilon, epsilon); + REQUIRE(out_h[i] < sum + delta); + REQUIRE(out_h[i] > sum - delta); + } + } + } +} + +TEST_CASE("Test alpaka kernels for the " EDM_STRINGIZE(ALPAKA_ACCELERATOR_NAMESPACE) " backend", + "[" EDM_STRINGIZE(ALPAKA_ACCELERATOR_NAMESPACE) "]") { + SECTION("Independent work groups") { + // get the list of devices on the current platform + auto const& devices = cms::alpakatools::devices(); + if (devices.empty()) { + INFO("No devices available on the platform " EDM_STRINGIZE(ALPAKA_ACCELERATOR_NAMESPACE)); + REQUIRE(not devices.empty()); + } + + // launch the independent work kernel with a small block size and a small number of blocks; + // this relies on the kernel to loop over the "problem space" and do more work per block + std::cout << "Test independent work kernel with small block size, using scalar dimensions\n"; + testIndependentWorkKernel(100, 32, 32, IndependentWorkKernel{}); + + // launch the independent work kernel with a large block size and a single block; + // this relies on the kernel to check the size of the "problem space" and avoid accessing out-of-bounds data + std::cout << "Test independent work kernel with large block size, using scalar dimensions\n"; + testIndependentWorkKernel(100, 1, 1024, IndependentWorkKernel{}); + + // launch the independent work kernel with a large block size and a large number of blocks; + // this relies on the kernel to check the size of the "problem space" and avoid accessing out-of-bounds data + std::cout << "Test independent work kernel with large block size, using scalar dimensions\n"; + testIndependentWorkKernel(100, 1024, 1024, IndependentWorkKernel{}); + } +} diff --git a/HeterogeneousCore/AlpakaInterface/test/alpaka/testKernel.dev.cc b/HeterogeneousCore/AlpakaInterface/test/alpaka/testKernel.dev.cc index 300f139b0c6e3..a730e4b515a76 100644 --- a/HeterogeneousCore/AlpakaInterface/test/alpaka/testKernel.dev.cc +++ b/HeterogeneousCore/AlpakaInterface/test/alpaka/testKernel.dev.cc @@ -23,6 +23,20 @@ struct VectorAddKernel { } }; +struct VectorAddKernelSkip { + template + ALPAKA_FN_ACC void operator()(TAcc const& acc, + T const* __restrict__ in1, + T const* __restrict__ in2, + T* __restrict__ out, + size_t first, + size_t size) const { + for (auto index : cms::alpakatools::elements_with_stride(acc, first, size)) { + out[index] = in1[index] + in2[index]; + } + } +}; + struct VectorAddKernel1D { template ALPAKA_FN_ACC void operator()( @@ -224,6 +238,76 @@ void testVectorAddKernel(std::size_t problem_size, std::size_t grid_size, std::s } } +// test the 1-dimensional kernel on all devices, potentially skipping some elements +template +void testVectorAddKernelSkip(std::size_t skip_elements, + std::size_t problem_size, + std::size_t grid_size, + std::size_t block_size, + TKernel kernel) { + // random number generator with a gaussian distribution + std::random_device rd{}; + std::default_random_engine rand{rd()}; + std::normal_distribution dist{0., 1.}; + + // tolerance + constexpr float epsilon = 0.000001; + + // buffer size + const size_t size = problem_size; + + // allocate input and output host buffers in pinned memory accessible by the Platform devices + auto in1_h = cms::alpakatools::make_host_buffer(size); + auto in2_h = cms::alpakatools::make_host_buffer(size); + auto out_h = cms::alpakatools::make_host_buffer(size); + + // fill the input buffers with random data, and the output buffer with zeros + for (size_t i = 0; i < size; ++i) { + in1_h[i] = dist(rand); + in2_h[i] = dist(rand); + out_h[i] = 0.; + } + + // run the test on each device + for (auto const& device : cms::alpakatools::devices()) { + std::cout << "Test 1D vector addition on " << alpaka::getName(device) << " skipping " << skip_elements << " over " + << problem_size << " values with " << grid_size << " blocks of " << block_size << " elements\n"; + auto queue = Queue(device); + + // allocate input and output buffers on the device + auto in1_d = cms::alpakatools::make_device_buffer(queue, size); + auto in2_d = cms::alpakatools::make_device_buffer(queue, size); + auto out_d = cms::alpakatools::make_device_buffer(queue, size); + + // copy the input data to the device; the size is known from the buffer objects + alpaka::memcpy(queue, in1_d, in1_h); + alpaka::memcpy(queue, in2_d, in2_h); + + // fill the output buffer with zeros; the size is known from the buffer objects + alpaka::memset(queue, out_d, 0.); + + // launch the 1-dimensional kernel with scalar size + auto div = cms::alpakatools::make_workdiv(grid_size, block_size); + alpaka::exec(queue, div, kernel, in1_d.data(), in2_d.data(), out_d.data(), skip_elements, size); + + // copy the results from the device to the host + alpaka::memcpy(queue, out_h, out_d); + + // wait for all the operations to complete + alpaka::wait(queue); + + // check the results + for (size_t i = 0; i < skip_elements; ++i) { + REQUIRE(out_h[i] == 0); + } + for (size_t i = skip_elements; i < size; ++i) { + float sum = in1_h[i] + in2_h[i]; + REQUIRE(out_h[i] < sum + epsilon); + REQUIRE(out_h[i] > sum - epsilon); + } + } +} + // test the N-dimensional kernels on all devices template void testVectorAddKernelND(Vec problem_size, Vec grid_size, Vec block_size, TKernel kernel) { @@ -367,5 +451,15 @@ TEST_CASE("Test alpaka kernels for the " EDM_STRINGIZE(ALPAKA_ACCELERATOR_NAMESP // this relies on the kernel to check the size of the "problem space" and avoid accessing out-of-bounds data std::cout << "Test 1D vector block-level serial addition with large block size, using scalar dimensions\n"; testVectorAddKernel(100, 1, 1024, VectorAddKernelBlockSerial{}); + + // launch the 1-dimensional kernel with a small block size and a small number of blocks; + // this relies on the kernel to loop over the "problem space" and do more work per block + std::cout << "Test 1D vector addition with small block size, using scalar dimensions\n"; + testVectorAddKernelSkip(20, 10000, 32, 32, VectorAddKernelSkip{}); + + // launch the 1-dimensional kernel with a large block size and a single block; + // this relies on the kernel to check the size of the "problem space" and avoid accessing out-of-bounds data + std::cout << "Test 1D vector addition with large block size, using scalar dimensions\n"; + testVectorAddKernelSkip(20, 100, 1, 1024, VectorAddKernelSkip{}); } } diff --git a/HeterogeneousCore/AlpakaInterface/test/alpaka/testOneHistoContainer.dev.cc b/HeterogeneousCore/AlpakaInterface/test/alpaka/testOneHistoContainer.dev.cc new file mode 100644 index 0000000000000..4ce11cc7facdd --- /dev/null +++ b/HeterogeneousCore/AlpakaInterface/test/alpaka/testOneHistoContainer.dev.cc @@ -0,0 +1,187 @@ +#include +#include +#include +#include + +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/HistoContainer.h" + +using namespace cms::alpakatools; +using namespace ALPAKA_ACCELERATOR_NAMESPACE; + +template +struct mykernel { + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, T const* __restrict__ v, uint32_t N) const { + ALPAKA_ASSERT_OFFLOAD(v); + ALPAKA_ASSERT_OFFLOAD(N == 12000); + + const uint32_t threadIdxLocal(alpaka::getIdx(acc)[0u]); + if (threadIdxLocal == 0) { + printf("start kernel for %d data\n", N); + } + + using Hist = HistoContainer; + + auto& hist = alpaka::declareSharedVar(acc); + auto& ws = alpaka::declareSharedVar(acc); + + // set off zero + for (auto j : elements_with_stride(acc, Hist::totbins())) { + hist.off[j] = 0; + } + alpaka::syncBlockThreads(acc); + + // set bins zero + for (auto j : elements_with_stride(acc, Hist::totbins())) { + hist.content[j] = 0; + } + alpaka::syncBlockThreads(acc); + + // count + for (auto j : elements_with_stride(acc, N)) { + hist.count(acc, v[j]); + } + alpaka::syncBlockThreads(acc); + + ALPAKA_ASSERT_OFFLOAD(0 == hist.size()); + alpaka::syncBlockThreads(acc); + + // finalize + hist.finalize(acc, ws); + alpaka::syncBlockThreads(acc); + + ALPAKA_ASSERT_OFFLOAD(N == hist.size()); + + // verify + for ([[maybe_unused]] auto j : elements_with_stride(acc, Hist::nbins())) { + ALPAKA_ASSERT_OFFLOAD(hist.off[j] <= hist.off[j + 1]); + } + alpaka::syncBlockThreads(acc); + + for (auto j : elements_with_stride(acc, 32)) { + ws[j] = 0; // used by prefix scan... + } + alpaka::syncBlockThreads(acc); + + // fill + for (auto j : elements_with_stride(acc, N)) { + hist.fill(acc, v[j], j); + } + alpaka::syncBlockThreads(acc); + + ALPAKA_ASSERT_OFFLOAD(0 == hist.off[0]); + ALPAKA_ASSERT_OFFLOAD(N == hist.size()); + + // bin +#ifndef NDEBUG + for (auto j : elements_with_stride(acc, hist.size() - 1)) { + auto p = hist.begin() + j; + ALPAKA_ASSERT_OFFLOAD((*p) < N); + [[maybe_unused]] auto k1 = Hist::bin(v[*p]); + [[maybe_unused]] auto k2 = Hist::bin(v[*(p + 1)]); + ALPAKA_ASSERT_OFFLOAD(k2 >= k1); + } +#endif + + // forEachInWindow + for (auto i : elements_with_stride(acc, hist.size())) { + auto p = hist.begin() + i; + auto j = *p; +#ifndef NDEBUG + auto b0 = Hist::bin(v[j]); +#endif + [[maybe_unused]] int tot = 0; + auto ftest = [&](unsigned int k) { + ALPAKA_ASSERT_OFFLOAD(k < N); + ++tot; + }; + forEachInWindow(hist, v[j], v[j], ftest); +#ifndef NDEBUG + [[maybe_unused]] int rtot = hist.size(b0); + ALPAKA_ASSERT_OFFLOAD(tot == rtot); +#endif + tot = 0; + auto vm = int(v[j]) - DELTA; + auto vp = int(v[j]) + DELTA; + constexpr int vmax = NBINS != 128 ? NBINS * 2 - 1 : std::numeric_limits::max(); + vm = std::max(vm, 0); + vm = std::min(vm, vmax); + vp = std::min(vp, vmax); + vp = std::max(vp, 0); + ALPAKA_ASSERT_OFFLOAD(vp >= vm); + forEachInWindow(hist, vm, vp, ftest); +#ifndef NDEBUG + int bp = Hist::bin(vp); + int bm = Hist::bin(vm); + rtot = hist.end(bp) - hist.begin(bm); + ALPAKA_ASSERT_OFFLOAD(tot == rtot); +#endif + } + } +}; + +template +void go(const DevHost& host, const Device& device, Queue& queue) { + std::mt19937 eng; + + int rmin = std::numeric_limits::min(); + int rmax = std::numeric_limits::max(); + if (NBINS != 128) { + rmin = 0; + rmax = NBINS * 2 - 1; + } + + std::uniform_int_distribution rgen(rmin, rmax); + constexpr unsigned int N = 12000; + + using Hist = HistoContainer; + std::cout << "HistoContainer " << Hist::nbits() << ' ' << Hist::nbins() << ' ' << Hist{}.capacity() << ' ' + << (rmax - rmin) / Hist::nbins() << std::endl; + std::cout << "bins " << int(Hist::bin(0)) << ' ' << int(Hist::bin(rmin)) << ' ' << int(Hist::bin(rmax)) << std::endl; + + auto v = make_host_buffer(queue, N); + auto v_d = make_device_buffer(queue, N); + + for (int it = 0; it < 5; ++it) { + for (long long j = 0; j < N; j++) + v[j] = rgen(eng); + if (it == 2) + for (long long j = N / 2; j < N / 2 + N / 4; j++) + v[j] = 4; + + alpaka::memcpy(queue, v_d, v); + + const auto threadsPerBlockOrElementsPerThread = 256u; + const auto blocksPerGrid = 1u; + const auto workDiv = make_workdiv(blocksPerGrid, threadsPerBlockOrElementsPerThread); + alpaka::enqueue(queue, alpaka::createTaskKernel(workDiv, mykernel(), v_d.data(), N)); + } + alpaka::wait(queue); +} + +int main() { + auto const& devices = cms::alpakatools::devices(); + if (devices.empty()) { + std::cout << "No devices available on the platform " << EDM_STRINGIZE(ALPAKA_ACCELERATOR_NAMESPACE) + << ", the test will be skipped.\n"; + return 0; + } + + auto const& host = cms::alpakatools::host(); + + // run the test on each device + for (auto const& device : devices) { + std::cout << "Test One Histo Container on " << alpaka::getName(device) << '\n'; + + auto queue = Queue(device); + + go(host, device, queue); + go(host, device, queue); + go(host, device, queue); + } + + return 0; +} diff --git a/HeterogeneousCore/AlpakaInterface/test/alpaka/testOneRadixSort.dev.cc b/HeterogeneousCore/AlpakaInterface/test/alpaka/testOneRadixSort.dev.cc new file mode 100644 index 0000000000000..b1cb735b55194 --- /dev/null +++ b/HeterogeneousCore/AlpakaInterface/test/alpaka/testOneRadixSort.dev.cc @@ -0,0 +1,246 @@ +// #include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h" + +#include +#include +#include + +#include + +#include "HeterogeneousCore/AlpakaInterface/interface/radixSort.h" +#include "HeterogeneousCore/AlpakaInterface/interface/devices.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" + +using namespace cms::alpakatools; +using namespace ALPAKA_ACCELERATOR_NAMESPACE; + +using FLOAT = double; + +// A templated unsigned integer type with N bytes +template +struct uintN; + +template <> +struct uintN<8> { + using type = uint8_t; +}; + +template <> +struct uintN<16> { + using type = uint16_t; +}; + +template <> +struct uintN<32> { + using type = uint32_t; +}; + +template <> +struct uintN<64> { + using type = uint64_t; +}; + +template +using uintN_t = typename uintN::type; + +// A templated unsigned integer type with the same size as T +template +using uintT_t = uintN_t; + +// Keep only the `N` most significant bytes of `t`, and set the others to zero +template > +ALPAKA_FN_HOST_ACC T truncate(T const& t) { + const int shift = 8 * (sizeof(T) - N); + union { + T t; + uintT_t u; + } c; + c.t = t; + c.u = c.u >> shift << shift; + return c.t; +} + +namespace { + struct testKernel { + template + ALPAKA_FN_ACC void operator()( + const TAcc& acc, FLOAT* gpu_input, int* gpu_product, int elements, bool doPrint) const { + //size_t firstElement = threadIdx.x + blockIdx.x * blockDim.x; // This is going to be the track index + //size_t gridSize = blockDim.x * gridDim.x; + bool threadZero = !alpaka::getIdx(acc)[0u]; + + // radix sort works in a single block (and the assert macro does not like the comma of the template parameters). + const auto blocksPerGrid = alpaka::getWorkDiv(acc)[0u]; + const auto blocksIdx = alpaka::getIdx(acc)[0u]; + assert(1 == blocksPerGrid); + assert(0 == blocksIdx); + assert(elements <= 2048); + + auto& order = alpaka::declareSharedVar(acc); + auto& sws = alpaka::declareSharedVar(acc); + auto& z = alpaka::declareSharedVar(acc); + auto& iz = alpaka::declareSharedVar(acc); + // __shared__ uint16_t order[2048]; + // __shared__ uint16_t sws[2048]; + // __shared__ float z[2048]; + // __shared__ int iz[2048]; + for (auto itrack : elements_with_stride(acc, elements)) { + z[itrack] = gpu_input[itrack]; + iz[itrack] = 10000 * gpu_input[itrack]; + // order[itrack] = itrack; + } + alpaka::syncBlockThreads(acc); + radixSort(acc, z, order, sws, elements); + alpaka::syncBlockThreads(acc); + + //verify + for (auto itrack : elements_with_stride(acc, elements - 1)) { + auto ntrack = order[itrack]; + auto mtrack = order[itrack + 1]; + assert(truncate<2>(z[ntrack]) <= truncate<2>(z[mtrack])); + } + + alpaka::syncBlockThreads(acc); + + if (doPrint) + if (threadZero) { + for (int itrackO = 0; itrackO < elements; itrackO++) { + int itrack = order[itrackO]; + printf( + "Radix sort with %i elements: At position %i, track position at input %i with z at input %f, z fed to " + "radixSort %f\n", + elements, + itrackO, + itrack, + gpu_input[itrack], + z[itrack]); + gpu_product[itrackO] = itrack; + } + } + + alpaka::syncBlockThreads(acc); + radixSort(acc, iz, order, sws, elements); + alpaka::syncBlockThreads(acc); + + for (auto itrack : elements_with_stride(acc, elements - 1)) { + auto ntrack = order[itrack]; + auto mtrack = order[itrack + 1]; + assert(iz[ntrack] <= iz[mtrack]); + } + + if (doPrint) + if (threadZero) { + for (int itrackO = 0; itrackO < elements; itrackO++) { + int itrack = order[itrackO]; + printf( + "Radix sort with %i elements: At position %i, track position at input %i with z at input %f, z fed to " + "radixSort %d\n", + elements, + itrackO, + itrack, + gpu_input[itrack], + iz[itrack]); + gpu_product[itrackO] = itrack; + } + } + } + }; + + void testWrapper(Queue& queue, FLOAT* gpu_input, int* gpu_product, int elements, bool doPrint) { + auto blockSize = 512; // somewhat arbitrary + auto gridSize = 1; // round up to cover the sample size + const auto workdiv = make_workdiv(gridSize, blockSize); + alpaka::enqueue(queue, + alpaka::createTaskKernel(workdiv, testKernel(), gpu_input, gpu_product, elements, doPrint)); + alpaka::wait(queue); + } +} // namespace + +#include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h" + +int main() { + // get the list of devices on the current platform + auto const& devices = cms::alpakatools::devices(); + if (devices.empty()) { + std::cout << "No devices available on the platform " << EDM_STRINGIZE(ALPAKA_ACCELERATOR_NAMESPACE) + << ", the test will be skipped.\n"; + return 0; + } + + std::random_device rd; + std::mt19937 g(rd()); + + // run the test on each device + for (auto const& device : devices) { + Queue queue(device); + // FLOAT* gpu_input; + // int* gpu_product; + + int nmax = 4 * 260; + auto gpu_input_h = cms::alpakatools::make_host_buffer(queue, nmax); + auto i = gpu_input_h.data(); + for (auto v : {30.0, 30.0, -4.4, -7.1860761642, -6.6870317459, 1.8010582924, 2.2535820007, + 2.2666890621, 2.2677690983, 2.2794606686, 2.2802586555, 2.2821085453, 2.2852313519, 2.2877883911, + 2.2946476936, 2.2960267067, 2.3006286621, 2.3245604038, 2.6755006313, 2.7229132652, 2.783257246, + 2.8440306187, 2.9017834663, 2.9252648354, 2.9254128933, 2.927520752, 2.9422419071, 2.9453969002, + 2.9457902908, 2.9465973377, 2.9492356777, 2.9573802948, 2.9575133324, 2.9575304985, 2.9586606026, + 2.9605507851, 2.9622797966, 2.9625515938, 2.9641008377, 2.9646151066, 2.9676523209, 2.9708273411, + 2.974111557, 2.9742531776, 2.9772830009, 2.9877333641, 2.9960610867, 3.013969183, 3.0187871456, + 3.0379793644, 3.0407221317, 3.0415751934, 3.0470511913, 3.0560519695, 3.0592908859, 3.0599737167, + 3.0607066154, 3.0629007816, 3.0632448196, 3.0633215904, 3.0643932819, 3.0645000935, 3.0666446686, + 3.068046093, 3.0697011948, 3.0717656612, 3.0718104839, 3.0718348026, 3.0733406544, 3.0738227367, + 3.0738801956, 3.0738828182, 3.0744686127, 3.0753741264, 3.0758397579, 3.0767207146, 3.0773906708, + 3.0778541565, 3.0780284405, 3.0780889988, 3.0782799721, 3.0789675713, 3.0792205334, 3.0793278217, + 3.0795567036, 3.0797944069, 3.0806643963, 3.0809247494, 3.0815284252, 3.0817306042, 3.0819730759, + 3.0820026398, 3.0838682652, 3.084009409, 3.0848178864, 3.0853257179, 3.0855510235, 3.0856611729, + 3.0873703957, 3.0884618759, 3.0891149044, 3.0893011093, 3.0895674229, 3.0901503563, 3.0903317928, + 3.0912668705, 3.0920717716, 3.0954346657, 3.096424818, 3.0995628834, 3.1001036167, 3.1173279285, + 3.1185023785, 3.1195163727, 3.1568386555, 3.1675374508, 3.1676850319, 3.1886672974, 3.3769197464, + 3.3821125031, 3.4780933857, 3.4822063446, 3.4989323616, 3.5076274872, 3.5225863457, 3.5271244049, + 3.5298995972, 3.5417425632, 3.5444457531, 3.5465917587, 3.5473103523, 3.5480232239, 3.5526945591, + 3.5531234741, 3.5538012981, 3.5544877052, 3.5547749996, 3.5549693108, 3.5550665855, 3.5558729172, + 3.5560717583, 3.5560848713, 3.5584278107, 3.558681488, 3.5587313175, 3.5592217445, 3.559384346, + 3.5604712963, 3.5634038448, 3.563803196, 3.564593792, 3.5660364628, 3.5683133602, 3.5696356297, + 3.569729805, 3.5740811825, 3.5757565498, 3.5760207176, 3.5760478973, 3.5836098194, 3.5839796066, + 3.5852358341, 3.5901627541, 3.6141786575, 3.6601481438, 3.7187042236, 3.9741659164, 4.4111995697, + 4.5337572098, 4.6292567253, 4.6748633385, 4.6806583405, 4.6868157387, 4.6868577003, 4.6879930496, + 4.6888813972, 4.6910686493, 4.6925001144, 4.6957530975, 4.698094368, 4.6997032166, 4.7017259598, + 4.7020640373, 4.7024269104, 4.7036352158, 4.7038679123, 4.7042069435, 4.7044086456, 4.7044372559, + 4.7050771713, 4.7055773735, 4.7060651779, 4.7062759399, 4.7065420151, 4.70657444, 4.7066287994, + 4.7066788673, 4.7067341805, 4.7072944641, 4.7074551582, 4.7075614929, 4.7075891495, 4.7076044083, + 4.7077374458, 4.7080879211, 4.70819664, 4.7086658478, 4.708937645, 4.7092385292, 4.709479332, + 4.7095656395, 4.7100076675, 4.7102108002, 4.7104525566, 4.7105507851, 4.71118927, 4.7113513947, + 4.7115578651, 4.7116270065, 4.7116751671, 4.7117190361, 4.7117333412, 4.7117910385, 4.7119007111, + 4.7120013237, 4.712003231, 4.712044239, 4.7122926712, 4.7135767937, 4.7143669128, 4.7145690918, + 4.7148418427, 4.7149815559, 4.7159647942, 4.7161884308, 4.7177276611, 4.717815876, 4.718059063, + 4.7188801765, 4.7190728188, 4.7199850082, 4.7213058472, 4.7239775658, 4.7243933678, 4.7243990898, + 4.7273659706, 4.7294125557, 4.7296204567, 4.7325615883, 4.7356877327, 4.740146637, 4.742254734, + 4.7433848381, 4.7454957962, 4.7462964058, 4.7692604065, 4.7723139628, 4.774812736, 4.8577151299, + 4.890037536}) { + *(i++) = v; + } + auto input = gpu_input_h.data(); + for (int i = 0; i < 260; i++) { + input[i + 260] = -input[i]; + input[i + 2 * 260] = input[i] + 10; + input[i + 3 * 260] = -input[i] - 10; + } + auto gpu_input_d = cms::alpakatools::make_device_buffer(queue, nmax); + //cudaCheck(cudaMalloc(&gpu_input, sizeof(FLOAT) * nmax)); + // cudaCheck(cudaMalloc(&gpu_product, sizeof(int) * nmax)); + auto gpu_product_d = cms::alpakatools::make_device_buffer(queue, nmax); + // copy the input data to the GPU + alpaka::memcpy(queue, gpu_input_d, gpu_input_h); + //cudaCheck(cudaMemcpy(gpu_input, input, sizeof(FLOAT) * nmax, cudaMemcpyHostToDevice)); + + for (int k = 2; k <= nmax; k++) { + std::shuffle(input, input + k, g); + printf("Test with %d items\n", k); + // sort on the GPU + testWrapper(queue, gpu_input_d.data(), gpu_product_d.data(), k, false); + alpaka::wait(queue); + } + } + return 0; +} diff --git a/HeterogeneousCore/AlpakaInterface/test/alpaka/testOneToManyAssoc.dev.cc b/HeterogeneousCore/AlpakaInterface/test/alpaka/testOneToManyAssoc.dev.cc new file mode 100644 index 0000000000000..d1de1f1c17cca --- /dev/null +++ b/HeterogeneousCore/AlpakaInterface/test/alpaka/testOneToManyAssoc.dev.cc @@ -0,0 +1,327 @@ +#include +#include +#include +#include +#include +#include + +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/OneToManyAssoc.h" + +constexpr uint32_t MaxElem = 64000; +constexpr uint32_t MaxTk = 8000; +constexpr uint32_t MaxAssocs = 4 * MaxTk; + +using namespace cms::alpakatools; +using namespace ALPAKA_ACCELERATOR_NAMESPACE; + +using AssocRandomAccess = OneToManyAssocRandomAccess; +using AssocSequential = OneToManyAssocSequential; +using SmallAssoc = OneToManyAssocSequential; +using Multiplicity = OneToManyAssocRandomAccess; +using TK = std::array; + +namespace { + template + ALPAKA_FN_HOST_ACC typename std::make_signed::type toSigned(T v) { + return static_cast::type>(v); + } +} // namespace + +struct countMultiLocal { + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, + TK const* __restrict__ tk, + Multiplicity* __restrict__ assoc, + uint32_t n) const { + for (auto i : elements_with_stride(acc, n)) { + auto& local = alpaka::declareSharedVar(acc); + const uint32_t threadIdxLocal(alpaka::getIdx(acc)[0u]); + const bool oncePerSharedMemoryAccess = (threadIdxLocal == 0); + if (oncePerSharedMemoryAccess) { + local.zero(); + } + alpaka::syncBlockThreads(acc); + local.count(acc, 2 + i % 4); + alpaka::syncBlockThreads(acc); + if (oncePerSharedMemoryAccess) { + assoc->add(acc, local); + } + } + } +}; + +struct countMulti { + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, + TK const* __restrict__ tk, + Multiplicity* __restrict__ assoc, + uint32_t n) const { + for (auto i : elements_with_stride(acc, n)) { + assoc->count(acc, 2 + i % 4); + } + } +}; + +struct verifyMulti { + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, Multiplicity* __restrict__ m1, Multiplicity* __restrict__ m2) const { + for ([[maybe_unused]] auto i : elements_with_stride(acc, Multiplicity{}.totOnes())) { + ALPAKA_ASSERT_OFFLOAD(m1->off[i] == m2->off[i]); + } + } +}; + +struct count { + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, + TK const* __restrict__ tk, + AssocRandomAccess* __restrict__ assoc, + uint32_t n) const { + for (auto i : elements_with_stride(acc, 4 * n)) { + auto k = i / 4; + auto j = i - 4 * k; + ALPAKA_ASSERT_OFFLOAD(j < 4); + if (k >= n) { + return; + } + if (tk[k][j] < MaxElem) { + assoc->count(acc, tk[k][j]); + } + } + } +}; + +struct fill { + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, + TK const* __restrict__ tk, + AssocRandomAccess* __restrict__ assoc, + uint32_t n) const { + for (auto i : elements_with_stride(acc, 4 * n)) { + auto k = i / 4; + auto j = i - 4 * k; + ALPAKA_ASSERT_OFFLOAD(j < 4); + if (k >= n) { + return; + } + if (tk[k][j] < MaxElem) { + assoc->fill(acc, tk[k][j], k); + } + } + } +}; + +struct verify { + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, Assoc* __restrict__ assoc) const { + ALPAKA_ASSERT_OFFLOAD(assoc->size() < Assoc{}.capacity()); + } +}; + +struct fillBulk { + template + ALPAKA_FN_ACC void operator()( + const TAcc& acc, AtomicPairCounter* apc, TK const* __restrict__ tk, Assoc* __restrict__ assoc, uint32_t n) const { + for (auto k : elements_with_stride(acc, n)) { + auto m = tk[k][3] < MaxElem ? 4 : 3; + assoc->bulkFill(acc, *apc, &tk[k][0], m); + } + } +}; + +struct verifyBulk { + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, Assoc const* __restrict__ assoc, AtomicPairCounter const* apc) const { + if (::toSigned(apc->get().first) >= Assoc::ctNOnes()) { + printf("Overflow %d %d\n", apc->get().first, Assoc::ctNOnes()); + } + ALPAKA_ASSERT_OFFLOAD(toSigned(assoc->size()) < Assoc::ctCapacity()); + } +}; + +int main() { + // get the list of devices on the current platform + auto const& devices = cms::alpakatools::devices(); + if (devices.empty()) { + std::cout << "No devices available on the platform " << EDM_STRINGIZE(ALPAKA_ACCELERATOR_NAMESPACE) + << ", the test will be skipped.\n"; + return 0; + } + + // run the test on each device + for (auto const& device : devices) { + Queue queue(device); + + std::cout << "OneToManyAssocRandomAccess " << sizeof(AssocRandomAccess) << " Ones=" << AssocRandomAccess{}.totOnes() + << " Capacity=" << AssocRandomAccess{}.capacity() << std::endl; + std::cout << "OneToManyAssocSequential " << sizeof(AssocSequential) << " Ones=" << AssocSequential{}.totOnes() + << " Capacity=" << AssocSequential{}.capacity() << std::endl; + std::cout << "OneToManyAssoc (small) " << sizeof(SmallAssoc) << " Ones=" << SmallAssoc{}.totOnes() + << " Capacity=" << SmallAssoc{}.capacity() << std::endl; + + std::mt19937 eng; + std::geometric_distribution rdm(0.8); + + constexpr uint32_t N = 4000; + + auto tr = make_host_buffer[]>(queue, N); + // fill with "index" to element + long long ave = 0; + int imax = 0; + auto n = 0U; + auto z = 0U; + auto nz = 0U; + for (auto i = 0U; i < 4U; ++i) { + auto j = 0U; + while (j < N && n < MaxElem) { + if (z == 11) { + ++n; + z = 0; + ++nz; + continue; + } // a bit of not assoc + auto x = rdm(eng); + auto k = std::min(j + x + 1, N); + if (i == 3 && z == 3) { // some triplets time to time + for (; j < k; ++j) + tr[j][i] = MaxElem + 1; + } else { + ave += x + 1; + imax = std::max(imax, x); + for (; j < k; ++j) + tr[j][i] = n; + ++n; + } + ++z; + } + ALPAKA_ASSERT_OFFLOAD(n <= MaxElem); + ALPAKA_ASSERT_OFFLOAD(j <= N); + } + std::cout << "filled with " << n << " elements " << double(ave) / n << ' ' << imax << ' ' << nz << std::endl; + + auto v_d = make_device_buffer[]>(queue, N); + alpaka::memcpy(queue, v_d, tr); + + auto ara_d = make_device_buffer(queue); + alpaka::memset(queue, ara_d, 0); + + const auto threadsPerBlockOrElementsPerThread = 256u; + const auto blocksPerGrid4N = divide_up_by(4 * N, threadsPerBlockOrElementsPerThread); + const auto workDiv4N = make_workdiv(blocksPerGrid4N, threadsPerBlockOrElementsPerThread); + + AssocRandomAccess::template launchZero(ara_d.data(), queue); + + alpaka::enqueue(queue, alpaka::createTaskKernel(workDiv4N, count(), v_d.data(), ara_d.data(), N)); + + AssocRandomAccess::template launchFinalize(ara_d.data(), queue); + + alpaka::enqueue(queue, alpaka::createTaskKernel(WorkDiv1D{1u, 1u, 1u}, verify(), ara_d.data())); + + alpaka::enqueue(queue, alpaka::createTaskKernel(workDiv4N, fill(), v_d.data(), ara_d.data(), N)); + + auto ara_h = make_host_buffer(queue); + alpaka::memcpy(queue, ara_h, ara_d); + alpaka::wait(queue); + + std::cout << ara_h->size() << std::endl; + imax = 0; + ave = 0; + z = 0; + for (auto i = 0U; i < n; ++i) { + auto x = ara_h->size(i); + if (x == 0) { + z++; + continue; + } + ave += x; + imax = std::max(imax, int(x)); + } + ALPAKA_ASSERT_OFFLOAD(0 == ara_h->size(n)); + std::cout << "found with " << n << " elements " << double(ave) / n << ' ' << imax << ' ' << z << std::endl; + + // now the inverse map (actually this is the direct....) + auto dc_d = make_device_buffer(queue); + alpaka::memset(queue, dc_d, 0); + + const auto blocksPerGrid = divide_up_by(N, threadsPerBlockOrElementsPerThread); + const auto workDiv = make_workdiv(blocksPerGrid, threadsPerBlockOrElementsPerThread); + + auto as_d = make_device_buffer(queue); + alpaka::enqueue(queue, + alpaka::createTaskKernel(workDiv, fillBulk(), dc_d.data(), v_d.data(), as_d.data(), N)); + + alpaka::enqueue( + queue, alpaka::createTaskKernel(workDiv, AssocSequential::finalizeBulk(), dc_d.data(), as_d.data())); + + alpaka::enqueue(queue, + alpaka::createTaskKernel(WorkDiv1D{1u, 1u, 1u}, verifyBulk(), as_d.data(), dc_d.data())); + + auto as_h = make_host_buffer(queue); + alpaka::memcpy(queue, as_h, as_d); + + auto dc_h = make_host_buffer(queue); + alpaka::memcpy(queue, dc_h, dc_d); + alpaka::wait(queue); + + alpaka::memset(queue, dc_d, 0); + auto sa_d = make_device_buffer(queue); + alpaka::memset(queue, sa_d, 0); + + alpaka::enqueue(queue, + alpaka::createTaskKernel(workDiv, fillBulk(), dc_d.data(), v_d.data(), sa_d.data(), N)); + + alpaka::enqueue(queue, + alpaka::createTaskKernel(workDiv, SmallAssoc::finalizeBulk(), dc_d.data(), sa_d.data())); + + alpaka::enqueue(queue, + alpaka::createTaskKernel(WorkDiv1D{1u, 1u, 1u}, verifyBulk(), sa_d.data(), dc_d.data())); + + std::cout << "final counter value " << dc_h->get().second << ' ' << dc_h->get().first << std::endl; + + std::cout << as_h->size() << std::endl; + imax = 0; + ave = 0; + for (auto i = 0U; i < N; ++i) { + auto x = as_h->size(i); + if (!(x == 4 || x == 3)) { + std::cout << "i=" << i << " x=" << x << std::endl; + } + ALPAKA_ASSERT_OFFLOAD(x == 4 || x == 3); + ave += x; + imax = std::max(imax, int(x)); + } + ALPAKA_ASSERT_OFFLOAD(0 == as_h->size(N)); + std::cout << "found with ave occupancy " << double(ave) / N << ' ' << imax << std::endl; + + // here verify use of block local counters + auto m1_d = make_device_buffer(queue); + alpaka::memset(queue, m1_d, 0); + auto m2_d = make_device_buffer(queue); + alpaka::memset(queue, m2_d, 0); + + Multiplicity::template launchZero(m1_d.data(), queue); + Multiplicity::template launchZero(m2_d.data(), queue); + + alpaka::enqueue(queue, alpaka::createTaskKernel(workDiv4N, countMulti(), v_d.data(), m1_d.data(), N)); + + alpaka::enqueue(queue, alpaka::createTaskKernel(workDiv4N, countMultiLocal(), v_d.data(), m2_d.data(), N)); + + const auto blocksPerGridTotBins = 1u; + const auto threadsPerBlockOrElementsPerThreadTotBins = Multiplicity::ctNOnes(); + const auto workDivTotBins = make_workdiv(blocksPerGridTotBins, threadsPerBlockOrElementsPerThreadTotBins); + + alpaka::enqueue(queue, alpaka::createTaskKernel(workDivTotBins, verifyMulti(), m1_d.data(), m2_d.data())); + + Multiplicity::launchFinalize(m1_d.data(), queue); + Multiplicity::launchFinalize(m2_d.data(), queue); + + alpaka::enqueue(queue, alpaka::createTaskKernel(workDivTotBins, verifyMulti(), m1_d.data(), m2_d.data())); + + alpaka::wait(queue); + + return 0; + } +} diff --git a/HeterogeneousCore/AlpakaInterface/test/alpaka/testPrefixScan.dev.cc b/HeterogeneousCore/AlpakaInterface/test/alpaka/testPrefixScan.dev.cc new file mode 100644 index 0000000000000..bffee8f1f533d --- /dev/null +++ b/HeterogeneousCore/AlpakaInterface/test/alpaka/testPrefixScan.dev.cc @@ -0,0 +1,219 @@ +#include +#include +#include +#include +#include + +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/prefixScan.h" + +using namespace cms::alpakatools; +using namespace ALPAKA_ACCELERATOR_NAMESPACE; + +// static constexpr auto s_tag = "[" ALPAKA_TYPE_ALIAS_NAME(alpakaTestPrefixScan) "]"; + +template +struct format_traits { +public: + static const constexpr char* failed_msg = "failed(int) size=%d, i=%d, blockDimension=%d: c[i]=%d c[i-1]=%d\n"; +}; + +template <> +struct format_traits { +public: + static const constexpr char* failed_msg = "failed(float size=%d, i=%d, blockDimension=%d: c[i]=%f c[i-1]=%f\n"; +}; + +template +struct testPrefixScan { + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, unsigned int size) const { + auto& ws = alpaka::declareSharedVar(acc); + auto& c = alpaka::declareSharedVar(acc); + auto& co = alpaka::declareSharedVar(acc); + + for (auto i : elements_with_stride(acc, size)) { + c[i] = 1; + }; + + alpaka::syncBlockThreads(acc); + + blockPrefixScan(acc, c, co, size, ws); + blockPrefixScan(acc, c, size, ws); + + ALPAKA_ASSERT_OFFLOAD(1 == c[0]); + ALPAKA_ASSERT_OFFLOAD(1 == co[0]); + + // TODO: not needed? Not in multi kernel version, not in CUDA version + alpaka::syncBlockThreads(acc); + + for (auto i : elements_with_stride(acc, size)) { + if (0 == i) + continue; + if constexpr (!std::is_floating_point_v) { + if (!((c[i] == c[i - 1] + 1) && (c[i] == i + 1) && (c[i] == co[i]))) + printf("c[%d]=%d, co[%d]=%d\n", i, c[i], i, co[i]); + } else { + if (!((c[i] == c[i - 1] + 1) && (c[i] == i + 1) && (c[i] == co[i]))) + printf("c[%d]=%f, co[%d]=%f\n", i, c[i], i, co[i]); + } + ALPAKA_ASSERT_OFFLOAD(c[i] == c[i - 1] + 1); + ALPAKA_ASSERT_OFFLOAD(c[i] == i + 1); + ALPAKA_ASSERT_OFFLOAD(c[i] == co[i]); + } + } +}; + +/* + * NB: GPU-only, so do not care about elements here. + */ +template +struct testWarpPrefixScan { + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, uint32_t size) const { + if constexpr (!requires_single_thread_per_block_v) { + ALPAKA_ASSERT_OFFLOAD(size <= 32); + auto& c = alpaka::declareSharedVar(acc); + auto& co = alpaka::declareSharedVar(acc); + + uint32_t const blockDimension = alpaka::getWorkDiv(acc)[0u]; + uint32_t const blockThreadIdx = alpaka::getIdx(acc)[0u]; + auto i = blockThreadIdx; + c[i] = 1; + alpaka::syncBlockThreads(acc); + auto laneId = blockThreadIdx & 0x1f; + + warpPrefixScan(acc, laneId, c, co, i); + warpPrefixScan(acc, laneId, c, i); + + alpaka::syncBlockThreads(acc); + + ALPAKA_ASSERT_OFFLOAD(1 == c[0]); + ALPAKA_ASSERT_OFFLOAD(1 == co[0]); + if (i != 0) { + if (c[i] != c[i - 1] + 1) + printf(format_traits::failed_msg, size, i, blockDimension, c[i], c[i - 1]); + ALPAKA_ASSERT_OFFLOAD(c[i] == c[i - 1] + 1); + ALPAKA_ASSERT_OFFLOAD(c[i] == static_cast(i + 1)); + ALPAKA_ASSERT_OFFLOAD(c[i] == co[i]); + } + } else { + // We should never be called outsie of the GPU. + ALPAKA_ASSERT_OFFLOAD(false); + } + } +}; + +struct init { + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, uint32_t* v, uint32_t val, uint32_t n) const { + for (auto index : elements_with_stride(acc, n)) { + v[index] = val; + + if (index == 0) + printf("init\n"); + } + } +}; + +struct verify { + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, uint32_t const* v, uint32_t n) const { + for (auto index : elements_with_stride(acc, n)) { + ALPAKA_ASSERT_OFFLOAD(v[index] == index + 1); + + if (index == 0) + printf("verify\n"); + } + } +}; + +int main() { + // get the list of devices on the current platform + auto const& devices = cms::alpakatools::devices(); + // auto const& host = cms::alpakatools::host(); + + if (devices.empty()) { + std::cout << "No devices available on the platform " << EDM_STRINGIZE(ALPAKA_ACCELERATOR_NAMESPACE) + << ", the test will be skipped.\n"; + return 0; + } + + for (auto const& device : devices) { + std::cout << "Test prefix scan on " << alpaka::getName(device) << '\n'; + auto queue = Queue(device); + const auto warpSize = alpaka::getWarpSizes(device)[0]; + // WARP PREFIXSCAN (OBVIOUSLY GPU-ONLY) + if constexpr (!requires_single_thread_per_block_v) { + std::cout << "warp level" << std::endl; + + const auto threadsPerBlockOrElementsPerThread = 32; + const auto blocksPerGrid = 1; + const auto workDivWarp = make_workdiv(blocksPerGrid, threadsPerBlockOrElementsPerThread); + + alpaka::enqueue(queue, alpaka::createTaskKernel(workDivWarp, testWarpPrefixScan(), 32)); + alpaka::enqueue(queue, alpaka::createTaskKernel(workDivWarp, testWarpPrefixScan(), 16)); + alpaka::enqueue(queue, alpaka::createTaskKernel(workDivWarp, testWarpPrefixScan(), 5)); + } + + // PORTABLE BLOCK PREFIXSCAN + std::cout << "block level" << std::endl; + + // Running kernel with 1 block, and bs threads per block or elements per thread. + // NB: obviously for tests only, for perf would need to use bs = 1024 in GPU version. + for (int bs = 32; bs <= 1024; bs += 32) { + const auto blocksPerGrid2 = 1; + const auto workDivSingleBlock = make_workdiv(blocksPerGrid2, bs); + + std::cout << "blocks per grid: " << blocksPerGrid2 << ", threads per block or elements per thread: " << bs + << std::endl; + + // Problem size + for (int j = 1; j <= 1024; ++j) { + alpaka::enqueue(queue, alpaka::createTaskKernel(workDivSingleBlock, testPrefixScan(), j)); + alpaka::enqueue(queue, alpaka::createTaskKernel(workDivSingleBlock, testPrefixScan(), j)); + } + } + + // PORTABLE MULTI-BLOCK PREFIXSCAN + uint32_t num_items = 200; + for (int ksize = 1; ksize < 4; ++ksize) { + std::cout << "multiblock" << std::endl; + num_items *= 10; + + auto input_d = make_device_buffer(queue, num_items); + auto output1_d = make_device_buffer(queue, num_items); + auto blockCounter_d = make_device_buffer(queue); + + const auto nThreadsInit = 256; // NB: 1024 would be better + const auto nBlocksInit = divide_up_by(num_items, nThreadsInit); + const auto workDivMultiBlockInit = make_workdiv(nBlocksInit, nThreadsInit); + + alpaka::enqueue(queue, + alpaka::createTaskKernel(workDivMultiBlockInit, init(), input_d.data(), 1, num_items)); + alpaka::memset(queue, blockCounter_d, 0); + + const auto nThreads = 1024; + const auto nBlocks = divide_up_by(num_items, nThreads); + const auto workDivMultiBlock = make_workdiv(nBlocks, nThreads); + + std::cout << "launch multiBlockPrefixScan " << num_items << ' ' << nBlocks << std::endl; + alpaka::enqueue(queue, + alpaka::createTaskKernel(workDivMultiBlock, + multiBlockPrefixScan(), + input_d.data(), + output1_d.data(), + num_items, + nBlocks, + blockCounter_d.data(), + warpSize)); + alpaka::enqueue(queue, alpaka::createTaskKernel(workDivMultiBlock, verify(), output1_d.data(), num_items)); + + alpaka::wait(queue); // input_d and output1_d end of scope + } // ksize + } + + return 0; +} diff --git a/HeterogeneousCore/AlpakaInterface/test/alpaka/testRadixSort.dev.cc b/HeterogeneousCore/AlpakaInterface/test/alpaka/testRadixSort.dev.cc new file mode 100644 index 0000000000000..64c18750a81c7 --- /dev/null +++ b/HeterogeneousCore/AlpakaInterface/test/alpaka/testRadixSort.dev.cc @@ -0,0 +1,289 @@ +#include +#include +#include +using namespace std::chrono_literals; +#include +#include +#include +#include +#include +#include +#include +#include + +#include "HeterogeneousCore/AlpakaInterface/interface/radixSort.h" +#include "HeterogeneousCore/AlpakaInterface/interface/devices.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" + +using namespace cms::alpakatools; +using namespace ALPAKA_ACCELERATOR_NAMESPACE; + +template +struct RS { + using type = std::uniform_int_distribution; + static auto ud() { return type(std::numeric_limits::min(), std::numeric_limits::max()); } + static constexpr T imax = std::numeric_limits::max(); +}; + +template <> +struct RS { + using T = float; + using type = std::uniform_real_distribution; + static auto ud() { return type(-std::numeric_limits::max() / 2, std::numeric_limits::max() / 2); } + // static auto ud() { return type(0,std::numeric_limits::max()/2);} + static constexpr int imax = std::numeric_limits::max(); +}; + +// A templated unsigned integer type with N bytes +template +struct uintN; + +template <> +struct uintN<8> { + using type = uint8_t; +}; + +template <> +struct uintN<16> { + using type = uint16_t; +}; + +template <> +struct uintN<32> { + using type = uint32_t; +}; + +template <> +struct uintN<64> { + using type = uint64_t; +}; + +template +using uintN_t = typename uintN::type; + +// A templated unsigned integer type with the same size as T +template +using uintT_t = uintN_t; + +// Keep only the `N` most significant bytes of `t`, and set the others to zero +template > +void truncate(T& t) { + const int shift = 8 * (sizeof(T) - N); + union { + T t; + uintT_t u; + } c; + c.t = t; + c.u = c.u >> shift << shift; + t = c.t; +} + +template +void go(Queue& queue, bool useShared) { + std::mt19937 eng; + //std::mt19937 eng2; + auto rgen = RS::ud(); + + std::chrono::high_resolution_clock::duration delta = 0ns; + constexpr int blocks = 10; + constexpr int blockSize = 256 * 32; + constexpr int N = blockSize * blocks; + auto v_h = cms::alpakatools::make_host_buffer(queue, N); + //uint16_t ind_h[N]; + + constexpr bool sgn = T(-1) < T(0); + std::cout << "Will sort " << N << (sgn ? " signed" : " unsigned") + << (std::numeric_limits::is_integer ? " 'ints'" : " 'float'") << " of size " << sizeof(T) << " using " + << NS << " significant bytes" << std::endl; + + for (int i = 0; i < 50; ++i) { + if (i == 49) { + for (long long j = 0; j < N; j++) + v_h[j] = 0; + } else if (i > 30) { + for (long long j = 0; j < N; j++) + v_h[j] = rgen(eng); + } else { + uint64_t imax = (i < 15) ? uint64_t(RS::imax) + 1LL : 255; + for (uint64_t j = 0; j < N; j++) { + v_h[j] = (j % imax); + if (j % 2 && i % 2) + v_h[j] = -v_h[j]; + } + } + + auto offsets_h = cms::alpakatools::make_host_buffer(queue, blocks + 1); + offsets_h[0] = 0; + for (int j = 1; j < blocks + 1; ++j) { + offsets_h[j] = offsets_h[j - 1] + blockSize - 3 * j; + assert(offsets_h[j] <= N); + } + + if (i == 1) { // special cases... + offsets_h[0] = 0; + offsets_h[1] = 0; + offsets_h[2] = 19; + offsets_h[3] = 32 + offsets_h[2]; + offsets_h[4] = 123 + offsets_h[3]; + offsets_h[5] = 256 + offsets_h[4]; + offsets_h[6] = 311 + offsets_h[5]; + offsets_h[7] = 2111 + offsets_h[6]; + offsets_h[8] = 256 * 11 + offsets_h[7]; + offsets_h[9] = 44 + offsets_h[8]; + offsets_h[10] = 3297 + offsets_h[9]; + } + + std::shuffle(v_h.data(), v_h.data() + N, eng); + + auto v_d = cms::alpakatools::make_device_buffer(queue, N); + auto ind_d = cms::alpakatools::make_device_buffer(queue, N); + auto ind_h = cms::alpakatools::make_host_buffer(queue, N); + auto ws_d = cms::alpakatools::make_device_buffer(queue, N); + auto off_d = cms::alpakatools::make_device_buffer(queue, blocks + 1); + + alpaka::memcpy(queue, v_d, v_h); + alpaka::memcpy(queue, off_d, offsets_h); + + if (i < 2) + std::cout << "launch for " << offsets_h[blocks] << std::endl; + + auto ntXBl = 1 == i % 4 ? 256 : 256; + + auto start = std::chrono::high_resolution_clock::now(); + // The MaxSize is the max size we allow between offsets (i.e. biggest set to sort when using shared memory). + constexpr int MaxSize = 256 * 32; + auto workdiv = make_workdiv(blocks, ntXBl); + if (useShared) + // The original CUDA version used to call a kernel with __launch_bounds__(256, 4) specifier + // + alpaka::enqueue(queue, + alpaka::createTaskKernel(workdiv, + radixSortMultiWrapper{}, + v_d.data(), + ind_d.data(), + off_d.data(), + nullptr, + MaxSize * sizeof(uint16_t))); + else + alpaka::enqueue( + queue, + alpaka::createTaskKernel( + workdiv, radixSortMultiWrapper2{}, v_d.data(), ind_d.data(), off_d.data(), ws_d.data())); + + if (i < 2) + std::cout << "launch done for " << offsets_h[blocks] << std::endl; + + alpaka::memcpy(queue, ind_h, ind_d); + alpaka::wait(queue); + + delta += std::chrono::high_resolution_clock::now() - start; + + if (i < 2) + std::cout << "kernel and read back done for " << offsets_h[blocks] << std::endl; + + if (32 == i) { + std::cout << LL(v_h[ind_h[0]]) << ' ' << LL(v_h[ind_h[1]]) << ' ' << LL(v_h[ind_h[2]]) << std::endl; + std::cout << LL(v_h[ind_h[3]]) << ' ' << LL(v_h[ind_h[10]]) << ' ' << LL(v_h[ind_h[blockSize - 1000]]) + << std::endl; + std::cout << LL(v_h[ind_h[blockSize / 2 - 1]]) << ' ' << LL(v_h[ind_h[blockSize / 2]]) << ' ' + << LL(v_h[ind_h[blockSize / 2 + 1]]) << std::endl; + } + for (int ib = 0; ib < blocks; ++ib) { + std::set inds; + if (offsets_h[ib + 1] > offsets_h[ib]) + inds.insert(ind_h[offsets_h[ib]]); + for (auto j = offsets_h[ib] + 1; j < offsets_h[ib + 1]; j++) { + if (inds.count(ind_h[j]) != 0) { + printf("i=%d ib=%d ind_h[j=%d]=%d: duplicate indice!\n", i, ib, j, ind_h[j]); + std::vector counts; + counts.resize(offsets_h[ib + 1] - offsets_h[ib], 0); + for (size_t j2 = offsets_h[ib]; j2 < offsets_h[ib + 1]; j2++) { + counts[ind_h[j2]]++; + } + for (size_t j2 = 0; j2 < counts.size(); j2++) { + if (counts[j2] != 1) + printf("counts[%ld]=%d ", j2, counts[j2]); + } + printf("\n"); + printf("inds.count(ind_h[j] = %lu\n", inds.count(ind_h[j])); + } + assert(0 == inds.count(ind_h[j])); + inds.insert(ind_h[j]); + auto a = v_h.data() + offsets_h[ib]; + auto k1 = a[ind_h[j]]; + auto k2 = a[ind_h[j - 1]]; + truncate(k1); + truncate(k2); + if (k1 < k2) { + std::cout << "i=" << i << " not ordered at ib=" << ib << " in [" << offsets_h[ib] << ", " + << offsets_h[ib + 1] - 1 << "] j=" << j << " ind[j]=" << ind_h[j] + << " (k1 < k2) : a1=" << (int64_t)a[ind_h[j]] << " k1=" << (int64_t)k1 + << " a2= " << (int64_t)a[ind_h[j - 1]] << " k2=" << (int64_t)k2 << std::endl; + //sleep(2); + assert(false); + } + } + if (!inds.empty()) { + assert(0 == *inds.begin()); + assert(inds.size() - 1 == *inds.rbegin()); + } + if (inds.size() != (offsets_h[ib + 1] - offsets_h[ib])) + std::cout << "error " << i << ' ' << ib << ' ' << inds.size() << "!=" << (offsets_h[ib + 1] - offsets_h[ib]) + << std::endl; + // + assert(inds.size() == (offsets_h[ib + 1] - offsets_h[ib])); + } + } // 50 times + std::cout << "Kernel computation took " << std::chrono::duration_cast(delta).count() / 50. + << " ms per pass" << std::endl; +} + +int main() { + // get the list of devices on the current platform + auto const& devices = cms::alpakatools::devices(); + if (devices.empty()) { + std::cout << "No devices available on the platform " << EDM_STRINGIZE(ALPAKA_ACCELERATOR_NAMESPACE) + << ", the test will be skipped.\n"; + return 0; + } + + for (auto const& device : devices) { + Queue queue(device); + bool useShared = false; + + std::cout << "using Global memory" << std::endl; + + go(queue, useShared); + go(queue, useShared); + go(queue, useShared); + go(queue, useShared); + go(queue, useShared); + go(queue, useShared); + go(queue, useShared); + + go(queue, useShared); + go(queue, useShared); + go(queue, useShared); + // go(v_h); + + useShared = true; + + std::cout << "using Shared memory" << std::endl; + + go(queue, useShared); + go(queue, useShared); + go(queue, useShared); + go(queue, useShared); + go(queue, useShared); + go(queue, useShared); + go(queue, useShared); + + go(queue, useShared); + go(queue, useShared); + go(queue, useShared); + // go(v_h); + } + return 0; +} diff --git a/HeterogeneousCore/AlpakaInterface/test/alpaka/testSimpleVector.dev.cc b/HeterogeneousCore/AlpakaInterface/test/alpaka/testSimpleVector.dev.cc new file mode 100644 index 0000000000000..c29b571c6d356 --- /dev/null +++ b/HeterogeneousCore/AlpakaInterface/test/alpaka/testSimpleVector.dev.cc @@ -0,0 +1,101 @@ +// author: Felice Pantaleo, CERN, 2018 +#include +#include +#include + +#include "HeterogeneousCore/AlpakaInterface/interface/SimpleVector.h" +#include "HeterogeneousCore/AlpakaInterface/interface/devices.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" + +using namespace cms::alpakatools; +using namespace ALPAKA_ACCELERATOR_NAMESPACE; + +struct vector_pushback { + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, SimpleVector* foo) const { + for (auto index : elements_with_stride(acc)) + foo->push_back(acc, index); + } +}; + +struct vector_reset { + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, SimpleVector* foo) const { + foo->reset(); + } +}; + +struct vector_emplace_back { + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, SimpleVector* foo) const { + for (auto index : elements_with_stride(acc)) + foo->emplace_back(acc, index); + } +}; + +int main() { + // get the list of devices on the current platform + auto const& devices = cms::alpakatools::devices(); + if (devices.empty()) { + std::cout << "No devices available on the platform " << EDM_STRINGIZE(ALPAKA_ACCELERATOR_NAMESPACE) + << ", the test will be skipped.\n"; + return 0; + } + + // run the test on each device + for (auto const& device : devices) { + Queue queue(device); + auto maxN = 10000; + auto vec_h = make_host_buffer>(queue); + auto vec_d = make_device_buffer>(queue); + auto data_h = make_host_buffer(queue, maxN); + auto data_d = make_device_buffer(queue, maxN); + + [[maybe_unused]] auto v = make_SimpleVector(maxN, data_d.data()); + + // Prepare the vec object on the host + auto tmp_vec_h = make_host_buffer>(queue); + make_SimpleVector(tmp_vec_h.data(), maxN, data_d.data()); + assert(tmp_vec_h->size() == 0); + assert(tmp_vec_h->capacity() == static_cast(maxN)); + + // ... and copy the object to the device. + alpaka::memcpy(queue, vec_d, tmp_vec_h); + alpaka::wait(queue); + + int numBlocks = 5; + int numThreadsPerBlock = 256; + const auto workDiv = make_workdiv(numBlocks, numThreadsPerBlock); + alpaka::enqueue(queue, alpaka::createTaskKernel(workDiv, vector_pushback(), vec_d.data())); + alpaka::wait(queue); + + alpaka::memcpy(queue, vec_h, vec_d); + alpaka::wait(queue); + printf("vec_h->size()=%d, numBlocks * numThreadsPerBlock=%d, maxN=%d\n", + vec_h->size(), + numBlocks * numThreadsPerBlock, + maxN); + assert(vec_h->size() == (numBlocks * numThreadsPerBlock < maxN ? numBlocks * numThreadsPerBlock : maxN)); + alpaka::enqueue(queue, alpaka::createTaskKernel(workDiv, vector_reset(), vec_d.data())); + alpaka::wait(queue); + + alpaka::memcpy(queue, vec_h, vec_d); + alpaka::wait(queue); + + assert(vec_h->size() == 0); + + alpaka::enqueue(queue, alpaka::createTaskKernel(workDiv, vector_emplace_back(), vec_d.data())); + alpaka::wait(queue); + + alpaka::memcpy(queue, vec_h, vec_d); + alpaka::wait(queue); + + assert(vec_h->size() == (numBlocks * numThreadsPerBlock < maxN ? numBlocks * numThreadsPerBlock : maxN)); + + alpaka::memcpy(queue, data_h, data_d); + } + std::cout << "TEST PASSED" << std::endl; + return 0; +} diff --git a/HeterogeneousCore/AlpakaInterface/test/alpaka/testWorkDivision.dev.cc b/HeterogeneousCore/AlpakaInterface/test/alpaka/testWorkDivision.dev.cc new file mode 100644 index 0000000000000..ce85ad42cb0f4 --- /dev/null +++ b/HeterogeneousCore/AlpakaInterface/test/alpaka/testWorkDivision.dev.cc @@ -0,0 +1,191 @@ +#include + +#include "HeterogeneousCore/AlpakaInterface/interface/devices.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" + +using namespace cms::alpakatools; +using namespace ALPAKA_ACCELERATOR_NAMESPACE; + +// Kernel running a loop over threads/elements +// One function with multiple flavors + +// The type of elements_with_stride +enum class RangeType { Default, ExtentLimited, ExtentLimitedWithShift }; + +// The concurrency scope between threads +enum class LoopScope { Block, Grid }; + +// Utility for one time initializations +template +bool constexpr firstInLoopRange(TAcc const& acc) { + if constexpr (loopScope == LoopScope::Block) + return !alpaka::getIdx(acc)[0u]; + if constexpr (loopScope == LoopScope::Grid) + return !alpaka::getIdx(acc)[0u]; + assert(false); +} + +template +size_t constexpr expectedCount(TAcc const& acc, size_t size, size_t shift) { + if constexpr (rangeType == RangeType::ExtentLimitedWithShift) + return shift < size ? size - shift : 0; + else if constexpr (rangeType == RangeType::ExtentLimited) + return size; + else /* rangeType == RangeType::Default */ + if constexpr (loopScope == LoopScope::Block) + return alpaka::getWorkDiv(acc)[0u]; + else + return alpaka::getWorkDiv(acc)[0u]; +} + +template +size_t constexpr expectedCount(WorkDiv1D const& workDiv, size_t size, size_t shift) { + if constexpr (rangeType == RangeType::ExtentLimitedWithShift) + return shift < size ? size - shift : 0; + else if constexpr (rangeType == RangeType::ExtentLimited) + return size; + else /* rangeType == RangeType::Default */ + if constexpr (loopScope == LoopScope::Block) + return workDiv.m_blockThreadExtent[0u] * workDiv.m_threadElemExtent[0u]; + else + return workDiv.m_gridBlockExtent[0u] * workDiv.m_blockThreadExtent[0u] * workDiv.m_threadElemExtent[0u]; +} + +template +struct testWordDivisionDefaultRange { + template + ALPAKA_FN_ACC void operator()(TAcc const& acc, size_t size, size_t shift, size_t* globalCounter) const { + size_t& counter = + (loopScope == LoopScope::Grid ? *globalCounter : alpaka::declareSharedVar(acc)); + // Init the counter for block range. Grid range does so my mean of memset. + if constexpr (loopScope == LoopScope::Block) { + if (firstInLoopRange(acc)) + counter = 0; + alpaka::syncBlockThreads(acc); + } + // The loop we are testing + if constexpr (rangeType == RangeType::Default) + for ([[maybe_unused]] auto idx : elements_with_stride(acc)) + alpaka::atomicAdd(acc, &counter, 1ul, alpaka::hierarchy::Blocks{}); + else if constexpr (rangeType == RangeType::ExtentLimited) + for ([[maybe_unused]] auto idx : elements_with_stride(acc, size)) + alpaka::atomicAdd(acc, &counter, 1ul, alpaka::hierarchy::Blocks{}); + else if constexpr (rangeType == RangeType::ExtentLimitedWithShift) + for ([[maybe_unused]] auto idx : elements_with_stride(acc, shift, size)) + alpaka::atomicAdd(acc, &counter, 1ul, alpaka::hierarchy::Blocks{}); + alpaka::syncBlockThreads(acc); + // Check the result. Grid range will check by memcpy-ing the result. + if constexpr (loopScope == LoopScope::Block) { + if (firstInLoopRange(acc)) { + auto expected = expectedCount(acc, size, shift); + assert(counter == expected); + } + } + } +}; + +int main() { + // get the list of devices on the current platform + auto const& devices = cms::alpakatools::devices(); + if (devices.empty()) { + std::cout << "No devices available on the platform " << EDM_STRINGIZE(ALPAKA_ACCELERATOR_NAMESPACE) + << ", the test will be skipped.\n"; + return 0; + } + + for (auto const& device : devices) { + // Get global memory + Queue queue(device); + auto counter_d = cms::alpakatools::make_device_buffer(queue); + auto counter_h = cms::alpakatools::make_host_buffer(queue); + alpaka::memset(queue, counter_d, 0); + ssize_t BlockSize = 512; + size_t GridSize = 4; + for (size_t blocks = 1; blocks < GridSize * 3; blocks++) + for (auto sizeFuzz : + std::initializer_list{-10 * BlockSize / 13, -BlockSize / 2, -1, 0, 1, BlockSize / 2}) + for (auto shift : std::initializer_list{0, + 1, + BlockSize / 2, + BlockSize - 1, + BlockSize, + BlockSize + 1, + BlockSize + BlockSize / 2, + 2 * BlockSize - 1, + 2 * BlockSize, + 2 * BlockSize + 1}) { + // Grid level iteration: we need to initialize/check at the grid level + // Default range + alpaka::memset(queue, counter_d, 0); + auto workdiv = make_workdiv(BlockSize, GridSize); + alpaka::enqueue( + queue, + alpaka::createTaskKernel(workdiv, + testWordDivisionDefaultRange{}, + blocks * BlockSize + sizeFuzz, + shift, + counter_d.data())); + alpaka::memcpy(queue, counter_h, counter_d); + alpaka::wait(queue); + auto expected = + expectedCount(workdiv, blocks * BlockSize + sizeFuzz, shift); + assert(*counter_h.data() == expected); + + // ExtentLimited range + alpaka::memset(queue, counter_d, 0); + alpaka::enqueue( + queue, + alpaka::createTaskKernel(workdiv, + testWordDivisionDefaultRange{}, + blocks * BlockSize + sizeFuzz, + shift, + counter_d.data())); + alpaka::memcpy(queue, counter_h, counter_d); + alpaka::wait(queue); + expected = + expectedCount(workdiv, blocks * BlockSize + sizeFuzz, shift); + assert(*counter_h.data() == expected); + + // ExtentLimitedWithShift range + alpaka::memset(queue, counter_d, 0); + alpaka::enqueue(queue, + alpaka::createTaskKernel( + workdiv, + testWordDivisionDefaultRange{}, + blocks * BlockSize + sizeFuzz, + shift, + counter_d.data())); + alpaka::memcpy(queue, counter_h, counter_d); + alpaka::wait(queue); + expected = expectedCount( + workdiv, blocks * BlockSize + sizeFuzz, shift); + assert(*counter_h.data() == expected); + + // Block level auto tests + alpaka::enqueue( + queue, + alpaka::createTaskKernel(workdiv, + testWordDivisionDefaultRange{}, + blocks * BlockSize + sizeFuzz, + shift, + counter_d.data())); + alpaka::enqueue( + queue, + alpaka::createTaskKernel(workdiv, + testWordDivisionDefaultRange{}, + blocks * BlockSize + sizeFuzz, + shift, + counter_d.data())); + alpaka::enqueue(queue, + alpaka::createTaskKernel( + workdiv, + testWordDivisionDefaultRange{}, + blocks * BlockSize + sizeFuzz, + shift, + counter_d.data())); + } + alpaka::wait(queue); + } +} \ No newline at end of file diff --git a/HeterogeneousCore/AlpakaTest/interface/AlpakaESTestData.h b/HeterogeneousCore/AlpakaTest/interface/AlpakaESTestData.h index 1c34de98d4b78..9975feda1b92e 100644 --- a/HeterogeneousCore/AlpakaTest/interface/AlpakaESTestData.h +++ b/HeterogeneousCore/AlpakaTest/interface/AlpakaESTestData.h @@ -2,6 +2,7 @@ #define HeterogeneousCore_AlpakaTest_interface_AlpakaESTestData_h #include "DataFormats/Portable/interface/PortableHostCollection.h" +#include "DataFormats/Portable/interface/PortableCollection.h" #include "HeterogeneousCore/AlpakaInterface/interface/CopyToDevice.h" #include "HeterogeneousCore/AlpakaInterface/interface/config.h" #include "HeterogeneousCore/AlpakaInterface/interface/memory.h" @@ -32,12 +33,52 @@ namespace cms::alpakatest { private: Buffer buffer_; }; + + // Template-over-device model with PortableCollection members + // Demonstrates indirection from one PortableCollection to the other + template + class AlpakaESTestDataE { + public: + using ECollection = PortableCollection; + using EDataCollection = PortableCollection; + + class ConstView { + public: + constexpr ConstView(typename ECollection::ConstView e, typename EDataCollection::ConstView data) + : eView_(e), dataView_(data) {} + + constexpr auto size() const { return eView_.metadata().size(); } + constexpr int val(int i) const { return eView_.val(i); } + constexpr int val2(int i) const { return dataView_.val2(eView_.ind(i)); } + + private: + typename ECollection::ConstView eView_; + typename EDataCollection::ConstView dataView_; + }; + + AlpakaESTestDataE(size_t size, size_t dataSize) : e_(size), data_(dataSize) {} + + AlpakaESTestDataE(ECollection e, EDataCollection data) : e_(std::move(e)), data_(std::move(data)) {} + + ECollection const& e() const { return e_; } + EDataCollection const& data() const { return data_; } + + ConstView view() const { return const_view(); } + ConstView const_view() const { return ConstView(e_.const_view(), data_.const_view()); } + + private: + ECollection e_; + EDataCollection data_; + }; + using AlpakaESTestDataEHost = AlpakaESTestDataE; + } // namespace cms::alpakatest namespace cms::alpakatools { - // Explicit specialization is needed for the template-over-device model + // Explicit specializations are needed for the template-over-device model // - // PortableCollection-based model gets this for free from PortableCollection itself + // PortableCollection-based model gets these for free from PortableCollection itself + template <> struct CopyToDevice> { template @@ -57,6 +98,18 @@ namespace cms::alpakatools { return cms::alpakatest::AlpakaESTestDataB>(std::move(dstBuffer)); } }; + + template <> + struct CopyToDevice { + template + static auto copyAsync(TQueue& queue, cms::alpakatest::AlpakaESTestDataEHost const& srcData) { + using ECopy = CopyToDevice; + using EDataCopy = CopyToDevice; + using TDevice = alpaka::Dev; + return cms::alpakatest::AlpakaESTestDataE(ECopy::copyAsync(queue, srcData.e()), + EDataCopy::copyAsync(queue, srcData.data())); + } + }; } // namespace cms::alpakatools #endif diff --git a/HeterogeneousCore/AlpakaTest/interface/AlpakaESTestSoA.h b/HeterogeneousCore/AlpakaTest/interface/AlpakaESTestSoA.h index d947d2b8f2333..425a248e7f378 100644 --- a/HeterogeneousCore/AlpakaTest/interface/AlpakaESTestSoA.h +++ b/HeterogeneousCore/AlpakaTest/interface/AlpakaESTestSoA.h @@ -10,10 +10,14 @@ namespace cms::alpakatest { GENERATE_SOA_LAYOUT(AlpakaESTestSoALayoutA, SOA_COLUMN(int, z)) GENERATE_SOA_LAYOUT(AlpakaESTestSoALayoutC, SOA_COLUMN(int, x)) GENERATE_SOA_LAYOUT(AlpakaESTestSoALayoutD, SOA_COLUMN(int, y)) + GENERATE_SOA_LAYOUT(AlpakaESTestSoALayoutE, SOA_COLUMN(float, val), SOA_COLUMN(int, ind)) + GENERATE_SOA_LAYOUT(AlpakaESTestSoALayoutEData, SOA_COLUMN(float, val2)) using AlpakaESTestSoAA = AlpakaESTestSoALayoutA<>; using AlpakaESTestSoAC = AlpakaESTestSoALayoutC<>; using AlpakaESTestSoAD = AlpakaESTestSoALayoutD<>; + using AlpakaESTestSoAE = AlpakaESTestSoALayoutE<>; + using AlpakaESTestSoAEData = AlpakaESTestSoALayoutEData<>; } // namespace cms::alpakatest #endif diff --git a/HeterogeneousCore/AlpakaTest/interface/alpaka/AlpakaESTestData.h b/HeterogeneousCore/AlpakaTest/interface/alpaka/AlpakaESTestData.h index 4eca569722b4f..71c3b91d8ba2a 100644 --- a/HeterogeneousCore/AlpakaTest/interface/alpaka/AlpakaESTestData.h +++ b/HeterogeneousCore/AlpakaTest/interface/alpaka/AlpakaESTestData.h @@ -17,11 +17,15 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE { using AlpakaESTestDataDHost = cms::alpakatest::AlpakaESTestDataDHost; using AlpakaESTestDataDDevice = PortableCollection; + + using AlpakaESTestDataEHost = cms::alpakatest::AlpakaESTestDataEHost; + using AlpakaESTestDataEDevice = cms::alpakatest::AlpakaESTestDataE; } // namespace ALPAKA_ACCELERATOR_NAMESPACE // check that the portable device collections for the host device are the same as the portable host collections ASSERT_DEVICE_MATCHES_HOST_COLLECTION(AlpakaESTestDataADevice, cms::alpakatest::AlpakaESTestDataAHost); ASSERT_DEVICE_MATCHES_HOST_COLLECTION(AlpakaESTestDataCDevice, cms::alpakatest::AlpakaESTestDataCHost); ASSERT_DEVICE_MATCHES_HOST_COLLECTION(AlpakaESTestDataDDevice, cms::alpakatest::AlpakaESTestDataDHost); +ASSERT_DEVICE_MATCHES_HOST_COLLECTION(AlpakaESTestDataEDevice, cms::alpakatest::AlpakaESTestDataEHost); #endif // HeterogeneousCore_AlpakaTest_interface_alpaka_AlpakaESTestData_h diff --git a/HeterogeneousCore/AlpakaTest/plugins/TestAlpakaAnalyzer.cc b/HeterogeneousCore/AlpakaTest/plugins/TestAlpakaAnalyzer.cc index 7698121a523b1..e1834ff95a31f 100644 --- a/HeterogeneousCore/AlpakaTest/plugins/TestAlpakaAnalyzer.cc +++ b/HeterogeneousCore/AlpakaTest/plugins/TestAlpakaAnalyzer.cc @@ -85,7 +85,8 @@ class TestAlpakaAnalyzer : public edm::global::EDAnalyzer<> { TestAlpakaAnalyzer(edm::ParameterSet const& config) : source_{config.getParameter("source")}, token_{consumes(source_)}, - expectSize_{config.getParameter("expectSize")} { + expectSize_{config.getParameter("expectSize")}, + expectXvalues_{config.getParameter>("expectXvalues")} { if (std::string const& eb = config.getParameter("expectBackend"); not eb.empty()) { expectBackend_ = cms::alpakatools::toBackend(eb); backendToken_ = consumes(edm::InputTag(source_.label(), "backend", source_.process())); @@ -146,7 +147,10 @@ class TestAlpakaAnalyzer : public edm::global::EDAnalyzer<> { assert(view.r() == 1.); for (int32_t i = 0; i < view.metadata().size(); ++i) { auto vi = view[i]; - assert(vi.x() == 0.); + if (not expectXvalues_.empty() and vi.x() != expectXvalues_[i % expectXvalues_.size()]) { + throw cms::Exception("Assert") << "Index " << i << " expected value " + << expectXvalues_[i % expectXvalues_.size()] << ", got " << vi.x(); + } assert(vi.y() == 0.); assert(vi.z() == 0.); assert(vi.id() == i); @@ -168,6 +172,11 @@ class TestAlpakaAnalyzer : public edm::global::EDAnalyzer<> { desc.add("source"); desc.add("expectSize", -1) ->setComment("Expected size of the input collection. Values < 0 mean the check is not performed. Default: -1"); + desc.add>("expectXvalues", std::vector(0.)) + ->setComment( + "Expected values of the 'x' field in the input collection. Empty value means to not perform the check. If " + "input collection has more elements than this parameter, the parameter values are looped over. Default: " + "{0.}"); desc.add("expectBackend", "") ->setComment( "Expected backend of the input collection. Empty value means to not perform the check. Default: empty " @@ -181,6 +190,7 @@ class TestAlpakaAnalyzer : public edm::global::EDAnalyzer<> { edm::EDGetTokenT backendToken_; std::optional expectBackend_; const int expectSize_; + const std::vector expectXvalues_; }; #include "FWCore/Framework/interface/MakerMacros.h" diff --git a/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlgo.dev.cc b/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlgo.dev.cc index 9dab03aac0823..e574da64ef84e 100644 --- a/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlgo.dev.cc +++ b/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlgo.dev.cc @@ -23,13 +23,11 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE { portabletest::TestDeviceCollection::View view, int32_t size, double xvalue) const { - // global index of the thread within the grid - const int32_t thread = alpaka::getIdx(acc)[0u]; const portabletest::Matrix matrix{{1, 2, 3, 4, 5, 6}, {2, 4, 6, 8, 10, 12}, {3, 6, 9, 12, 15, 18}}; const portabletest::Array flags = {{6, 4, 2, 0}}; // set this only once in the whole kernel grid - if (thread == 0) { + if (once_per_grid(acc)) { view.r() = 1.; } @@ -82,4 +80,48 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE { alpaka::exec(queue, workDiv, TestAlgoStructKernel{}, object.data(), x, y, z, id); } + class TestAlgoKernelUpdate { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, + portabletest::TestDeviceCollection::ConstView input, + AlpakaESTestDataEDevice::ConstView esData, + portabletest::TestDeviceCollection::View output) const { + // set this only once in the whole kernel grid + if (once_per_grid(acc)) { + output.r() = input.r(); + } + + // make a strided loop over the kernel grid, covering up to "size" elements + for (int32_t i : elements_with_stride(acc, output.metadata().size())) { + double x = input[i].x(); + if (i < esData.size()) { + x += esData.val(i) + esData.val2(i); + } + output[i] = {x, input[i].y(), input[i].z(), input[i].id(), input[i].flags(), input[i].m()}; + } + } + }; + + portabletest::TestDeviceCollection TestAlgo::update(Queue& queue, + portabletest::TestDeviceCollection const& input, + AlpakaESTestDataEDevice const& esData) const { + portabletest::TestDeviceCollection collection{input->metadata().size(), queue}; + + // use 64 items per group (this value is arbitrary, but it's a reasonable starting point) + uint32_t items = 64; + + // use as many groups as needed to cover the whole problem + uint32_t groups = divide_up_by(collection->metadata().size(), items); + + // map items to + // - threads with a single element per thread on a GPU backend + // - elements within a single thread on a CPU backend + auto workDiv = make_workdiv(groups, items); + + alpaka::exec(queue, workDiv, TestAlgoKernelUpdate{}, input.view(), esData.view(), collection.view()); + + return collection; + } + } // namespace ALPAKA_ACCELERATOR_NAMESPACE diff --git a/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlgo.h b/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlgo.h index e54a606275b37..e9eca3f364b54 100644 --- a/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlgo.h +++ b/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlgo.h @@ -4,6 +4,7 @@ #include "DataFormats/PortableTestObjects/interface/alpaka/TestDeviceCollection.h" #include "DataFormats/PortableTestObjects/interface/alpaka/TestDeviceObject.h" #include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaTest/interface/alpaka/AlpakaESTestData.h" namespace ALPAKA_ACCELERATOR_NAMESPACE { @@ -12,6 +13,10 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE { void fill(Queue& queue, portabletest::TestDeviceCollection& collection, double xvalue = 0.) const; void fillObject( Queue& queue, portabletest::TestDeviceObject& object, double x, double y, double z, int32_t id) const; + + portabletest::TestDeviceCollection update(Queue& queue, + portabletest::TestDeviceCollection const& input, + AlpakaESTestDataEDevice const& esData) const; }; } // namespace ALPAKA_ACCELERATOR_NAMESPACE diff --git a/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlpakaESProducerA.cc b/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlpakaESProducerA.cc index e0fd86c9a48c1..2bfe90ecf196b 100644 --- a/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlpakaESProducerA.cc +++ b/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlpakaESProducerA.cc @@ -1,9 +1,9 @@ -#include "FWCore/Framework/interface/ESTransientHandle.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "FWCore/Utilities/interface/ESGetToken.h" #include "HeterogeneousCore/AlpakaCore/interface/alpaka/ESProducer.h" #include "HeterogeneousCore/AlpakaCore/interface/alpaka/ModuleFactory.h" #include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/host.h" #include "HeterogeneousCore/AlpakaInterface/interface/memory.h" #include "HeterogeneousCore/AlpakaTest/interface/AlpakaESTestRecords.h" #include "HeterogeneousCore/AlpakaTest/interface/ESTestData.h" diff --git a/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlpakaESProducerC.cc b/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlpakaESProducerC.cc index 3db197f36e2e4..b6784b717d91d 100644 --- a/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlpakaESProducerC.cc +++ b/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlpakaESProducerC.cc @@ -1,13 +1,10 @@ -#include "FWCore/Framework/interface/ESTransientHandle.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "FWCore/Utilities/interface/ESGetToken.h" -#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ESGetToken.h" #include "HeterogeneousCore/AlpakaCore/interface/alpaka/ESProducer.h" #include "HeterogeneousCore/AlpakaCore/interface/alpaka/ModuleFactory.h" #include "HeterogeneousCore/AlpakaInterface/interface/config.h" #include "HeterogeneousCore/AlpakaInterface/interface/host.h" #include "HeterogeneousCore/AlpakaInterface/interface/memory.h" -#include "HeterogeneousCore/AlpakaTest/interface/AlpakaESTestData.h" #include "HeterogeneousCore/AlpakaTest/interface/AlpakaESTestRecords.h" #include "HeterogeneousCore/AlpakaTest/interface/ESTestData.h" #include "HeterogeneousCore/AlpakaTest/interface/alpaka/AlpakaESTestData.h" diff --git a/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlpakaESProducerE.cc b/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlpakaESProducerE.cc new file mode 100644 index 0000000000000..1285ac0347706 --- /dev/null +++ b/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlpakaESProducerE.cc @@ -0,0 +1,58 @@ +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/Utilities/interface/ESGetToken.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ESProducer.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ModuleFactory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/host.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaTest/interface/AlpakaESTestRecords.h" +#include "HeterogeneousCore/AlpakaTest/interface/ESTestData.h" +#include "HeterogeneousCore/AlpakaTest/interface/alpaka/AlpakaESTestData.h" + +#include + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + /** + * This class demonstrates an ESProducer that uses the + * PortableCollection-based data model, and that consumes a standard + * host ESProduct and converts the data into PortableCollection, and + * implicitly transfers the data product to device + */ + class TestAlpakaESProducerE : public ESProducer { + public: + TestAlpakaESProducerE(edm::ParameterSet const& iConfig) : ESProducer(iConfig) { + auto cc = setWhatProduced(this); + token_ = cc.consumes(); + } + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + descriptions.addWithDefaultLabel(desc); + } + + std::optional produce(AlpakaESTestRecordC const& iRecord) { + auto const& input = iRecord.get(token_); + + int const edatasize = 2; + AlpakaESTestDataEHost::EDataCollection data(edatasize, cms::alpakatools::host()); + for (int i = 0; i < edatasize; ++i) { + data.view()[i].val2() = i * 10 + 1; + } + + int const esize = 5; + // TODO: pinned allocation? + // TODO: cached allocation? + AlpakaESTestDataEHost::ECollection e(esize, cms::alpakatools::host()); + for (int i = 0; i < esize; ++i) { + e.view()[i].val() = std::abs(input.value()) + i * 2; + e.view()[i].ind() = i % edatasize; + } + return AlpakaESTestDataEHost(std::move(e), std::move(data)); + } + + private: + edm::ESGetToken token_; + }; +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +DEFINE_FWK_EVENTSETUP_ALPAKA_MODULE(TestAlpakaESProducerE); diff --git a/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlpakaGlobalProducerE.cc b/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlpakaGlobalProducerE.cc new file mode 100644 index 0000000000000..95d1423fdf2bc --- /dev/null +++ b/HeterogeneousCore/AlpakaTest/plugins/alpaka/TestAlpakaGlobalProducerE.cc @@ -0,0 +1,59 @@ +#include "DataFormats/PortableTestObjects/interface/alpaka/TestDeviceCollection.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/global/EDProducer.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/EDPutToken.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ESGetToken.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaTest/interface/AlpakaESTestRecords.h" +#include "HeterogeneousCore/AlpakaTest/interface/alpaka/AlpakaESTestData.h" + +#include "TestAlgo.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + /** + * This class demonstrates a global EDProducer that + * - consumes a device ESProduct + * - consumes a device EDProduct + * - produces a device EDProduct (that can get transferred to host automatically) + */ + class TestAlpakaGlobalProducerE : public global::EDProducer<> { + public: + TestAlpakaGlobalProducerE(edm::ParameterSet const& config) + : esToken_(esConsumes(config.getParameter("eventSetupSource"))), + getToken_(consumes(config.getParameter("source"))), + putToken_{produces()} {} + + void produce(edm::StreamID, device::Event& iEvent, device::EventSetup const& iSetup) const override { + auto const& esData = iSetup.getData(esToken_); + auto const& input = iEvent.get(getToken_); + + // run the algorithm, potentially asynchronously + auto deviceProduct = algo_.update(iEvent.queue(), input, esData); + + iEvent.emplace(putToken_, std::move(deviceProduct)); + } + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("eventSetupSource", edm::ESInputTag{}); + desc.add("source", edm::InputTag{}); + + descriptions.addWithDefaultLabel(desc); + } + + private: + const device::ESGetToken esToken_; + const device::EDGetToken getToken_; + const device::EDPutToken putToken_; + + // implementation of the algorithm + TestAlgo algo_; + }; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/MakerMacros.h" +DEFINE_FWK_ALPAKA_MODULE(TestAlpakaGlobalProducerE); diff --git a/HeterogeneousCore/AlpakaTest/src/ES_AlpakaESTestData.cc b/HeterogeneousCore/AlpakaTest/src/ES_AlpakaESTestData.cc index 6e5c253fcd9f7..b6b2adaa98d81 100644 --- a/HeterogeneousCore/AlpakaTest/src/ES_AlpakaESTestData.cc +++ b/HeterogeneousCore/AlpakaTest/src/ES_AlpakaESTestData.cc @@ -8,3 +8,6 @@ TYPELOOKUP_DATA_REG(cms::alpakatest::AlpakaESTestDataDHost); // Template-over-device model TYPELOOKUP_DATA_REG(cms::alpakatest::AlpakaESTestDataB); + +// Template-over-device model with PortableCollection members +TYPELOOKUP_DATA_REG(cms::alpakatest::AlpakaESTestDataEHost); diff --git a/HeterogeneousCore/AlpakaTest/src/alpaka/ES_AlpakaESTestData.cc b/HeterogeneousCore/AlpakaTest/src/alpaka/ES_AlpakaESTestData.cc index 8af0c172ef83f..f5093b6bf2e9d 100644 --- a/HeterogeneousCore/AlpakaTest/src/alpaka/ES_AlpakaESTestData.cc +++ b/HeterogeneousCore/AlpakaTest/src/alpaka/ES_AlpakaESTestData.cc @@ -9,3 +9,6 @@ TYPELOOKUP_ALPAKA_DATA_REG(AlpakaESTestDataDDevice); // Template-over-device model #include "HeterogeneousCore/AlpakaTest/interface/AlpakaESTestData.h" TYPELOOKUP_ALPAKA_TEMPLATED_DATA_REG(cms::alpakatest::AlpakaESTestDataB); + +// Template-over-device model with PortableCollection members +TYPELOOKUP_ALPAKA_TEMPLATED_DATA_REG(cms::alpakatest::AlpakaESTestDataE); diff --git a/HeterogeneousCore/AlpakaTest/test/testAlpakaModules_cfg.py b/HeterogeneousCore/AlpakaTest/test/testAlpakaModules_cfg.py index 406833b920eb0..331fac9b84312 100644 --- a/HeterogeneousCore/AlpakaTest/test/testAlpakaModules_cfg.py +++ b/HeterogeneousCore/AlpakaTest/test/testAlpakaModules_cfg.py @@ -54,6 +54,7 @@ srcA = cms.ESInputTag("", "appendedLabel"), srcB = cms.ESInputTag("", "explicitLabel"), ) +process.alpakaESProducerE = cms.ESProducer("TestAlpakaESProducerE@alpaka") process.alpakaESProducerNull = cms.ESProducer("TestAlpakaESProducerNull@alpaka", appendToDataLabel = cms.string("null"), ) @@ -69,6 +70,9 @@ alpaka_rocm_async = 30, ) ) +process.alpakaGlobalProducerE = cms.EDProducer("TestAlpakaGlobalProducerE@alpaka", + source = cms.InputTag("alpakaGlobalProducer") +) process.alpakaStreamProducer = cms.EDProducer("TestAlpakaStreamProducer@alpaka", source = cms.InputTag("intProduct"), eventSetupSource = cms.ESInputTag("alpakaESProducerB", "explicitLabel"), @@ -99,6 +103,10 @@ expectSize = cms.int32(10), expectBackend = cms.string("SerialSync") ) +process.alpakaGlobalConsumerE = process.alpakaGlobalConsumer.clone( + source = "alpakaGlobalProducerE", + expectXvalues = cms.vdouble([(i%2)*10+1 + abs(27)+i*2 for i in range(0,5)] + [0]*5) +) process.alpakaStreamConsumer = cms.EDAnalyzer("TestAlpakaAnalyzer", source = cms.InputTag("alpakaStreamProducer"), expectSize = cms.int32(5), @@ -121,8 +129,10 @@ if args.processAcceleratorBackend != "": process.ProcessAcceleratorAlpaka.setBackend(args.processAcceleratorBackend) if args.moduleBackend != "": - for name in ["ESProducerA", "ESProducerB", "ESProducerC", "ESProducerD", "ESProducerNull", - "GlobalProducer", "StreamProducer", "StreamInstanceProducer", "StreamSynchronizingProducer", + for name in ["ESProducerA", "ESProducerB", "ESProducerC", "ESProducerD", "ESProducerE", + "ESProducerNull", + "GlobalProducer", "GlobalProducerE", + "StreamProducer", "StreamInstanceProducer", "StreamSynchronizingProducer", "NullESConsumer"]: mod = getattr(process, "alpaka"+name) mod.alpaka = cms.untracked.PSet(backend = cms.untracked.string(args.moduleBackend)) @@ -131,6 +141,8 @@ def setExpect(m, size): m.expectSize = size m.expectBackend = "CudaAsync" setExpect(process.alpakaGlobalConsumer, size=20) + setExpect(process.alpakaGlobalConsumerE, size=20) + process.alpakaGlobalConsumerE.expectXvalues.extend([0]*(20-10)) setExpect(process.alpakaStreamConsumer, size=25) setExpect(process.alpakaStreamInstanceConsumer, size=36) setExpect(process.alpakaStreamSynchronizingConsumer, size=20) @@ -139,6 +151,8 @@ def setExpect(m, size): m.expectSize = size m.expectBackend = "ROCmAsync" setExpect(process.alpakaGlobalConsumer, size = 30) + setExpect(process.alpakaGlobalConsumerE, size = 30) + process.alpakaGlobalConsumerE.expectXvalues.extend([0]*(30-10)) setExpect(process.alpakaStreamConsumer, size = 125) setExpect(process.alpakaStreamInstanceConsumer, size = 216) setExpect(process.alpakaStreamSynchronizingConsumer, size = 30) @@ -156,12 +170,14 @@ def setExpect(m, size): process.t = cms.Task( process.intProduct, process.alpakaGlobalProducer, + process.alpakaGlobalProducerE, process.alpakaStreamProducer, process.alpakaStreamInstanceProducer, process.alpakaStreamSynchronizingProducer ) process.p = cms.Path( process.alpakaGlobalConsumer+ + process.alpakaGlobalConsumerE+ process.alpakaStreamConsumer+ process.alpakaStreamInstanceConsumer+ process.alpakaStreamSynchronizingConsumer+ diff --git a/HeterogeneousCore/AlpakaTest/test/writer.py b/HeterogeneousCore/AlpakaTest/test/writer.py index bd8d2775b31ed..d23ac528629b8 100644 --- a/HeterogeneousCore/AlpakaTest/test/writer.py +++ b/HeterogeneousCore/AlpakaTest/test/writer.py @@ -1,4 +1,5 @@ import FWCore.ParameterSet.Config as cms +from HeterogeneousCore.AlpakaCore.functions import * process = cms.Process('Writer') @@ -31,16 +32,9 @@ ) # run a second producer explicitly on the cpu -process.testProducerSerial = cms.EDProducer('alpaka_serial_sync::TestAlpakaProducer', +process.testProducerSerial = makeSerialClone(process.testProducer, size = cms.int32(99) ) -# an alternative approach would be to use -#process.testProducerSerial = cms.EDProducer('TestAlpakaProducer@alpaka', -# size = cms.int32(99), -# alpaka = cms.untracked.PSet( -# backend = cms.untracked.string("serial_sync") -# ) -#) # analyse the second set of products process.testAnalyzerSerial = cms.EDAnalyzer('TestAlpakaAnalyzer', diff --git a/HeterogeneousCore/CUDAUtilities/test/oneRadixSort_t.cu b/HeterogeneousCore/CUDAUtilities/test/oneRadixSort_t.cu index 8b6ffd70fd4e6..f10242d858f79 100644 --- a/HeterogeneousCore/CUDAUtilities/test/oneRadixSort_t.cu +++ b/HeterogeneousCore/CUDAUtilities/test/oneRadixSort_t.cu @@ -5,6 +5,7 @@ #include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h" #include "HeterogeneousCore/CUDAUtilities/interface/radixSort.h" #include +#include using FLOAT = double; @@ -140,6 +141,9 @@ namespace { int main() { cms::cudatest::requireDevices(); + std::random_device rd; + std::mt19937 g(rd()); + FLOAT* gpu_input; int* gpu_product; @@ -189,7 +193,7 @@ int main() { cudaCheck(cudaMemcpy(gpu_input, input, sizeof(FLOAT) * nmax, cudaMemcpyHostToDevice)); for (int k = 2; k <= nmax; k++) { - std::random_shuffle(input, input + k); + std::shuffle(input, input + k, g); printf("Test with %d items\n", k); // sort on the GPU testWrapper(gpu_input, gpu_product, k, false); diff --git a/HeterogeneousCore/CUDAUtilities/test/radixSort_t.cu b/HeterogeneousCore/CUDAUtilities/test/radixSort_t.cu index 209ce97347e25..42ab6bfdeb8c1 100644 --- a/HeterogeneousCore/CUDAUtilities/test/radixSort_t.cu +++ b/HeterogeneousCore/CUDAUtilities/test/radixSort_t.cu @@ -135,7 +135,7 @@ void go(bool useShared) { offsets[10] = 3297 + offsets[9]; } - std::random_shuffle(v, v + N); + std::shuffle(v, v + N, eng); auto v_d = cms::cuda::make_device_unique(N, nullptr); auto ind_d = cms::cuda::make_device_unique(N, nullptr); @@ -151,10 +151,15 @@ void go(bool useShared) { auto ntXBl __attribute__((unused)) = 1 == i % 4 ? 256 : 256; delta -= (std::chrono::high_resolution_clock::now() - start); + // The MaxSize is the max size we allow between offsets (i.e. biggest set to sort when using shared memory). constexpr int MaxSize = 256 * 32; if (useShared) - cms::cuda::launch( - radixSortMultiWrapper, {blocks, ntXBl, MaxSize * 2}, v_d.get(), ind_d.get(), off_d.get(), nullptr); + cms::cuda::launch(radixSortMultiWrapper, + {blocks, ntXBl, MaxSize * 2 /* sizeof(uint16_t) */}, + v_d.get(), + ind_d.get(), + off_d.get(), + nullptr); else cms::cuda::launch( radixSortMultiWrapper2, {blocks, ntXBl}, v_d.get(), ind_d.get(), off_d.get(), ws_d.get()); diff --git a/HeterogeneousCore/ROCmServices/plugins/ROCmService.cc b/HeterogeneousCore/ROCmServices/plugins/ROCmService.cc index d8e598e8cad15..08d87bfc9858e 100644 --- a/HeterogeneousCore/ROCmServices/plugins/ROCmService.cc +++ b/HeterogeneousCore/ROCmServices/plugins/ROCmService.cc @@ -6,9 +6,8 @@ #include #include -/* -#include -*/ +#include +#include #include "FWCore/MessageLogger/interface/MessageLogger.h" #include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" @@ -18,9 +17,7 @@ #include "FWCore/Utilities/interface/ResourceInformation.h" #include "HeterogeneousCore/ROCmServices/interface/ROCmInterface.h" #include "HeterogeneousCore/ROCmUtilities/interface/hipCheck.h" -/* -#include "HeterogeneousCore/ROCmUtilities/interface/nvmlCheck.h" -*/ +#include "HeterogeneousCore/ROCmUtilities/interface/rsmiCheck.h" class ROCmService : public ROCmInterface { public: @@ -51,6 +48,7 @@ class ROCmService : public ROCmInterface { }; void setHipLimit(hipLimit_t limit, const char* name, size_t request) { +#if HIP_VERSION >= 50400000 // read the current device int device; hipCheck(hipGetDevice(&device)); @@ -70,10 +68,15 @@ void setHipLimit(hipLimit_t limit, const char* name, size_t request) { edm::LogWarning("ROCmService") << "ROCm device " << device << ": limit \"" << name << "\" set to " << value << " instead of requested " << request; } +#else + edm::LogWarning("ROCmService") << "ROCm versions below 5.4.0 do not support setting device limits."; +#endif } std::string decodeVersion(int version) { - return std::to_string(version / 1000) + '.' + std::to_string(version % 1000 / 10); + // decode 50631061 as 5.6.31061 + return std::to_string(version / 10000000) + '.' + std::to_string(version / 100000 % 100) + '.' + + std::to_string(version % 100000); } /// Constructor @@ -91,13 +94,11 @@ ROCmService::ROCmService(edm::ParameterSet const& config) : verbose_(config.getU } computeCapabilities_.reserve(numberOfDevices_); - /* - // AMD system driver version, e.g. 470.57.02 - char systemDriverVersion[NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE]; - nvmlCheck(nvmlInitWithFlags(NVML_INIT_FLAG_NO_GPUS | NVML_INIT_FLAG_NO_ATTACH)); - nvmlCheck(nvmlSystemGetDriverVersion(systemDriverVersion, sizeof(systemDriverVersion))); - nvmlCheck(nvmlShutdown()); - */ + // AMD system driver version, e.g. 5.16.9.22.20 or 6.1.5 + char systemDriverVersion[256]; + rsmiCheck(rsmi_init(0x00)); + rsmiCheck(rsmi_version_str_get(RSMI_SW_COMP_DRIVER, systemDriverVersion, sizeof(systemDriverVersion) - 1)); + rsmiCheck(rsmi_shut_down()); // ROCm driver version, e.g. 11.4 // the full version, like 11.4.1 or 11.4.100, is not reported @@ -111,36 +112,28 @@ ROCmService::ROCmService(edm::ParameterSet const& config) : verbose_(config.getU edm::LogInfo log("ROCmService"); if (verbose_) { - /* - log << "AMD driver: " << systemDriverVersion << '\n'; - */ - log << "ROCm driver API: " << decodeVersion(driverVersion) << /*" (compiled with " << decodeVersion(ROCm_VERSION) - << ")" */ - "\n"; - log << "ROCm runtime API: " << decodeVersion(runtimeVersion) - << /*" (compiled with " << decodeVersion(ROCmRT_VERSION) - << ")" */ - "\n"; + log << "AMD kernel driver: " << systemDriverVersion << '\n'; + log << "ROCm driver API: " << decodeVersion(driverVersion) << " (compiled with ROCm " << +#ifdef ROCM_BUILD_INFO + // ROCM_BUILD_INFO has been introduced in ROCm 5.5.0 + ROCM_BUILD_INFO +#else + ROCM_VERSION_MAJOR << '.' << ROCM_VERSION_MINOR << '.' << ROCM_VERSION_PATCH +#endif + << ")\n"; + log << "ROCm runtime API: " << decodeVersion(runtimeVersion) << " (compiled with HIP " << HIP_VERSION_MAJOR << '.' + << HIP_VERSION_MINOR << '.' << HIP_VERSION_PATCH << ")\n"; log << "ROCm runtime successfully initialised, found " << numberOfDevices_ << " compute devices.\n"; } else { log << "ROCm runtime version " << decodeVersion(runtimeVersion) << ", driver version " - << decodeVersion(driverVersion) - /* - << ", AMD driver version " << systemDriverVersion - */ - ; + << decodeVersion(driverVersion) << ", AMD driver version " << systemDriverVersion; } +#if HIP_VERSION >= 50400000 auto const& limits = config.getUntrackedParameter("limits"); - /* - auto printfFifoSize = limits.getUntrackedParameter("hipLimitPrintfFifoSize"); - */ auto stackSize = limits.getUntrackedParameter("hipLimitStackSize"); auto mallocHeapSize = limits.getUntrackedParameter("hipLimitMallocHeapSize"); - /* - auto devRuntimeSyncDepth = limits.getUntrackedParameter("hipLimitDevRuntimeSyncDepth"); - auto devRuntimePendingLaunchCount = limits.getUntrackedParameter("hipLimitDevRuntimePendingLaunchCount"); - */ +#endif std::set models; @@ -158,17 +151,15 @@ ROCmService::ROCmService(edm::ParameterSet const& config) : verbose_(config.getU // compute capabilities computeCapabilities_.emplace_back(properties.major, properties.minor); if (verbose_) { - log << " compute capability: " << properties.major << "." << properties.minor; + log << " compute capability: " << properties.gcnArchName; + } else { + log << " (" << properties.gcnArchName << ")"; } - log << " (sm_" << properties.major << properties.minor << ")"; if (verbose_) { log << '\n'; log << " streaming multiprocessors: " << std::setw(13) << properties.multiProcessorCount << '\n'; log << " ROCm cores: " << std::setw(28) << "not yet implemented" << '\n'; - /* - log << " single to double performance: " << std::setw(8) << properties.singleToDoublePrecisionPerfRatio - << ":1\n"; - */ + // ROCm does not provide single to double performance ratio } // compute mode @@ -189,27 +180,20 @@ ROCmService::ROCmService(edm::ParameterSet const& config) : verbose_(config.getU hipCheck(hipSetDevice(i)); hipCheck(hipSetDeviceFlags(hipDeviceScheduleAuto | hipDeviceMapHost)); - // read the free and total amount of memory available for allocation by the device, in bytes. - // see the documentation of hipMemGetInfo() for more information. if (verbose_) { - size_t freeMemory, totalMemory; + // read the free and total amount of memory available for allocation by the device, in bytes. + // see the documentation of hipMemGetInfo() for more information. + size_t freeMemory = 0; + size_t totalMemory = 0; hipCheck(hipMemGetInfo(&freeMemory, &totalMemory)); log << " memory: " << std::setw(6) << freeMemory / (1 << 20) << " MB free / " << std::setw(6) << totalMemory / (1 << 20) << " MB total\n"; - log << " constant memory: " << std::setw(6) << properties.totalConstMem / (1 << 10) << " kB\n"; - log << " L2 cache size: " << std::setw(6) << properties.l2CacheSize / (1 << 10) << " kB\n"; - } + log << " constant memory: " << std::setw(8) << properties.totalConstMem / (1 << 10) << " kB\n"; + log << " L2 cache size: " << std::setw(8) << properties.l2CacheSize / (1 << 10) << " kB\n"; - // L1 cache behaviour - if (verbose_) { - /* - static constexpr const char* l1CacheModeDescription[] = { - "unknown", "local memory", "global memory", "local and global memory"}; - int l1CacheMode = properties.localL1CacheSupported + 2 * properties.globalL1CacheSupported; - log << " L1 cache mode:" << std::setw(26) << std::right << l1CacheModeDescription[l1CacheMode] << '\n'; log << '\n'; - */ + // other capabilities log << "Other capabilities\n"; log << " " << (properties.canMapHostMemory ? "can" : "cannot") << " map host memory into the ROCm address space for use with hipHostAlloc()/hipHostGetDevicePointer()\n"; @@ -217,12 +201,6 @@ ROCmService::ROCmService(edm::ParameterSet const& config) : verbose_(config.getU << " coherently accessing pageable memory without calling hipHostRegister() on it\n"; log << " " << (properties.pageableMemoryAccessUsesHostPageTables ? "can" : "cannot") << " access pageable memory via the host's page tables\n"; - /* - log << " " << (properties.canUseHostPointerForRegisteredMem ? "can" : "cannot") - << " access host registered memory at the same virtual address as the host\n"; - log << " " << (properties.unifiedAddressing ? "shares" : "does not share") - << " a unified address space with the host\n"; - */ log << " " << (properties.managedMemory ? "supports" : "does not support") << " allocating managed memory on this system\n"; log << " " << (properties.concurrentManagedAccess ? "can" : "cannot") @@ -275,13 +253,7 @@ ROCmService::ROCmService(edm::ParameterSet const& config) : verbose_(config.getU // set and read the ROCm resource limits. // see the documentation of hipDeviceSetLimit() for more information. - /* - // hipLimitPrintfFifoSize controls the size in bytes of the shared FIFO used by the - // printf() device system call. - if (printfFifoSize >= 0) { - setHipLimit(hipLimitPrintfFifoSize, "hipLimitPrintfFifoSize", printfFifoSize); - } - */ +#if HIP_VERSION >= 50400000 // hipLimitStackSize controls the stack size in bytes of each GPU thread. if (stackSize >= 0) { setHipLimit(hipLimitStackSize, "hipLimitStackSize", stackSize); @@ -291,41 +263,17 @@ ROCmService::ROCmService(edm::ParameterSet const& config) : verbose_(config.getU if (mallocHeapSize >= 0) { setHipLimit(hipLimitMallocHeapSize, "hipLimitMallocHeapSize", mallocHeapSize); } - /* - if ((properties.major > 3) or (properties.major == 3 and properties.minor >= 5)) { - // hipLimitDevRuntimeSyncDepth controls the maximum nesting depth of a grid at which - // a thread can safely call hipDeviceSynchronize(). - if (devRuntimeSyncDepth >= 0) { - setHipLimit(hipLimitDevRuntimeSyncDepth, "hipLimitDevRuntimeSyncDepth", devRuntimeSyncDepth); - } - // hipLimitDevRuntimePendingLaunchCount controls the maximum number of outstanding - // device runtime launches that can be made from the current device. - if (devRuntimePendingLaunchCount >= 0) { - setHipLimit( - hipLimitDevRuntimePendingLaunchCount, "hipLimitDevRuntimePendingLaunchCount", devRuntimePendingLaunchCount); - } - } - */ +#endif if (verbose_) { size_t value; log << "ROCm limits\n"; - /* - hipCheck(hipDeviceGetLimit(&value, hipLimitPrintfFifoSize)); - log << " printf buffer size: " << std::setw(10) << value / (1 << 20) << " MB\n"; - */ +#if HIP_VERSION >= 50400000 hipCheck(hipDeviceGetLimit(&value, hipLimitStackSize)); log << " stack size: " << std::setw(10) << value / (1 << 10) << " kB\n"; +#endif hipCheck(hipDeviceGetLimit(&value, hipLimitMallocHeapSize)); log << " malloc heap size: " << std::setw(10) << value / (1 << 20) << " MB\n"; - /* - if ((properties.major > 3) or (properties.major == 3 and properties.minor >= 5)) { - hipCheck(hipDeviceGetLimit(&value, hipLimitDevRuntimeSyncDepth)); - log << " runtime sync depth: " << std::setw(10) << value << '\n'; - hipCheck(hipDeviceGetLimit(&value, hipLimitDevRuntimePendingLaunchCount)); - log << " runtime pending launch count: " << std::setw(10) << value << '\n'; - } - */ } } @@ -365,22 +313,16 @@ void ROCmService::fillDescriptions(edm::ConfigurationDescriptions& descriptions) desc.addUntracked("enabled", true); desc.addUntracked("verbose", false); +#if HIP_VERSION >= 50400000 edm::ParameterSetDescription limits; - /* - limits.addUntracked("hipLimitPrintfFifoSize", -1) - ->setComment("Size in bytes of the shared FIFO used by the printf() device system call."); - */ limits.addUntracked("hipLimitStackSize", -1)->setComment("Stack size in bytes of each GPU thread."); limits.addUntracked("hipLimitMallocHeapSize", -1) ->setComment("Size in bytes of the heap used by the malloc() and free() device system calls."); - limits.addUntracked("hipLimitDevRuntimeSyncDepth", -1) - ->setComment("Maximum nesting depth of a grid at which a thread can safely call hipDeviceSynchronize()."); - limits.addUntracked("hipLimitDevRuntimePendingLaunchCount", -1) - ->setComment("Maximum number of outstanding device runtime launches that can be made from the current device."); desc.addUntracked("limits", limits) ->setComment( "See the documentation of hipDeviceSetLimit for more information.\nSetting any of these options to -1 keeps " "the default value."); +#endif descriptions.add("ROCmService", desc); } diff --git a/HeterogeneousCore/ROCmUtilities/bin/rocmComputeCapabilities.cpp b/HeterogeneousCore/ROCmUtilities/bin/rocmComputeCapabilities.cpp index 06a1ce18fdbb6..6c14cf2dc99d6 100644 --- a/HeterogeneousCore/ROCmUtilities/bin/rocmComputeCapabilities.cpp +++ b/HeterogeneousCore/ROCmUtilities/bin/rocmComputeCapabilities.cpp @@ -21,9 +21,7 @@ int main() { for (int i = 0; i < devices; ++i) { hipDeviceProp_t properties; hipCheck(hipGetDeviceProperties(&properties, i)); - std::stringstream arch; - arch << "gfx" << properties.gcnArch; - std::cout << std::setw(4) << i << " " << std::setw(8) << arch.str() << " " << properties.name; + std::cout << std::setw(4) << i << " " << std::setw(8) << properties.gcnArchName << " " << properties.name; if (not isRocmDeviceSupported(i)) { std::cout << " (unsupported)"; } diff --git a/HeterogeneousCore/ROCmUtilities/interface/rsmiCheck.h b/HeterogeneousCore/ROCmUtilities/interface/rsmiCheck.h new file mode 100644 index 0000000000000..38f921be8aeb3 --- /dev/null +++ b/HeterogeneousCore/ROCmUtilities/interface/rsmiCheck.h @@ -0,0 +1,55 @@ +#ifndef HeterogeneousCore_ROCmUtilities_rsmiCheck_h +#define HeterogeneousCore_ROCmUtilities_rsmiCheck_h + +// C++ standard headers +#include +#include +#include +#include +#include + +// ROCm headers +#include + +// CMSSW headers +#include "FWCore/Utilities/interface/Likely.h" + +namespace cms { + namespace rocm { + + [[noreturn]] inline void abortOnRsmiError(const char* file, + int line, + const char* cmd, + const char* error, + const char* message, + std::string_view description = std::string_view()) { + std::ostringstream out; + out << "\n"; + out << file << ", line " << line << ":\n"; + out << "rsmiCheck(" << cmd << ");\n"; + out << error << ": " << message << "\n"; + if (!description.empty()) + out << description << "\n"; + throw std::runtime_error(out.str()); + } + + inline bool rsmiCheck_(const char* file, + int line, + const char* cmd, + rsmi_status_t result, + std::string_view description = std::string_view()) { + if (LIKELY(result == RSMI_STATUS_SUCCESS)) + return true; + + std::string error = "ROCm SMI Error " + std::to_string(result); + const char* message; + rsmi_status_string(result, &message); + abortOnRsmiError(file, line, cmd, error.c_str(), message, description); + return false; + } + } // namespace rocm +} // namespace cms + +#define rsmiCheck(ARG, ...) (cms::rocm::rsmiCheck_(__FILE__, __LINE__, #ARG, (ARG), ##__VA_ARGS__)) + +#endif // HeterogeneousCore_ROCmUtilities_rsmiCheck_h diff --git a/HeterogeneousCore/SonicTriton/README.md b/HeterogeneousCore/SonicTriton/README.md index 314b5d4d15986..7eed0f67989fa 100644 --- a/HeterogeneousCore/SonicTriton/README.md +++ b/HeterogeneousCore/SonicTriton/README.md @@ -124,14 +124,18 @@ In a SONIC Triton producer, the basic flow should follow this pattern: ## Services +### `cmsTriton` + A script [`cmsTriton`](./scripts/cmsTriton) is provided to launch and manage local servers. -The script has two operations (`start` and `stop`) and the following options: +The script has three operations (`start`, `stop`, `check`) and the following options: * `-c`: don't cleanup temporary dir (for debugging) +* `-C [dir]`: directory containing Nvidia compatibility drivers (checks CMSSW_BASE by default if available) * `-D`: dry run: print container commands rather than executing them * `-d`: use Docker instead of Apptainer * `-f`: force reuse of (possibly) existing container instance * `-g`: use GPU instead of CPU * `-i` [name]`: server image name (default: fastml/triton-torchgeo:22.07-py3-geometric) +* `-I [num]`: number of model instances (default: 0 -> means no local editing of config files) * `-M [dir]`: model repository (can be given more than once) * `-m [dir]`: specific model directory (can be given more than one) * `-n [name]`: name of container instance, also used for hidden temporary dir (default: triton_server_instance) @@ -148,6 +152,7 @@ Additional details and caveats: * The `start` and `stop` operations for a given container instance should always be executed in the same directory if a relative path is used for the hidden temporary directory (including the default from the container instance name), in order to ensure that everything is properly cleaned up. +* The `check` operation just checks if the server can run on the current system, based on driver compatibility. * A model repository is a folder that contains multiple model directories, while a model directory contains the files for a specific file. (In the example below, `$CMSSW_BASE/src/HeterogeneousCore/SonicTriton/data/models` is a model repository, while `$CMSSW_BASE/src/HeterogeneousCore/SonicTriton/data/models/resnet50_netdef` is a model directory.) @@ -155,6 +160,24 @@ If a model repository is provided, all of the models it contains will be provide * Older versions of Apptainer (Singularity) have a short timeout that may cause launching the server to fail the first time the command is executed. The `-r` (retry) flag exists to work around this issue. +### `cmsTritonConfigTool` + +The `config.pbtxt` files used for model configuration are written in the protobuf text format. +To ease modification of these files, a dedicated Python tool [`cmsTritonConfigTool`](./scripts/cmsTritonConfigTool) is provided. +The tool has several modes of operation (each with its own options, which can be viewed using `--help`): +* `schema`: displays all field names and types for the Triton ModelConfig message class. +* `view`: displays the field values from a provided `config.pbtxt` file. +* `edit`: allows changing any field value in a `config.pbtxt` file. Non-primitive types are specified using JSON format. +* `checksum`: checks and updates checksums for model files (to enforce versioning). +* `versioncheck`: checks and updates checksums for all `config.pbtxt` files in `$CMSSW_SEARCH_PATH`. +* `threadcontrol`: adds job- and ML framework-specific thread control settings. + +The `edit` mode is intended for generic modifications, and only supports overwriting existing values +(not modifying, removing, deleting, etc.). +Additional dedicated modes, like `checksum` and `threadcontrol`, can easily be added for more complicated tasks. + +### `TritonService` + A central `TritonService` is provided to keep track of all available servers and which models they can serve. The servers will automatically be assigned to clients at startup. If some models are not served by any server, the `TritonService` can launch a fallback server using the `cmsTriton` script described above. diff --git a/HeterogeneousCore/SonicTriton/interface/triton_utils.h b/HeterogeneousCore/SonicTriton/interface/triton_utils.h index 159da808edcab..d6c7612a5159c 100644 --- a/HeterogeneousCore/SonicTriton/interface/triton_utils.h +++ b/HeterogeneousCore/SonicTriton/interface/triton_utils.h @@ -83,6 +83,7 @@ extern template std::string triton_utils::printColl(const edm::Span& coll, const std::string& delim); extern template std::string triton_utils::printColl(const std::vector& coll, const std::string& delim); +extern template std::string triton_utils::printColl(const std::vector& coll, const std::string& delim); extern template std::string triton_utils::printColl(const std::unordered_set& coll, const std::string& delim); diff --git a/HeterogeneousCore/SonicTriton/scripts/cmsTriton b/HeterogeneousCore/SonicTriton/scripts/cmsTriton index addbfb2c247c7..9c84be2b62616 100755 --- a/HeterogeneousCore/SonicTriton/scripts/cmsTriton +++ b/HeterogeneousCore/SonicTriton/scripts/cmsTriton @@ -34,7 +34,7 @@ get_sandbox(){ usage() { ECHO="echo -e" - $ECHO "cmsTriton [options] [start|stop]" + $ECHO "cmsTriton [options] [start|stop|check]" $ECHO $ECHO "Options:" $ECHO "-c \t don't cleanup temporary dir (for debugging)" @@ -338,57 +338,6 @@ wait_server(){ echo "server is ready!" } -edit_model(){ - MODELNAME=$1 - NUMINSTANCES=$2 - - cp -r $MODELNAME $TMPDIR/$LOCALMODELREPO/ - COPY_EXIT=$? - if [ "$COPY_EXIT" -ne 0 ]; then - echo "Could not copy $MODELNAME into $TMPDIR/$LOCALMODELREPO/" - exit "$COPY_EXIT" - fi - IFS='/' read -ra ADDR <<< "$MODELNAME" - CONFIG=$TMPDIR/$LOCALMODELREPO/${ADDR[-1]}/config.pbtxt - - PLATFORM=$(grep -m 1 "^platform:" "$CONFIG") - - if [[ $PLATFORM == *"ensemble"* ]]; then - #recurse over submodels of ensemble model - MODELLOC=$(echo ""${ADDR[@]:0:${#ADDR[@]}-1} | sed "s/ /\//g") - SUBNAME=$(grep "model_name:" "$CONFIG" | sed 's/model_name://; s/"//g') - for SUBMODEL in ${SUBNAME}; do - SUBMODEL=${MODELLOC}/${SUBMODEL} - edit_model $SUBMODEL "$INSTANCES" - done - else - #This is not an ensemble model, so we should edit the config file - cat <> $CONFIG -instance_group [ - { - count: $NUMINSTANCES - kind: KIND_CPU - } -] - -EOF - if [[ $PLATFORM == *"onnx"* ]]; then - cat <> $CONFIG -parameters { key: "intra_op_thread_count" value: { string_value: "1" } } -parameters { key: "inter_op_thread_count" value: { string_value: "1" } } -EOF - elif [[ $PLATFORM == *"tensorflow"* ]]; then - cat <> $CONFIG -parameters { key: "TF_NUM_INTRA_THREADS" value: { string_value: "1" } } -parameters { key: "TF_NUM_INTER_THREADS" value: { string_value: "1" } } -parameters { key: "TF_USE_PER_SESSION_THREADS" value: { string_value: "1" } } -EOF - else - echo "Warning: thread (instance) control not implemented for $PLATFORM" - fi - fi -} - list_models(){ # make list of model repositories LOCALMODELREPO="local_model_repo" @@ -411,7 +360,12 @@ list_models(){ MODEL="$(dirname "$MODEL")" fi if [ "$INSTANCES" -gt 0 ]; then - edit_model $MODEL "$INSTANCES" + $DRYRUN cmsTritonConfigTool threadcontrol -c ${MODEL}/config.pbtxt --copy $TMPDIR/$LOCALMODELREPO --nThreads $INSTANCES + TOOL_EXIT=$? + if [ "$TOOL_EXIT" -ne 0 ]; then + echo "Could not apply threadcontrol to $MODEL" + exit "$TOOL_EXIT" + fi else REPOS+=("$(dirname "$MODEL")") fi diff --git a/HeterogeneousCore/SonicTriton/scripts/cmsTritonConfigTool b/HeterogeneousCore/SonicTriton/scripts/cmsTritonConfigTool new file mode 100755 index 0000000000000..9a1ef54c57da6 --- /dev/null +++ b/HeterogeneousCore/SonicTriton/scripts/cmsTritonConfigTool @@ -0,0 +1,456 @@ +#!/usr/bin/env python3 + +import os, sys, json, pathlib, shutil +from collections import OrderedDict +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter, RawTextHelpFormatter, RawDescriptionHelpFormatter, Action, Namespace +from enum import Enum +from google.protobuf import text_format, json_format, message, descriptor +from google.protobuf.internal import type_checkers +from tritonclient import grpc + +# convenience definition +# (from ConfigArgParse) +class ArgumentDefaultsRawHelpFormatter( + ArgumentDefaultsHelpFormatter, + RawTextHelpFormatter, + RawDescriptionHelpFormatter): + """HelpFormatter that adds default values AND doesn't do line-wrapping""" +pass + +class DictAction(Action): + val_type = None + def __call__(self, parser, namespace, values, option_string=None): + if self.val_type is None: + self.val_type = self.type + result = {} + if len(values)%2!=0: + parser.error("{} args must come in pairs".format(self.dest)) + for i in range(0, len(values), 2): + result[values[i]] = self.val_type(values[i+1]) + setattr(namespace, self.dest, result) + +class TritonChecksumStatus(Enum): + CORRECT = 0 + MISSING = 1 + INCORRECT = 2 + +message_classes = {cls.__name__ : cls for cls in message.Message.__subclasses__()} + +_FieldDescriptor = descriptor.FieldDescriptor +cpp_to_python = { + _FieldDescriptor.CPPTYPE_INT32: int, + _FieldDescriptor.CPPTYPE_INT64: int, + _FieldDescriptor.CPPTYPE_UINT32: int, + _FieldDescriptor.CPPTYPE_UINT64: int, + _FieldDescriptor.CPPTYPE_DOUBLE: float, + _FieldDescriptor.CPPTYPE_FLOAT: float, + _FieldDescriptor.CPPTYPE_BOOL: bool, + _FieldDescriptor.CPPTYPE_STRING: str, +} +checker_to_type = {val.__class__:cpp_to_python[key] for key,val in type_checkers._VALUE_CHECKERS.items()} +# for some reason, this one is not in the map +checker_to_type[type_checkers.UnicodeValueChecker] = str + +kind_to_int = {v.name:v.number for v in grpc.model_config_pb2._MODELINSTANCEGROUP_KIND.values} +thread_control_parameters = { + "onnx": ["intra_op_thread_count", "inter_op_thread_count"], + "tensorflow": ["TF_NUM_INTRA_THREADS", "TF_NUM_INTER_THREADS", "TF_USE_PER_SESSION_THREADS"], +} + +def get_type(obj): + obj_type = obj.__class__.__name__ + entry_type = None + entry_class = None + if obj_type=="RepeatedCompositeFieldContainer" or obj_type=="MessageMap": + entry_type = obj._message_descriptor.name + entry_class = message_classes[entry_type] + elif obj_type=="RepeatedScalarFieldContainer": + entry_class = checker_to_type[obj._type_checker.__class__] + entry_type = entry_class.__name__ + elif obj_type=="ScalarMap": + entry_class = obj.GetEntryClass()().value.__class__ + entry_type = entry_class.__name__ + return { + "class": obj.__class__, + "type": obj_type+("<"+entry_type+">" if entry_type is not None else ""), + "entry_class": entry_class, + "entry_type": entry_type, + } + +def get_fields(obj, name, level=0, verbose=False): + prefix = ' '*level + obj_info = {"name": name, "fields": []} + obj_info.update(get_type(obj)) + if verbose: print(prefix+obj_info["type"],name) + field_obj = None + if hasattr(obj, "DESCRIPTOR"): + field_obj = obj + elif obj_info["entry_class"] is not None and hasattr(obj_info["entry_class"], "DESCRIPTOR"): + field_obj = obj_info["entry_class"]() + field_list = [] + if field_obj is not None: + field_list = [f.name for f in field_obj.DESCRIPTOR.fields] + for field in field_list: + obj_info["fields"].append(get_fields(getattr(field_obj,field),field,level+1,verbose)) + return obj_info + +def get_model_info(): + return get_fields(grpc.model_config_pb2.ModelConfig(), "ModelConfig") + +def msg_json(val, defaults=False): + return json_format.MessageToJson(val, preserving_proto_field_name=True, including_default_value_fields=defaults, indent=0).replace(",\n",", ").replace("\n","") + +def print_fields(obj, info, level=0, json=False, defaults=False): + def print_subfields(obj,level): + fields = obj.DESCRIPTOR.fields if defaults else [f[0] for f in obj.ListFields()] + for field in fields: + print_fields(getattr(obj,field.name), next(f for f in info["fields"] if f["name"]==field.name), level=level, json=json, defaults=defaults) + + prefix = ' ' + print(prefix*level+info["type"],info["name"]) + if hasattr(obj, "DESCRIPTOR"): + if json and level>0: + print(prefix*(level+1)+msg_json(obj, defaults)) + else: + print_subfields(obj,level+1) + elif info["type"].startswith("RepeatedCompositeFieldContainer"): + if json: + print(prefix*(level+1)+str([msg_json(val, defaults) for val in obj])) + else: + for ientry,entry in enumerate(obj): + print(prefix*(level+1)+"{}: ".format(ientry)) + print_subfields(entry,level+2) + elif info["type"].startswith("MessageMap"): + if json: + print(prefix*(level+1)+str({key:msg_json(val, defaults) for key,val in obj.items()})) + else: + for key,val in obj.items(): + print(prefix*(level+1)+"{}: ".format(key)) + print_subfields(val,level+2) + else: + print(prefix*(level+1)+str(obj)) + +def edit_builtin(model,dest,val): + setattr(model,dest,val) + +def edit_scalar_list(model,dest,val): + item = getattr(model,dest) + item.clear() + item.extend(val) + +def edit_scalar_map(model,dest,val): + item = getattr(model,dest) + item.clear() + item.update(val) + +def edit_msg(model,dest,val): + item = getattr(model,dest) + json_format.ParseDict(val,item) + +def edit_msg_list(model,dest,val): + item = getattr(model,dest) + item.clear() + for v in vals: + m = item.add() + json_format.ParseDict(v,m) + +def edit_msg_map(model,dest,val): + item = getattr(model,dest) + item.clear() + for k,v in vals.items(): + m = item.get_or_create(k) + json_format.ParseDict(v,m) + +def add_edit_args(parser, model_info): + group = parser.add_argument_group("fields", description="ModelConfig fields to edit") + dests = {} + for field in model_info["fields"]: + argname = "--{}".format(field["name"].replace("_","-")) + val_type = None + editor = None + if field["class"].__module__=="builtins": + kwargs = dict(type=field["class"]) + editor = edit_builtin + elif field["type"].startswith("RepeatedScalarFieldContainer"): + kwargs = dict(type=field["entry_class"], nargs='*') + editor = edit_scalar_list + elif field["type"].startswith("ScalarMap"): + kwargs = dict(type=str, nargs='*', metavar="key value", action=DictAction) + val_type = field["entry_class"] + editor = edit_scalar_map + elif field["type"].startswith("RepeatedCompositeFieldContainer"): + kwargs = dict(type=json.loads, nargs='*', + help="provide {} values in json format".format(field["entry_type"]) + ) + editor = edit_msg_list + elif field["type"].startswith("MessageMap"): + kwargs = dict(type=str, nargs='*', metavar="key value", action=DictAction, + help="provide {} values in json format".format(field["entry_type"]) + ) + editor = edit_msg_map + val_type = json.loads + else: + kwargs = dict(type=json.loads, + help="provide {} values in json format".format(field["type"]) + ) + edit = edit_msg + action = group.add_argument(argname, **kwargs) + if val_type is not None: action.val_type = val_type + dests[action.dest] = editor + return parser, dests + +def get_checksum(filename, chunksize=4096): + import hashlib + with open(filename, 'rb') as f: + file_hash = hashlib.md5() + while chunk := f.read(chunksize): + file_hash.update(chunk) + return file_hash.hexdigest() + +def get_checksum_update_cmd(force=False): + extra_args = ["--update"] + if force: extra_args.append("--force") + extra_args = [arg for arg in extra_args if arg not in sys.argv] + return "{} {}".format(" ".join(sys.argv), " ".join(extra_args)) + +def update_config(args): + # update config path to be output path (in case view is called) + if args.copy: + args.config = "config.pbtxt" + if isinstance(args.copy,str): + args.config = os.path.join(args.copy, args.config) + + with open(args.config,'w') as outfile: + text_format.PrintMessage(args.model, outfile, use_short_repeated_primitives=True) + +def cfg_common(args): + if not hasattr(args,'model_info'): + args.model_info = get_model_info() + args.model = grpc.model_config_pb2.ModelConfig() + if hasattr(args,'config'): + with open(args.config,'r') as infile: + text_format.Parse(infile.read(), args.model) + +def cfg_schema(args): + get_fields(args.model, "ModelConfig", verbose=True) + +def cfg_view(args): + print("Contents of {}".format(args.config)) + print_fields(args.model, args.model_info, json=args.json, defaults=args.defaults) + +def cfg_edit(args): + for dest,editor,val in [(dest,editor,getattr(args,dest)) for dest,editor in args.edit_dests.items() if getattr(args,dest) is not None]: + editor(args.model,dest,val) + + update_config(args) + + if args.view: + cfg_view(args) + +def cfg_checksum(args): + # internal parameter + if not hasattr(args, "should_return"): + args.should_return = False + + agents = args.model.model_repository_agents.agents + checksum_agent = next((agent for agent in agents if agent.name=="checksum"), None) + if checksum_agent is None: + checksum_agent = agents.add(name="checksum") + + incorrect = [] + missing = [] + + from glob import glob + config_dir = os.path.dirname(args.config) + for filename in glob(os.path.join(config_dir,"*/*")): + if os.path.islink(os.path.dirname(filename)): continue + checksum = get_checksum(filename) + # key = algorithm:[filename relative to config.pbtxt dir] + filename = os.path.relpath(filename, config_dir) + filekey = "MD5:{}".format(filename) + if filekey in checksum_agent.parameters and checksum!=checksum_agent.parameters[filekey]: + incorrect.append(filename) + if args.update and args.force: + checksum_agent.parameters[filekey] = checksum + elif filekey not in checksum_agent.parameters: + missing.append(filename) + if args.update: + checksum_agent.parameters[filekey] = checksum + else: + continue + + needs_update = len(missing)>0 + needs_force_update = len(incorrect)>0 + + if not args.quiet: + if needs_update: + print("\n".join(["Missing checksums:"]+missing)) + if needs_force_update: + print("\n".join(["Incorrect checksums:"]+incorrect)) + + if needs_force_update: + if not (args.update and args.force): + if args.should_return: + return TritonChecksumStatus.INCORRECT + else: + raise RuntimeError("\n".join([ + "Incorrect checksum(s) found, indicating existing model file(s) has been changed, which violates policy.", + "To override, run the following command (and provide a justification in your PR):", + get_checksum_update_cmd(force=True) + ])) + else: + update_config(args) + elif needs_update: + if not args.update: + if args.should_return: + return TritonChecksumStatus.MISSING + else: + raise RuntimeError("\n".join([ + "Missing checksum(s) found, indicating new model file(s).", + "To update, run the following command:", + get_checksum_update_cmd(force=False) + ])) + else: + update_config(args) + + if args.view: + cfg_view(args) + + if args.should_return: + return TritonChecksumStatus.CORRECT + +def cfg_versioncheck(args): + incorrect = [] + missing = [] + + for path in os.environ['CMSSW_SEARCH_PATH'].split(':'): + for dirpath, dirnames, filenames in os.walk(path): + for filename in filenames: + if filename=="config.pbtxt": + filepath = os.path.join(dirpath,filename) + checksum_args = Namespace( + config=filepath, should_return=True, + copy=False, json=False, defaults=False, view=False, + update=args.update, force=args.force, quiet=True + ) + cfg_common(checksum_args) + status = cfg_checksum(checksum_args) + if status==TritonChecksumStatus.INCORRECT: + incorrect.append(filepath) + elif status==TritonChecksumStatus.MISSING: + missing.append(filepath) + + msg = [] + instr = [] + if len(missing)>0: + msg.extend(["","The following files have missing checksum(s), indicating new model file(s):"]+missing) + instr.extend(["","To update missing checksums, run the following command:",get_checksum_update_cmd(force=False)]) + if len(incorrect)>0: + msg.extend(["","The following files have incorrect checksum(s), indicating existing model file(s) have been changed, which violates policy:"]+incorrect) + instr.extend(["","To override incorrect checksums, run the following command (and provide a justification in your PR):",get_checksum_update_cmd(force=True)]) + + if len(msg)>0: + raise RuntimeError("\n".join(msg+instr)) + +def cfg_threadcontrol(args): + # copy the entire model, not just config.pbtxt + config_dir = os.path.dirname(args.config) + copy_dir = args.copy + new_config_dir = os.path.join(copy_dir, pathlib.Path(config_dir).name) + shutil.copytree(config_dir, new_config_dir) + + platform = args.model.platform + if platform=="ensemble": + repo_dir = pathlib.Path(config_dir).parent + for step in args.model.ensemble_scheduling.step: + # update args and run recursively + args.config = os.path.join(repo_dir,step.model_name,"config.pbtxt") + args.copy = copy_dir + cfg_common(args) + cfg_threadcontrol(args) + return + + # is it correct to do this even if found_params is false at the end? + args.model.instance_group.add(count=args.nThreads, kind=kind_to_int['KIND_CPU']) + + found_params = False + for key,val in thread_control_parameters.items(): + if key in platform: # partial matching + for param in val: + item = args.model.parameters.get_or_create(key) + item.string_value = "1" + found_params = True + break + if not found_params: + print("Warning: thread (instance) control not implemented for {}".format(platform)) + + args.copy = new_config_dir + update_config(args) + + if args.view: + cfg_view(args) + +if __name__=="__main__": + # initial common operations + model_info = get_model_info() + edit_dests = None + + _parser_common = ArgumentParser(add_help=False) + _parser_common.add_argument("-c", "--config", type=str, default="", required=True, help="path to input config.pbtxt file") + + parser = ArgumentParser(formatter_class=ArgumentDefaultsRawHelpFormatter) + subparsers = parser.add_subparsers(dest="command") + + parser_schema = subparsers.add_parser("schema", help="view ModelConfig schema", + description="""Display all fields in the ModelConfig object, with type information. + (For collection types, the subfields of the entry type are shown.)""", + ) + parser_schema.set_defaults(func=cfg_schema) + + _parser_view_args = ArgumentParser(add_help=False) + _parser_view_args.add_argument("--json", default=False, action="store_true", help="display in json format") + _parser_view_args.add_argument("--defaults", default=False, action="store_true", help="show fields with default values") + + parser_view = subparsers.add_parser("view", parents=[_parser_common, _parser_view_args], help="view config.pbtxt contents") + parser_view.set_defaults(func=cfg_view) + + _parser_copy_view = ArgumentParser(add_help=False) + _parser_copy_view.add_argument("--view", default=False, action="store_true", help="view file after editing") + + _parser_copy = ArgumentParser(add_help=False, parents=[_parser_copy_view]) + _parser_copy.add_argument("--copy", metavar="dir", default=False, const=True, nargs='?', type=str, + help="make a copy of config.pbtxt instead of editing in place ([dir] = output path for copy; if omitted, current directory is used)" + ) + + parser_edit = subparsers.add_parser("edit", parents=[_parser_common, _parser_copy, _parser_view_args], help="edit config.pbtxt contents") + parser_edit, edit_dests = add_edit_args(parser_edit, model_info) + parser_edit.set_defaults(func=cfg_edit) + + _parser_checksum_update = ArgumentParser(add_help=False) + _parser_checksum_update.add_argument("--update", default=False, action="store_true", help="update missing checksums") + _parser_checksum_update.add_argument("--force", default=False, action="store_true", help="force update all checksums") + + parser_checksum = subparsers.add_parser("checksum", parents=[_parser_common, _parser_copy, _parser_view_args, _parser_checksum_update], help="handle model file checksums") + parser_checksum.add_argument("--quiet", default=False, action="store_true", help="suppress printouts") + parser_checksum.set_defaults(func=cfg_checksum) + + parser_versioncheck = subparsers.add_parser("versioncheck", parents=[_parser_checksum_update], help="check all model checksums") + parser_versioncheck.set_defaults(func=cfg_versioncheck) + + _parser_copy_req = ArgumentParser(add_help=False, parents=[_parser_copy_view]) + _parser_copy_req.add_argument("--copy", metavar="dir", type=str, required=True, + help="local model repository directory to copy model(s)" + ) + + parser_threadcontrol = subparsers.add_parser("threadcontrol", parents=[_parser_common, _parser_copy_req, _parser_view_args], help="enable thread controls") + parser_threadcontrol.add_argument("--nThreads", type=int, required=True, help="number of threads") + parser_threadcontrol.set_defaults(func=cfg_threadcontrol) + + args = parser.parse_args() + args.model_info = model_info + if edit_dests is not None: + args.edit_dests = edit_dests + + cfg_common(args) + + args.func(args) diff --git a/HeterogeneousCore/SonicTriton/src/TritonClient.cc b/HeterogeneousCore/SonicTriton/src/TritonClient.cc index c57a8355d07a1..201ad40d35a0e 100644 --- a/HeterogeneousCore/SonicTriton/src/TritonClient.cc +++ b/HeterogeneousCore/SonicTriton/src/TritonClient.cc @@ -9,11 +9,18 @@ #include "grpc_client.h" #include "grpc_service.pb.h" +#include "model_config.pb.h" -#include +#include "google/protobuf/text_format.h" +#include "google/protobuf/io/zero_copy_stream_impl.h" + +#include #include #include +#include +#include #include +#include #include #include @@ -75,22 +82,61 @@ TritonClient::TritonClient(const edm::ParameterSet& params, const std::string& d //convert seconds to microseconds options_[0].client_timeout_ = params.getUntrackedParameter("timeout") * 1e6; - //config needed for batch size - inference::ModelConfigResponse modelConfigResponse; - TRITON_THROW_IF_ERROR(client_->ModelConfig(&modelConfigResponse, options_[0].model_name_, options_[0].model_version_), - "TritonClient(): unable to get model config"); - inference::ModelConfig modelConfig(modelConfigResponse.config()); + //get fixed parameters from local config + inference::ModelConfig localModelConfig; + { + const std::string& localModelConfigPath(params.getParameter("modelConfigPath").fullPath()); + int fileDescriptor = open(localModelConfigPath.c_str(), O_RDONLY); + if (fileDescriptor < 0) + throw TritonException("LocalFailure") + << "TritonClient(): unable to open local model config: " << localModelConfigPath; + google::protobuf::io::FileInputStream localModelConfigInput(fileDescriptor); + localModelConfigInput.SetCloseOnDelete(true); + if (!google::protobuf::TextFormat::Parse(&localModelConfigInput, &localModelConfig)) + throw TritonException("LocalFailure") + << "TritonClient(): unable to parse local model config: " << localModelConfigPath; + } //check batch size limitations (after i/o setup) //triton uses max batch size = 0 to denote a model that does not support native batching (using the outer dimension) //but for models that do support batching (native or otherwise), a given event may set batch size 0 to indicate no valid input is present //so set the local max to 1 and keep track of "no outer dim" case - maxOuterDim_ = modelConfig.max_batch_size(); + maxOuterDim_ = localModelConfig.max_batch_size(); noOuterDim_ = maxOuterDim_ == 0; maxOuterDim_ = std::max(1u, maxOuterDim_); //propagate batch size setBatchSize(1); + //compare model checksums to remote config to enforce versioning + inference::ModelConfigResponse modelConfigResponse; + TRITON_THROW_IF_ERROR(client_->ModelConfig(&modelConfigResponse, options_[0].model_name_, options_[0].model_version_), + "TritonClient(): unable to get model config"); + inference::ModelConfig remoteModelConfig(modelConfigResponse.config()); + + std::map> checksums; + size_t fileCounter = 0; + for (const auto& modelConfig : {localModelConfig, remoteModelConfig}) { + const auto& agents = modelConfig.model_repository_agents().agents(); + auto agent = std::find_if(agents.begin(), agents.end(), [](auto const& a) { return a.name() == "checksum"; }); + if (agent != agents.end()) { + const auto& params = agent->parameters(); + for (const auto& [key, val] : params) { + // only check the requested version + if (key.compare(0, options_[0].model_version_.size() + 1, options_[0].model_version_ + "/") == 0) + checksums[key][fileCounter] = val; + } + } + ++fileCounter; + } + std::vector incorrect; + for (const auto& [key, val] : checksums) { + if (checksums[key][0] != checksums[key][1]) + incorrect.push_back(key); + } + if (!incorrect.empty()) + throw TritonException("ModelVersioning") << "The following files have incorrect checksums on the remote server: " + << triton_utils::printColl(incorrect, ", "); + //get model info inference::ModelMetadataResponse modelMetadata; TRITON_THROW_IF_ERROR(client_->ModelMetadata(&modelMetadata, options_[0].model_name_, options_[0].model_version_), diff --git a/HeterogeneousCore/SonicTriton/src/triton_utils.cc b/HeterogeneousCore/SonicTriton/src/triton_utils.cc index 3dc872d6e1b42..a71190d951e46 100644 --- a/HeterogeneousCore/SonicTriton/src/triton_utils.cc +++ b/HeterogeneousCore/SonicTriton/src/triton_utils.cc @@ -21,4 +21,5 @@ template std::string triton_utils::printColl(const edm::Span& coll, const std::string& delim); template std::string triton_utils::printColl(const std::vector& coll, const std::string& delim); +template std::string triton_utils::printColl(const std::vector& coll, const std::string& delim); template std::string triton_utils::printColl(const std::unordered_set& coll, const std::string& delim); diff --git a/HeterogeneousCore/SonicTriton/test/BuildFile.xml b/HeterogeneousCore/SonicTriton/test/BuildFile.xml index 272fba3da2cc8..e4ff7a0bb56f3 100644 --- a/HeterogeneousCore/SonicTriton/test/BuildFile.xml +++ b/HeterogeneousCore/SonicTriton/test/BuildFile.xml @@ -1,5 +1,8 @@ - - + + + + + diff --git a/IOMC/EventVertexGenerators/interface/BetafuncEvtVtxGenerator.h b/IOMC/EventVertexGenerators/interface/BetafuncEvtVtxGenerator.h index 777dd1607393a..2e1703d760548 100644 --- a/IOMC/EventVertexGenerators/interface/BetafuncEvtVtxGenerator.h +++ b/IOMC/EventVertexGenerators/interface/BetafuncEvtVtxGenerator.h @@ -20,6 +20,7 @@ ________________________________________________________________________ #include "IOMC/EventVertexGenerators/interface/BaseEvtVtxGenerator.h" #include "FWCore/Framework/interface/ESWatcher.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" #include "FWCore/Utilities/interface/ESGetToken.h" #include "CondFormats/DataRecord/interface/SimBeamSpotObjectsRcd.h" #include "CondFormats/BeamSpotObjects/interface/SimBeamSpotObjects.h" @@ -37,6 +38,8 @@ class BetafuncEvtVtxGenerator : public BaseEvtVtxGenerator { BetafuncEvtVtxGenerator& operator=(const BetafuncEvtVtxGenerator& rhs) = delete; ~BetafuncEvtVtxGenerator() override = default; + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; /// return a new event vertex diff --git a/IOMC/EventVertexGenerators/interface/GaussEvtVtxGenerator.h b/IOMC/EventVertexGenerators/interface/GaussEvtVtxGenerator.h index 6bbea9c0821aa..b0a05ddb5b647 100644 --- a/IOMC/EventVertexGenerators/interface/GaussEvtVtxGenerator.h +++ b/IOMC/EventVertexGenerators/interface/GaussEvtVtxGenerator.h @@ -9,6 +9,7 @@ #include "IOMC/EventVertexGenerators/interface/BaseEvtVtxGenerator.h" #include "FWCore/Framework/interface/ESWatcher.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" #include "FWCore/Utilities/interface/ESGetToken.h" #include "CondFormats/DataRecord/interface/SimBeamSpotObjectsRcd.h" #include "CondFormats/BeamSpotObjects/interface/SimBeamSpotObjects.h" @@ -26,6 +27,8 @@ class GaussEvtVtxGenerator : public BaseEvtVtxGenerator { GaussEvtVtxGenerator& operator=(const GaussEvtVtxGenerator& rhs) = delete; ~GaussEvtVtxGenerator() override = default; + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; /// return a new event vertex diff --git a/IOMC/EventVertexGenerators/python/VtxSmearedDesign_cfi.py b/IOMC/EventVertexGenerators/python/VtxSmearedDesign_cfi.py new file mode 100644 index 0000000000000..028b87e24c3af --- /dev/null +++ b/IOMC/EventVertexGenerators/python/VtxSmearedDesign_cfi.py @@ -0,0 +1,8 @@ +import FWCore.ParameterSet.Config as cms + +# Load GaussEvtVtxGenerator and read parameters from GT (SimBeamSpotObjectRcd) +from IOMC.EventVertexGenerators.GaussEvtVtxGenerator_cfi import GaussEvtVtxGenerator +VtxSmeared = GaussEvtVtxGenerator.clone( + src = "generator:unsmeared", + readDB = True +) diff --git a/IOMC/EventVertexGenerators/python/VtxSmearedRealistic_cfi.py b/IOMC/EventVertexGenerators/python/VtxSmearedRealistic_cfi.py new file mode 100644 index 0000000000000..20d5d739746cb --- /dev/null +++ b/IOMC/EventVertexGenerators/python/VtxSmearedRealistic_cfi.py @@ -0,0 +1,8 @@ +import FWCore.ParameterSet.Config as cms + +# Load BetafuncEvtVtxGenerator and read parameters from GT (SimBeamSpotObjectRcd) +from IOMC.EventVertexGenerators.BetafuncEvtVtxGenerator_cfi import BetafuncEvtVtxGenerator +VtxSmeared = BetafuncEvtVtxGenerator.clone( + src = "generator:unsmeared", + readDB = True +) diff --git a/IOMC/EventVertexGenerators/src/BetafuncEvtVtxGenerator.cc b/IOMC/EventVertexGenerators/src/BetafuncEvtVtxGenerator.cc index 125169fba39a5..89932a409da85 100644 --- a/IOMC/EventVertexGenerators/src/BetafuncEvtVtxGenerator.cc +++ b/IOMC/EventVertexGenerators/src/BetafuncEvtVtxGenerator.cc @@ -24,11 +24,8 @@ ________________________________________________________________________ #include "CLHEP/Random/RandGaussQ.h" #include "CLHEP/Units/GlobalSystemOfUnits.h" #include "CLHEP/Units/GlobalPhysicalConstants.h" -//#include "CLHEP/Vector/ThreeVector.h" #include "HepMC/SimpleVector.h" -#include - BetafuncEvtVtxGenerator::BetafuncEvtVtxGenerator(const edm::ParameterSet& p) : BaseEvtVtxGenerator(p), boost_(4, 4) { readDB_ = p.getParameter("readDB"); if (!readDB_) { @@ -140,3 +137,19 @@ void BetafuncEvtVtxGenerator::sigmaZ(double s) { } TMatrixD const* BetafuncEvtVtxGenerator::GetInvLorentzBoost() const { return &boost_; } + +void BetafuncEvtVtxGenerator::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("X0", 0.0)->setComment("in cm"); + desc.add("Y0", 0.0)->setComment("in cm"); + desc.add("Z0", 0.0)->setComment("in cm"); + desc.add("SigmaZ", 0.0)->setComment("in cm"); + desc.add("BetaStar", 0.0)->setComment("in cm"); + desc.add("Emittance", 0.0)->setComment("in cm"); + desc.add("Alpha", 0.0)->setComment("in radians"); + desc.add("Phi", 0.0)->setComment("in radians"); + desc.add("TimeOffset", 0.0)->setComment("in ns"); + desc.add("src"); + desc.add("readDB"); + descriptions.add("BetafuncEvtVtxGenerator", desc); +} diff --git a/IOMC/EventVertexGenerators/src/GaussEvtVtxGenerator.cc b/IOMC/EventVertexGenerators/src/GaussEvtVtxGenerator.cc index a45a096fbfaa4..103a420b67a2b 100644 --- a/IOMC/EventVertexGenerators/src/GaussEvtVtxGenerator.cc +++ b/IOMC/EventVertexGenerators/src/GaussEvtVtxGenerator.cc @@ -1,14 +1,10 @@ - - #include "IOMC/EventVertexGenerators/interface/GaussEvtVtxGenerator.h" -#include "FWCore/Utilities/interface/Exception.h" - #include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/Utilities/interface/Exception.h" #include "CLHEP/Random/RandGaussQ.h" #include "CLHEP/Units/GlobalSystemOfUnits.h" #include "CLHEP/Units/GlobalPhysicalConstants.h" -//#include "CLHEP/Vector/ThreeVector.h" #include "HepMC/SimpleVector.h" GaussEvtVtxGenerator::GaussEvtVtxGenerator(const edm::ParameterSet& p) : BaseEvtVtxGenerator(p) { @@ -95,3 +91,17 @@ void GaussEvtVtxGenerator::sigmaZ(double s) { << "Illegal resolution in Z (negative)"; } } + +void GaussEvtVtxGenerator::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("MeanX", 0.0)->setComment("in cm"); + desc.add("MeanY", 0.0)->setComment("in cm"); + desc.add("MeanZ", 0.0)->setComment("in cm"); + desc.add("SigmaX", 0.0)->setComment("in cm"); + desc.add("SigmaY", 0.0)->setComment("in cm"); + desc.add("SigmaZ", 0.0)->setComment("in cm"); + desc.add("TimeOffset", 0.0)->setComment("in ns"); + desc.add("src"); + desc.add("readDB"); + descriptions.add("GaussEvtVtxGenerator", desc); +} diff --git a/IOMC/RandomEngine/test/unit_test_outputs/testMultiStreamDump.txt b/IOMC/RandomEngine/test/unit_test_outputs/testMultiStreamDump.txt index af396cbdeab39..5fb08c30c8f5e 100644 --- a/IOMC/RandomEngine/test/unit_test_outputs/testMultiStreamDump.txt +++ b/IOMC/RandomEngine/test/unit_test_outputs/testMultiStreamDump.txt @@ -3,11 +3,11 @@ RandomNumberGeneratorService dump Contents of seedsAndNameMap (label moduleID engineType seeds) - t1 4 HepJamesRandom 81 - t2 5 RanecuEngine 1 2 - t3 6 TRandom3 83 - t4 7 HepJamesRandom 84 - t6 8 MixMaxRng 85 + t1 3 HepJamesRandom 81 + t2 4 RanecuEngine 1 2 + t3 5 TRandom3 83 + t4 6 HepJamesRandom 84 + t6 7 MixMaxRng 85 nStreams_ = 3 saveFileName_ = StashStateStream.data saveFileNameRecorded_ = 0 diff --git a/IOMC/RandomEngine/test/unit_test_outputs/testRandomService1Dump.txt b/IOMC/RandomEngine/test/unit_test_outputs/testRandomService1Dump.txt index 5637951fbf9e6..cd5818d5fea34 100644 --- a/IOMC/RandomEngine/test/unit_test_outputs/testRandomService1Dump.txt +++ b/IOMC/RandomEngine/test/unit_test_outputs/testRandomService1Dump.txt @@ -3,11 +3,11 @@ RandomNumberGeneratorService dump Contents of seedsAndNameMap (label moduleID engineType seeds) - t1 4 HepJamesRandom 81 - t2 5 RanecuEngine 1 2 - t3 6 TRandom3 83 - t4 7 HepJamesRandom 84 - t6 8 MixMaxRng 85 + t1 3 HepJamesRandom 81 + t2 4 RanecuEngine 1 2 + t3 5 TRandom3 83 + t4 6 HepJamesRandom 84 + t6 7 MixMaxRng 85 nStreams_ = 1 saveFileName_ = StashState1.data saveFileNameRecorded_ = 0 diff --git a/IOPool/Input/python/fixReading_12_4_X_Files.py b/IOPool/Input/python/fixReading_12_4_X_Files.py new file mode 100644 index 0000000000000..baa174f5d8330 --- /dev/null +++ b/IOPool/Input/python/fixReading_12_4_X_Files.py @@ -0,0 +1,12 @@ +import FWCore.ParameterSet.Config as cms + +# ROOT version used in 12_4_X (2022 data taking), and in 13_0_X up to +# 13_0_3 had a bug where TStreamerInfo was missing in some cases. This +# customize function adds a Service to include the TStreamerInfo for +# the affected classes so that the old files can be read now that some +# of the affected data format classes have evolved. +def fixReading_12_4_X_Files(process): + process.add_(cms.Service("FixMissingStreamerInfos", + fileInPath = cms.untracked.FileInPath("IOPool/Input/data/fileContainingStreamerInfos_13_0_0.root") + )) + return process diff --git a/IOPool/Input/src/OneLumiPoolSource.cc b/IOPool/Input/src/OneLumiPoolSource.cc index 77512503cfbde..8a33b257b9640 100644 --- a/IOPool/Input/src/OneLumiPoolSource.cc +++ b/IOPool/Input/src/OneLumiPoolSource.cc @@ -15,7 +15,7 @@ namespace edm { explicit OneLumiPoolSource(ParameterSet const& pset, InputSourceDescription const& desc); private: - ItemType getNextItemType() override; + ItemTypeInfo getNextItemType() override; std::shared_ptr readLuminosityBlockAuxiliary_() override; void readEvent_(EventPrincipal& eventPrincipal) override { @@ -37,9 +37,9 @@ namespace edm { return ret; } - InputSource::ItemType OneLumiPoolSource::getNextItemType() { + InputSource::ItemTypeInfo OneLumiPoolSource::getNextItemType() { auto type = PoolSource::getNextItemType(); - if (type == IsLumi) { + if (type == ItemType::IsLumi) { if (seenFirstLumi_) { do { edm::HistoryAppender historyAppender; @@ -50,7 +50,7 @@ namespace edm { LuminosityBlockPrincipal temp(prodReg, procConfig, &historyAppender, 0); readLuminosityBlock_(temp); type = PoolSource::getNextItemType(); - } while (type == IsLumi); + } while (type == ItemType::IsLumi); } else { seenFirstLumi_ = true; } diff --git a/IOPool/Input/src/PoolSource.cc b/IOPool/Input/src/PoolSource.cc index 9bd2cb393c848..6996f17d0c637 100644 --- a/IOPool/Input/src/PoolSource.cc +++ b/IOPool/Input/src/PoolSource.cc @@ -260,15 +260,15 @@ namespace edm { return true; } - InputSource::ItemType PoolSource::getNextItemType() { + InputSource::ItemTypeInfo PoolSource::getNextItemType() { RunNumber_t run = IndexIntoFile::invalidRun; LuminosityBlockNumber_t lumi = IndexIntoFile::invalidLumi; EventNumber_t event = IndexIntoFile::invalidEvent; InputSource::ItemType itemType = primaryFileSequence_->getNextItemType(run, lumi, event); - if (secondaryFileSequence_ && (IsSynchronize != state())) { - if (itemType == IsRun || itemType == IsLumi || itemType == IsEvent) { + if (secondaryFileSequence_ && (ItemType::IsSynchronize != state())) { + if (itemType == ItemType::IsRun || itemType == ItemType::IsLumi || itemType == ItemType::IsEvent) { if (!secondaryFileSequence_->containedInCurrentFile(run, lumi, event)) { - return IsSynchronize; + return ItemType::IsSynchronize; } } } diff --git a/IOPool/Input/src/PoolSource.h b/IOPool/Input/src/PoolSource.h index ed7deb115f91f..48560338f867d 100644 --- a/IOPool/Input/src/PoolSource.h +++ b/IOPool/Input/src/PoolSource.h @@ -49,7 +49,7 @@ namespace edm { static void fillDescriptions(ConfigurationDescriptions& descriptions); protected: - ItemType getNextItemType() override; + ItemTypeInfo getNextItemType() override; void readLuminosityBlock_(LuminosityBlockPrincipal& lumiPrincipal) override; std::shared_ptr readLuminosityBlockAuxiliary_() override; void readEvent_(EventPrincipal& eventPrincipal) override; diff --git a/IOPool/Input/src/RepeatingCachedRootSource.cc b/IOPool/Input/src/RepeatingCachedRootSource.cc index f85866180de1b..7b92607e46125 100644 --- a/IOPool/Input/src/RepeatingCachedRootSource.cc +++ b/IOPool/Input/src/RepeatingCachedRootSource.cc @@ -112,7 +112,7 @@ namespace edm { }; protected: - ItemType getNextItemType() override; + ItemTypeInfo getNextItemType() override; void readLuminosityBlock_(LuminosityBlockPrincipal& lumiPrincipal) override; std::shared_ptr readLuminosityBlockAuxiliary_() override; void readEvent_(EventPrincipal& eventPrincipal) override; @@ -153,7 +153,7 @@ namespace edm { std::map productIDToWrapperIndex_; std::vector streamToCacheIndex_; size_t nextEventIndex_ = 0; - ItemType presentState_ = IsFile; + ItemType presentState_ = ItemType::IsFile; unsigned long long eventIndex_ = 0; }; } // namespace edm @@ -344,17 +344,17 @@ std::shared_ptr RepeatingCachedRootSource::getProduct(unsigned int return cachedWrappers_[streamToCacheIndex_[iStreamIndex]][branchIDToWrapperIndex_.find(k)->second]; } -RepeatingCachedRootSource::ItemType RepeatingCachedRootSource::getNextItemType() { +RepeatingCachedRootSource::ItemTypeInfo RepeatingCachedRootSource::getNextItemType() { auto v = presentState_; switch (presentState_) { - case IsFile: - presentState_ = IsRun; + case ItemType::IsFile: + presentState_ = ItemType::IsRun; break; - case IsRun: - presentState_ = IsLumi; + case ItemType::IsRun: + presentState_ = ItemType::IsLumi; break; - case IsLumi: - presentState_ = IsEvent; + case ItemType::IsLumi: + presentState_ = ItemType::IsEvent; break; default: break; diff --git a/IOPool/Input/src/RootPrimaryFileSequence.cc b/IOPool/Input/src/RootPrimaryFileSequence.cc index 2f29dc762538a..07e0255bc0129 100644 --- a/IOPool/Input/src/RootPrimaryFileSequence.cc +++ b/IOPool/Input/src/RootPrimaryFileSequence.cc @@ -226,31 +226,31 @@ namespace edm { return true; } - InputSource::ItemType RootPrimaryFileSequence::getNextItemType(RunNumber_t& run, - LuminosityBlockNumber_t& lumi, - EventNumber_t& event) { + InputSource::ItemTypeInfo RootPrimaryFileSequence::getNextItemType(RunNumber_t& run, + LuminosityBlockNumber_t& lumi, + EventNumber_t& event) { if (noMoreFiles() || skipToStop_) { skipToStop_ = false; - return InputSource::IsStop; + return InputSource::ItemType::IsStop; } if (firstFile_ || goToEventInNewFile_ || skipIntoNewFile_) { - return InputSource::IsFile; + return InputSource::ItemType::IsFile; } if (rootFile()) { IndexIntoFile::EntryType entryType = rootFile()->getNextItemType(run, lumi, event); if (entryType == IndexIntoFile::kEvent) { - return InputSource::IsEvent; + return InputSource::ItemType::IsEvent; } else if (entryType == IndexIntoFile::kLumi) { - return InputSource::IsLumi; + return InputSource::ItemType::IsLumi; } else if (entryType == IndexIntoFile::kRun) { - return InputSource::IsRun; + return InputSource::ItemType::IsRun; } assert(entryType == IndexIntoFile::kEnd); } if (atLastFile()) { - return InputSource::IsStop; + return InputSource::ItemType::IsStop; } - return InputSource::IsFile; + return InputSource::ItemType::IsFile; } // Rewind to before the first event that was read. diff --git a/IOPool/Input/src/RootPrimaryFileSequence.h b/IOPool/Input/src/RootPrimaryFileSequence.h index 75bc262b19c48..aba1fc99cd2fd 100644 --- a/IOPool/Input/src/RootPrimaryFileSequence.h +++ b/IOPool/Input/src/RootPrimaryFileSequence.h @@ -41,7 +41,7 @@ namespace edm { std::shared_ptr readFile_(); void endJob(); - InputSource::ItemType getNextItemType(RunNumber_t& run, LuminosityBlockNumber_t& lumi, EventNumber_t& event); + InputSource::ItemTypeInfo getNextItemType(RunNumber_t& run, LuminosityBlockNumber_t& lumi, EventNumber_t& event); void skipEventsAtBeginning(int offset); void skipEvents(int offset); bool goToEvent(EventID const& eventID); diff --git a/IOPool/Input/src/RunHelper.cc b/IOPool/Input/src/RunHelper.cc index 8537f2165e380..cf1cfbeb76087 100644 --- a/IOPool/Input/src/RunHelper.cc +++ b/IOPool/Input/src/RunHelper.cc @@ -106,8 +106,8 @@ namespace edm { RunNumber_t, LuminosityBlockNumber_t, EventNumber_t) { - if (newItemType == InputSource::IsRun || - (newItemType == InputSource::IsLumi && previousItemType != InputSource::IsRun)) { + if (newItemType == InputSource::ItemType::IsRun || + (newItemType == InputSource::ItemType::IsLumi && previousItemType != InputSource::ItemType::IsRun)) { if (firstTime_) { firstTime_ = false; } else { @@ -125,8 +125,8 @@ namespace edm { } bool sameRunNumber = (indexOfNextRunNumber_ != 0U && run == setRunNumberForEachLumi_[indexOfNextRunNumber_ - 1]); if (!sameRunNumber) { - fakeNewRun_ = (newItemType != InputSource::IsRun); - return InputSource::IsRun; + fakeNewRun_ = (newItemType != InputSource::ItemType::IsRun); + return InputSource::ItemType::IsRun; } } return newItemType; @@ -174,7 +174,7 @@ namespace edm { RunNumber_t, LuminosityBlockNumber_t iLumi, EventNumber_t) { - if (newItemType == InputSource::IsLumi && previousItemType != InputSource::IsRun) { + if (newItemType == InputSource::ItemType::IsLumi && previousItemType != InputSource::ItemType::IsRun) { auto run = findRunFromLumi(iLumi); if (run == 0) { throw Exception(errors::Configuration, "PoolSource") @@ -183,7 +183,7 @@ namespace edm { if (lastUsedRunNumber_ != run) { fakeNewRun_ = true; lastUsedRunNumber_ = run; - return InputSource::IsRun; + return InputSource::ItemType::IsRun; } } return newItemType; diff --git a/IOPool/Input/test/testSchemaEvolution.sh b/IOPool/Input/test/testSchemaEvolution.sh index 237bfa639fcad..faaa577f133fe 100755 --- a/IOPool/Input/test/testSchemaEvolution.sh +++ b/IOPool/Input/test/testSchemaEvolution.sh @@ -1,4 +1,4 @@ -#!/bin/sh -x +#!/bin/sh function die { echo $1: status $2 ; exit $2; } @@ -91,6 +91,28 @@ cmsRun ${LOCAL_TEST_DIR}/SchemaEvolution_test_read_cfg.py --inputFile "$inputfil file=SchemaEvolutionTestOLD13_0_0.root inputfile=$(edmFileInPath IOPool/Input/data/$file) || die "Failure edmFileInPath IOPool/Input/data/$file" $? + +# The next test demonstrates the FileReadError that can occur as a +# result of the known ROOT bug in 13_0_0 (file has a problem when +# written with 13_0_0 that causes an exception when read). +# Note that this is also used to test the cmsRun exit code +# after a FileReadError (should be 8021). It is very convenient +# to test that here because it is hard to intentionally create +# a file that will cause a FileReadError. So we take advantage +# of the ROOT bug to implement the test. This bug actually +# occurred, see Issue 42179 for details. +echo "***" +echo "***" +echo "Exception in next test is INTENTIONAL. Test fails if not thrown or cmsRun returns wrong exit code" +echo "***" +echo "***" +cmsRun -j FileReadErrorTest_jobreport.xml ${LOCAL_TEST_DIR}/SchemaEvolution_test_read_cfg.py --inputFile $inputfile && die 'SchemaEvolution_test_read_cfg.py with corrupt input did not throw an exception' 1 +CMSRUN_EXIT_CODE=$(edmFjrDump --exitCode FileReadErrorTest_jobreport.xml) +if [ "x${CMSRUN_EXIT_CODE}" != "x8021" ]; then + echo "cmsRun reported exit code ${CMSRUN_EXIT_CODE} which is different from the expected 8021 (FileReadError)" + exit 1 +fi + # The test below would fail without the "--enableStreamerInfosFix" # because there was a bug in the version of ROOT associated with CMSSW_13_0_0. # The bug caused StreamerInfo objects to be missing from the ROOT file. In this case, diff --git a/IOPool/Streamer/src/StreamerOutputModuleCommon.cc b/IOPool/Streamer/src/StreamerOutputModuleCommon.cc index 76ae5e72ed480..bc79452fcc75e 100644 --- a/IOPool/Streamer/src/StreamerOutputModuleCommon.cc +++ b/IOPool/Streamer/src/StreamerOutputModuleCommon.cc @@ -227,18 +227,15 @@ namespace edm { std::vector hltbits; setHltMask(e, triggerResults, hltbits); - uint32 lumi; - if (lumiSectionInterval_ == 0) { - lumi = e.luminosityBlock(); - } else { + uint32 lumi = e.luminosityBlock(); + if (lumiSectionInterval_ != 0) { struct timeval now; struct timezone dummyTZ; gettimeofday(&now, &dummyTZ); double timeInSec = static_cast(now.tv_sec) + (static_cast(now.tv_usec) / 1000000.0) - timeInSecSinceUTC; // what about overflows? - if (lumiSectionInterval_ > 0) - lumi = static_cast(timeInSec / lumiSectionInterval_) + 1; + lumi = static_cast(timeInSec / std::abs(lumiSectionInterval_)) + 1; } serializer_.serializeEvent(sbuf, e, selectorCfg, compressionAlgo_, compressionLevel_, reserve_size); diff --git a/JetMETCorrections/FFTJetModules/interface/FFTJetESParameterParser.h b/JetMETCorrections/FFTJetModules/interface/FFTJetESParameterParser.h index ba31028da4275..0dd8a25c24ff4 100644 --- a/JetMETCorrections/FFTJetModules/interface/FFTJetESParameterParser.h +++ b/JetMETCorrections/FFTJetModules/interface/FFTJetESParameterParser.h @@ -52,7 +52,7 @@ std::shared_ptr > parseFFTJetScaleCalc // } // Load the table from the archive - CPP11_auto_ptr autof = loadFFTJetInterpolationTable(ps, ar, verbose); + std::unique_ptr autof = loadFFTJetInterpolationTable(ps, ar, verbose); std::shared_ptr f(autof.release()); // Swap the class name if it is supposed to be determined diff --git a/JetMETCorrections/FFTJetModules/plugins/FFTJetCorrectionESProducer.h b/JetMETCorrections/FFTJetModules/plugins/FFTJetCorrectionESProducer.h index 3a8a3645a1e7b..41495f36050a1 100644 --- a/JetMETCorrections/FFTJetModules/plugins/FFTJetCorrectionESProducer.h +++ b/JetMETCorrections/FFTJetModules/plugins/FFTJetCorrectionESProducer.h @@ -60,7 +60,7 @@ static void buildCorrectorSequence(const FFTJetCorrectorParameters& tablePars, typedef typename CorrectorSequence::jet_type jet_type; // Load the archive stored in the FFTJetCorrectorParameters object - CPP11_auto_ptr ar; + std::unique_ptr ar; { std::istringstream is(tablePars.str()); if (isArchiveCompressed) diff --git a/JetMETCorrections/FFTJetModules/plugins/FFTJetCorrectorDBReader.cc b/JetMETCorrections/FFTJetModules/plugins/FFTJetCorrectorDBReader.cc index 2417cb8903ee9..bb91d28f94c03 100644 --- a/JetMETCorrections/FFTJetModules/plugins/FFTJetCorrectorDBReader.cc +++ b/JetMETCorrections/FFTJetModules/plugins/FFTJetCorrectorDBReader.cc @@ -80,7 +80,7 @@ void FFTJetCorrectorDBReader::analyze(const edm::Event& iEvent, const edm::Event if (printAsString) std::cout << "++++ String rep: \"" << JetCorParams->str() << '"' << std::endl; else if (readArchive) { - CPP11_auto_ptr par; + std::unique_ptr par; { std::istringstream is(JetCorParams->str()); diff --git a/JetMETCorrections/FFTJetModules/plugins/FFTJetLookupTableESProducer.cc b/JetMETCorrections/FFTJetModules/plugins/FFTJetLookupTableESProducer.cc index 4606cef39b746..8a5e8ea8005ac 100644 --- a/JetMETCorrections/FFTJetModules/plugins/FFTJetLookupTableESProducer.cc +++ b/JetMETCorrections/FFTJetModules/plugins/FFTJetLookupTableESProducer.cc @@ -57,7 +57,7 @@ static void buildLookupTables(const FFTJetCorrectorParameters& tablePars, const bool verbose, FFTJetLookupTableSequence* ptr) { // Load the archive stored in the FFTJetCorrectorParameters object - CPP11_auto_ptr ar; + std::unique_ptr ar; { std::istringstream is(tablePars.str()); if (isArchiveCompressed) @@ -82,7 +82,7 @@ static void buildLookupTables(const FFTJetCorrectorParameters& tablePars, for (unsigned long item = 0; item < nItems; ++item) { const unsigned long long id = ref.id(item); if (loadedSet.insert(id).second) { - CPP11_auto_ptr p(ref.get(item)); + std::unique_ptr p(ref.get(item)); StorableFunctorPtr fptr(p.release()); std::shared_ptr e = ar->catalogEntry(id); insertLUTItem(*ptr, fptr, e->name(), e->category()); diff --git a/JetMETCorrections/FFTJetObjects/interface/loadFFTJetInterpolationTable.h b/JetMETCorrections/FFTJetObjects/interface/loadFFTJetInterpolationTable.h index 314eae41dbf7a..3973bc4667de6 100644 --- a/JetMETCorrections/FFTJetObjects/interface/loadFFTJetInterpolationTable.h +++ b/JetMETCorrections/FFTJetObjects/interface/loadFFTJetInterpolationTable.h @@ -2,13 +2,13 @@ #define JetMETCorrections_FFTJetObjects_loadFFTJetInterpolationTable_h #include "Alignment/Geners/interface/StringArchive.hh" -#include "Alignment/Geners/interface/CPP11_auto_ptr.hh" +#include #include "FWCore/ParameterSet/interface/ParameterSetfwd.h" #include "JetMETCorrections/InterpolationTables/interface/StorableMultivariateFunctor.h" -CPP11_auto_ptr loadFFTJetInterpolationTable(const edm::ParameterSet& ps, - gs::StringArchive& ar, - bool verbose); +std::unique_ptr loadFFTJetInterpolationTable(const edm::ParameterSet& ps, + gs::StringArchive& ar, + bool verbose); #endif // JetMETCorrections_FFTJetObjects_loadFFTJetInterpolationTable_h diff --git a/JetMETCorrections/FFTJetObjects/src/loadFFTJetInterpolationTable.cc b/JetMETCorrections/FFTJetObjects/src/loadFFTJetInterpolationTable.cc index 9817f64ffe60c..de30a40408793 100644 --- a/JetMETCorrections/FFTJetObjects/src/loadFFTJetInterpolationTable.cc +++ b/JetMETCorrections/FFTJetObjects/src/loadFFTJetInterpolationTable.cc @@ -25,9 +25,9 @@ static void dumpArchiveMetadata(gs::StringArchive& ar, std::ostream& os) { } } -CPP11_auto_ptr loadFFTJetInterpolationTable(const edm::ParameterSet& ps, - gs::StringArchive& ar, - const bool verbose) { +std::unique_ptr loadFFTJetInterpolationTable(const edm::ParameterSet& ps, + gs::StringArchive& ar, + const bool verbose) { gs::SearchSpecifier nameSearch(ps.getParameter("name"), ps.getParameter("nameIsRegex")); gs::SearchSpecifier categorySearch(ps.getParameter("category"), ps.getParameter("categoryIsRegex")); @@ -52,7 +52,7 @@ CPP11_auto_ptr loadFFTJetInterpolationTable throw cms::Exception("FFTJetBadConfig", os.str()); } - CPP11_auto_ptr p = ref.get(0); + std::unique_ptr p = ref.get(0); if (verbose) { std::cout << "In loadFFTJetInterpolationTable: loaded table with metadata" << std::endl; std::shared_ptr e = ref.indexedCatalogEntry(0); diff --git a/JetMETCorrections/InterpolationTables/interface/HistoND.h b/JetMETCorrections/InterpolationTables/interface/HistoND.h index 1a97f7336c4c5..1542699f552a9 100644 --- a/JetMETCorrections/InterpolationTables/interface/HistoND.h +++ b/JetMETCorrections/InterpolationTables/interface/HistoND.h @@ -1002,7 +1002,7 @@ namespace npstat { #include #include -#include "Alignment/Geners/interface/CPP11_auto_ptr.hh" +#include #include "Alignment/Geners/interface/binaryIO.hh" namespace npstat { @@ -3521,7 +3521,7 @@ namespace npstat { ArrayND data, over; ArrayND::restore(ida, in, &data); ArrayND::restore(ida, in, &over); - CPP11_auto_ptr > result( + std::unique_ptr > result( new HistoND(axes, title.c_str(), accumulatedDataLabel.c_str())); result->data_ = data; result->overflow_ = over; diff --git a/JetMETCorrections/InterpolationTables/interface/LinInterpolatedTableND.h b/JetMETCorrections/InterpolationTables/interface/LinInterpolatedTableND.h index 14f52ef09728f..4977728483f9d 100644 --- a/JetMETCorrections/InterpolationTables/interface/LinInterpolatedTableND.h +++ b/JetMETCorrections/InterpolationTables/interface/LinInterpolatedTableND.h @@ -15,7 +15,7 @@ #include #include -#include "Alignment/Geners/interface/CPP11_auto_ptr.hh" +#include #include "JetMETCorrections/InterpolationTables/interface/ArrayND.h" #include "JetMETCorrections/InterpolationTables/interface/UniformAxis.h" @@ -179,11 +179,11 @@ namespace npstat { // in case "Numeric" type is either float or double. */ template - CPP11_auto_ptr invertWRTAxis(ConvertibleToUnsigned axisNumber, - const Axis& replacementAxis, - bool newAxisLeftLinear, - bool newAxisRightLinear, - const char* functionLabel = nullptr) const; + std::unique_ptr invertWRTAxis(ConvertibleToUnsigned axisNumber, + const Axis& replacementAxis, + bool newAxisLeftLinear, + bool newAxisRightLinear, + const char* functionLabel = nullptr) const; /** // This method inverts the ratio response. @@ -208,13 +208,13 @@ namespace npstat { // only in case "Numeric" type is either float or double. */ template - CPP11_auto_ptr invertRatioResponse(unsigned axisNumber, - const Axis& replacementAxis, - bool newAxisLeftLinear, - bool newAxisRightLinear, - Functor1 invg, - Functor2 invh, - const char* functionLabel = nullptr) const; + std::unique_ptr invertRatioResponse(unsigned axisNumber, + const Axis& replacementAxis, + bool newAxisLeftLinear, + bool newAxisRightLinear, + Functor1 invg, + Functor2 invh, + const char* functionLabel = nullptr) const; /** Comparison for equality */ bool operator==(const LinInterpolatedTableND&) const; @@ -716,7 +716,7 @@ namespace npstat { template template - CPP11_auto_ptr > LinInterpolatedTableND::invertWRTAxis( + std::unique_ptr > LinInterpolatedTableND::invertWRTAxis( const ConvertibleToUnsigned axisNumC, const Axis& replacementAxis, const bool leftLinear, @@ -737,7 +737,7 @@ namespace npstat { iType[axisNumber] = std::pair(leftLinear, rightLinear); // Create the new table - CPP11_auto_ptr pTable(new LinInterpolatedTableND(newAxes, iType, functionLabel)); + std::unique_ptr pTable(new LinInterpolatedTableND(newAxes, iType, functionLabel)); if (dim_ > 1U) { // Prepare array slices @@ -767,7 +767,7 @@ namespace npstat { template template - CPP11_auto_ptr > LinInterpolatedTableND::invertRatioResponse( + std::unique_ptr > LinInterpolatedTableND::invertRatioResponse( const unsigned axisNumber, const Axis& replacementAxis, const bool leftLinear, @@ -820,7 +820,7 @@ namespace npstat { std::vector workspace(nCoords); // Create the new table - CPP11_auto_ptr pTable(new LinInterpolatedTableND(newAxes, iType, functionLabel)); + std::unique_ptr pTable(new LinInterpolatedTableND(newAxes, iType, functionLabel)); if (dim_ > 1U) { // Prepare array slices diff --git a/JetMETCorrections/InterpolationTables/interface/StorableHistoNDFunctor.h b/JetMETCorrections/InterpolationTables/interface/StorableHistoNDFunctor.h index 4ab2c430cd30e..29cb753a61794 100644 --- a/JetMETCorrections/InterpolationTables/interface/StorableHistoNDFunctor.h +++ b/JetMETCorrections/InterpolationTables/interface/StorableHistoNDFunctor.h @@ -118,7 +118,7 @@ namespace npstat { #include "JetMETCorrections/InterpolationTables/interface/NpstatException.h" #include "Alignment/Geners/interface/binaryIO.hh" -#include "Alignment/Geners/interface/CPP11_auto_ptr.hh" +#include #include "Alignment/Geners/interface/IOException.hh" #include "JetMETCorrections/InterpolationTables/interface/interpolateHistoND.h" @@ -157,7 +157,7 @@ namespace npstat { throw gs::IOReadFailure( "In npstat::StorableHistoNDFunctor::read: " "input stream failure"); - CPP11_auto_ptr tab(Table::read(tabid, in)); + std::unique_ptr
tab(Table::read(tabid, in)); return new StorableHistoNDFunctor(*tab, deg, descr); } diff --git a/JetMETCorrections/InterpolationTables/interface/StorableInterpolationFunctor.h b/JetMETCorrections/InterpolationTables/interface/StorableInterpolationFunctor.h index d55b430e5fd5d..3ae51cb8adeba 100644 --- a/JetMETCorrections/InterpolationTables/interface/StorableInterpolationFunctor.h +++ b/JetMETCorrections/InterpolationTables/interface/StorableInterpolationFunctor.h @@ -183,7 +183,7 @@ namespace npstat { } // namespace npstat #include "Alignment/Geners/interface/binaryIO.hh" -#include "Alignment/Geners/interface/CPP11_auto_ptr.hh" +#include #include "Alignment/Geners/interface/IOException.hh" namespace npstat { @@ -212,7 +212,7 @@ namespace npstat { throw gs::IOReadFailure( "In npstat::StorableInterpolationFunctor::read: " "input stream failure"); - CPP11_auto_ptr
tab(Table::read(tabid, in)); + std::unique_ptr
tab(Table::read(tabid, in)); return new StorableInterpolationFunctor(*tab, descr); } } // namespace npstat diff --git a/JetMETCorrections/InterpolationTables/src/DualAxis.cc b/JetMETCorrections/InterpolationTables/src/DualAxis.cc index dd6eb623f9806..01b101da34e77 100644 --- a/JetMETCorrections/InterpolationTables/src/DualAxis.cc +++ b/JetMETCorrections/InterpolationTables/src/DualAxis.cc @@ -2,7 +2,7 @@ #include "Alignment/Geners/interface/binaryIO.hh" #include "Alignment/Geners/interface/IOException.hh" -#include "Alignment/Geners/interface/CPP11_auto_ptr.hh" +#include namespace npstat { bool DualAxis::write(std::ostream& of) const { @@ -26,10 +26,10 @@ namespace npstat { "In npstat::DualAxis::read: " "input stream failure"); if (c) { - CPP11_auto_ptr axis(UniformAxis::read(clid, in)); + std::unique_ptr axis(UniformAxis::read(clid, in)); return new DualAxis(*axis); } else { - CPP11_auto_ptr axis(GridAxis::read(clid, in)); + std::unique_ptr axis(GridAxis::read(clid, in)); return new DualAxis(*axis); } } diff --git a/JetMETCorrections/InterpolationTables/src/DualHistoAxis.cc b/JetMETCorrections/InterpolationTables/src/DualHistoAxis.cc index ec1f4e39e41b3..f90fc7f2f32af 100644 --- a/JetMETCorrections/InterpolationTables/src/DualHistoAxis.cc +++ b/JetMETCorrections/InterpolationTables/src/DualHistoAxis.cc @@ -2,7 +2,7 @@ #include "Alignment/Geners/interface/binaryIO.hh" #include "Alignment/Geners/interface/IOException.hh" -#include "Alignment/Geners/interface/CPP11_auto_ptr.hh" +#include namespace npstat { bool DualHistoAxis::write(std::ostream& of) const { @@ -26,10 +26,10 @@ namespace npstat { "In npstat::DualHistoAxis::read: " "input stream failure"); if (c) { - CPP11_auto_ptr axis(HistoAxis::read(clid, in)); + std::unique_ptr axis(HistoAxis::read(clid, in)); return new DualHistoAxis(*axis); } else { - CPP11_auto_ptr axis(NUHistoAxis::read(clid, in)); + std::unique_ptr axis(NUHistoAxis::read(clid, in)); return new DualHistoAxis(*axis); } } diff --git a/L1Trigger/Configuration/python/L1Trigger_EventContent_cff.py b/L1Trigger/Configuration/python/L1Trigger_EventContent_cff.py index 6399f341f7984..873f8deff4088 100644 --- a/L1Trigger/Configuration/python/L1Trigger_EventContent_cff.py +++ b/L1Trigger/Configuration/python/L1Trigger_EventContent_cff.py @@ -197,10 +197,10 @@ def _appendPhase2Digis(obj): 'keep *_l1tPFClustersFromHGC3DClusters_*_*', 'keep *_l1tPFTracksFromL1TracksBarrel_*_*', 'keep *_l1tPFTracksFromL1TracksHGCal_*_*', - 'keep *_l1tSCPFL1PuppiCorrectedEmulator_*_*', - 'keep *_l1tSCPFL1PuppiCorrectedEmulatorMHT_*_*', - 'keep *_l1tSCPFL1PuppiExtendedCorrectedEmulator_*_*', - 'keep *_l1tSCPFL1PuppiExtendedCorrectedEmulatorMHT_*_*', + 'keep *_l1tSC4PFL1PuppiCorrectedEmulator_*_*', + 'keep *_l1tSC4PFL1PuppiCorrectedEmulatorMHT_*_*', + 'keep *_l1tSC4PFL1PuppiExtendedCorrectedEmulator_*_*', + 'keep *_l1tSC4PFL1PuppiExtendedCorrectedEmulatorMHT_*_*', 'keep *_l1tPhase1JetProducer9x9_*_*', 'keep *_l1tPhase1JetCalibrator9x9_*_*', 'keep *_l1tPhase1JetSumsProducer9x9_*_*', @@ -231,6 +231,7 @@ def _appendPhase2Digis(obj): 'keep *_l1tTkStubsGmt_*_*', 'keep *_l1tTkMuonsGmt_*_*', 'keep *_l1tSAMuonsGmt_*_*', + 'keep *_l1tTkMuonsGmtLowPtFix_*_*', # in the long run this should be removed, but these fix objects will be used for now. ] obj.outputCommands += l1Phase2Digis diff --git a/L1Trigger/Configuration/python/SimL1CaloEmulator_cff.py b/L1Trigger/Configuration/python/SimL1CaloEmulator_cff.py index 995584b92bc09..bd7d4e337dddc 100644 --- a/L1Trigger/Configuration/python/SimL1CaloEmulator_cff.py +++ b/L1Trigger/Configuration/python/SimL1CaloEmulator_cff.py @@ -4,7 +4,3 @@ # define a core which can be extended in customizations: SimL1CaloEmulator = cms.Sequence( SimL1TCalorimeter ) - -# Emulators are configured from DB (GlobalTags) -# but in the integration branch conffigure from static hackConditions -from L1Trigger.L1TCalorimeter.hackConditions_cff import * diff --git a/L1Trigger/Configuration/python/customiseSettings.py b/L1Trigger/Configuration/python/customiseSettings.py index 5aa071788bfe2..bb253774c073b 100644 --- a/L1Trigger/Configuration/python/customiseSettings.py +++ b/L1Trigger/Configuration/python/customiseSettings.py @@ -2,6 +2,14 @@ import os.path import FWCore.ParameterSet.Config as cms +def L1TSettingsToCaloParamsHI_2023_v0_4_3(process): + process.load("L1Trigger.L1TCalorimeter.caloParamsHI_2023_v0_4_3_cfi") + return process + +def L1TSettingsToCaloParamsHI_2023_v0_4_2(process): + process.load("L1Trigger.L1TCalorimeter.caloParamsHI_2023_v0_4_2_cfi") + return process + def L1TSettingsToCaloParamsHI_2023_v0_4_1(process): process.load("L1Trigger.L1TCalorimeter.caloParamsHI_2023_v0_4_1_cfi") return process diff --git a/L1Trigger/DTTriggerPhase2/interface/DTprimitive.h b/L1Trigger/DTTriggerPhase2/interface/DTprimitive.h index cec71913409f8..512ffca09cc49 100644 --- a/L1Trigger/DTTriggerPhase2/interface/DTprimitive.h +++ b/L1Trigger/DTTriggerPhase2/interface/DTprimitive.h @@ -37,6 +37,11 @@ class DTPrimitive { const int superLayerId() const { return superLayerId_; }; const cmsdt::LATERAL_CASES laterality() const { return laterality_; }; + bool operator==(const DTPrimitive& dtp) { + return (tdcTimeStamp() == dtp.tdcTimeStamp() && channelId() == dtp.channelId() && layerId() == dtp.layerId() && + cameraId() == dtp.cameraId() && cameraId() == dtp.cameraId() && superLayerId() == dtp.superLayerId()); + } + private: int cameraId_; // Chamber ID int superLayerId_; // SL ID diff --git a/L1Trigger/DTTriggerPhase2/interface/LateralityBasicProvider.h b/L1Trigger/DTTriggerPhase2/interface/LateralityBasicProvider.h new file mode 100644 index 0000000000000..6fbb9d82a4c96 --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/interface/LateralityBasicProvider.h @@ -0,0 +1,46 @@ +#ifndef L1Trigger_DTTriggerPhase2_LateralityBasicProvider_h +#define L1Trigger_DTTriggerPhase2_LateralityBasicProvider_h + +#include "L1Trigger/DTTriggerPhase2/interface/LateralityProvider.h" + +// =============================================================================== +// Previous definitions and declarations +// =============================================================================== + +struct lat_combination { + short missing_layer; + short cellLayout[cmsdt::NUM_LAYERS]; + lat_vector latcombs; +}; + +// =============================================================================== +// Class declarations +// =============================================================================== + +class LateralityBasicProvider : public LateralityProvider { +public: + // Constructors and destructor + LateralityBasicProvider(const edm::ParameterSet &pset, edm::ConsumesCollector &iC); + ~LateralityBasicProvider() override; + + // Main methods + void initialise(const edm::EventSetup &iEventSetup) override; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + MuonPathPtrs &inMpath, + std::vector &lateralities) override; + + void finish() override; + + // Other public methods + +private: + // Private methods + void analyze(MuonPathPtr &inMPath, std::vector &lateralities); + void fill_lat_combinations(); + // Private attributes + const bool debug_; + std::vector lat_combinations; +}; + +#endif diff --git a/L1Trigger/DTTriggerPhase2/interface/LateralityCoarsedProvider.h b/L1Trigger/DTTriggerPhase2/interface/LateralityCoarsedProvider.h new file mode 100644 index 0000000000000..e7f0ac25800b2 --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/interface/LateralityCoarsedProvider.h @@ -0,0 +1,50 @@ +#ifndef L1Trigger_DTTriggerPhase2_LateralityCoarsedProvider_h +#define L1Trigger_DTTriggerPhase2_LateralityCoarsedProvider_h + +#include "L1Trigger/DTTriggerPhase2/interface/LateralityProvider.h" + +// =============================================================================== +// Previous definitions and declarations +// =============================================================================== + +struct lat_coarsed_combination { + short missing_layer; + short cellLayout[cmsdt::NUM_LAYERS]; + short coarsed_times[cmsdt::NUM_LAYERS]; + lat_vector latcombs; +}; + +// =============================================================================== +// Class declarations +// =============================================================================== + +class LateralityCoarsedProvider : public LateralityProvider { +public: + // Constructors and destructor + LateralityCoarsedProvider(const edm::ParameterSet &pset, edm::ConsumesCollector &iC); + ~LateralityCoarsedProvider() override; + + // Main methods + void initialise(const edm::EventSetup &iEventSetup) override; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + MuonPathPtrs &inMpath, + std::vector &lateralities) override; + + void finish() override; + + // Other public methods + +private: + // Private methods + void analyze(MuonPathPtr &inMPath, std::vector &lateralities); + std::vector coarsify_times(MuonPathPtr &inMPath); + void fill_lat_combinations(); + std::vector> convertString(std::string chain); + // Private attributes + const bool debug_; + std::vector lat_combinations; + edm::FileInPath laterality_filename_; +}; + +#endif diff --git a/L1Trigger/DTTriggerPhase2/interface/LateralityProvider.h b/L1Trigger/DTTriggerPhase2/interface/LateralityProvider.h new file mode 100644 index 0000000000000..866fcb08533cb --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/interface/LateralityProvider.h @@ -0,0 +1,58 @@ +#ifndef Phase2L1Trigger_DTTrigger_LateralityProvider_h +#define Phase2L1Trigger_DTTrigger_LateralityProvider_h + +#include "FWCore/Utilities/interface/ESGetToken.h" +#include "FWCore/Framework/interface/ConsumesCollector.h" +#include "FWCore/Framework/interface/FrameworkfwdMostUsed.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/Run.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" + +#include "L1Trigger/DTTriggerPhase2/interface/MuonPath.h" +#include "L1Trigger/DTTriggerPhase2/interface/constants.h" + +#include +#include + +// =============================================================================== +// Previous definitions and declarations +// =============================================================================== + +// =============================================================================== +// Class declarations +// =============================================================================== + +using latcomb = std::vector; +using lat_vector = std::vector; + +class LateralityProvider { +public: + // Constructors and destructor + LateralityProvider(const edm::ParameterSet& pset, edm::ConsumesCollector& iC); + virtual ~LateralityProvider(); + + // Main methods + virtual void initialise(const edm::EventSetup& iEventSetup); + virtual void run(edm::Event& iEvent, + const edm::EventSetup& iEventSetup, + MuonPathPtrs& inMpath, + std::vector& lateralities) = 0; + + virtual void finish(); + + // Other public methods + + // Public attributes + lat_vector LAT_VECTOR_NULL = {{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}; + +private: + // Private methods + + // Private attributes + const bool debug_; +}; + +#endif diff --git a/L1Trigger/DTTriggerPhase2/interface/MPCleanHitsFilter.h b/L1Trigger/DTTriggerPhase2/interface/MPCleanHitsFilter.h index d458ccdce1cea..f3713b322adbd 100644 --- a/L1Trigger/DTTriggerPhase2/interface/MPCleanHitsFilter.h +++ b/L1Trigger/DTTriggerPhase2/interface/MPCleanHitsFilter.h @@ -26,7 +26,11 @@ class MPCleanHitsFilter : public MPFilter { const edm::EventSetup& iEventSetup, std::vector& inMPath, std::vector& outMPath) override{}; - + void run(edm::Event& iEvent, + const edm::EventSetup& iEventSetup, + std::vector& inSLMPath, + std::vector& inCorMPath, + std::vector& outMPath) override{}; void run(edm::Event& iEvent, const edm::EventSetup& iEventSetup, MuonPathPtrs& inMPath, diff --git a/L1Trigger/DTTriggerPhase2/interface/MPCorFilter.h b/L1Trigger/DTTriggerPhase2/interface/MPCorFilter.h new file mode 100644 index 0000000000000..01cd5a860cc69 --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/interface/MPCorFilter.h @@ -0,0 +1,76 @@ +#ifndef Phase2L1Trigger_DTTrigger_MPCorFilter_h +#define Phase2L1Trigger_DTTrigger_MPCorFilter_h + +#include "L1Trigger/DTTriggerPhase2/interface/MPFilter.h" +#include "L1Trigger/DTTriggerPhase2/interface/vhdl_functions.h" + +#include +#include + +// =============================================================================== +// Previous definitions and declarations +// =============================================================================== + +// =============================================================================== +// Class declarations +// =============================================================================== + +struct valid_cor_tp_t { + bool valid; + cmsdt::metaPrimitive mp; + int coarsed_t0; + int coarsed_pos; + int coarsed_slope; + valid_cor_tp_t() : valid(false), mp(cmsdt::metaPrimitive()), coarsed_t0(-1), coarsed_pos(-1), coarsed_slope(-1) {} + valid_cor_tp_t(bool valid, cmsdt::metaPrimitive mp, int coarsed_t0, int coarsed_pos, int coarsed_slope) + : valid(valid), mp(mp), coarsed_t0(coarsed_t0), coarsed_pos(coarsed_pos), coarsed_slope(coarsed_slope) {} +}; + +using valid_cor_tp_arr_t = std::vector; + +class MPCorFilter : public MPFilter { +public: + // Constructors and destructor + MPCorFilter(const edm::ParameterSet &pset); + ~MPCorFilter() override = default; + + // Main methods + void initialise(const edm::EventSetup &iEventSetup) override; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + std::vector &inMPath, + std::vector &outMPath) override{}; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + std::vector &inSLMPath, + std::vector &inCorMPath, + std::vector &outMPath) override; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + MuonPathPtrs &inMPath, + MuonPathPtrs &outMPath) override{}; + + void finish() override; + + // Other public methods + + // Public attributes + void printmP(cmsdt::metaPrimitive mP); + +private: + // Private methods + std::vector filter(std::vector SL1mps, + std::vector SL2mps, + std::vector SL3mps, + std::vector Cormps); + std::vector coarsify(cmsdt::metaPrimitive mp, int sl); + bool isDead(cmsdt::metaPrimitive mp, std::vector coarsed, std::map tps_per_bx); + int killTps(cmsdt::metaPrimitive mp, std::vector coarsed, int bx, std::map &tps_per_bx); + int match(cmsdt::metaPrimitive mp, std::vector coarsed, valid_cor_tp_t valid_cor_tp2); + int get_chi2(cmsdt::metaPrimitive mp); + + // Private attributes + const bool debug_; +}; + +#endif diff --git a/L1Trigger/DTTriggerPhase2/interface/MPFilter.h b/L1Trigger/DTTriggerPhase2/interface/MPFilter.h index a75a1a74e6474..c7743bf75f304 100644 --- a/L1Trigger/DTTriggerPhase2/interface/MPFilter.h +++ b/L1Trigger/DTTriggerPhase2/interface/MPFilter.h @@ -12,7 +12,7 @@ #include "Geometry/Records/interface/MuonGeometryRecord.h" #include "Geometry/DTGeometry/interface/DTGeometry.h" -#include "Geometry/DTGeometry/interface/DTLayer.h" +#include "DataFormats/MuonDetId/interface/DTLayerId.h" #include #include @@ -37,6 +37,11 @@ class MPFilter { const edm::EventSetup& iEventSetup, std::vector& inMPath, std::vector& outMPath) = 0; + virtual void run(edm::Event& iEvent, + const edm::EventSetup& iEventSetup, + std::vector& inSLMPath, + std::vector& inCorMPath, + std::vector& outMPath) = 0; virtual void run(edm::Event& iEvent, const edm::EventSetup& iEventSetup, MuonPathPtrs& inMPath, @@ -46,6 +51,12 @@ class MPFilter { // Other public methods + // Public attributes + // max drift velocity + edm::FileInPath maxdrift_filename_; + int maxdriftinfo_[5][4][14]; + int max_drift_tdc = -1; + private: // Private attributes const bool debug_; diff --git a/L1Trigger/DTTriggerPhase2/interface/MPQualityEnhancerFilter.h b/L1Trigger/DTTriggerPhase2/interface/MPQualityEnhancerFilter.h index 03f52b181db2d..03db99c9a6d9b 100644 --- a/L1Trigger/DTTriggerPhase2/interface/MPQualityEnhancerFilter.h +++ b/L1Trigger/DTTriggerPhase2/interface/MPQualityEnhancerFilter.h @@ -26,6 +26,11 @@ class MPQualityEnhancerFilter : public MPFilter { const edm::EventSetup &iEventSetup, std::vector &inMPath, std::vector &outMPath) override; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + std::vector &inSLMPath, + std::vector &inCorMPath, + std::vector &outMPath) override{}; void run(edm::Event &iEvent, const edm::EventSetup &iEventSetup, MuonPathPtrs &inMPath, diff --git a/L1Trigger/DTTriggerPhase2/interface/MPQualityEnhancerFilterBayes.h b/L1Trigger/DTTriggerPhase2/interface/MPQualityEnhancerFilterBayes.h index 5db4357905d5a..ef108561bb06b 100644 --- a/L1Trigger/DTTriggerPhase2/interface/MPQualityEnhancerFilterBayes.h +++ b/L1Trigger/DTTriggerPhase2/interface/MPQualityEnhancerFilterBayes.h @@ -27,6 +27,11 @@ class MPQualityEnhancerFilterBayes : public MPFilter { const edm::EventSetup &iEventSetup, std::vector &inMPath, std::vector &outMPath) override; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + std::vector &inSLMPath, + std::vector &inCorMPath, + std::vector &outMPath) override{}; void run(edm::Event &iEvent, const edm::EventSetup &iEventSetup, MuonPathPtrs &inMPath, diff --git a/L1Trigger/DTTriggerPhase2/interface/MPRedundantFilter.h b/L1Trigger/DTTriggerPhase2/interface/MPRedundantFilter.h index fbab5c18cb517..53c6c611ea1ec 100644 --- a/L1Trigger/DTTriggerPhase2/interface/MPRedundantFilter.h +++ b/L1Trigger/DTTriggerPhase2/interface/MPRedundantFilter.h @@ -27,6 +27,11 @@ class MPRedundantFilter : public MPFilter { const edm::EventSetup& iEventSetup, std::vector& inMPath, std::vector& outMPath) override{}; + void run(edm::Event& iEvent, + const edm::EventSetup& iEventSetup, + std::vector& inSLMPath, + std::vector& inCorMPath, + std::vector& outMPath) override{}; void run(edm::Event& iEvent, const edm::EventSetup& iEventSetup, MuonPathPtrs& inMPath, diff --git a/L1Trigger/DTTriggerPhase2/interface/MPSLFilter.h b/L1Trigger/DTTriggerPhase2/interface/MPSLFilter.h new file mode 100644 index 0000000000000..63d5ec54182c1 --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/interface/MPSLFilter.h @@ -0,0 +1,71 @@ +#ifndef Phase2L1Trigger_DTTrigger_MPSLFilter_h +#define Phase2L1Trigger_DTTrigger_MPSLFilter_h + +#include "L1Trigger/DTTriggerPhase2/interface/MPFilter.h" +#include "L1Trigger/DTTriggerPhase2/interface/vhdl_functions.h" + +#include +#include + +// =============================================================================== +// Previous definitions and declarations +// =============================================================================== + +// =============================================================================== +// Class declarations +// =============================================================================== + +struct valid_tp_t { + bool valid; + cmsdt::metaPrimitive mp; + valid_tp_t() : valid(false), mp(cmsdt::metaPrimitive()) {} + valid_tp_t(bool valid, cmsdt::metaPrimitive mp) : valid(valid), mp(mp) {} +}; + +using valid_tp_arr_t = std::vector; + +class MPSLFilter : public MPFilter { +public: + // Constructors and destructor + MPSLFilter(const edm::ParameterSet &pset); + ~MPSLFilter() override = default; + + // Main methods + void initialise(const edm::EventSetup &iEventSetup) override; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + std::vector &inMPath, + std::vector &outMPath) override; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + std::vector &inSLMPath, + std::vector &inCorMPath, + std::vector &outMPath) override{}; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + MuonPathPtrs &inMPath, + MuonPathPtrs &outMPath) override{}; + + void finish() override; + + // Other public methods + + // Public attributes + void printmP(cmsdt::metaPrimitive mP); + +private: + // Private methods + // std::vector filter(std::map>); + std::vector filter(std::vector mps); + bool isDead(cmsdt::metaPrimitive mp, std::map tps_per_bx); + int killTps(cmsdt::metaPrimitive mp, int bx, std::map &tps_per_bx); + int share_hit(cmsdt::metaPrimitive mp, cmsdt::metaPrimitive mp2); + int match(cmsdt::metaPrimitive mp1, cmsdt::metaPrimitive mp2); + int smaller_chi2(cmsdt::metaPrimitive mp, cmsdt::metaPrimitive mp2); + int get_chi2(cmsdt::metaPrimitive mp); + + // Private attributes + const bool debug_; +}; + +#endif diff --git a/L1Trigger/DTTriggerPhase2/interface/MuonPath.h b/L1Trigger/DTTriggerPhase2/interface/MuonPath.h index 9355cf0cbaee6..b23e4d6ef53bb 100644 --- a/L1Trigger/DTTriggerPhase2/interface/MuonPath.h +++ b/L1Trigger/DTTriggerPhase2/interface/MuonPath.h @@ -1,130 +1,134 @@ -#ifndef L1Trigger_DTTriggerPhase2_MuonPath_h -#define L1Trigger_DTTriggerPhase2_MuonPath_h -#include -#include - -#include "L1Trigger/DTTriggerPhase2/interface/DTprimitive.h" - -class MuonPath { -public: - MuonPath(); - MuonPath(DTPrimitivePtrs &ptrPrimitive, int prup = 0, int prdw = 0); - MuonPath(DTPrimitives &ptrPrimitive, int prup = 0, int prdw = 0); - MuonPath(std::shared_ptr &ptr); - virtual ~MuonPath() {} - - // setter methods - void setPrimitive(DTPrimitivePtr &ptr, int layer); - void setNPrimitives(short nprim) { nprimitives_ = nprim; } - void setNPrimitivesUp(short nprim) { nprimitivesUp_ = nprim; } - void setNPrimitivesDown(short nprim) { nprimitivesDown_ = nprim; } - void setCellHorizontalLayout(int layout[4]); - void setCellHorizontalLayout(const int *layout); - void setBaseChannelId(int bch) { baseChannelId_ = bch; } - void setQuality(cmsdt::MP_QUALITY qty) { quality_ = qty; } - void setBxTimeValue(int time); - void setLateralComb(cmsdt::LATERAL_CASES latComb[4]); - void setLateralComb(const cmsdt::LATERAL_CASES *latComb); - void setLateralCombFromPrimitives(void); - - void setHorizPos(float pos) { horizPos_ = pos; } - void setTanPhi(float tanPhi) { tanPhi_ = tanPhi; } - void setChiSquare(float chi) { chiSquare_ = chi; } - void setPhi(float phi) { phi_ = phi; } - void setPhiB(float phib) { phiB_ = phib; } - void setPhiCMSSW(float phi_cmssw) { phicmssw_ = phi_cmssw; } - void setPhiBCMSSW(float phib_cmssw) { phiBcmssw_ = phib_cmssw; } - void setXCoorCell(float x, int cell) { xCoorCell_[cell] = x; } - void setDriftDistance(float dx, int cell) { xDriftDistance_[cell] = dx; } - void setXWirePos(float x, int cell) { xWirePos_[cell] = x; } - void setZWirePos(float z, int cell) { zWirePos_[cell] = z; } - void setTWireTDC(float t, int cell) { tWireTDC_[cell] = t; } - void setRawId(uint32_t id) { rawId_ = id; } - - // getter methods - DTPrimitivePtr primitive(int layer) const { return prim_[layer]; } - short nprimitives() const { return nprimitives_; } - short nprimitivesDown() const { return nprimitivesDown_; } - short nprimitivesUp() const { return nprimitivesUp_; } - const int *cellLayout() const { return cellLayout_; } - int baseChannelId() const { return baseChannelId_; } - cmsdt::MP_QUALITY quality() const { return quality_; } - int bxTimeValue() const { return bxTimeValue_; } - int bxNumId() const { return bxNumId_; } - float tanPhi() const { return tanPhi_; } - const cmsdt::LATERAL_CASES *lateralComb() const { return (lateralComb_); } - float horizPos() const { return horizPos_; } - float chiSquare() const { return chiSquare_; } - float phi() const { return phi_; } - float phiB() const { return phiB_; } - float phi_cmssw() const { return phicmssw_; } - float phiB_cmssw() const { return phiBcmssw_; } - float xCoorCell(int cell) const { return xCoorCell_[cell]; } - float xDriftDistance(int cell) const { return xDriftDistance_[cell]; } - float xWirePos(int cell) const { return xWirePos_[cell]; } - float zWirePos(int cell) const { return zWirePos_[cell]; } - float tWireTDC(int cell) const { return tWireTDC_[cell]; } - uint32_t rawId() const { return rawId_; } - - // Other methods - bool isEqualTo(MuonPath *ptr); - bool isAnalyzable(); - bool completeMP(); - -private: - //------------------------------------------------------------------ - //--- MuonPath's data - //------------------------------------------------------------------ - /* - Primitives that make up the path. The 0th position holds the channel ID of - the lower layer. The order is critical. - */ - DTPrimitivePtrs prim_; //ENSURE that there are no more than 4-8 prims - short nprimitives_; - short nprimitivesUp_; - short nprimitivesDown_; - - /* Horizontal position of each cell (one per layer), in half-cell units, - with respect of the lower layer (layer 0). - */ - int cellLayout_[cmsdt::NUM_LAYERS]; - int baseChannelId_; - - //------------------------------------------------------------------ - //--- Fit results: - //------------------------------------------------------------------ - /* path quality */ - cmsdt::MP_QUALITY quality_; - - /* Lateral combination */ - cmsdt::LATERAL_CASES lateralComb_[cmsdt::NUM_LAYERS]; - - /* BX time value with respect to BX0 of the orbit */ - int bxTimeValue_; - - /* BX number in the orbit */ - int bxNumId_; - - /* Cell parameters */ - float xCoorCell_[cmsdt::NUM_LAYERS_2SL]; // Horizontal position of the hit in each cell - float xDriftDistance_[cmsdt::NUM_LAYERS_2SL]; // Drift distance on the cell (absolute value) - float xWirePos_[cmsdt::NUM_LAYERS_2SL]; - float zWirePos_[cmsdt::NUM_LAYERS_2SL]; - float tWireTDC_[cmsdt::NUM_LAYERS_2SL]; - - float tanPhi_; - float horizPos_; - float chiSquare_; - float phi_; - float phiB_; - float phicmssw_; - float phiBcmssw_; - - uint32_t rawId_; -}; - -typedef std::vector MuonPaths; -typedef std::shared_ptr MuonPathPtr; -typedef std::vector MuonPathPtrs; - -#endif +#ifndef L1Trigger_DTTriggerPhase2_MuonPath_h +#define L1Trigger_DTTriggerPhase2_MuonPath_h +#include +#include + +#include "L1Trigger/DTTriggerPhase2/interface/DTprimitive.h" + +class MuonPath { +public: + MuonPath(); + MuonPath(DTPrimitivePtrs &ptrPrimitive, int prup = 0, int prdw = 0); + MuonPath(DTPrimitives &ptrPrimitive, int prup = 0, int prdw = 0); + MuonPath(std::shared_ptr &ptr); + virtual ~MuonPath() {} + + // setter methods + void setPrimitive(DTPrimitivePtr &ptr, int layer); + void setNPrimitives(short nprim) { nprimitives_ = nprim; } + void setNPrimitivesUp(short nprim) { nprimitivesUp_ = nprim; } + void setNPrimitivesDown(short nprim) { nprimitivesDown_ = nprim; } + void setCellHorizontalLayout(int layout[4]); + void setCellHorizontalLayout(const int *layout); + void setBaseChannelId(int bch) { baseChannelId_ = bch; } + void setMissingLayer(int layer) { missingLayer_ = layer; } + void setQuality(cmsdt::MP_QUALITY qty) { quality_ = qty; } + void setBxTimeValue(int time); + void setLateralComb(cmsdt::LATERAL_CASES latComb[4]); + void setLateralComb(const cmsdt::LATERAL_CASES *latComb); + void setLateralCombFromPrimitives(void); + + void setHorizPos(float pos) { horizPos_ = pos; } + void setTanPhi(float tanPhi) { tanPhi_ = tanPhi; } + void setChiSquare(float chi) { chiSquare_ = chi; } + void setPhi(float phi) { phi_ = phi; } + void setPhiB(float phib) { phiB_ = phib; } + void setPhiCMSSW(float phi_cmssw) { phicmssw_ = phi_cmssw; } + void setPhiBCMSSW(float phib_cmssw) { phiBcmssw_ = phib_cmssw; } + void setXCoorCell(float x, int cell) { xCoorCell_[cell] = x; } + void setDriftDistance(float dx, int cell) { xDriftDistance_[cell] = dx; } + void setXWirePos(float x, int cell) { xWirePos_[cell] = x; } + void setZWirePos(float z, int cell) { zWirePos_[cell] = z; } + void setTWireTDC(float t, int cell) { tWireTDC_[cell] = t; } + void setRawId(uint32_t id) { rawId_ = id; } + + // getter methods + DTPrimitivePtr primitive(int layer) const { return prim_[layer]; } + short nprimitives() const { return nprimitives_; } + short nprimitivesDown() const { return nprimitivesDown_; } + short nprimitivesUp() const { return nprimitivesUp_; } + const int *cellLayout() const { return cellLayout_; } + int baseChannelId() const { return baseChannelId_; } + int missingLayer() const { return missingLayer_; } + cmsdt::MP_QUALITY quality() const { return quality_; } + int bxTimeValue() const { return bxTimeValue_; } + int bxNumId() const { return bxNumId_; } + float tanPhi() const { return tanPhi_; } + const cmsdt::LATERAL_CASES *lateralComb() const { return (lateralComb_); } + float horizPos() const { return horizPos_; } + float chiSquare() const { return chiSquare_; } + float phi() const { return phi_; } + float phiB() const { return phiB_; } + float phi_cmssw() const { return phicmssw_; } + float phiB_cmssw() const { return phiBcmssw_; } + float xCoorCell(int cell) const { return xCoorCell_[cell]; } + float xDriftDistance(int cell) const { return xDriftDistance_[cell]; } + float xWirePos(int cell) const { return xWirePos_[cell]; } + float zWirePos(int cell) const { return zWirePos_[cell]; } + float tWireTDC(int cell) const { return tWireTDC_[cell]; } + uint32_t rawId() const { return rawId_; } + + // Other methods + bool isEqualTo(MuonPath *ptr); + bool isAnalyzable(); + bool completeMP(); + +private: + //------------------------------------------------------------------ + //--- MuonPath's data + //------------------------------------------------------------------ + /* + Primitives that make up the path. The 0th position holds the channel ID of + the lower layer. The order is critical. + */ + DTPrimitivePtrs prim_; //ENSURE that there are no more than 4-8 prims + short nprimitives_; + short nprimitivesUp_; + short nprimitivesDown_; + + /* Horizontal position of each cell (one per layer), in half-cell units, + with respect of the lower layer (layer 0). + */ + int cellLayout_[cmsdt::NUM_LAYERS]; + int baseChannelId_; + + int missingLayer_; + + //------------------------------------------------------------------ + //--- Fit results: + //------------------------------------------------------------------ + /* path quality */ + cmsdt::MP_QUALITY quality_; + + /* Lateral combination */ + cmsdt::LATERAL_CASES lateralComb_[cmsdt::NUM_LAYERS]; + + /* BX time value with respect to BX0 of the orbit */ + int bxTimeValue_; + + /* BX number in the orbit */ + int bxNumId_; + + /* Cell parameters */ + float xCoorCell_[cmsdt::NUM_LAYERS_2SL]; // Horizontal position of the hit in each cell + float xDriftDistance_[cmsdt::NUM_LAYERS_2SL]; // Drift distance on the cell (absolute value) + float xWirePos_[cmsdt::NUM_LAYERS_2SL]; + float zWirePos_[cmsdt::NUM_LAYERS_2SL]; + float tWireTDC_[cmsdt::NUM_LAYERS_2SL]; + + float tanPhi_; + float horizPos_; + float chiSquare_; + float phi_; + float phiB_; + float phicmssw_; + float phiBcmssw_; + + uint32_t rawId_; +}; + +typedef std::vector MuonPaths; +typedef std::shared_ptr MuonPathPtr; +typedef std::vector MuonPathPtrs; + +#endif diff --git a/L1Trigger/DTTriggerPhase2/interface/MuonPathAnalyticAnalyzer.h b/L1Trigger/DTTriggerPhase2/interface/MuonPathAnalyticAnalyzer.h index 9b4f8dd0e32e3..2096cbfa9ea74 100644 --- a/L1Trigger/DTTriggerPhase2/interface/MuonPathAnalyticAnalyzer.h +++ b/L1Trigger/DTTriggerPhase2/interface/MuonPathAnalyticAnalyzer.h @@ -53,6 +53,15 @@ class MuonPathAnalyticAnalyzer : public MuonPathAnalyzer { const edm::EventSetup &iEventSetup, MuonPathPtrs &inMpath, std::vector &metaPrimitives) override; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + MuonPathPtrs &inMpath, + std::vector &lateralities, + std::vector &metaPrimitives) override{}; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + std::vector &inMPaths, + std::vector &outMPaths) override{}; void run(edm::Event &iEvent, const edm::EventSetup &iEventSetup, MuonPathPtrs &inMpath, diff --git a/L1Trigger/DTTriggerPhase2/interface/MuonPathAnalyzer.h b/L1Trigger/DTTriggerPhase2/interface/MuonPathAnalyzer.h index e6cc4e0b58cf0..bc00b779fbcda 100644 --- a/L1Trigger/DTTriggerPhase2/interface/MuonPathAnalyzer.h +++ b/L1Trigger/DTTriggerPhase2/interface/MuonPathAnalyzer.h @@ -19,6 +19,7 @@ #include "L1Trigger/DTTriggerPhase2/interface/MuonPath.h" #include "L1Trigger/DTTriggerPhase2/interface/constants.h" #include "L1Trigger/DTTriggerPhase2/interface/GlobalCoordsObtainer.h" +#include "L1Trigger/DTTriggerPhase2/interface/LateralityProvider.h" #include "Geometry/Records/interface/MuonGeometryRecord.h" #include "Geometry/DTGeometry/interface/DTGeometry.h" @@ -46,6 +47,15 @@ class MuonPathAnalyzer { const edm::EventSetup& iEventSetup, MuonPathPtrs& inMpath, std::vector& metaPrimitives) = 0; + virtual void run(edm::Event& iEvent, + const edm::EventSetup& iEventSetup, + MuonPathPtrs& inMpath, + std::vector& lateralities, + std::vector& metaPrimitives) = 0; + virtual void run(edm::Event& iEvent, + const edm::EventSetup& iEventSetup, + std::vector& inMPaths, + std::vector& outMPaths) = 0; virtual void run(edm::Event& iEvent, const edm::EventSetup& iEventSetup, MuonPathPtrs& inMpath, diff --git a/L1Trigger/DTTriggerPhase2/interface/MuonPathAnalyzerInChamber.h b/L1Trigger/DTTriggerPhase2/interface/MuonPathAnalyzerInChamber.h index 9f2983b9d0c08..d867437359825 100644 --- a/L1Trigger/DTTriggerPhase2/interface/MuonPathAnalyzerInChamber.h +++ b/L1Trigger/DTTriggerPhase2/interface/MuonPathAnalyzerInChamber.h @@ -27,6 +27,15 @@ class MuonPathAnalyzerInChamber : public MuonPathAnalyzer { const edm::EventSetup &iEventSetup, MuonPathPtrs &inMpath, std::vector &metaPrimitives) override{}; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + MuonPathPtrs &inMpath, + std::vector &lateralities, + std::vector &metaPrimitives) override{}; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + std::vector &inMPaths, + std::vector &outMPaths) override{}; void run(edm::Event &iEvent, const edm::EventSetup &iEventSetup, MuonPathPtrs &inMpath, diff --git a/L1Trigger/DTTriggerPhase2/interface/MuonPathConfirmator.h b/L1Trigger/DTTriggerPhase2/interface/MuonPathConfirmator.h new file mode 100644 index 0000000000000..c953434be6a71 --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/interface/MuonPathConfirmator.h @@ -0,0 +1,72 @@ +#ifndef L1Trigger_DTTriggerPhase2_MuonPathConfirmator_h +#define L1Trigger_DTTriggerPhase2_MuonPathConfirmator_h + +#include "FWCore/Utilities/interface/ESGetToken.h" +#include "FWCore/Framework/interface/ConsumesCollector.h" +#include "FWCore/Framework/interface/FrameworkfwdMostUsed.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/Run.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" + +#include "L1Trigger/DTTriggerPhase2/interface/constants.h" +#include "DataFormats/MuonDetId/interface/DTChamberId.h" +#include "DataFormats/MuonDetId/interface/DTSuperLayerId.h" +#include "DataFormats/MuonDetId/interface/DTLayerId.h" +#include "DataFormats/MuonDetId/interface/DTWireId.h" +#include "DataFormats/DTDigi/interface/DTDigiCollection.h" + +#include +#include + +// =============================================================================== +// Previous definitions and declarations +// =============================================================================== + +// =============================================================================== +// Class declarations +// =============================================================================== + +class MuonPathConfirmator { +public: + // Constructors and destructor + MuonPathConfirmator(const edm::ParameterSet &pset, edm::ConsumesCollector &iC); + ~MuonPathConfirmator(); + + // Main methods + void initialise(const edm::EventSetup &iEventSetup); + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + std::vector inMetaPrimitives, + edm::Handle dtdigis, + std::vector &outMetaPrimitives); + + void finish(); + // Other public methods + +private: + // Private methods + void analyze(cmsdt::metaPrimitive mp, + edm::Handle dtdigis, + std::vector &outMetaPrimitives); + // Private attributes + bool debug_; + double minx_match_2digis_; + //shift + edm::FileInPath shift_filename_; + std::map shiftinfo_; + edm::FileInPath maxdrift_filename_; + int maxdriftinfo_[5][4][14]; + int max_drift_tdc = -1; + + int PARTIALS_PRECISSION = 4; + int SEMICHAMBER_H_PRECISSION = 13 + PARTIALS_PRECISSION; + int SEMICHAMBER_RES_SHR = SEMICHAMBER_H_PRECISSION; + int LYRANDAHALF_RES_SHR = 4; + float SEMICHAMBER_H_REAL = ((235. / 2.) / (16. * 6.5)) * std::pow(2, SEMICHAMBER_H_PRECISSION); + int SEMICHAMBER_H = int(SEMICHAMBER_H_REAL); +}; + +#endif diff --git a/L1Trigger/DTTriggerPhase2/interface/MuonPathCorFitter.h b/L1Trigger/DTTriggerPhase2/interface/MuonPathCorFitter.h new file mode 100644 index 0000000000000..fa50ddebae2ca --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/interface/MuonPathCorFitter.h @@ -0,0 +1,74 @@ +#ifndef L1Trigger_DTTriggerPhase2_MuonPathCorFitter_h +#define L1Trigger_DTTriggerPhase2_MuonPathCorFitter_h + +#include "L1Trigger/DTTriggerPhase2/interface/MuonPathFitter.h" + +// =============================================================================== +// Previous definitions and declarations +// =============================================================================== + +inline bool bxSort(const cmsdt::bx_sl_vector &vA, const cmsdt::bx_sl_vector &vB) { + if (vA.bx > vB.bx) + return true; + else if (vA.bx == vB.bx) + return (vA.sl > vB.sl); + else + return false; +} + +using mp_group = std::vector; + +// =============================================================================== +// Class declarations +// =============================================================================== + +class MuonPathCorFitter : public MuonPathFitter { +public: + // Constructors and destructor + MuonPathCorFitter(const edm::ParameterSet &pset, + edm::ConsumesCollector &iC, + std::shared_ptr &globalcoordsobtainer); + ~MuonPathCorFitter() override; + + // Main methods + void initialise(const edm::EventSetup &iEventSetup) override; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + MuonPathPtrs &inMpath, + std::vector &metaPrimitives) override{}; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + MuonPathPtrs &inMpath, + std::vector &lateralities, + std::vector &metaPrimitives) override{}; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + std::vector &inMPaths, + std::vector &outMPaths) override; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + MuonPathPtrs &inMpath, + MuonPathPtrs &outMPath) override{}; + + void finish() override; + + // Other public methods + + // luts + edm::FileInPath both_sl_filename_; + +private: + // Private methods + void analyze(mp_group mp, std::vector &metaPrimitives); + void fillLuts(); + int get_rom_addr(mp_group mps, std::vector missing_hits); + bool canCorrelate(cmsdt::metaPrimitive mp_sl1, cmsdt::metaPrimitive mp_sl3); + + // Private attributes + int dT0_correlate_TP_; + + // double chi2Th_; + std::vector> lut_2sl; +}; + +#endif diff --git a/L1Trigger/DTTriggerPhase2/interface/MuonPathFitter.h b/L1Trigger/DTTriggerPhase2/interface/MuonPathFitter.h new file mode 100644 index 0000000000000..3c19d831112c7 --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/interface/MuonPathFitter.h @@ -0,0 +1,115 @@ +#ifndef L1Trigger_DTTriggerPhase2_MuonPathFitter_h +#define L1Trigger_DTTriggerPhase2_MuonPathFitter_h + +#include "L1Trigger/DTTriggerPhase2/interface/MuonPathAnalyzer.h" +#include "L1Trigger/DTTriggerPhase2/interface/vhdl_functions.h" + +// =============================================================================== +// Previous definitions and declarations +// =============================================================================== + +using coeff_arr_t = std::vector>; +struct coeffs_t { + coeff_arr_t t0; + coeff_arr_t position; + coeff_arr_t slope; + coeffs_t() + : t0(cmsdt::N_COEFFS, std::vector(cmsdt::GENERIC_COEFF_WIDTH, 0)), + position(cmsdt::N_COEFFS, std::vector(cmsdt::GENERIC_COEFF_WIDTH, 0)), + slope(cmsdt::N_COEFFS, std::vector(cmsdt::GENERIC_COEFF_WIDTH, 0)) {} +}; + +struct SLhitP { + int ti; // unsigned(16 downto 0); -- 12 msb = bunch_ctr, 5 lsb = tdc counts, resolution 25/32 ns + int wi; // unsigned(6 downto 0); -- ~ 96 channels per layer + int ly; // unsigned(1 downto 0); -- 4 layers + int wp; // signed(WIREPOS_WIDTH-1 downto 0); +}; + +struct fit_common_in_t { + // int valid; not needed, we will not propagate the mpath to the fitter + std::vector hits; + std::vector hits_valid; // slv(0 to 7) + std::vector lateralities; // slv(0 to 7) + coeffs_t coeffs; + int coarse_bctr; // unsigned(11 downto 0) + int coarse_wirepos; // signed(WIDTH_FULL_POS-1 downto WIREPOS_NORM_LSB_IGNORED); +}; + +struct fit_common_out_t { + int t0; + int slope; + int position; + int chi2; + int valid_fit; + fit_common_out_t() : t0(0), slope(0), position(0), chi2(0), valid_fit(0) {} +}; + +// =============================================================================== +// Class declarations +// =============================================================================== + +class MuonPathFitter : public MuonPathAnalyzer { +public: + // Constructors and destructor + MuonPathFitter(const edm::ParameterSet &pset, + edm::ConsumesCollector &iC, + std::shared_ptr &globalcoordsobtainer); + ~MuonPathFitter() override; + + // Main methods + + // Other public methods + coeffs_t RomDataConvert(std::vector slv, + short COEFF_WIDTH_T0, + short COEFF_WIDTH_POSITION, + short COEFF_WIDTH_SLOPE, + short LOLY, + short HILY); + + bool hasPosRF(int wh, int sec) { return wh > 0 || (wh == 0 && sec % 4 > 1); }; + void setChi2Th(double chi2Th) { chi2Th_ = chi2Th; }; + void setTanPhiTh(double tanPhiTh) { tanPhiTh_ = tanPhiTh; }; + + // Public attributes + DTGeometry const *dtGeo_; + edm::ESGetToken dtGeomH; + + //shift + edm::FileInPath shift_filename_; + std::map shiftinfo_; + + // max drift velocity + edm::FileInPath maxdrift_filename_; + int maxdriftinfo_[5][4][14]; + int max_drift_tdc = -1; + + int get_rom_addr(MuonPathPtr &inMPath, latcomb lats); + fit_common_out_t fit(fit_common_in_t fit_common_in, + int XI_WIDTH, + int COEFF_WIDTH_T0, + int COEFF_WIDTH_POSITION, + int COEFF_WIDTH_SLOPE, + int PRECISSION_T0, + int PRECISSION_POSITION, + int PRECISSION_SLOPE, + int PROD_RESIZE_T0, + int PROD_RESIZE_POSITION, + int PROD_RESIZE_SLOPE, + int MAX_DRIFT_TDC, + int sl); + + double tanPhiTh_; + const bool debug_; + double chi2Th_; + + // global coordinates + std::shared_ptr globalcoordsobtainer_; + +private: + // Private methods + + // Private attributes +}; + +#endif diff --git a/L1Trigger/DTTriggerPhase2/interface/MuonPathSLFitter.h b/L1Trigger/DTTriggerPhase2/interface/MuonPathSLFitter.h new file mode 100644 index 0000000000000..b15402ff4424e --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/interface/MuonPathSLFitter.h @@ -0,0 +1,69 @@ +#ifndef L1Trigger_DTTriggerPhase2_MuonPathSLFitter_h +#define L1Trigger_DTTriggerPhase2_MuonPathSLFitter_h + +#include "L1Trigger/DTTriggerPhase2/interface/MuonPathFitter.h" + +// =============================================================================== +// Previous definitions and declarations +// =============================================================================== + +// =============================================================================== +// Class declarations +// =============================================================================== + +class MuonPathSLFitter : public MuonPathFitter { +public: + // Constructors and destructor + MuonPathSLFitter(const edm::ParameterSet &pset, + edm::ConsumesCollector &iC, + std::shared_ptr &globalcoordsobtainer); + ~MuonPathSLFitter() override; + + // Main methods + void initialise(const edm::EventSetup &iEventSetup) override; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + MuonPathPtrs &inMpath, + std::vector &metaPrimitives) override{}; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + MuonPathPtrs &inMpath, + std::vector &lateralities, + std::vector &metaPrimitives) override; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + std::vector &inMPaths, + std::vector &outMPaths) override{}; + void run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + MuonPathPtrs &inMpath, + MuonPathPtrs &outMPath) override{}; + + void finish() override; + + // Other public methods + + //shift theta + edm::FileInPath shift_theta_filename_; + std::map shiftthetainfo_; + + // luts + edm::FileInPath sl1_filename_; + edm::FileInPath sl2_filename_; + edm::FileInPath sl3_filename_; + +private: + // Private methods + void analyze(MuonPathPtr &inMPath, lat_vector lat_combs, std::vector &metaPrimitives); + void fillLuts(); + int get_rom_addr(MuonPathPtr &inMPath, latcomb lats); + + // Private attributes + + // double chi2Th_; + std::vector> lut_sl1; + std::vector> lut_sl2; + std::vector> lut_sl3; +}; + +#endif diff --git a/L1Trigger/DTTriggerPhase2/interface/TrapezoidalGrouping.h b/L1Trigger/DTTriggerPhase2/interface/TrapezoidalGrouping.h new file mode 100644 index 0000000000000..323831af2ee9b --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/interface/TrapezoidalGrouping.h @@ -0,0 +1,332 @@ +#ifndef Phase2L1Trigger_DTTrigger_TrapezoidalGrouping_h +#define Phase2L1Trigger_DTTrigger_TrapezoidalGrouping_h + +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" + +#include "DataFormats/MuonDetId/interface/DTChamberId.h" +#include "DataFormats/MuonDetId/interface/DTSuperLayerId.h" +#include "DataFormats/MuonDetId/interface/DTLayerId.h" +#include "DataFormats/MuonDetId/interface/DTWireId.h" +#include "DataFormats/DTDigi/interface/DTDigiCollection.h" + +#include "L1Trigger/DTTriggerPhase2/interface/MuonPath.h" +#include "L1Trigger/DTTriggerPhase2/interface/constants.h" + +#include "L1Trigger/DTTriggerPhase2/interface/MotherGrouping.h" + +#include +#include +#include + +// =============================================================================== +// Previous definitions and declarations +// =============================================================================== + +/* + Channels are labeled following next schema: + --------------------------------- + | 6 | 7 | 8 | 9 | + --------------------------------- + | 3 | 4 | 5 | + ------------------------- + | 1 | 2 | + ----------------- + | 0 | + --------- +*/ + +inline bool hitWireSort(const DTPrimitive& hit1, const DTPrimitive& hit2) { + int wi1 = hit1.channelId(); + int wi2 = hit2.channelId(); + + if (wi1 < wi2) + return true; + else + return false; +} + +inline bool hitLayerSort(const DTPrimitive& hit1, const DTPrimitive& hit2) { + int lay1 = hit1.layerId(); + int lay2 = hit2.layerId(); + + if (lay1 < lay2) + return true; + else if (lay1 > lay2) + return false; + else + return hitWireSort(hit1, hit2); +} + +inline bool hitTimeSort(const DTPrimitive& hit1, const DTPrimitive& hit2) { + int tdc1 = hit1.tdcTimeStamp(); + int tdc2 = hit2.tdcTimeStamp(); + + if (tdc1 < tdc2) + return true; + else if (tdc1 > tdc2) + return false; + else + return hitLayerSort(hit1, hit2); +} + +namespace dtamgrouping { + /* Cell's combination, following previous labeling, to obtain every possible muon's path. + Others cells combinations imply non straight paths */ + constexpr int CHANNELS_PATH_ARRANGEMENTS[8][4] = { + {0, 1, 3, 6}, {0, 1, 3, 7}, {0, 1, 4, 7}, {0, 1, 4, 8}, {0, 2, 4, 7}, {0, 2, 4, 8}, {0, 2, 5, 8}, {0, 2, 5, 9}}; + + /* For each of the previous cell's combinations, this array stores the associated cell's + displacement, relative to lower layer cell, measured in semi-cell length units */ + + constexpr int CELL_HORIZONTAL_LAYOUTS[8][4] = {{0, -1, -2, -3}, + {0, -1, -2, -1}, + {0, -1, 0, -1}, + {0, -1, 0, 1}, + {0, 1, 0, -1}, + {0, 1, 0, 1}, + {0, 1, 2, 1}, + {0, 1, 2, 3}}; +} // namespace dtamgrouping + +// =============================================================================== +// Class declarations +// =============================================================================== + +class TrapezoidalGrouping : public MotherGrouping { +public: + // Constructors and destructor + TrapezoidalGrouping(const edm::ParameterSet& pset, edm::ConsumesCollector& iC); + ~TrapezoidalGrouping() override; + + // Main methods + void initialise(const edm::EventSetup& iEventSetup) override; + void run(edm::Event& iEvent, + const edm::EventSetup& iEventSetup, + const DTDigiCollection& digis, + MuonPathPtrs& outMpath) override; + void finish() override; + + // Other public methods + + // Public attributes + +private: + // Private methods + void setInChannels(const DTDigiCollection* digi, int sl); + std::vector group_hits(DTPrimitive pivot_hit, + std::vector input_paths, + DTPrimitives hits_per_cell, + DTPrimitives& hits_in_trapezoid); + + // Private attributes + const bool debug_; + + DTPrimitives muxInChannels_[cmsdt::NUM_CELLS_PER_BLOCK]; + DTPrimitives channelIn_[cmsdt::NUM_LAYERS][cmsdt::NUM_CH_PER_LAYER]; + DTPrimitives all_hits; + DTPrimitives chInDummy_; + int prevTDCTimeStamps_[4]; + int currentBaseChannel_; + + // The trapezoid is as follows: + // [ 0 ][ 1 ][ 2 ][ 3 ][ 4 ][ 5 ][ 6 ][ 7 ][ 8 ] + + // And maps to the physical cells as follows: + + // Pivot in layer 1 = "00" + // [ 5 ][ 6 ][ 7 ][ 8 ] Layer C + // [ 2 ][ 3 ][ 4 ] Layer B + // [ 0 ][ 1 ] Layer A + // Pivot + + // Pivot in layer 2 = "01" + // [ 2 ][ 3 ][ 4 ] Layer B + // [ 0 ][ 1 ] Layer A + // Pivot + // [ 6,8 ][ 5,7 ] Layer C + + // Pivot in layer 3 = "10" + // [ 6,8 ][ 5,7 ] Layer C + // Pivot + // [ 0 ][ 1 ] Layer A + // [ 2 ][ 3 ][ 4 ] Layer B + + // Pivot in layer 4 = "11" + // Pivot + // [ 0 ][ 1 ] Layer A + // [ 2 ][ 3 ][ 4 ] Layer B + // [ 5 ][ 6 ][ 7 ][ 8 ] Layer C + + short trapezoid_vertical_mapping[4][9] = {{1, 1, 2, 2, 2, 3, 3, 3, 3}, + {1, 1, 2, 2, 2, -1, -1, -1, -1}, + {-1, -1, -2, -2, -2, 1, 1, 1, 1}, + {-1, -1, -2, -2, -2, -3, -3, -3, -3}}; + + short trapezoid_horizontal_mapping[4][9] = {{0, 1, -1, 0, 1, -1, 0, 1, 2}, + {-1, 0, -1, 0, 1, 0, -1, 0, -1}, + {0, 1, -1, 0, 1, 1, 0, 1, 0}, + {-1, 0, -1, 0, 1, -2, -1, 0, 1}}; + + // Task list + // 4 hit candidates + // 0 => (0,2,5), 1 => (0,2,6), 2 => (0,3,6), 3 => (0,3,7), + // 4 => (1,3,6), 5 => (1,3,7), 6 => (1,4,7), 7 => (1,4,8), + // the rest are 3-hit candidates, last value not used + // 8 => (0,2,0), 9 => (0,3,0), 10 => (1,3,0), 11 => (1,4,0), + // 12 => (0,5,0), 13 => (0,6,0), 14 => (0,7,0), 15 => (1,6,0), + // 16 => (1,7,0), 17 => (1,8,0), 18 => (2,5,0), 19 => (2,6,0), + // 20 => (3,6,0), 21 => (3,7,0), 22 => (4,7,0), 23 => (4,8,0) + + std::vector> task_list = {// 4-hit + {0, 2, 5}, + {0, 2, 6}, + {0, 3, 6}, + {0, 3, 7}, + {1, 3, 6}, + {1, 3, 7}, + {1, 4, 7}, + {1, 4, 8}, + // 3-hit + {0, 2}, + {0, 3}, + {1, 3}, + {1, 4}, + {0, 5}, + {0, 6}, + {0, 7}, + {1, 6}, + {1, 7}, + {1, 8}, + {2, 5}, + {2, 6}, + {3, 6}, + {3, 7}, + {4, 7}, + {4, 8}}; + + int CELL_HORIZONTAL_LAYOUTS_PER_TASK[4][24][4] = { // pivoting over layer 1 + {// all layers available + {0, 0, 0, -1}, + {0, 0, 1, -1}, + {0, 1, 0, -1}, + {0, 1, 1, -1}, + {1, 0, 0, -1}, + {1, 0, 1, -1}, + {1, 1, 0, -1}, + {1, 1, 1, -1}, + // layer 4 missing + {0, 0, 0, -1}, + {0, 1, 0, -1}, + {1, 0, 0, -1}, + {1, 1, 0, -1}, + // layer 3 missing + {0, 0, 0, -1}, + {0, 0, 1, -1}, + {0, 1, 1, -1}, + {1, 0, 0, -1}, + {1, 0, 1, -1}, + {1, 1, 1, -1}, + // layer 2 missing + {0, 0, 0, -1}, + {0, 0, 1, -1}, + {0, 1, 0, -1}, + {0, 1, 1, -1}, + {1, 1, 0, -1}, + {1, 1, 1, -1}}, + // pivoting over layer 2 + {// all layers available + {0, 0, 0, -1}, + {1, 0, 0, -1}, + {1, 0, 1, -1}, + {0, 0, 1, -1}, + {1, 1, 0, -1}, + {0, 1, 0, -1}, + {0, 1, 1, -1}, + {1, 1, 1, -1}, + // layer 1 missing + {0, 0, 0, -1}, + {0, 0, 1, -1}, + {0, 1, 0, -1}, + {0, 1, 1, -1}, + // layer 4 missing + {0, 0, 0, -1}, + {1, 0, 0, -1}, + {0, 0, 0, -1}, + {1, 1, 0, -1}, + {0, 1, 0, -1}, + {1, 1, 0, -1}, + // layer 3 missing + {0, 0, 0, -1}, + {1, 0, 0, -1}, + {1, 0, 1, -1}, + {0, 0, 1, -1}, + {0, 1, 1, -1}, + {1, 1, 1, -1}}, + // pivoting over layer 3 + {// all layers available + {1, 1, 1, -1}, + {1, 1, 0, -1}, + {0, 1, 0, -1}, + {0, 1, 1, -1}, + {1, 0, 0, -1}, + {1, 0, 1, -1}, + {0, 0, 1, -1}, + {0, 0, 0, -1}, + // layer 4 missing + {1, 1, 0, -1}, + {0, 1, 0, -1}, + {1, 0, 0, -1}, + {0, 0, 0, -1}, + // layer 1 missing + {0, 1, 1, -1}, + {0, 1, 0, -1}, + {0, 1, 1, -1}, + {0, 0, 0, -1}, + {0, 0, 1, -1}, + {0, 0, 0, -1}, + // layer 2 missing + {1, 1, 1, -1}, + {1, 1, 0, -1}, + {0, 1, 0, -1}, + {0, 1, 1, -1}, + {0, 0, 1, -1}, + {0, 0, 0, -1}}, + // pivoting over layer 4 + {// all layers available + {1, 1, 1, -1}, + {0, 1, 1, -1}, + {1, 0, 1, -1}, + {0, 0, 1, -1}, + {1, 1, 0, -1}, + {0, 1, 0, -1}, + {1, 0, 0, -1}, + {0, 0, 0, -1}, + // layer 1 missing + {0, 1, 1, -1}, + {0, 0, 1, -1}, + {0, 1, 0, -1}, + {0, 0, 0, -1}, + // layer 2 missing + {1, 1, 1, -1}, + {0, 1, 1, -1}, + {0, 0, 1, -1}, + {1, 1, 0, -1}, + {0, 1, 0, -1}, + {0, 0, 0, -1}, + // layer 3 missing + {1, 1, 1, -1}, + {0, 1, 1, -1}, + {1, 0, 1, -1}, + {0, 0, 1, -1}, + {1, 0, 0, -1}, + {0, 0, 0, -1}}}; + + int MISSING_LAYER_LAYOUTS_PER_TASK[4][24] = { + {-1, -1, -1, -1, -1, -1, -1, -1, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1}, + {-1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2}, + {-1, -1, -1, -1, -1, -1, -1, -1, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1}, + {-1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2}}; +}; + +#endif diff --git a/L1Trigger/DTTriggerPhase2/interface/constants.h b/L1Trigger/DTTriggerPhase2/interface/constants.h index ac69c152d74fe..454fd0abcac63 100644 --- a/L1Trigger/DTTriggerPhase2/interface/constants.h +++ b/L1Trigger/DTTriggerPhase2/interface/constants.h @@ -20,6 +20,8 @@ #ifndef L1Trigger_DTTriggerPhase2_constants_h #define L1Trigger_DTTriggerPhase2_constants_h #include +#include +#include // Compiler option to select program mode: PRUEBA_MEZCLADOR, PRUEBA_ANALIZADOR, // or NONE @@ -119,7 +121,43 @@ namespace cmsdt { lat8(l8), index(idx), rpcFlag(rpc) {} - + metaPrimitive() + : rawId(0), + t0(0), + x(0), + tanPhi(0), + phi(0), + phiB(0), + phi_cmssw(0), + phiB_cmssw(0), + chi2(0), + quality(0), + wi1(0), + tdc1(0), + lat1(0), + wi2(0), + tdc2(0), + lat2(0), + wi3(0), + tdc3(0), + lat3(0), + wi4(0), + tdc4(0), + lat4(0), + wi5(0), + tdc5(0), + lat5(0), + wi6(0), + tdc6(0), + lat6(0), + wi7(0), + tdc7(0), + lat7(0), + wi8(0), + tdc8(0), + lat8(0), + index(0), + rpcFlag(0) {} uint32_t rawId; double t0; double x; @@ -157,10 +195,12 @@ namespace cmsdt { int index; int rpcFlag = 0; }; + struct PARTIAL_LATQ_TYPE { bool latQValid; int bxValue; }; + struct LATQ_TYPE { bool valid; int bxValue; @@ -168,6 +208,12 @@ namespace cmsdt { MP_QUALITY quality; }; + struct bx_sl_vector { + int bx; + std::vector mps; + int sl; + }; + enum algo { Standard = 0, PseudoBayes = 1, HoughTrans = 2 }; enum scenario { MC = 0, DATA = 1, SLICE_TEST = 2 }; @@ -175,11 +221,92 @@ namespace cmsdt { /* En nanosegundos */ constexpr int LHC_CLK_FREQ = 25; + /* mixer constants */ + // Hits can be separated up to 9 frames, with 2 BXs per frame + // | | | | | | | | | | | | | | | | | | + // F1 F2 F3 F4 F5 F6 F7 F8 F9 + constexpr int BX_PER_FRAME = 2; + constexpr int MAX_FRAME_DIF = 8; + constexpr int PATHFINDER_INPUT_HITS_LIMIT = 8; + + /* laterality provider */ + constexpr int LAT_TOTAL_BITS = 9; // tdc counts from 0 to 512 + constexpr int LAT_MSB_BITS = 6; + constexpr int TIME_TO_TDC_COUNTS = 32; + + constexpr int LAT_P0_4H = 1; + constexpr int LAT_P1_4H = 31; + constexpr int LAT_P2_4H = 40; + + constexpr int LAT_P0_3H = 24; + constexpr int LAT_P1_3H = 27; + constexpr int LAT_P2_3H = 30; + + /* Fitting */ + + constexpr int SL1_CELLS_OFFSET = 48; + + constexpr int N_COEFFS = 8; + constexpr int GENERIC_COEFF_WIDTH = 20; + constexpr int WIDTH_FULL_TIME = 17; + constexpr int WIDTH_COARSED_TIME = 12; + constexpr int WIDTH_DIFBX = 5; + constexpr int WIDTH_FULL_POS = 17; + constexpr int WIDTH_FULL_SLOPE = 14; + constexpr int WIDTH_FULL_CHI2 = 16; + constexpr int WIREPOS_WIDTH = 17; + constexpr int WIREPOS_NORM_LSB_IGNORED = 9; + constexpr int WIDTH_POS_SLOPE_CORR = 9; + + constexpr int XI_SL_WIDTH = 12; + + constexpr int COEFF_WIDTH_SL_T0 = 15; + constexpr int COEFF_WIDTH_SL_POSITION = 18; + constexpr int COEFF_WIDTH_SL2_POSITION = 15; + constexpr int COEFF_WIDTH_SL_SLOPE = 18; + + constexpr int PRECISSION_SL_T0 = 13; + constexpr int PRECISSION_SL_POSITION = 13; + constexpr int PRECISSION_SL_SLOPE = 13; + + constexpr int PROD_RESIZE_SL_T0 = 28; + constexpr int PROD_RESIZE_SL_POSITION = 30; + constexpr int PROD_RESIZE_SL2_POSITION = 27; + constexpr int PROD_RESIZE_SL_SLOPE = 30; + + constexpr int XI_COR_WIDTH = 14; + + constexpr int COEFF_WIDTH_COR_T0 = 15; + constexpr int COEFF_WIDTH_COR_POSITION = 15; + constexpr int COEFF_WIDTH_COR_SLOPE = 15; + + constexpr int PRECISSION_COR_T0 = 15; + constexpr int PRECISSION_COR_POSITION = 15; + constexpr int PRECISSION_COR_SLOPE = 15; + + constexpr int PROD_RESIZE_COR_T0 = 30; + constexpr int PROD_RESIZE_COR_POSITION = 30; + constexpr int PROD_RESIZE_COR_SLOPE = 29; + + constexpr int T0_CUT_TOLERANCE = 0; + + // Filtering + constexpr int FSEG_T0_BX_LSB = 2; + constexpr int FSEG_T0_DISCARD_LSB = 5; + constexpr int FSEG_T0_SIZE = FSEG_T0_BX_LSB + (5 - FSEG_T0_DISCARD_LSB); + constexpr int FSEG_POS_DISCARD_LSB = 9; + constexpr int FSEG_POS_SIZE = WIDTH_FULL_POS - FSEG_POS_DISCARD_LSB; + constexpr int FSEG_SLOPE_DISCARD_LSB = 9; + constexpr int FSEG_SLOPE_SIZE = WIDTH_FULL_SLOPE - FSEG_SLOPE_DISCARD_LSB; + constexpr int SLFILT_MAX_SEG1T0_TO_SEG2ARRIVAL = 24; + /* Adimensional */ constexpr int MAX_BX_IDX = 3564; // In ns (maximum drift time inside the cell) constexpr float MAXDRIFT = 387; + constexpr float MAXDRIFTTDC = 496; // we could make this value depend on the chamber, to be seen + // In mm (cell dimmensions) constexpr int CELL_HEIGHT = 13; constexpr float CELL_SEMIHEIGHT = 6.5; @@ -190,6 +317,9 @@ namespace cmsdt { // With 4 bits for the decimal part constexpr int DRIFT_SPEED_X4 = 889; // 55.5 * 2 ** 4 + // slope conversion 1 LSB = (v_drift) x (1 tdc count) / (1 semicell_h * 16) ~= 0.4e-3 + constexpr float SLOPE_LSB = ((float)CELL_SEMILENGTH / MAXDRIFTTDC) * (1) / (CELL_SEMIHEIGHT * 16.); + // distance between SLs, cm constexpr float VERT_PHI1_PHI3 = 23.5; @@ -199,6 +329,15 @@ namespace cmsdt { // distance between center of the chamber and each SL in mm, 2 bit precision for the decimal part constexpr int CH_CENTER_TO_MID_SL_X2 = 470; // 117.5 * 2 ** 2 + // max difference in BX to even try to correlate + constexpr int MAX_BX_FOR_COR = 2; + + // max number of TPs to store per BX + constexpr int MAX_PRIM_PER_BX_FOR_COR = 6; + + // max number of TPs to correlate and perform the refitting + constexpr int MAX_PRIM_FOR_COR = 12; + /* This is the maximum value than internal time can take. This is because internal time is cyclical due to the limited size of the time counters and diff --git a/L1Trigger/DTTriggerPhase2/interface/vhdl_functions.h b/L1Trigger/DTTriggerPhase2/interface/vhdl_functions.h new file mode 100644 index 0000000000000..f612f773e00ff --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/interface/vhdl_functions.h @@ -0,0 +1,19 @@ +#ifndef L1Trigger_DTTriggerPhase2_vhdl_h +#define L1Trigger_DTTriggerPhase2_vhdl_h + +#include +#include +#include + +// "à la vhdl" functions +std::vector vhdl_slice(std::vector v, int upper, int lower); +int vhdl_unsigned_to_int(std::vector v); +int vhdl_signed_to_int(std::vector v); +void vhdl_int_to_unsigned(int value, std::vector &v); +void vhdl_int_to_signed(int value, std::vector &v); +void vhdl_resize_unsigned(std::vector &v, int new_size); +void vhdl_resize_signed(std::vector &v, int new_size); +bool vhdl_resize_signed_ok(std::vector v, int new_size); +bool vhdl_resize_unsigned_ok(std::vector v, int new_size); + +#endif diff --git a/L1Trigger/DTTriggerPhase2/plugins/DTTrigPhase2Prod.cc b/L1Trigger/DTTriggerPhase2/plugins/DTTrigPhase2Prod.cc index bd7029041f1ae..608528ed88818 100644 --- a/L1Trigger/DTTriggerPhase2/plugins/DTTrigPhase2Prod.cc +++ b/L1Trigger/DTTriggerPhase2/plugins/DTTrigPhase2Prod.cc @@ -23,14 +23,22 @@ #include "L1Trigger/DTTriggerPhase2/interface/constants.h" #include "L1Trigger/DTTriggerPhase2/interface/MotherGrouping.h" -#include "L1Trigger/DTTriggerPhase2/interface/InitialGrouping.h" +#include "L1Trigger/DTTriggerPhase2/interface/TrapezoidalGrouping.h" #include "L1Trigger/DTTriggerPhase2/interface/HoughGrouping.h" #include "L1Trigger/DTTriggerPhase2/interface/PseudoBayesGrouping.h" +#include "L1Trigger/DTTriggerPhase2/interface/LateralityProvider.h" +#include "L1Trigger/DTTriggerPhase2/interface/LateralityBasicProvider.h" +#include "L1Trigger/DTTriggerPhase2/interface/LateralityCoarsedProvider.h" #include "L1Trigger/DTTriggerPhase2/interface/MuonPathAnalyzer.h" +#include "L1Trigger/DTTriggerPhase2/interface/MuonPathSLFitter.h" +#include "L1Trigger/DTTriggerPhase2/interface/MuonPathCorFitter.h" #include "L1Trigger/DTTriggerPhase2/interface/MuonPathAnalyticAnalyzer.h" #include "L1Trigger/DTTriggerPhase2/interface/MuonPathAnalyzerInChamber.h" #include "L1Trigger/DTTriggerPhase2/interface/MuonPathAssociator.h" +#include "L1Trigger/DTTriggerPhase2/interface/MuonPathConfirmator.h" #include "L1Trigger/DTTriggerPhase2/interface/MPFilter.h" +#include "L1Trigger/DTTriggerPhase2/interface/MPSLFilter.h" +#include "L1Trigger/DTTriggerPhase2/interface/MPCorFilter.h" #include "L1Trigger/DTTriggerPhase2/interface/MPQualityEnhancerFilter.h" #include "L1Trigger/DTTriggerPhase2/interface/MPRedundantFilter.h" #include "L1Trigger/DTTriggerPhase2/interface/MPCleanHitsFilter.h" @@ -95,7 +103,9 @@ class DTTrigPhase2Prod : public edm::stream::EDProducer<> { bool outer(const metaPrimitive& mp) const; bool inner(const metaPrimitive& mp) const; void printmP(const std::string& ss, const metaPrimitive& mP) const; + void printmP(const metaPrimitive& mP) const; void printmPC(const std::string& ss, const metaPrimitive& mP) const; + void printmPC(const metaPrimitive& mP) const; bool hasPosRF(int wh, int sec) const; // Getter-methods @@ -127,6 +137,15 @@ class DTTrigPhase2Prod : public edm::stream::EDProducer<> { int df_extended_; int max_index_; + bool output_mixer_; + bool output_latpredictor_; + bool output_slfitter_; + bool output_slfilter_; + bool output_confirmed_; + bool output_matcher_; + bool skip_processing_; + bool allow_confirmation_; + // ParameterSet edm::EDGetTokenT dtDigisToken_; edm::EDGetTokenT rpcRecHitsLabel_; @@ -135,11 +154,14 @@ class DTTrigPhase2Prod : public edm::stream::EDProducer<> { int algo_; // Grouping code std::unique_ptr grouping_obj_; std::unique_ptr mpathanalyzer_; + std::unique_ptr latprovider_; std::unique_ptr mpathqualityenhancer_; std::unique_ptr mpathqualityenhancerbayes_; std::unique_ptr mpathredundantfilter_; std::unique_ptr mpathhitsfilter_; - std::unique_ptr mpathassociator_; + std::unique_ptr mpathassociator_; + std::unique_ptr mpathconfirmator_; + std::unique_ptr mpathcorfilter_; std::shared_ptr globalcoordsobtainer_; // Buffering @@ -192,6 +214,16 @@ DTTrigPhase2Prod::DTTrigPhase2Prod(const ParameterSet& pset) // Choosing grouping scheme: algo_ = pset.getParameter("algo"); + // shortcuts + + output_mixer_ = pset.getParameter("output_mixer"); + output_latpredictor_ = pset.getParameter("output_latpredictor"); + output_slfitter_ = pset.getParameter("output_slfitter"); + output_slfilter_ = pset.getParameter("output_slfilter"); + output_confirmed_ = pset.getParameter("output_confirmed"); + output_matcher_ = pset.getParameter("output_matcher"); + allow_confirmation_ = pset.getParameter("allow_confirmation"); + edm::ConsumesCollector consumesColl(consumesCollector()); globalcoordsobtainer_ = std::make_shared(pset); globalcoordsobtainer_->generate_luts(); @@ -203,13 +235,14 @@ DTTrigPhase2Prod::DTTrigPhase2Prod(const ParameterSet& pset) grouping_obj_ = std::make_unique(pset.getParameter("HoughGrouping"), consumesColl); } else { - grouping_obj_ = std::make_unique(pset, consumesColl); + grouping_obj_ = std::make_unique(pset, consumesColl); } if (algo_ == Standard) { if (debug_) LogDebug("DTTrigPhase2Prod") << "DTp2:constructor: JM analyzer"; - mpathanalyzer_ = std::make_unique(pset, consumesColl, globalcoordsobtainer_); + mpathanalyzer_ = std::make_unique(pset, consumesColl, globalcoordsobtainer_); + latprovider_ = std::make_unique(pset, consumesColl); } else { if (debug_) LogDebug("DTTrigPhase2Prod") << "DTp2:constructor: Full chamber analyzer"; @@ -221,11 +254,13 @@ DTTrigPhase2Prod::DTTrigPhase2Prod(const ParameterSet& pset) superCellhalfspacewidth_ = pset.getParameter("superCellspacewidth") / 2; superCelltimewidth_ = pset.getParameter("superCelltimewidth"); - mpathqualityenhancer_ = std::make_unique(pset); + mpathqualityenhancer_ = std::make_unique(pset); mpathqualityenhancerbayes_ = std::make_unique(pset); mpathredundantfilter_ = std::make_unique(pset); mpathhitsfilter_ = std::make_unique(pset); - mpathassociator_ = std::make_unique(pset, consumesColl, globalcoordsobtainer_); + mpathconfirmator_ = std::make_unique(pset, consumesColl); + mpathassociator_ = std::make_unique(pset, consumesColl, globalcoordsobtainer_); + mpathcorfilter_ = std::make_unique(pset); rpc_integrator_ = std::make_unique(pset, consumesColl); dtGeomH = esConsumes(); @@ -249,6 +284,7 @@ void DTTrigPhase2Prod::beginRun(edm::Run const& iRun, const edm::EventSetup& iEv mpathqualityenhancerbayes_->initialise(iEventSetup); // Filter object initialisation mpathhitsfilter_->initialise(iEventSetup); mpathassociator_->initialise(iEventSetup); // Associator object initialisation + mpathcorfilter_->initialise(iEventSetup); if (auto geom = iEventSetup.getHandle(dtGeomH)) { dtGeo_ = &(*geom); @@ -285,7 +321,7 @@ void DTTrigPhase2Prod::produce(Event& iEvent, const EventSetup& iEventSetup) { else if (debug_) LogDebug("DTTrigPhase2Prod") << "produce - Getting and grouping digis per chamber."; - MuonPathPtrs muonpaths; + std::map muonpaths; for (const auto& ich : dtGeo_->chambers()) { // The code inside this for loop would ideally later fit inside a trigger unit (in principle, a DT station) of the future Phase 2 DT Trigger. const DTChamber* chamb = ich; @@ -328,88 +364,314 @@ void DTTrigPhase2Prod::produce(Event& iEvent, const EventSetup& iEventSetup) { // the groupings access it, it's not needed to "collect" the final products). while (!superCells.empty()) { - grouping_obj_->run(iEvent, iEventSetup, *(superCells.back()), muonpaths); + grouping_obj_->run(iEvent, iEventSetup, *(superCells.back()), muonpaths[chid.rawId()]); superCells.pop_back(); } } else { - grouping_obj_->run(iEvent, iEventSetup, (*dmit).second, muonpaths); + grouping_obj_->run(iEvent, iEventSetup, (*dmit).second, muonpaths[chid.rawId()]); } } digiMap.clear(); if (dump_) { - for (unsigned int i = 0; i < muonpaths.size(); i++) { - stringstream ss; - ss << iEvent.id().event() << " mpath " << i << ": "; - for (int lay = 0; lay < muonpaths.at(i)->nprimitives(); lay++) - ss << muonpaths.at(i)->primitive(lay)->channelId() << " "; - for (int lay = 0; lay < muonpaths.at(i)->nprimitives(); lay++) - ss << muonpaths.at(i)->primitive(lay)->tdcTimeStamp() << " "; - for (int lay = 0; lay < muonpaths.at(i)->nprimitives(); lay++) - ss << muonpaths.at(i)->primitive(lay)->laterality() << " "; - LogInfo("DTTrigPhase2Prod") << ss.str(); + for (auto& ch_muonpaths : muonpaths) { + for (unsigned int i = 0; i < ch_muonpaths.second.size(); i++) { + stringstream ss; + ss << iEvent.id().event() << " mpath " << i << ": "; + for (int lay = 0; lay < ch_muonpaths.second.at(i)->nprimitives(); lay++) + ss << ch_muonpaths.second.at(i)->primitive(lay)->channelId() << " "; + for (int lay = 0; lay < ch_muonpaths.second.at(i)->nprimitives(); lay++) + ss << ch_muonpaths.second.at(i)->primitive(lay)->tdcTimeStamp() << " "; + for (int lay = 0; lay < ch_muonpaths.second.at(i)->nprimitives(); lay++) + ss << ch_muonpaths.second.at(i)->primitive(lay)->laterality() << " "; + LogInfo("DTTrigPhase2Prod") << ss.str(); + } + } + } + + std::map> lateralities; + if (!output_mixer_) { + for (auto& ch_muonpaths : muonpaths) { + if (algo_ == Standard) { + latprovider_->run(iEvent, iEventSetup, ch_muonpaths.second, lateralities[ch_muonpaths.first]); + } } } // FILTER GROUPING - MuonPathPtrs filteredmuonpaths; - if (algo_ == Standard) { - mpathredundantfilter_->run(iEvent, iEventSetup, muonpaths, filteredmuonpaths); - } else { - mpathhitsfilter_->run(iEvent, iEventSetup, muonpaths, filteredmuonpaths); + std::map filteredmuonpaths; + for (auto& ch_muonpaths : muonpaths) { + if (algo_ == Standard) { + mpathredundantfilter_->run(iEvent, iEventSetup, ch_muonpaths.second, filteredmuonpaths[ch_muonpaths.first]); + } else { + mpathhitsfilter_->run(iEvent, iEventSetup, ch_muonpaths.second, filteredmuonpaths[ch_muonpaths.first]); + } } if (dump_) { - for (unsigned int i = 0; i < filteredmuonpaths.size(); i++) { - stringstream ss; - ss << iEvent.id().event() << " filt. mpath " << i << ": "; - for (int lay = 0; lay < filteredmuonpaths.at(i)->nprimitives(); lay++) - ss << filteredmuonpaths.at(i)->primitive(lay)->channelId() << " "; - for (int lay = 0; lay < filteredmuonpaths.at(i)->nprimitives(); lay++) - ss << filteredmuonpaths.at(i)->primitive(lay)->tdcTimeStamp() << " "; - LogInfo("DTTrigPhase2Prod") << ss.str(); + for (auto& ch_filteredmuonpaths : filteredmuonpaths) { + for (unsigned int i = 0; i < ch_filteredmuonpaths.second.size(); i++) { + stringstream ss; + ss << iEvent.id().event() << " filt. mpath " << i << ": "; + for (int lay = 0; lay < ch_filteredmuonpaths.second.at(i)->nprimitives(); lay++) + ss << ch_filteredmuonpaths.second.at(i)->primitive(lay)->channelId() << " "; + for (int lay = 0; lay < ch_filteredmuonpaths.second.at(i)->nprimitives(); lay++) + ss << ch_filteredmuonpaths.second.at(i)->primitive(lay)->tdcTimeStamp() << " "; + LogInfo("DTTrigPhase2Prod") << ss.str(); + } } } + skip_processing_ = output_mixer_ || output_latpredictor_; + /////////////////////////////////////////// /// Fitting SECTION; /////////////////////////////////////////// - if (debug_) - LogDebug("DTTrigPhase2Prod") << "MUON PATHS found: " << muonpaths.size() << " (" << filteredmuonpaths.size() - << ") in event " << iEvent.id().event(); + if (debug_) { + for (auto& ch_muonpaths : muonpaths) { + LogDebug("DTTrigPhase2Prod") << "MUON PATHS found: " << ch_muonpaths.second.size() << " (" + << filteredmuonpaths[ch_muonpaths.first].size() << ") in event " + << iEvent.id().event(); + } + } if (debug_) LogDebug("DTTrigPhase2Prod") << "filling NmetaPrimtives" << std::endl; - std::vector metaPrimitives; - MuonPathPtrs outmpaths; + std::map> metaPrimitives; + std::map outmpaths; if (algo_ == Standard) { if (debug_) LogDebug("DTTrigPhase2Prod") << "Fitting 1SL "; - mpathanalyzer_->run(iEvent, iEventSetup, filteredmuonpaths, metaPrimitives); + for (auto& ch_muonpaths : muonpaths) { // FIXME, do we need filtered muonpaths? + if (!output_mixer_ && !output_latpredictor_) + mpathanalyzer_->run(iEvent, + iEventSetup, + ch_muonpaths.second, + lateralities[ch_muonpaths.first], + metaPrimitives[ch_muonpaths.first]); + else if (output_mixer_) { + for (auto& inMPath : ch_muonpaths.second) { + auto sl = inMPath->primitive(0)->superLayerId(); // 0, 1, 2 + int selected_lay = 1; + if (inMPath->primitive(0)->tdcTimeStamp() != -1) + selected_lay = 0; + int dumLayId = inMPath->primitive(selected_lay)->cameraId(); + auto dtDumlayerId = DTLayerId(dumLayId); + DTSuperLayerId MuonPathSLId(dtDumlayerId.wheel(), dtDumlayerId.station(), dtDumlayerId.sector(), sl + 1); + if (sl == 0) + metaPrimitives[ch_muonpaths.first].emplace_back(metaPrimitive({MuonPathSLId.rawId(), + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + inMPath->primitive(0)->channelId(), + inMPath->primitive(0)->tdcTimeStamp(), + -1, + inMPath->primitive(1)->channelId(), + inMPath->primitive(1)->tdcTimeStamp(), + -1, + inMPath->primitive(2)->channelId(), + inMPath->primitive(2)->tdcTimeStamp(), + -1, + inMPath->primitive(3)->channelId(), + inMPath->primitive(3)->tdcTimeStamp(), + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1})); + else + metaPrimitives[ch_muonpaths.first].emplace_back(metaPrimitive({MuonPathSLId.rawId(), + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + inMPath->primitive(0)->channelId(), + inMPath->primitive(0)->tdcTimeStamp(), + -1, + inMPath->primitive(1)->channelId(), + inMPath->primitive(1)->tdcTimeStamp(), + -1, + inMPath->primitive(2)->channelId(), + inMPath->primitive(2)->tdcTimeStamp(), + -1, + inMPath->primitive(3)->channelId(), + inMPath->primitive(3)->tdcTimeStamp(), + -1, + -1})); + } + } else if (output_latpredictor_) { + int imp = -1; + for (auto& inMPath : ch_muonpaths.second) { + imp++; + auto sl = inMPath->primitive(0)->superLayerId(); // 0, 1, 2 + int selected_lay = 1; + if (inMPath->primitive(0)->tdcTimeStamp() != -1) + selected_lay = 0; + int dumLayId = inMPath->primitive(selected_lay)->cameraId(); + auto dtDumlayerId = DTLayerId(dumLayId); + DTSuperLayerId MuonPathSLId(dtDumlayerId.wheel(), dtDumlayerId.station(), dtDumlayerId.sector(), sl + 1); + for (auto& latcomb : lateralities[ch_muonpaths.first][imp]) { + if (sl == 0) + metaPrimitives[ch_muonpaths.first].emplace_back(metaPrimitive({MuonPathSLId.rawId(), + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + inMPath->primitive(0)->channelId(), + inMPath->primitive(0)->tdcTimeStamp(), + latcomb[0], + inMPath->primitive(1)->channelId(), + inMPath->primitive(1)->tdcTimeStamp(), + latcomb[1], + inMPath->primitive(2)->channelId(), + inMPath->primitive(2)->tdcTimeStamp(), + latcomb[2], + inMPath->primitive(3)->channelId(), + inMPath->primitive(3)->tdcTimeStamp(), + latcomb[3], + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1})); + else + metaPrimitives[ch_muonpaths.first].emplace_back(metaPrimitive({MuonPathSLId.rawId(), + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + inMPath->primitive(0)->channelId(), + inMPath->primitive(0)->tdcTimeStamp(), + latcomb[0], + inMPath->primitive(1)->channelId(), + inMPath->primitive(1)->tdcTimeStamp(), + latcomb[1], + inMPath->primitive(2)->channelId(), + inMPath->primitive(2)->tdcTimeStamp(), + latcomb[2], + inMPath->primitive(3)->channelId(), + inMPath->primitive(3)->tdcTimeStamp(), + latcomb[3], + -1})); + } + } + } + } } else { // implementation for advanced (2SL) grouping, no filter required.. if (debug_) LogDebug("DTTrigPhase2Prod") << "Fitting 2SL at once "; - mpathanalyzer_->run(iEvent, iEventSetup, muonpaths, outmpaths); + for (auto& ch_muonpaths : muonpaths) { + mpathanalyzer_->run(iEvent, iEventSetup, ch_muonpaths.second, outmpaths[ch_muonpaths.first]); + } } + skip_processing_ = skip_processing_ || output_slfitter_; + if (dump_) { - for (unsigned int i = 0; i < outmpaths.size(); i++) { - LogInfo("DTTrigPhase2Prod") << iEvent.id().event() << " mp " << i << ": " << outmpaths.at(i)->bxTimeValue() << " " - << outmpaths.at(i)->horizPos() << " " << outmpaths.at(i)->tanPhi() << " " - << outmpaths.at(i)->phi() << " " << outmpaths.at(i)->phiB() << " " - << outmpaths.at(i)->quality() << " " << outmpaths.at(i)->chiSquare(); + for (auto& ch_outmpaths : outmpaths) { + for (unsigned int i = 0; i < ch_outmpaths.second.size(); i++) { + LogInfo("DTTrigPhase2Prod") << iEvent.id().event() << " mp " << i << ": " + << ch_outmpaths.second.at(i)->bxTimeValue() << " " + << ch_outmpaths.second.at(i)->horizPos() << " " + << ch_outmpaths.second.at(i)->tanPhi() << " " << ch_outmpaths.second.at(i)->phi() + << " " << ch_outmpaths.second.at(i)->phiB() << " " + << ch_outmpaths.second.at(i)->quality() << " " + << ch_outmpaths.second.at(i)->chiSquare(); + } } - for (unsigned int i = 0; i < metaPrimitives.size(); i++) { - stringstream ss; - ss << iEvent.id().event() << " mp " << i << ": "; - printmP(ss.str(), metaPrimitives.at(i)); + for (auto& ch_metaPrimitives : metaPrimitives) { + for (unsigned int i = 0; i < ch_metaPrimitives.second.size(); i++) { + stringstream ss; + ss << iEvent.id().event() << " mp " << i << ": "; + printmP(ss.str(), ch_metaPrimitives.second.at(i)); + } } } muonpaths.clear(); filteredmuonpaths.clear(); + ///////////////////////////////////// + //// CONFIRMATION: + ///////////////////////////////////// + + std::map> confirmedMetaPrimitives; + for (auto& ch_metaPrimitives : metaPrimitives) { + if (!skip_processing_ && allow_confirmation_) + mpathconfirmator_->run( + iEvent, iEventSetup, ch_metaPrimitives.second, dtdigis, confirmedMetaPrimitives[ch_metaPrimitives.first]); + else + for (auto& mp : ch_metaPrimitives.second) { + confirmedMetaPrimitives[ch_metaPrimitives.first].push_back(mp); + } + } + + metaPrimitives.clear(); + skip_processing_ = skip_processing_ || output_confirmed_; + ///////////////////////////////////// // FILTER SECTIONS: //////////////////////////////////// @@ -417,25 +679,38 @@ void DTTrigPhase2Prod::produce(Event& iEvent, const EventSetup& iEventSetup) { if (debug_) LogDebug("DTTrigPhase2Prod") << "declaring new vector for filtered" << std::endl; - std::vector filteredMetaPrimitives; + std::map> filteredMetaPrimitives; if (algo_ == Standard) - mpathqualityenhancer_->run(iEvent, iEventSetup, metaPrimitives, filteredMetaPrimitives); - + for (auto& ch_confirmedMetaPrimitives : confirmedMetaPrimitives) { + if (!skip_processing_) + mpathqualityenhancer_->run(iEvent, + iEventSetup, + ch_confirmedMetaPrimitives.second, + filteredMetaPrimitives[ch_confirmedMetaPrimitives.first]); + else + for (auto& mp : ch_confirmedMetaPrimitives.second) { + filteredMetaPrimitives[ch_confirmedMetaPrimitives.first].push_back(mp); + } + } if (dump_) { - for (unsigned int i = 0; i < filteredMetaPrimitives.size(); i++) { - stringstream ss; - ss << iEvent.id().event() << " filtered mp " << i << ": "; - printmP(ss.str(), filteredMetaPrimitives.at(i)); + for (auto& ch_filteredMetaPrimitives : filteredMetaPrimitives) { + for (unsigned int i = 0; i < ch_filteredMetaPrimitives.second.size(); i++) { + stringstream ss; + ss << iEvent.id().event() << " filtered mp " << i << ": "; + printmP(ss.str(), ch_filteredMetaPrimitives.second.at(i)); + } } } - metaPrimitives.clear(); - metaPrimitives.erase(metaPrimitives.begin(), metaPrimitives.end()); + skip_processing_ = skip_processing_ || output_slfilter_; + confirmedMetaPrimitives.clear(); if (debug_) - LogDebug("DTTrigPhase2Prod") << "DTp2 in event:" << iEvent.id().event() << " we found " - << filteredMetaPrimitives.size() << " filteredMetaPrimitives (superlayer)" - << std::endl; + for (auto& ch_filteredMetaPrimitives : filteredMetaPrimitives) { + LogDebug("DTTrigPhase2Prod") << "DTp2 in event:" << iEvent.id().event() << " we found " + << ch_filteredMetaPrimitives.second.size() << " filteredMetaPrimitives (superlayer)" + << std::endl; + } if (debug_) LogDebug("DTTrigPhase2Prod") << "filteredMetaPrimitives: starting correlations" << std::endl; @@ -443,64 +718,106 @@ void DTTrigPhase2Prod::produce(Event& iEvent, const EventSetup& iEventSetup) { //// CORRELATION: ///////////////////////////////////// - std::vector correlatedMetaPrimitives; - if (algo_ == Standard) - mpathassociator_->run(iEvent, iEventSetup, dtdigis, filteredMetaPrimitives, correlatedMetaPrimitives); - else { - for (const auto& muonpath : outmpaths) { - correlatedMetaPrimitives.emplace_back(muonpath->rawId(), - (double)muonpath->bxTimeValue(), - muonpath->horizPos(), - muonpath->tanPhi(), - muonpath->phi(), - muonpath->phiB(), - muonpath->phi_cmssw(), - muonpath->phiB_cmssw(), - muonpath->chiSquare(), - (int)muonpath->quality(), - muonpath->primitive(0)->channelId(), - muonpath->primitive(0)->tdcTimeStamp(), - muonpath->primitive(0)->laterality(), - muonpath->primitive(1)->channelId(), - muonpath->primitive(1)->tdcTimeStamp(), - muonpath->primitive(1)->laterality(), - muonpath->primitive(2)->channelId(), - muonpath->primitive(2)->tdcTimeStamp(), - muonpath->primitive(2)->laterality(), - muonpath->primitive(3)->channelId(), - muonpath->primitive(3)->tdcTimeStamp(), - muonpath->primitive(3)->laterality(), - muonpath->primitive(4)->channelId(), - muonpath->primitive(4)->tdcTimeStamp(), - muonpath->primitive(4)->laterality(), - muonpath->primitive(5)->channelId(), - muonpath->primitive(5)->tdcTimeStamp(), - muonpath->primitive(5)->laterality(), - muonpath->primitive(6)->channelId(), - muonpath->primitive(6)->tdcTimeStamp(), - muonpath->primitive(6)->laterality(), - muonpath->primitive(7)->channelId(), - muonpath->primitive(7)->tdcTimeStamp(), - muonpath->primitive(7)->laterality()); + std::map> correlatedMetaPrimitives; + if (algo_ == Standard) { + for (auto& ch_filteredMetaPrimitives : filteredMetaPrimitives) { + if (!skip_processing_) + mpathassociator_->run(iEvent, + iEventSetup, + ch_filteredMetaPrimitives.second, + correlatedMetaPrimitives[ch_filteredMetaPrimitives.first]); + else + for (auto& mp : ch_filteredMetaPrimitives.second) { + correlatedMetaPrimitives[ch_filteredMetaPrimitives.first].push_back(mp); + } + } + } else { + for (auto& ch_outmpaths : outmpaths) { + for (const auto& muonpath : ch_outmpaths.second) { + correlatedMetaPrimitives[ch_outmpaths.first].emplace_back(muonpath->rawId(), + (double)muonpath->bxTimeValue(), + muonpath->horizPos(), + muonpath->tanPhi(), + muonpath->phi(), + muonpath->phiB(), + muonpath->phi_cmssw(), + muonpath->phiB_cmssw(), + muonpath->chiSquare(), + (int)muonpath->quality(), + muonpath->primitive(0)->channelId(), + muonpath->primitive(0)->tdcTimeStamp(), + muonpath->primitive(0)->laterality(), + muonpath->primitive(1)->channelId(), + muonpath->primitive(1)->tdcTimeStamp(), + muonpath->primitive(1)->laterality(), + muonpath->primitive(2)->channelId(), + muonpath->primitive(2)->tdcTimeStamp(), + muonpath->primitive(2)->laterality(), + muonpath->primitive(3)->channelId(), + muonpath->primitive(3)->tdcTimeStamp(), + muonpath->primitive(3)->laterality(), + muonpath->primitive(4)->channelId(), + muonpath->primitive(4)->tdcTimeStamp(), + muonpath->primitive(4)->laterality(), + muonpath->primitive(5)->channelId(), + muonpath->primitive(5)->tdcTimeStamp(), + muonpath->primitive(5)->laterality(), + muonpath->primitive(6)->channelId(), + muonpath->primitive(6)->tdcTimeStamp(), + muonpath->primitive(6)->laterality(), + muonpath->primitive(7)->channelId(), + muonpath->primitive(7)->tdcTimeStamp(), + muonpath->primitive(7)->laterality()); + } } } - filteredMetaPrimitives.clear(); - if (debug_) - LogDebug("DTTrigPhase2Prod") << "DTp2 in event:" << iEvent.id().event() << " we found " - << correlatedMetaPrimitives.size() << " correlatedMetPrimitives (chamber)"; + skip_processing_ = skip_processing_ || output_matcher_; + if (debug_) + for (auto& ch_correlatedMetaPrimitives : correlatedMetaPrimitives) { + LogDebug("DTTrigPhase2Prod") << "DTp2 in event:" << iEvent.id().event() << " we found " + << ch_correlatedMetaPrimitives.second.size() << " correlatedMetPrimitives (chamber)"; + } if (dump_) { - LogInfo("DTTrigPhase2Prod") << "DTp2 in event:" << iEvent.id().event() << " we found " - << correlatedMetaPrimitives.size() << " correlatedMetPrimitives (chamber)"; + for (auto& ch_correlatedMetaPrimitives : correlatedMetaPrimitives) { + LogDebug("DTTrigPhase2Prod") << "DTp2 in event:" << iEvent.id().event() << " we found " + << ch_correlatedMetaPrimitives.second.size() << " correlatedMetPrimitives (chamber)"; + } + for (auto& ch_correlatedMetaPrimitives : correlatedMetaPrimitives) { + for (unsigned int i = 0; i < ch_correlatedMetaPrimitives.second.size(); i++) { + stringstream ss; + ss << iEvent.id().event() << " correlated mp " << i << ": "; + printmPC(ss.str(), ch_correlatedMetaPrimitives.second.at(i)); + } + } + } - for (unsigned int i = 0; i < correlatedMetaPrimitives.size(); i++) { - stringstream ss; - ss << iEvent.id().event() << " correlated mp " << i << ": "; - printmPC(ss.str(), correlatedMetaPrimitives.at(i)); + // Correlated Filtering + std::map> filtCorrelatedMetaPrimitives; + if (algo_ == Standard) { + for (auto& ch_filteredMetaPrimitives : filteredMetaPrimitives) { + if (!skip_processing_) + mpathcorfilter_->run(iEvent, + iEventSetup, + ch_filteredMetaPrimitives.second, + correlatedMetaPrimitives[ch_filteredMetaPrimitives.first], + filtCorrelatedMetaPrimitives[ch_filteredMetaPrimitives.first]); + else { + for (auto& mp : ch_filteredMetaPrimitives.second) { + filtCorrelatedMetaPrimitives[ch_filteredMetaPrimitives.first].push_back(mp); + } + if (output_matcher_) + for (auto& mp : correlatedMetaPrimitives[ch_filteredMetaPrimitives.first]) { + filtCorrelatedMetaPrimitives[ch_filteredMetaPrimitives.first].push_back(mp); + } + } } } + correlatedMetaPrimitives.clear(); + filteredMetaPrimitives.clear(); + double shift_back = 0; if (scenario_ == MC) //scope for MC shift_back = 400; @@ -513,7 +830,9 @@ void DTTrigPhase2Prod::produce(Event& iEvent, const EventSetup& iEventSetup) { if (useRPC_) { rpc_integrator_->initialise(iEventSetup, shift_back); rpc_integrator_->prepareMetaPrimitives(rpcRecHits); - rpc_integrator_->matchWithDTAndUseRPCTime(correlatedMetaPrimitives); + for (auto& ch_correlatedMetaPrimitives : filtCorrelatedMetaPrimitives) { + rpc_integrator_->matchWithDTAndUseRPCTime(ch_correlatedMetaPrimitives.second); // Probably this is a FIXME + } rpc_integrator_->makeRPCOnlySegments(); rpc_integrator_->storeRPCSingleHits(); rpc_integrator_->removeRPCHitsUsed(); @@ -526,148 +845,157 @@ void DTTrigPhase2Prod::produce(Event& iEvent, const EventSetup& iEventSetup) { vector outExtP2Th; // Assigning index value - assignIndex(correlatedMetaPrimitives); - for (const auto& metaPrimitiveIt : correlatedMetaPrimitives) { - DTChamberId chId(metaPrimitiveIt.rawId); - DTSuperLayerId slId(metaPrimitiveIt.rawId); - if (debug_) - LogDebug("DTTrigPhase2Prod") << "looping in final vector: SuperLayerId" << chId << " x=" << metaPrimitiveIt.x - << " quality=" << metaPrimitiveIt.quality - << " BX=" << round(metaPrimitiveIt.t0 / 25.) << " index=" << metaPrimitiveIt.index; - - int sectorTP = chId.sector(); - //sectors 13 and 14 exist only for the outermost stations for sectors 4 and 10 respectively - //due to the larger MB4 that are divided into two. - if (sectorTP == 13) - sectorTP = 4; - if (sectorTP == 14) - sectorTP = 10; - sectorTP = sectorTP - 1; - int sl = 0; - if (metaPrimitiveIt.quality < LOWLOWQ || metaPrimitiveIt.quality == CHIGHQ) { - if (inner(metaPrimitiveIt)) - sl = 1; - else - sl = 3; + if (!skip_processing_) + for (auto& ch_correlatedMetaPrimitives : filtCorrelatedMetaPrimitives) { + assignIndex(ch_correlatedMetaPrimitives.second); } - if (debug_) - LogDebug("DTTrigPhase2Prod") << "pushing back phase-2 dataformat carlo-federica dataformat"; - - if (slId.superLayer() != 2) { - if (df_extended_ == 1 || df_extended_ == 2) { - int pathWireId[8] = {metaPrimitiveIt.wi1, - metaPrimitiveIt.wi2, - metaPrimitiveIt.wi3, - metaPrimitiveIt.wi4, - metaPrimitiveIt.wi5, - metaPrimitiveIt.wi6, - metaPrimitiveIt.wi7, - metaPrimitiveIt.wi8}; - - int pathTDC[8] = {max((int)round(metaPrimitiveIt.tdc1 - shift_back * LHC_CLK_FREQ), -1), - max((int)round(metaPrimitiveIt.tdc2 - shift_back * LHC_CLK_FREQ), -1), - max((int)round(metaPrimitiveIt.tdc3 - shift_back * LHC_CLK_FREQ), -1), - max((int)round(metaPrimitiveIt.tdc4 - shift_back * LHC_CLK_FREQ), -1), - max((int)round(metaPrimitiveIt.tdc5 - shift_back * LHC_CLK_FREQ), -1), - max((int)round(metaPrimitiveIt.tdc6 - shift_back * LHC_CLK_FREQ), -1), - max((int)round(metaPrimitiveIt.tdc7 - shift_back * LHC_CLK_FREQ), -1), - max((int)round(metaPrimitiveIt.tdc8 - shift_back * LHC_CLK_FREQ), -1)}; - - int pathLat[8] = {metaPrimitiveIt.lat1, - metaPrimitiveIt.lat2, - metaPrimitiveIt.lat3, - metaPrimitiveIt.lat4, - metaPrimitiveIt.lat5, - metaPrimitiveIt.lat6, - metaPrimitiveIt.lat7, - metaPrimitiveIt.lat8}; - - // phiTP (extended DF) - outExtP2Ph.emplace_back( - L1Phase2MuDTExtPhDigi((int)round(metaPrimitiveIt.t0 / (float)LHC_CLK_FREQ) - shift_back, - chId.wheel(), // uwh (m_wheel) - sectorTP, // usc (m_sector) - chId.station(), // ust (m_station) - sl, // ust (m_station) - (int)round(metaPrimitiveIt.phi * PHIRES_CONV), // uphi (m_phiAngle) - (int)round(metaPrimitiveIt.phiB * PHIBRES_CONV), // uphib (m_phiBending) - metaPrimitiveIt.quality, // uqua (m_qualityCode) - metaPrimitiveIt.index, // uind (m_segmentIndex) - (int)round(metaPrimitiveIt.t0) - shift_back * LHC_CLK_FREQ, // ut0 (m_t0Segment) - (int)round(metaPrimitiveIt.chi2 * CHI2RES_CONV), // uchi2 (m_chi2Segment) - (int)round(metaPrimitiveIt.x * 1000), // ux (m_xLocal) - (int)round(metaPrimitiveIt.tanPhi * 1000), // utan (m_tanPsi) - (int)round(metaPrimitiveIt.phi_cmssw * PHIRES_CONV), // uphi (m_phiAngleCMSSW) - (int)round(metaPrimitiveIt.phiB_cmssw * PHIBRES_CONV), // uphib (m_phiBendingCMSSW) - metaPrimitiveIt.rpcFlag, // urpc (m_rpcFlag) - pathWireId, - pathTDC, - pathLat)); - } - if (df_extended_ == 0 || df_extended_ == 2) { - // phiTP (standard DF) - outP2Ph.push_back(L1Phase2MuDTPhDigi( - (int)round(metaPrimitiveIt.t0 / (float)LHC_CLK_FREQ) - shift_back, - chId.wheel(), // uwh (m_wheel) - sectorTP, // usc (m_sector) - chId.station(), // ust (m_station) - sl, // ust (m_station) - (int)round(metaPrimitiveIt.phi * PHIRES_CONV), // uphi (_phiAngle) - (int)round(metaPrimitiveIt.phiB * PHIBRES_CONV), // uphib (m_phiBending) - metaPrimitiveIt.quality, // uqua (m_qualityCode) - metaPrimitiveIt.index, // uind (m_segmentIndex) - (int)round(metaPrimitiveIt.t0) - shift_back * LHC_CLK_FREQ, // ut0 (m_t0Segment) - (int)round(metaPrimitiveIt.chi2 * CHI2RES_CONV), // uchi2 (m_chi2Segment) - metaPrimitiveIt.rpcFlag // urpc (m_rpcFlag) - )); + for (auto& ch_correlatedMetaPrimitives : filtCorrelatedMetaPrimitives) { + for (const auto& metaPrimitiveIt : ch_correlatedMetaPrimitives.second) { + DTChamberId chId(metaPrimitiveIt.rawId); + DTSuperLayerId slId(metaPrimitiveIt.rawId); + if (debug_) + LogDebug("DTTrigPhase2Prod") << "looping in final vector: SuperLayerId" << chId << " x=" << metaPrimitiveIt.x + << " quality=" << metaPrimitiveIt.quality + << " BX=" << round(metaPrimitiveIt.t0 / 25.) << " index=" << metaPrimitiveIt.index; + + int sectorTP = chId.sector(); + //sectors 13 and 14 exist only for the outermost stations for sectors 4 and 10 respectively + //due to the larger MB4 that are divided into two. + if (sectorTP == 13) + sectorTP = 4; + if (sectorTP == 14) + sectorTP = 10; + sectorTP = sectorTP - 1; + int sl = 0; + if (metaPrimitiveIt.quality < LOWLOWQ || metaPrimitiveIt.quality == CHIGHQ) { + if (inner(metaPrimitiveIt)) + sl = 1; + else + sl = 3; } - } else { - if (df_extended_ == 1 || df_extended_ == 2) { - int pathWireId[4] = {metaPrimitiveIt.wi1, metaPrimitiveIt.wi2, metaPrimitiveIt.wi3, metaPrimitiveIt.wi4}; - - int pathTDC[4] = {max((int)round(metaPrimitiveIt.tdc1 - shift_back * LHC_CLK_FREQ), -1), - max((int)round(metaPrimitiveIt.tdc2 - shift_back * LHC_CLK_FREQ), -1), - max((int)round(metaPrimitiveIt.tdc3 - shift_back * LHC_CLK_FREQ), -1), - max((int)round(metaPrimitiveIt.tdc4 - shift_back * LHC_CLK_FREQ), -1)}; - - int pathLat[4] = {metaPrimitiveIt.lat1, metaPrimitiveIt.lat2, metaPrimitiveIt.lat3, metaPrimitiveIt.lat4}; - - // thTP (extended DF) - outExtP2Th.emplace_back( - L1Phase2MuDTExtThDigi((int)round(metaPrimitiveIt.t0 / (float)LHC_CLK_FREQ) - shift_back, - chId.wheel(), // uwh (m_wheel) - sectorTP, // usc (m_sector) - chId.station(), // ust (m_station) - (int)round(metaPrimitiveIt.phi * ZRES_CONV), // uz (m_zGlobal) - (int)round(metaPrimitiveIt.phiB * KRES_CONV), // uk (m_kSlope) - metaPrimitiveIt.quality, // uqua (m_qualityCode) - metaPrimitiveIt.index, // uind (m_segmentIndex) - (int)round(metaPrimitiveIt.t0) - shift_back * LHC_CLK_FREQ, // ut0 (m_t0Segment) - (int)round(metaPrimitiveIt.chi2 * CHI2RES_CONV), // uchi2 (m_chi2Segment) - (int)round(metaPrimitiveIt.x * 1000), // ux (m_yLocal) - (int)round(metaPrimitiveIt.phi_cmssw * PHIRES_CONV), // uphi (m_zCMSSW) - (int)round(metaPrimitiveIt.phiB_cmssw * PHIBRES_CONV), // uphib (m_kCMSSW) - metaPrimitiveIt.rpcFlag, // urpc (m_rpcFlag) - pathWireId, - pathTDC, - pathLat)); - } - if (df_extended_ == 0 || df_extended_ == 2) { - // thTP (standard DF) - outP2Th.push_back(L1Phase2MuDTThDigi( - (int)round(metaPrimitiveIt.t0 / (float)LHC_CLK_FREQ) - shift_back, - chId.wheel(), // uwh (m_wheel) - sectorTP, // usc (m_sector) - chId.station(), // ust (m_station) - (int)round(metaPrimitiveIt.phi * ZRES_CONV), // uz (m_zGlobal) - (int)round(metaPrimitiveIt.phiB * KRES_CONV), // uk (m_kSlope) - metaPrimitiveIt.quality, // uqua (m_qualityCode) - metaPrimitiveIt.index, // uind (m_segmentIndex) - (int)round(metaPrimitiveIt.t0) - shift_back * LHC_CLK_FREQ, // ut0 (m_t0Segment) - (int)round(metaPrimitiveIt.chi2 * CHI2RES_CONV), // uchi2 (m_chi2Segment) - metaPrimitiveIt.rpcFlag // urpc (m_rpcFlag) - )); + + float tp_t0 = + (metaPrimitiveIt.t0 - shift_back * LHC_CLK_FREQ) * ((float)TIME_TO_TDC_COUNTS / (float)LHC_CLK_FREQ); + + if (debug_) + LogDebug("DTTrigPhase2Prod") << "pushing back phase-2 dataformat carlo-federica dataformat"; + + if (slId.superLayer() != 2) { + if (df_extended_ == 1 || df_extended_ == 2) { + int pathWireId[8] = {metaPrimitiveIt.wi1, + metaPrimitiveIt.wi2, + metaPrimitiveIt.wi3, + metaPrimitiveIt.wi4, + metaPrimitiveIt.wi5, + metaPrimitiveIt.wi6, + metaPrimitiveIt.wi7, + metaPrimitiveIt.wi8}; + + int pathTDC[8] = {max((int)round(metaPrimitiveIt.tdc1 - shift_back * LHC_CLK_FREQ), -1), + max((int)round(metaPrimitiveIt.tdc2 - shift_back * LHC_CLK_FREQ), -1), + max((int)round(metaPrimitiveIt.tdc3 - shift_back * LHC_CLK_FREQ), -1), + max((int)round(metaPrimitiveIt.tdc4 - shift_back * LHC_CLK_FREQ), -1), + max((int)round(metaPrimitiveIt.tdc5 - shift_back * LHC_CLK_FREQ), -1), + max((int)round(metaPrimitiveIt.tdc6 - shift_back * LHC_CLK_FREQ), -1), + max((int)round(metaPrimitiveIt.tdc7 - shift_back * LHC_CLK_FREQ), -1), + max((int)round(metaPrimitiveIt.tdc8 - shift_back * LHC_CLK_FREQ), -1)}; + + int pathLat[8] = {metaPrimitiveIt.lat1, + metaPrimitiveIt.lat2, + metaPrimitiveIt.lat3, + metaPrimitiveIt.lat4, + metaPrimitiveIt.lat5, + metaPrimitiveIt.lat6, + metaPrimitiveIt.lat7, + metaPrimitiveIt.lat8}; + + // phiTP (extended DF) + outExtP2Ph.emplace_back( + L1Phase2MuDTExtPhDigi((int)round(metaPrimitiveIt.t0 / (float)LHC_CLK_FREQ) - shift_back, + chId.wheel(), // uwh (m_wheel) + sectorTP, // usc (m_sector) + chId.station(), // ust (m_station) + sl, // ust (m_station) + (int)round(metaPrimitiveIt.phi * PHIRES_CONV), // uphi (m_phiAngle) + (int)round(metaPrimitiveIt.phiB * PHIBRES_CONV), // uphib (m_phiBending) + metaPrimitiveIt.quality, // uqua (m_qualityCode) + metaPrimitiveIt.index, // uind (m_segmentIndex) + tp_t0, // ut0 (m_t0Segment) + (int)round(metaPrimitiveIt.chi2 * CHI2RES_CONV), // uchi2 (m_chi2Segment) + (int)round(metaPrimitiveIt.x * 1000), // ux (m_xLocal) + (int)round(metaPrimitiveIt.tanPhi * 1000), // utan (m_tanPsi) + (int)round(metaPrimitiveIt.phi_cmssw * PHIRES_CONV), // uphi (m_phiAngleCMSSW) + (int)round(metaPrimitiveIt.phiB_cmssw * PHIBRES_CONV), // uphib (m_phiBendingCMSSW) + metaPrimitiveIt.rpcFlag, // urpc (m_rpcFlag) + pathWireId, + pathTDC, + pathLat)); + } + if (df_extended_ == 0 || df_extended_ == 2) { + // phiTP (standard DF) + outP2Ph.push_back(L1Phase2MuDTPhDigi( + (int)round(metaPrimitiveIt.t0 / (float)LHC_CLK_FREQ) - shift_back, + chId.wheel(), // uwh (m_wheel) + sectorTP, // usc (m_sector) + chId.station(), // ust (m_station) + sl, // ust (m_station) + (int)round(metaPrimitiveIt.phi * PHIRES_CONV), // uphi (_phiAngle) + (int)round(metaPrimitiveIt.phiB * PHIBRES_CONV), // uphib (m_phiBending) + metaPrimitiveIt.quality, // uqua (m_qualityCode) + metaPrimitiveIt.index, // uind (m_segmentIndex) + tp_t0, // ut0 (m_t0Segment) + (int)round(metaPrimitiveIt.chi2 * CHI2RES_CONV), // uchi2 (m_chi2Segment) + metaPrimitiveIt.rpcFlag // urpc (m_rpcFlag) + )); + } + } else { + if (df_extended_ == 1 || df_extended_ == 2) { + int pathWireId[4] = {metaPrimitiveIt.wi1, metaPrimitiveIt.wi2, metaPrimitiveIt.wi3, metaPrimitiveIt.wi4}; + + int pathTDC[4] = {max((int)round(metaPrimitiveIt.tdc1 - shift_back * LHC_CLK_FREQ), -1), + max((int)round(metaPrimitiveIt.tdc2 - shift_back * LHC_CLK_FREQ), -1), + max((int)round(metaPrimitiveIt.tdc3 - shift_back * LHC_CLK_FREQ), -1), + max((int)round(metaPrimitiveIt.tdc4 - shift_back * LHC_CLK_FREQ), -1)}; + + int pathLat[4] = {metaPrimitiveIt.lat1, metaPrimitiveIt.lat2, metaPrimitiveIt.lat3, metaPrimitiveIt.lat4}; + + // thTP (extended DF) + outExtP2Th.emplace_back( + L1Phase2MuDTExtThDigi((int)round(metaPrimitiveIt.t0 / (float)LHC_CLK_FREQ) - shift_back, + chId.wheel(), // uwh (m_wheel) + sectorTP, // usc (m_sector) + chId.station(), // ust (m_station) + (int)round(metaPrimitiveIt.phi * ZRES_CONV), // uz (m_zGlobal) + (int)round(metaPrimitiveIt.phiB * KRES_CONV), // uk (m_kSlope) + metaPrimitiveIt.quality, // uqua (m_qualityCode) + metaPrimitiveIt.index, // uind (m_segmentIndex) + tp_t0, // ut0 (m_t0Segment) + (int)round(metaPrimitiveIt.chi2 * CHI2RES_CONV), // uchi2 (m_chi2Segment) + (int)round(metaPrimitiveIt.x * 1000), // ux (m_yLocal) + (int)round(metaPrimitiveIt.phi_cmssw * ZRES_CONV), // uphi (m_zCMSSW) + (int)round(metaPrimitiveIt.phiB_cmssw * KRES_CONV), // uphib (m_kCMSSW) + metaPrimitiveIt.rpcFlag, // urpc (m_rpcFlag) + pathWireId, + pathTDC, + pathLat)); + } + if (df_extended_ == 0 || df_extended_ == 2) { + // thTP (standard DF) + outP2Th.push_back(L1Phase2MuDTThDigi( + (int)round(metaPrimitiveIt.t0 / (float)LHC_CLK_FREQ) - shift_back, + chId.wheel(), // uwh (m_wheel) + sectorTP, // usc (m_sector) + chId.station(), // ust (m_station) + (int)round(metaPrimitiveIt.phi * ZRES_CONV), // uz (m_zGlobal) + (int)round(metaPrimitiveIt.phiB * KRES_CONV), // uk (m_kSlope) + metaPrimitiveIt.quality, // uqua (m_qualityCode) + metaPrimitiveIt.index, // uind (m_segmentIndex) + tp_t0, // ut0 (m_t0Segment) + (int)round(metaPrimitiveIt.chi2 * CHI2RES_CONV), // uchi2 (m_chi2Segment) + metaPrimitiveIt.rpcFlag // urpc (m_rpcFlag) + )); + } } } } @@ -747,6 +1075,16 @@ void DTTrigPhase2Prod::printmP(const string& ss, const metaPrimitive& mP) const << setw(13) << left << mP.chi2 << " r:" << rango(mP); } +void DTTrigPhase2Prod::printmP(const metaPrimitive& mP) const { + DTSuperLayerId slId(mP.rawId); + LogInfo("DTTrigPhase2Prod") << (int)slId << "\t " << setw(2) << left << mP.wi1 << " " << setw(2) << left << mP.wi2 + << " " << setw(2) << left << mP.wi3 << " " << setw(2) << left << mP.wi4 << " " << setw(5) + << left << mP.tdc1 << " " << setw(5) << left << mP.tdc2 << " " << setw(5) << left + << mP.tdc3 << " " << setw(5) << left << mP.tdc4 << " " << setw(10) << right << mP.x << " " + << setw(9) << left << mP.tanPhi << " " << setw(5) << left << mP.t0 << " " << setw(13) + << left << mP.chi2 << " r:" << rango(mP) << std::endl; +} + void DTTrigPhase2Prod::printmPC(const string& ss, const metaPrimitive& mP) const { DTChamberId ChId(mP.rawId); LogInfo("DTTrigPhase2Prod") << ss << (int)ChId << "\t " << setw(2) << left << mP.wi1 << " " << setw(2) << left @@ -764,6 +1102,23 @@ void DTTrigPhase2Prod::printmPC(const string& ss, const metaPrimitive& mP) const << left << mP.chi2 << " r:" << rango(mP); } +void DTTrigPhase2Prod::printmPC(const metaPrimitive& mP) const { + DTChamberId ChId(mP.rawId); + LogInfo("DTTrigPhase2Prod") << (int)ChId << "\t " << setw(2) << left << mP.wi1 << " " << setw(2) << left << mP.wi2 + << " " << setw(2) << left << mP.wi3 << " " << setw(2) << left << mP.wi4 << " " << setw(2) + << left << mP.wi5 << " " << setw(2) << left << mP.wi6 << " " << setw(2) << left << mP.wi7 + << " " << setw(2) << left << mP.wi8 << " " << setw(5) << left << mP.tdc1 << " " << setw(5) + << left << mP.tdc2 << " " << setw(5) << left << mP.tdc3 << " " << setw(5) << left + << mP.tdc4 << " " << setw(5) << left << mP.tdc5 << " " << setw(5) << left << mP.tdc6 + << " " << setw(5) << left << mP.tdc7 << " " << setw(5) << left << mP.tdc8 << " " + << setw(2) << left << mP.lat1 << " " << setw(2) << left << mP.lat2 << " " << setw(2) + << left << mP.lat3 << " " << setw(2) << left << mP.lat4 << " " << setw(2) << left + << mP.lat5 << " " << setw(2) << left << mP.lat6 << " " << setw(2) << left << mP.lat7 + << " " << setw(2) << left << mP.lat8 << " " << setw(10) << right << mP.x << " " << setw(9) + << left << mP.tanPhi << " " << setw(5) << left << mP.t0 << " " << setw(13) << left + << mP.chi2 << " r:" << rango(mP) << std::endl; +} + int DTTrigPhase2Prod::rango(const metaPrimitive& mp) const { if (mp.quality == 1 or mp.quality == 2) return 3; @@ -880,7 +1235,6 @@ void DTTrigPhase2Prod::fillDescriptions(edm::ConfigurationDescriptions& descript // dtTriggerPhase2PrimitiveDigis edm::ParameterSetDescription desc; desc.add("digiTag", edm::InputTag("CalibratedDigis")); - desc.add("trigger_with_sl", 4); desc.add("timeTolerance", 999999); desc.add("tanPhiTh", 1.0); desc.add("tanPhiThw2max", 1.3); @@ -900,12 +1254,26 @@ void DTTrigPhase2Prod::fillDescriptions(edm::ConfigurationDescriptions& descript desc.add("scenario", 0); desc.add("df_extended", 0); desc.add("max_primitives", 999); + desc.add("output_mixer", false); + desc.add("output_latpredictor", false); + desc.add("output_slfitter", false); + desc.add("output_slfilter", false); + desc.add("output_confirmed", false); + desc.add("output_matcher", false); desc.add("ttrig_filename", edm::FileInPath("L1Trigger/DTTriggerPhase2/data/wire_rawId_ttrig.txt")); desc.add("z_filename", edm::FileInPath("L1Trigger/DTTriggerPhase2/data/wire_rawId_z.txt")); + desc.add("lut_sl1", edm::FileInPath("L1Trigger/DTTriggerPhase2/data/fitterlut_sl1.dat")); + desc.add("lut_sl2", edm::FileInPath("L1Trigger/DTTriggerPhase2/data/fitterlut_slx.dat")); + desc.add("lut_sl3", edm::FileInPath("L1Trigger/DTTriggerPhase2/data/fitterlut_sl3.dat")); + desc.add("lut_2sl", edm::FileInPath("L1Trigger/DTTriggerPhase2/data/fitterlut_2sl.dat")); desc.add("shift_filename", edm::FileInPath("L1Trigger/DTTriggerPhase2/data/wire_rawId_x.txt")); + desc.add("maxdrift_filename", + edm::FileInPath("L1Trigger/DTTriggerPhase2/data/drift_time_per_chamber.txt")); desc.add("shift_theta_filename", edm::FileInPath("L1Trigger/DTTriggerPhase2/data/theta_shift.txt")); desc.add("global_coords_filename", edm::FileInPath("L1Trigger/DTTriggerPhase2/data/global_coord_perp_x_phi0.txt")); + desc.add("laterality_filename", + edm::FileInPath("L1Trigger/DTTriggerPhase2/data/lat_predictions.dat")); desc.add("algo", 0); desc.add("minHits4Fit", 3); desc.add("splitPathPerSL", true); diff --git a/L1Trigger/DTTriggerPhase2/python/dtTriggerPhase2PrimitiveDigis_cfi.py b/L1Trigger/DTTriggerPhase2/python/dtTriggerPhase2PrimitiveDigis_cfi.py index 5e3f920736e03..d044ecf89923b 100644 --- a/L1Trigger/DTTriggerPhase2/python/dtTriggerPhase2PrimitiveDigis_cfi.py +++ b/L1Trigger/DTTriggerPhase2/python/dtTriggerPhase2PrimitiveDigis_cfi.py @@ -6,7 +6,6 @@ dtTriggerPhase2PrimitiveDigis = cms.EDProducer("DTTrigPhase2Prod", digiTag = cms.InputTag("CalibratedDigis"), - trigger_with_sl = cms.int32(4), tanPhiTh = cms.double(1.), tanPhiThw2max = cms.double(1.3), tanPhiThw2min = cms.double(0.5), @@ -26,11 +25,24 @@ df_extended = cms.int32(0), # DF: 0 for standard, 1 for extended, 2 for both max_primitives = cms.int32(999), + output_mixer = cms.bool(False), + output_latpredictor = cms.bool(False), + output_slfitter = cms.bool(False), + output_slfilter = cms.bool(False), + output_confirmed = cms.bool(False), + output_matcher = cms.bool(False), + ttrig_filename = cms.FileInPath('L1Trigger/DTTriggerPhase2/data/wire_rawId_ttrig.txt'), z_filename = cms.FileInPath('L1Trigger/DTTriggerPhase2/data/wire_rawId_z.txt'), + lut_sl1 = cms.FileInPath('L1Trigger/DTTriggerPhase2/data/fitterlut_sl1.dat'), + lut_sl2 = cms.FileInPath('L1Trigger/DTTriggerPhase2/data/fitterlut_slx.dat'), + lut_sl3 = cms.FileInPath('L1Trigger/DTTriggerPhase2/data/fitterlut_sl3.dat'), + lut_2sl = cms.FileInPath('L1Trigger/DTTriggerPhase2/data/fitterlut_2sl.dat'), shift_filename = cms.FileInPath('L1Trigger/DTTriggerPhase2/data/wire_rawId_x.txt'), shift_theta_filename = cms.FileInPath('L1Trigger/DTTriggerPhase2/data/theta_shift.txt'), + maxdrift_filename = cms.FileInPath('L1Trigger/DTTriggerPhase2/data/drift_time_per_chamber.txt'), global_coords_filename = cms.FileInPath('L1Trigger/DTTriggerPhase2/data/global_coord_perp_x_phi0.txt'), + laterality_filename = cms.FileInPath('L1Trigger/DTTriggerPhase2/data/lat_predictions.dat'), algo = cms.int32(0), # 0 = STD gr., 2 = Hough transform, 1 = PseudoBayes Approach minHits4Fit = cms.int32(3), diff --git a/L1Trigger/DTTriggerPhase2/src/GlobalCoordsObtainer.cc b/L1Trigger/DTTriggerPhase2/src/GlobalCoordsObtainer.cc index 1376d4c6e40c0..2446d50745a07 100644 --- a/L1Trigger/DTTriggerPhase2/src/GlobalCoordsObtainer.cc +++ b/L1Trigger/DTTriggerPhase2/src/GlobalCoordsObtainer.cc @@ -224,12 +224,6 @@ std::vector GlobalCoordsObtainer::get_global_coordinates(uint32_t chid, int tanpsi_msb = tanpsi >> (TANPSI_SIZE - PHIB_LUT_ADDR_WIDTH); tanpsi_msb = from_two_comp(tanpsi_msb, PHIB_LUT_ADDR_WIDTH); - x_msb = x >> (X_SIZE - PHI_LUT_ADDR_WIDTH); - x_msb = from_two_comp(x_msb, PHI_LUT_ADDR_WIDTH); - - tanpsi_msb = tanpsi >> (TANPSI_SIZE - PHIB_LUT_ADDR_WIDTH); - tanpsi_msb = from_two_comp(tanpsi_msb, PHIB_LUT_ADDR_WIDTH); - // The LSB part can be sliced right away because it must yield a positive integer int x_lsb = x & (int)(std::pow(2, (X_SIZE - PHI_LUT_ADDR_WIDTH)) - 1); int tanpsi_lsb = tanpsi & (int)(std::pow(2, (TANPSI_SIZE - PHIB_LUT_ADDR_WIDTH)) - 1); @@ -261,4 +255,4 @@ std::vector GlobalCoordsObtainer::get_global_coordinates(uint32_t chid, double phib_f = (double)phib / pow(2, PHIB_SIZE); return std::vector({phi_f, phib_f}); -} \ No newline at end of file +} diff --git a/L1Trigger/DTTriggerPhase2/src/LateralityBasicProvider.cc b/L1Trigger/DTTriggerPhase2/src/LateralityBasicProvider.cc new file mode 100644 index 0000000000000..a46a2e82b5e87 --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/src/LateralityBasicProvider.cc @@ -0,0 +1,100 @@ +#include "L1Trigger/DTTriggerPhase2/interface/LateralityBasicProvider.h" +#include +#include + +using namespace edm; +using namespace std; +using namespace cmsdt; +// ============================================================================ +// Constructors and destructor +// ============================================================================ +LateralityBasicProvider::LateralityBasicProvider(const ParameterSet &pset, edm::ConsumesCollector &iC) + : LateralityProvider(pset, iC), debug_(pset.getUntrackedParameter("debug")) { + if (debug_) + LogDebug("LateralityBasicProvider") << "LateralityBasicProvider: constructor"; + + fill_lat_combinations(); +} + +LateralityBasicProvider::~LateralityBasicProvider() { + if (debug_) + LogDebug("LateralityBasicProvider") << "LateralityBasicProvider: destructor"; +} + +// ============================================================================ +// Main methods (initialise, run, finish) +// ============================================================================ +void LateralityBasicProvider::initialise(const edm::EventSetup &iEventSetup) { + if (debug_) + LogDebug("LateralityBasicProvider") << "LateralityBasicProvider::initialiase"; +} + +void LateralityBasicProvider::run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + MuonPathPtrs &muonpaths, + std::vector &lateralities) { + if (debug_) + LogDebug("LateralityBasicProvider") << "LateralityBasicProvider: run"; + + // fit per SL (need to allow for multiple outputs for a single mpath) + for (auto &muonpath : muonpaths) { + analyze(muonpath, lateralities); + } +} + +void LateralityBasicProvider::finish() { + if (debug_) + LogDebug("LateralityBasicProvider") << "LateralityBasicProvider: finish"; +}; + +//------------------------------------------------------------------ +//--- Metodos privados +//------------------------------------------------------------------ + +void LateralityBasicProvider::analyze(MuonPathPtr &inMPath, std::vector &lateralities) { + if (debug_) + LogDebug("LateralityBasicProvider") << "DTp2:analyze \t\t\t\t starts"; + for (auto &lat_combination : lat_combinations) { + if (inMPath->missingLayer() == lat_combination.missing_layer && + inMPath->cellLayout()[0] == lat_combination.cellLayout[0] && + inMPath->cellLayout()[1] == lat_combination.cellLayout[1] && + inMPath->cellLayout()[2] == lat_combination.cellLayout[2] && + inMPath->cellLayout()[3] == lat_combination.cellLayout[3]) { + lateralities.push_back(lat_combination.latcombs); + return; + } + } + lateralities.push_back(LAT_VECTOR_NULL); + return; +} + +void LateralityBasicProvider::fill_lat_combinations() { + lat_combinations.push_back({-1, {0, 0, 0, -1}, {{0, 0, 0, 1}, {0, 0, 1, 1}, {0, 1, 1, 1}, {0, 0, 0, 0}}}); + lat_combinations.push_back({-1, {0, 0, 1, -1}, {{0, 0, 1, 0}, {0, 1, 1, 0}, {1, 1, 1, 0}, {0, 0, 0, 0}}}); + lat_combinations.push_back({-1, {0, 1, 0, -1}, {{0, 1, 0, 0}, {0, 1, 0, 1}, {1, 1, 0, 0}, {1, 1, 0, 1}}}); + lat_combinations.push_back({-1, {0, 1, 1, -1}, {{0, 1, 0, 0}, {0, 1, 1, 0}, {0, 1, 1, 1}, {0, 0, 0, 0}}}); + lat_combinations.push_back({-1, {1, 0, 0, -1}, {{1, 0, 0, 0}, {1, 0, 0, 1}, {1, 0, 1, 1}, {0, 0, 0, 0}}}); + lat_combinations.push_back({-1, {1, 0, 1, -1}, {{0, 0, 1, 0}, {0, 0, 1, 1}, {1, 0, 1, 0}, {1, 0, 1, 1}}}); + lat_combinations.push_back({-1, {1, 1, 0, -1}, {{0, 0, 0, 1}, {1, 0, 0, 1}, {1, 1, 0, 1}, {0, 0, 0, 0}}}); + lat_combinations.push_back({-1, {1, 1, 1, -1}, {{1, 0, 0, 0}, {1, 1, 0, 0}, {1, 1, 1, 0}, {0, 0, 0, 0}}}); + lat_combinations.push_back({0, {0, 0, 0, -1}, {{0, 0, 0, 1}, {0, 0, 1, 1}, {0, 0, 0, 0}, {0, 0, 0, 0}}}); + lat_combinations.push_back({0, {0, 0, 1, -1}, {{0, 0, 1, 0}, {0, 0, 1, 1}, {0, 1, 1, 0}, {0, 0, 0, 0}}}); + lat_combinations.push_back({0, {0, 1, 0, -1}, {{0, 0, 0, 1}, {0, 1, 0, 0}, {0, 1, 0, 1}, {0, 0, 0, 0}}}); + lat_combinations.push_back({0, {0, 1, 1, -1}, {{0, 1, 0, 0}, {0, 1, 1, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}}); + lat_combinations.push_back({1, {0, 0, 0, -1}, {{0, 0, 0, 1}, {0, 0, 1, 1}, {0, 0, 0, 0}, {0, 0, 0, 0}}}); + lat_combinations.push_back({1, {0, 0, 1, -1}, {{0, 0, 1, 0}, {1, 0, 1, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}}); + lat_combinations.push_back({1, {0, 1, 0, -1}, {{0, 0, 0, 1}, {1, 0, 0, 0}, {1, 0, 0, 1}, {0, 0, 0, 0}}}); + lat_combinations.push_back({1, {0, 1, 1, -1}, {{0, 0, 1, 0}, {0, 0, 1, 1}, {1, 0, 1, 0}, {0, 0, 0, 0}}}); + lat_combinations.push_back({1, {1, 1, 0, -1}, {{0, 0, 0, 1}, {1, 0, 0, 1}, {0, 0, 0, 0}, {0, 0, 0, 0}}}); + lat_combinations.push_back({1, {1, 1, 1, -1}, {{1, 0, 0, 0}, {1, 0, 1, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}}); + lat_combinations.push_back({2, {0, 0, 0, -1}, {{0, 0, 0, 1}, {0, 1, 0, 1}, {0, 0, 0, 0}, {0, 0, 0, 0}}}); + lat_combinations.push_back({2, {0, 0, 1, -1}, {{0, 1, 0, 0}, {0, 1, 0, 1}, {1, 1, 0, 0}, {0, 0, 0, 0}}}); + lat_combinations.push_back({2, {0, 1, 1, -1}, {{0, 1, 0, 0}, {0, 1, 0, 1}, {0, 0, 0, 0}, {0, 0, 0, 0}}}); + lat_combinations.push_back({2, {1, 0, 0, -1}, {{1, 0, 0, 0}, {1, 0, 0, 1}, {0, 0, 0, 0}, {0, 0, 0, 0}}}); + lat_combinations.push_back({2, {1, 0, 1, -1}, {{0, 0, 0, 1}, {1, 0, 0, 0}, {1, 0, 0, 1}, {0, 0, 0, 0}}}); + lat_combinations.push_back({2, {1, 1, 1, -1}, {{1, 0, 0, 0}, {1, 1, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}}); + lat_combinations.push_back({3, {0, 0, 0, -1}, {{0, 0, 1, 0}, {0, 1, 1, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}}); + lat_combinations.push_back({3, {0, 1, 0, -1}, {{0, 1, 0, 0}, {0, 1, 1, 0}, {1, 1, 0, 0}, {0, 0, 0, 0}}}); + lat_combinations.push_back({3, {1, 0, 0, -1}, {{0, 0, 1, 0}, {1, 0, 0, 0}, {1, 0, 1, 0}, {0, 0, 0, 0}}}); + lat_combinations.push_back({3, {1, 1, 0, -1}, {{1, 0, 0, 0}, {1, 1, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}}); +}; diff --git a/L1Trigger/DTTriggerPhase2/src/LateralityCoarsedProvider.cc b/L1Trigger/DTTriggerPhase2/src/LateralityCoarsedProvider.cc new file mode 100644 index 0000000000000..e37281d999e24 --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/src/LateralityCoarsedProvider.cc @@ -0,0 +1,259 @@ +#include "L1Trigger/DTTriggerPhase2/interface/LateralityCoarsedProvider.h" +#include +#include + +using namespace edm; +using namespace std; +using namespace cmsdt; +// ============================================================================ +// Constructors and destructor +// ============================================================================ +LateralityCoarsedProvider::LateralityCoarsedProvider(const ParameterSet &pset, edm::ConsumesCollector &iC) + : LateralityProvider(pset, iC), + debug_(pset.getUntrackedParameter("debug")), + laterality_filename_(pset.getParameter("laterality_filename")) { + if (debug_) + LogDebug("LateralityCoarsedProvider") << "LateralityCoarsedProvider: constructor"; + + fill_lat_combinations(); +} + +LateralityCoarsedProvider::~LateralityCoarsedProvider() { + if (debug_) + LogDebug("LateralityCoarsedProvider") << "LateralityCoarsedProvider: destructor"; +} + +// ============================================================================ +// Main methods (initialise, run, finish) +// ============================================================================ +void LateralityCoarsedProvider::initialise(const edm::EventSetup &iEventSetup) { + if (debug_) + LogDebug("LateralityCoarsedProvider") << "LateralityCoarsedProvider::initialiase"; +} + +void LateralityCoarsedProvider::run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + MuonPathPtrs &muonpaths, + std::vector &lateralities) { + if (debug_) + LogDebug("LateralityCoarsedProvider") << "LateralityCoarsedProvider: run"; + + // fit per SL (need to allow for multiple outputs for a single mpath) + for (auto &muonpath : muonpaths) { + analyze(muonpath, lateralities); + } +} + +void LateralityCoarsedProvider::finish() { + if (debug_) + LogDebug("LateralityCoarsedProvider") << "LateralityCoarsedProvider: finish"; +}; + +//------------------------------------------------------------------ +//--- Metodos privados +//------------------------------------------------------------------ + +void LateralityCoarsedProvider::analyze(MuonPathPtr &inMPath, std::vector &lateralities) { + if (debug_) + LogDebug("LateralityCoarsedProvider") << "DTp2:analyze \t\t\t\t starts"; + + auto coarsified_times = coarsify_times(inMPath); + + for (auto &lat_combination : lat_combinations) { + if (inMPath->missingLayer() == lat_combination.missing_layer && + inMPath->cellLayout()[0] == lat_combination.cellLayout[0] && + inMPath->cellLayout()[1] == lat_combination.cellLayout[1] && + inMPath->cellLayout()[2] == lat_combination.cellLayout[2] && + inMPath->cellLayout()[3] == lat_combination.cellLayout[3] && + coarsified_times[0] == lat_combination.coarsed_times[0] && + coarsified_times[1] == lat_combination.coarsed_times[1] && + coarsified_times[2] == lat_combination.coarsed_times[2] && + coarsified_times[3] == lat_combination.coarsed_times[3]) { + lateralities.push_back(lat_combination.latcombs); + return; + } + } + lateralities.push_back(LAT_VECTOR_NULL); + return; +} + +std::vector LateralityCoarsedProvider::coarsify_times(MuonPathPtr &inMPath) { + int max_time = -999; + // obtain the maximum time to do the coarsification + for (int layer = 0; layer < cmsdt::NUM_LAYERS; layer++) { + if (inMPath->missingLayer() == layer) + continue; + if (inMPath->primitive(layer)->tdcTimeStamp() > max_time) + max_time = inMPath->primitive(layer)->tdcTimeStamp(); + } + + // do the coarsification + std::vector coarsified_times; + for (int layer = 0; layer < cmsdt::NUM_LAYERS; layer++) { + if (inMPath->missingLayer() == layer) { + coarsified_times.push_back(-1); + continue; + } + auto coarsified_time = max_time - inMPath->primitive(layer)->tdcTimeStamp(); + // transform into tdc counts + coarsified_time = (int)round(((float)TIME_TO_TDC_COUNTS / (float)LHC_CLK_FREQ) * coarsified_time); + // keep the LAT_MSB_BITS + coarsified_time = coarsified_time >> (LAT_TOTAL_BITS - LAT_MSB_BITS); + + if (inMPath->missingLayer() == -1) { // 4-hit candidates + if (coarsified_time <= LAT_P0_4H) + coarsified_times.push_back(0); + else if (coarsified_time <= LAT_P1_4H) + coarsified_times.push_back(1); + else if (coarsified_time <= LAT_P2_4H) + coarsified_times.push_back(2); + else + coarsified_times.push_back(3); + } else { // 3-hit candidates + if (coarsified_time <= LAT_P0_3H) + coarsified_times.push_back(0); + else if (coarsified_time <= LAT_P1_3H) + coarsified_times.push_back(1); + else if (coarsified_time <= LAT_P2_3H) + coarsified_times.push_back(2); + else + coarsified_times.push_back(3); + } + } + return coarsified_times; +} + +void LateralityCoarsedProvider::fill_lat_combinations() { + std::ifstream latFile(laterality_filename_.fullPath()); // Open file + if (latFile.fail()) { + throw cms::Exception("Missing Input File") + << "LateralityCoarsedProvider::fill_lat_combinations() - Cannot find " << laterality_filename_.fullPath(); + return; + } + + std::string line; + + short line_counter = 0; // Line counter + + // Bit masks for every parameter + int _12bitMask = 0xFFF; // 12 bits + int _layoutMask = 0xE00; // 3 bits + int _is4HitMask = 0x100; // 1 bit + int _coarsedMask = 0xFF; // 8 bits + int _layerMask = 0xC0; // 2 bits + + while (std::getline(latFile, line)) { + if (line == "000000000000") { + line_counter++; + continue; + } //skip zeros + + if (line.size() == 12) { + std::vector> transformedVector = convertString(line); + latcomb lat0 = { + transformedVector[0][0], transformedVector[0][1], transformedVector[0][2], transformedVector[0][3]}; + latcomb lat1 = { + transformedVector[1][0], transformedVector[1][1], transformedVector[1][2], transformedVector[1][3]}; + latcomb lat2 = { + transformedVector[2][0], transformedVector[2][1], transformedVector[2][2], transformedVector[2][3]}; + + //Transforming line number to binary + short address = line_counter & _12bitMask; // 12 bits + + short layout = + (address & _layoutMask) >> 9; //Doing AND and displacing 9 bits to the right to obtain 3 bits of layout + short is4Hit = (address & _is4HitMask) >> 8; + short coarsed = address & _coarsedMask; + + short bit1Layout = (layout & (1)); + short bit2Layout = (layout & (1 << 1)) >> 1; + short bit3Layout = (layout & (1 << 2)) >> 2; + + //Logic implementation + short missingLayer = -1; + short layout_comb[NUM_LAYERS] = {bit3Layout, bit2Layout, bit1Layout, -1}; + short coarsedTimes[NUM_LAYERS] = {0, 0, 0, 0}; + + if (is4Hit != 1) { //3 hit case + missingLayer = + (coarsed & _layerMask) >> 6; //Missing layer is given by the two most significative bits of coarsed vector + coarsedTimes[missingLayer] = -1; //Missing layer set to -1 + } + + // Filling coarsedTimes vector without the missing layer + if (missingLayer != -1) { + switch (missingLayer) { + case 0: + coarsedTimes[1] = (coarsed & 0x30) >> 4; + coarsedTimes[2] = (coarsed & 0x0C) >> 2; + coarsedTimes[3] = coarsed & 0x03; + lat0 = {-1, transformedVector[0][1], transformedVector[0][2], transformedVector[0][3]}; + lat1 = {-1, transformedVector[1][1], transformedVector[1][2], transformedVector[1][3]}; + lat2 = {-1, transformedVector[2][1], transformedVector[2][2], transformedVector[2][3]}; + break; + case 1: + coarsedTimes[0] = (coarsed & 0x30) >> 4; + coarsedTimes[2] = (coarsed & 0x0C) >> 2; + coarsedTimes[3] = coarsed & 0x03; + lat0 = {transformedVector[0][0], -1, transformedVector[0][2], transformedVector[0][3]}; + lat1 = {transformedVector[1][0], -1, transformedVector[1][2], transformedVector[1][3]}; + lat2 = {transformedVector[2][0], -1, transformedVector[2][2], transformedVector[2][3]}; + break; + case 2: + coarsedTimes[0] = (coarsed & 0x30) >> 4; + coarsedTimes[1] = (coarsed & 0x0C) >> 2; + coarsedTimes[3] = coarsed & 0x03; + lat0 = {transformedVector[0][0], transformedVector[0][1], -1, transformedVector[0][3]}; + lat1 = {transformedVector[1][0], transformedVector[1][1], -1, transformedVector[1][3]}; + lat2 = {transformedVector[2][0], transformedVector[2][1], -1, transformedVector[2][3]}; + break; + case 3: + coarsedTimes[0] = (coarsed & 0x30) >> 4; + coarsedTimes[1] = (coarsed & 0x0C) >> 2; + coarsedTimes[2] = coarsed & 0x03; + lat0 = {transformedVector[0][0], transformedVector[0][1], transformedVector[0][2], -1}; + lat1 = {transformedVector[1][0], transformedVector[1][1], transformedVector[1][2], -1}; + lat2 = {transformedVector[2][0], transformedVector[2][1], transformedVector[2][2], -1}; + break; + + default: + break; + } + + } else { //4 hit case + coarsedTimes[0] = (coarsed & 0xC0) >> 6; + coarsedTimes[1] = (coarsed & 0x30) >> 4; + coarsedTimes[2] = (coarsed & 0x0C) >> 2; + coarsedTimes[3] = coarsed & 0x03; + } + + lat_coarsed_combination lat_temp = {missingLayer, + {layout_comb[0], layout_comb[1], layout_comb[2], layout_comb[3]}, + {coarsedTimes[0], coarsedTimes[1], coarsedTimes[2], coarsedTimes[3]}, + {lat0, lat1, lat2}}; + lat_combinations.push_back(lat_temp); + + } else { //size different from 12 + std::cerr << "Error: line " << line_counter << " does not contain 12 bits." << std::endl; + } + line_counter++; + }; + + //closing lateralities file + latFile.close(); +}; + +// Function to convert a 12 bit string in a a vector of 4 bit vectors +std::vector> LateralityCoarsedProvider::convertString(std::string chain) { + std::vector> result; + + for (size_t i = 0; i < chain.size(); i += 4) { + std::vector group; + for (size_t j = 0; j < 4; j++) { + group.push_back(chain[i + j] - '0'); // Convert the character to integer + } + result.push_back(group); + } + + return result; +} diff --git a/L1Trigger/DTTriggerPhase2/src/LateralityProvider.cc b/L1Trigger/DTTriggerPhase2/src/LateralityProvider.cc new file mode 100644 index 0000000000000..ae5e8486c9242 --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/src/LateralityProvider.cc @@ -0,0 +1,24 @@ +#include "L1Trigger/DTTriggerPhase2/interface/LateralityProvider.h" + +using namespace edm; +using namespace std; + +// ============================================================================ +// Constructors and destructor +// ============================================================================ +LateralityProvider::LateralityProvider(const ParameterSet& pset, edm::ConsumesCollector& iC) + : debug_(pset.getUntrackedParameter("debug")) {} + +LateralityProvider::~LateralityProvider() {} + +// ============================================================================ +// Main methods (initialise, run, finish) +// ============================================================================ +void LateralityProvider::initialise(const edm::EventSetup& iEventSetup) {} + +void LateralityProvider::finish(){}; + +void LateralityProvider::run(edm::Event& iEvent, + const edm::EventSetup& iEventSetup, + MuonPathPtrs& inMpath, + std::vector& lateralities){}; diff --git a/L1Trigger/DTTriggerPhase2/src/MPCorFilter.cc b/L1Trigger/DTTriggerPhase2/src/MPCorFilter.cc new file mode 100644 index 0000000000000..0feec5b478522 --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/src/MPCorFilter.cc @@ -0,0 +1,242 @@ +#include "L1Trigger/DTTriggerPhase2/interface/MPCorFilter.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" + +using namespace edm; +using namespace std; +using namespace cmsdt; + +// ============================================================================ +// Constructors and destructor +// ============================================================================ +MPCorFilter::MPCorFilter(const ParameterSet &pset) + : MPFilter(pset), debug_(pset.getUntrackedParameter("debug")) {} + +// ============================================================================ +// Main methods (initialise, run, finish) +// ============================================================================ +void MPCorFilter::initialise(const edm::EventSetup &iEventSetup) {} + +void MPCorFilter::run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + std::vector &inSLMPaths, + std::vector &inCorMPaths, + std::vector &outMPaths) { + if (debug_) + LogDebug("MPCorFilter") << "MPCorFilter: run"; + + std::vector SL1metaPrimitives; + std::vector SL2metaPrimitives; + std::vector SL3metaPrimitives; + std::vector CormetaPrimitives; + uint32_t sl1Id_rawid = -1, sl2Id_rawid = -1, sl3Id_rawid = -1; + if (!inSLMPaths.empty()) { + int dum_sl_rawid = inSLMPaths[0].rawId; + DTSuperLayerId dumSlId(dum_sl_rawid); + + max_drift_tdc = maxdriftinfo_[dumSlId.wheel() + 2][dumSlId.station() - 1][dumSlId.sector() - 1]; + DTChamberId ChId(dumSlId.wheel(), dumSlId.station(), dumSlId.sector()); + DTSuperLayerId sl1Id(ChId.rawId(), 1); + sl1Id_rawid = sl1Id.rawId(); + DTSuperLayerId sl2Id(ChId.rawId(), 2); + sl2Id_rawid = sl2Id.rawId(); + DTSuperLayerId sl3Id(ChId.rawId(), 3); + sl3Id_rawid = sl3Id.rawId(); + + for (const auto &metaprimitiveIt : inSLMPaths) { + if (metaprimitiveIt.rawId == sl1Id_rawid) { + SL1metaPrimitives.push_back(metaprimitiveIt); + } else if (metaprimitiveIt.rawId == sl3Id_rawid) + SL3metaPrimitives.push_back(metaprimitiveIt); + else if (metaprimitiveIt.rawId == sl2Id_rawid) + SL2metaPrimitives.push_back(metaprimitiveIt); + } + } + auto filteredMPs = filter(SL1metaPrimitives, SL2metaPrimitives, SL3metaPrimitives, inCorMPaths); + for (auto &mp : filteredMPs) + outMPaths.push_back(mp); +} + +void MPCorFilter::finish(){}; + +/////////////////////////// +/// OTHER METHODS + +std::vector MPCorFilter::filter(std::vector SL1mps, + std::vector SL2mps, + std::vector SL3mps, + std::vector Cormps) { + std::map mp_valid_per_bx; + std::map imp_per_bx_sl1; + for (auto &mp : SL1mps) { + int BX = mp.t0 / 25; + if (mp_valid_per_bx.find(BX) == mp_valid_per_bx.end()) { + mp_valid_per_bx[BX] = valid_cor_tp_arr_t(12); + } + + if (imp_per_bx_sl1.find(BX) == imp_per_bx_sl1.end()) { + imp_per_bx_sl1[BX] = 0; + } + + auto coarsed = coarsify(mp, 1); + mp_valid_per_bx[BX][imp_per_bx_sl1[BX]] = valid_cor_tp_t({true, mp, coarsed[3], coarsed[4], coarsed[5]}); + imp_per_bx_sl1[BX] += 2; + } + std::map imp_per_bx_sl3; + for (auto &mp : SL3mps) { + int BX = mp.t0 / 25; + if (mp_valid_per_bx.find(BX) == mp_valid_per_bx.end()) { + mp_valid_per_bx[BX] = valid_cor_tp_arr_t(12); + } + + if (imp_per_bx_sl3.find(BX) == imp_per_bx_sl3.end()) { + imp_per_bx_sl3[BX] = 1; + } + + auto coarsed = coarsify(mp, 3); + mp_valid_per_bx[BX][imp_per_bx_sl3[BX]] = valid_cor_tp_t({true, mp, coarsed[3], coarsed[4], coarsed[5]}); + imp_per_bx_sl3[BX] += 2; + } + + for (auto &mp : Cormps) { + int BX = mp.t0 / 25; + if (mp_valid_per_bx.find(BX) == mp_valid_per_bx.end()) { + mp_valid_per_bx[BX] = valid_cor_tp_arr_t(12); + } + auto coarsed = coarsify(mp, 0); + if (isDead(mp, coarsed, mp_valid_per_bx)) + continue; + auto index = killTps(mp, coarsed, BX, mp_valid_per_bx); + mp_valid_per_bx[BX][index] = valid_cor_tp_t({true, mp, coarsed[3], coarsed[4], coarsed[5]}); + } + + std::vector outTPs; + for (auto &elem : mp_valid_per_bx) { + for (auto &mp_valid : elem.second) { + if (mp_valid.valid) { + outTPs.push_back(mp_valid.mp); + } + } + } + + for (auto &mp : SL2mps) + outTPs.push_back(mp); + return outTPs; +} + +std::vector MPCorFilter::coarsify(cmsdt::metaPrimitive mp, int sl) { + float pos_ch_f = mp.x; + + // translating into tdc counts + int pos_ch = int(round(pos_ch_f)); + int slope = (int)(mp.tanPhi); + + std::vector t0_slv, t0_coarse, pos_slv, pos_coarse, slope_slv, slope_coarse; + vhdl_int_to_unsigned(mp.t0, t0_slv); + vhdl_int_to_signed(pos_ch, pos_slv); + vhdl_int_to_signed(slope, slope_slv); + + vhdl_resize_unsigned(t0_slv, WIDTH_FULL_TIME); + vhdl_resize_signed(pos_slv, WIDTH_FULL_POS); + vhdl_resize_signed(slope_slv, WIDTH_FULL_SLOPE); + + t0_coarse = vhdl_slice(t0_slv, FSEG_T0_BX_LSB + 4, FSEG_T0_DISCARD_LSB); + pos_coarse = vhdl_slice(pos_slv, WIDTH_FULL_POS - 1, FSEG_POS_DISCARD_LSB); + slope_coarse = vhdl_slice(slope_slv, WIDTH_FULL_SLOPE - 1, FSEG_SLOPE_DISCARD_LSB); + + std::vector results; + int t0_coarse_int = vhdl_unsigned_to_int(t0_coarse); + int pos_coarse_int = vhdl_signed_to_int(pos_coarse); + int slope_coarse_int = vhdl_signed_to_int(slope_coarse); + + for (int index = 0; index <= 2; index++) { + auto aux_t0_coarse_int = + (t0_coarse_int + (index - 1)) % (int)std::pow(2, FSEG_T0_BX_LSB + 4 - (FSEG_T0_DISCARD_LSB)); + auto aux_pos_coarse_int = pos_coarse_int + (index - 1); + auto aux_slope_coarse_int = slope_coarse_int + (index - 1); + results.push_back(aux_t0_coarse_int); + results.push_back(aux_pos_coarse_int); + results.push_back(aux_slope_coarse_int); + } + return results; +} + +int MPCorFilter::match(cmsdt::metaPrimitive mp, std::vector coarsed, valid_cor_tp_t valid_cor_tp2) { + bool matched = ((coarsed[0] == valid_cor_tp2.coarsed_t0 || coarsed[3] == valid_cor_tp2.coarsed_t0 || + coarsed[6] == valid_cor_tp2.coarsed_t0) && + (coarsed[1] == valid_cor_tp2.coarsed_pos || coarsed[4] == valid_cor_tp2.coarsed_pos || + coarsed[7] == valid_cor_tp2.coarsed_pos) && + (coarsed[2] == valid_cor_tp2.coarsed_slope || coarsed[5] == valid_cor_tp2.coarsed_slope || + coarsed[8] == valid_cor_tp2.coarsed_slope)) && + (abs(mp.t0 / 25 - valid_cor_tp2.mp.t0 / 25) <= 1); + return ((int)matched) * 2 + (int)(mp.quality > valid_cor_tp2.mp.quality) + + (int)(mp.quality == valid_cor_tp2.mp.quality) * (int)(get_chi2(mp) < get_chi2(valid_cor_tp2.mp)); +} + +bool MPCorFilter::isDead(cmsdt::metaPrimitive mp, + std::vector coarsed, + std::map tps_per_bx) { + for (auto &elem : tps_per_bx) { + for (auto &mp_valid : elem.second) { + if (!mp_valid.valid) + continue; + int isMatched = match(mp, coarsed, mp_valid); + if (isMatched == 2) + return true; // matched and quality <= stored tp + } + } + return false; +} + +int MPCorFilter::killTps(cmsdt::metaPrimitive mp, + std::vector coarsed, + int bx, + std::map &tps_per_bx) { + int index_to_occupy = -1; + int index_to_kill = -1; + for (auto &elem : tps_per_bx) { + if (abs(bx - elem.first) > 2) + continue; + for (size_t i = 0; i < elem.second.size(); i++) { + if (elem.second[i].valid == 1) { + int isMatched = match(mp, coarsed, elem.second[i]); + if (isMatched == 3) { + elem.second[i].valid = false; + if (elem.first == bx && index_to_kill == -1) + index_to_kill = i; + } + } else if (elem.first == bx && index_to_occupy == -1) + index_to_occupy = i; + } + } + // My first option is to replace the one from my BX that I killed first + if (index_to_kill != -1) + return index_to_kill; + // If I wasn't able to kill anyone from my BX, I fill the first empty space + return index_to_occupy; +} + +int MPCorFilter::get_chi2(cmsdt::metaPrimitive mp) { + // chi2 is coarsified to the index of the chi2's highest bit set to 1 + + int chi2 = (int)round(mp.chi2 / (std::pow(((float)CELL_SEMILENGTH / (float)max_drift_tdc), 2) / 100)); + + std::vector chi2_unsigned, chi2_unsigned_msb; + vhdl_int_to_unsigned(chi2, chi2_unsigned); + + for (int i = (int)chi2_unsigned.size() - 1; i >= 0; i--) { + if (chi2_unsigned[i] == 1) { + return i; + } + } + return -1; +} + +void MPCorFilter::printmP(metaPrimitive mP) { + DTSuperLayerId slId(mP.rawId); + LogDebug("MPCorFilter") << slId << "\t" + << " " << setw(2) << left << mP.wi1 << " " << setw(2) << left << mP.wi2 << " " << setw(2) + << left << mP.wi3 << " " << setw(2) << left << mP.wi4 << " " << setw(5) << left << mP.tdc1 + << " " << setw(5) << left << mP.tdc2 << " " << setw(5) << left << mP.tdc3 << " " << setw(5) + << left << mP.tdc4 << " " << setw(10) << right << mP.x << " " << setw(9) << left << mP.tanPhi + << " " << setw(5) << left << mP.t0 << " " << setw(13) << left << mP.chi2; +} diff --git a/L1Trigger/DTTriggerPhase2/src/MPFilter.cc b/L1Trigger/DTTriggerPhase2/src/MPFilter.cc index c098972be4e89..fb45a0803f21c 100644 --- a/L1Trigger/DTTriggerPhase2/src/MPFilter.cc +++ b/L1Trigger/DTTriggerPhase2/src/MPFilter.cc @@ -8,6 +8,17 @@ using namespace std; // ============================================================================ MPFilter::MPFilter(const ParameterSet& pset) : debug_(pset.getUntrackedParameter("debug")) { // Obtention of parameters + int wh, st, se, maxdrift; + maxdrift_filename_ = pset.getParameter("maxdrift_filename"); + std::ifstream ifind(maxdrift_filename_.fullPath()); + if (ifind.fail()) { + throw cms::Exception("Missing Input File") + << "MPSLFilter::MPSLFilter() - Cannot find " << maxdrift_filename_.fullPath(); + } + while (ifind.good()) { + ifind >> wh >> st >> se >> maxdrift; + maxdriftinfo_[wh][st][se] = maxdrift; + } } MPFilter::~MPFilter() {} diff --git a/L1Trigger/DTTriggerPhase2/src/MPQualityEnhancerFilter.cc b/L1Trigger/DTTriggerPhase2/src/MPQualityEnhancerFilter.cc index 94b72556b9dc3..0a76fa7cffa6f 100644 --- a/L1Trigger/DTTriggerPhase2/src/MPQualityEnhancerFilter.cc +++ b/L1Trigger/DTTriggerPhase2/src/MPQualityEnhancerFilter.cc @@ -120,11 +120,7 @@ void MPQualityEnhancerFilter::filterCousins(std::vector &inMPaths bestI = i; } } - bool add_paths = (i == (int)(inMPaths.size() - 1)); - if (!add_paths) { - add_paths = areCousins(inMPaths[i], inMPaths[i + 1]) == 0; - } - if (!add_paths) { + if (areCousins(inMPaths[i], inMPaths[i + 1]) != 0) { primo_index++; } else { //areCousing==0 if (oneof4) { diff --git a/L1Trigger/DTTriggerPhase2/src/MPSLFilter.cc b/L1Trigger/DTTriggerPhase2/src/MPSLFilter.cc new file mode 100644 index 0000000000000..808eb38126d3f --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/src/MPSLFilter.cc @@ -0,0 +1,278 @@ +#include "L1Trigger/DTTriggerPhase2/interface/MPSLFilter.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" + +using namespace edm; +using namespace std; +using namespace cmsdt; + +// ============================================================================ +// Constructors and destructor +// ============================================================================ +MPSLFilter::MPSLFilter(const ParameterSet &pset) : MPFilter(pset), debug_(pset.getUntrackedParameter("debug")) {} + +// ============================================================================ +// Main methods (initialise, run, finish) +// ============================================================================ +void MPSLFilter::initialise(const edm::EventSetup &iEventSetup) {} + +void MPSLFilter::run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + std::vector &inMPaths, + std::vector &outMPaths) { + if (debug_) + LogDebug("MPSLFilter") << "MPSLFilter: run"; + if (!inMPaths.empty()) { + int dum_sl_rawid = inMPaths[0].rawId; + DTSuperLayerId dumSlId(dum_sl_rawid); + DTChamberId ChId(dumSlId.wheel(), dumSlId.station(), dumSlId.sector()); + max_drift_tdc = maxdriftinfo_[dumSlId.wheel() + 2][dumSlId.station() - 1][dumSlId.sector() - 1]; + DTSuperLayerId sl1Id(ChId.rawId(), 1); + DTSuperLayerId sl2Id(ChId.rawId(), 2); + DTSuperLayerId sl3Id(ChId.rawId(), 3); + + std::vector SL1metaPrimitives; + std::vector SL2metaPrimitives; + std::vector SL3metaPrimitives; + for (const auto &metaprimitiveIt : inMPaths) { + // int BX = metaprimitiveIt.t0 / 25; + if (metaprimitiveIt.rawId == sl1Id.rawId()) + SL1metaPrimitives.push_back(metaprimitiveIt); + else if (metaprimitiveIt.rawId == sl3Id.rawId()) + SL3metaPrimitives.push_back(metaprimitiveIt); + else if (metaprimitiveIt.rawId == sl2Id.rawId()) + SL2metaPrimitives.push_back(metaprimitiveIt); + } + + auto filteredSL1MPs = filter(SL1metaPrimitives); + auto filteredSL2MPs = filter(SL2metaPrimitives); + auto filteredSL3MPs = filter(SL3metaPrimitives); + + for (auto &mp : filteredSL1MPs) + outMPaths.push_back(mp); + for (auto &mp : filteredSL2MPs) + outMPaths.push_back(mp); + for (auto &mp : filteredSL3MPs) + outMPaths.push_back(mp); + } +} + +void MPSLFilter::finish(){}; + +/////////////////////////// +/// OTHER METHODS + +std::vector MPSLFilter::filter(std::vector mps) { + std::map mp_valid_per_bx; + for (auto &mp : mps) { + int BX = mp.t0 / 25; + if (mp_valid_per_bx.find(BX) == mp_valid_per_bx.end()) + mp_valid_per_bx[BX] = valid_tp_arr_t(6); + + // is this mp getting killed? + if (isDead(mp, mp_valid_per_bx)) + continue; + // if not, let's kill other mps + auto index = killTps(mp, BX, mp_valid_per_bx); + if (index == -1) + continue; + mp_valid_per_bx[BX][index] = valid_tp_t({true, mp}); + } + + std::vector outTPs; + for (auto &elem : mp_valid_per_bx) { + for (auto &mp_valid : elem.second) { + if (mp_valid.valid) + outTPs.push_back(mp_valid.mp); + } + } + + return outTPs; +} + +int MPSLFilter::match(cmsdt::metaPrimitive mp, cmsdt::metaPrimitive mp2) { + if ((mp.quality == mp2.quality) && (mp.quality == LOWQ || mp2.quality == CLOWQ)) + return 1; + + // CONFIRMATION, FIXME /////////////////////////// + // if (mp.quality == CLOWQ && mp2.quality == HIGHQ) { + // if (share_hit(mp, mp2)) return 2; + // return 3; + // } + // if (mp.quality == HIGHQ && mp2.quality == CLOWQ) { + // if (share_hit(mp, mp2)) return 4; + // return 5; + // } + ////////////////////////////////////////////////// + + if (mp.quality > mp2.quality) { + if (share_hit(mp, mp2)) + return 2; + return 3; + } + if (mp.quality < mp2.quality) { + if (share_hit(mp, mp2)) + return 4; + return 5; + } + if (share_hit(mp, mp2)) { + if (smaller_chi2(mp, mp2) == 0) + return 6; + return 7; + } + if (smaller_chi2(mp, mp2) == 0) + return 8; + return 9; +} + +bool MPSLFilter::isDead(cmsdt::metaPrimitive mp, std::map tps_per_bx) { + for (auto &elem : tps_per_bx) { + for (auto &mp_valid : elem.second) { + if (!mp_valid.valid) + continue; + int isMatched = match(mp, mp_valid.mp); + if (isMatched == 4 || isMatched == 7) + return true; + } + } + return false; +} + +int MPSLFilter::smaller_chi2(cmsdt::metaPrimitive mp, cmsdt::metaPrimitive mp2) { + auto chi2_1 = get_chi2(mp); + auto chi2_2 = get_chi2(mp2); + if (chi2_1 < chi2_2) + return 0; + return 1; +} + +int MPSLFilter::get_chi2(cmsdt::metaPrimitive mp) { + // CHI2 is converted to an unsigned in which 4 msb are the exponent + // of a float-like value and the rest of the bits are the mantissa + // (without the first 1). So comparing these reduced-width unsigned + // values is equivalent to comparing rounded versions of the chi2 + + int chi2 = (int)round(mp.chi2 / (std::pow(((float)CELL_SEMILENGTH / (float)max_drift_tdc), 2) / 100)); + + std::vector chi2_unsigned, chi2_unsigned_msb; + vhdl_int_to_unsigned(chi2, chi2_unsigned); + + if (chi2_unsigned.size() > 2) { + for (int i = (int)chi2_unsigned.size() - 1; i >= 2; i--) { + if (chi2_unsigned[i] == 1) { + vhdl_int_to_unsigned(i - 1, chi2_unsigned_msb); + + for (int j = i - 1; j > i - 3; j--) { + chi2_unsigned_msb.insert(chi2_unsigned_msb.begin(), chi2_unsigned[j]); + } + return vhdl_unsigned_to_int(chi2_unsigned_msb); + } + } + } + vhdl_resize_unsigned(chi2_unsigned, 2); + return vhdl_unsigned_to_int(vhdl_slice(chi2_unsigned, 1, 0)); +} + +int MPSLFilter::killTps(cmsdt::metaPrimitive mp, int bx, std::map &tps_per_bx) { + int index_to_occupy = -1; + int index_to_kill = -1; + for (auto &elem : tps_per_bx) { + if (abs(bx - elem.first) > 16) + continue; + for (size_t i = 0; i < elem.second.size(); i++) { + if (elem.second[i].valid == 1) { + int isMatched = match(mp, elem.second[i].mp); + if (isMatched == 2 || isMatched == 6) { + elem.second[i].valid = false; + if (elem.first == bx && index_to_kill == -1) + index_to_kill = i; + } + } else if (elem.first == bx && index_to_occupy == -1) + index_to_occupy = i; + } + } + // My first option is to replace the one from my BX that I killed first + if (index_to_kill != -1) + return index_to_kill; + // If I wasn't able to kill anyone from my BX, I fill the first empty space + if (index_to_occupy != -1) + return index_to_occupy; + // If I'm a 3h and there were no empty spaces, I don't replace any tp + if (mp.quality == LOWQ) + return -1; + // If I'm a 4h, I replace the first 3h or the 4h with the biggest chi2. + // Let's try to find both + int biggest_chi2 = 0; + int clowq_index = -1; + for (size_t i = 0; i < tps_per_bx[bx].size(); i++) { + if (tps_per_bx[bx][i].mp.quality == LOWQ) + return i; + if (tps_per_bx[bx][i].mp.quality == CLOWQ && clowq_index == -1) { + clowq_index = i; + continue; + } + auto chi2 = get_chi2(tps_per_bx[bx][i].mp); + if (chi2 > biggest_chi2) { + index_to_kill = i; + biggest_chi2 = chi2; + } + } + // If I found a confirmed 3h, I replace that one + if (clowq_index != -1) + return clowq_index; + // If all stored tps are 4h and their chi2 is smaller than mine, I don't replace any + if (biggest_chi2 < get_chi2(mp)) + return -1; + // If at least one chi2 is bigger than mine, I replace the corresponding tp + return index_to_kill; +} + +int MPSLFilter::share_hit(cmsdt::metaPrimitive mp, cmsdt::metaPrimitive mp2) { + // This function returns the layer % 4 (1 to 4) of the hit that is shared between TPs + // If they don't share any hits or the last hit of the latest one differs in more than + // SLFILT_MAX_SEG1T0_TO_SEG2ARRIVAL w.r.t. the t0 of the other, returns 0 + + // checking that they are from the same SL + if (mp.rawId != mp2.rawId) + return 0; + + bool isSL1 = ((int)(mp2.wi1 != -1) + (int)(mp2.wi2 != -1) + (int)(mp2.wi3 != -1) + (int)(mp2.wi4 != -1)) >= 3; + + int tdc_mp[NUM_LAYERS_2SL] = {mp.tdc1, mp.tdc2, mp.tdc3, mp.tdc4, mp.tdc5, mp.tdc6, mp.tdc7, mp.tdc8}; + int tdc_mp2[NUM_LAYERS_2SL] = {mp2.tdc1, mp2.tdc2, mp2.tdc3, mp2.tdc4, mp2.tdc5, mp2.tdc6, mp2.tdc7, mp2.tdc8}; + int max_tdc_mp = -999, max_tdc_mp2 = -999; + + for (size_t i = 0; i < NUM_LAYERS_2SL; i++) { + if (tdc_mp[i] > max_tdc_mp) + max_tdc_mp = tdc_mp[i]; + if (tdc_mp2[i] > max_tdc_mp2) + max_tdc_mp2 = tdc_mp2[i]; + } + + if (mp.t0 / LHC_CLK_FREQ + SLFILT_MAX_SEG1T0_TO_SEG2ARRIVAL < max_tdc_mp2 / LHC_CLK_FREQ || + mp2.t0 / LHC_CLK_FREQ + SLFILT_MAX_SEG1T0_TO_SEG2ARRIVAL < max_tdc_mp / LHC_CLK_FREQ) + return 0; + + if ((isSL1 && (mp.wi1 == mp2.wi1 and mp.tdc1 == mp2.tdc1 and mp.wi1 != -1 and mp.tdc1 != -1)) || + (!isSL1 && (mp.wi5 == mp2.wi5 and mp.tdc5 == mp2.tdc5 and mp.wi5 != -1 and mp.tdc5 != -1))) + return 1; + if ((isSL1 && (mp.wi2 == mp2.wi2 and mp.tdc2 == mp2.tdc2 and mp.wi2 != -1 and mp.tdc2 != -1)) || + (!isSL1 && (mp.wi6 == mp2.wi6 and mp.tdc6 == mp2.tdc6 and mp.wi6 != -1 and mp.tdc6 != -1))) + return 2; + if ((isSL1 && (mp.wi3 == mp2.wi3 and mp.tdc3 == mp2.tdc3 and mp.wi3 != -1 and mp.tdc3 != -1)) || + (!isSL1 && (mp.wi7 == mp2.wi7 and mp.tdc7 == mp2.tdc7 and mp.wi7 != -1 and mp.tdc7 != -1))) + return 3; + if ((isSL1 && (mp.wi4 == mp2.wi4 and mp.tdc4 == mp2.tdc4 and mp.wi4 != -1 and mp.tdc4 != -1)) || + (!isSL1 && (mp.wi8 == mp2.wi8 and mp.tdc8 == mp2.tdc8 and mp.wi8 != -1 and mp.tdc8 != -1))) + return 4; + return 0; +} + +void MPSLFilter::printmP(metaPrimitive mP) { + DTSuperLayerId slId(mP.rawId); + LogDebug("MPSLFilter") << slId << "\t" + << " " << setw(2) << left << mP.wi1 << " " << setw(2) << left << mP.wi2 << " " << setw(2) + << left << mP.wi3 << " " << setw(2) << left << mP.wi4 << " " << setw(5) << left << mP.tdc1 + << " " << setw(5) << left << mP.tdc2 << " " << setw(5) << left << mP.tdc3 << " " << setw(5) + << left << mP.tdc4 << " " << setw(10) << right << mP.x << " " << setw(9) << left << mP.tanPhi + << " " << setw(5) << left << mP.t0 << " " << setw(13) << left << mP.chi2; +} diff --git a/L1Trigger/DTTriggerPhase2/src/MuonPathAnalyzerInChamber.cc b/L1Trigger/DTTriggerPhase2/src/MuonPathAnalyzerInChamber.cc index abae4a2af3108..3df9e90d545d8 100644 --- a/L1Trigger/DTTriggerPhase2/src/MuonPathAnalyzerInChamber.cc +++ b/L1Trigger/DTTriggerPhase2/src/MuonPathAnalyzerInChamber.cc @@ -1,4 +1,5 @@ #include "L1Trigger/DTTriggerPhase2/interface/MuonPathAnalyzerInChamber.h" +#include "FWCore/Utilities/interface/isFinite.h" #include #include @@ -242,7 +243,7 @@ void MuonPathAnalyzerInChamber::analyze(MuonPathPtr &inMPath, MuonPathPtrs &outM } // Protection against non-converged fits - if (isnan(jm_x)) + if (edm::isNotFinite(jm_x)) continue; // Updating muon-path horizontal position diff --git a/L1Trigger/DTTriggerPhase2/src/MuonPathAssociator.cc b/L1Trigger/DTTriggerPhase2/src/MuonPathAssociator.cc index e96f1634e2980..0f406b32c47bf 100644 --- a/L1Trigger/DTTriggerPhase2/src/MuonPathAssociator.cc +++ b/L1Trigger/DTTriggerPhase2/src/MuonPathAssociator.cc @@ -108,7 +108,7 @@ void MuonPathAssociator::correlateMPaths(edm::Handle dtdigis, SL3metaPrimitives.push_back(metaprimitiveIt); } - if (SL1metaPrimitives.empty() or SL3metaPrimitives.empty()) + if (SL1metaPrimitives.empty() and SL3metaPrimitives.empty()) continue; if (debug_) @@ -119,12 +119,12 @@ void MuonPathAssociator::correlateMPaths(edm::Handle dtdigis, bool at_least_one_SL1_confirmation = false; bool at_least_one_SL3_confirmation = false; - vector useFitSL1; + bool useFitSL1[SL1metaPrimitives.size()]; for (unsigned int i = 0; i < SL1metaPrimitives.size(); i++) - useFitSL1.push_back(false); - vector useFitSL3; + useFitSL1[i] = false; + bool useFitSL3[SL3metaPrimitives.size()]; for (unsigned int i = 0; i < SL3metaPrimitives.size(); i++) - useFitSL3.push_back(false); + useFitSL3[i] = false; //SL1-SL3 vector chamberMetaPrimitives; @@ -133,7 +133,7 @@ void MuonPathAssociator::correlateMPaths(edm::Handle dtdigis, int sl1 = 0; int sl3 = 0; for (auto SL1metaPrimitive = SL1metaPrimitives.begin(); SL1metaPrimitive != SL1metaPrimitives.end(); - ++SL1metaPrimitive, sl1++, sl3 = 0) { + ++SL1metaPrimitive, sl1++, sl3 = -1) { if (clean_chi2_correlation_) at_least_one_correlation = false; for (auto SL3metaPrimitive = SL3metaPrimitives.begin(); SL3metaPrimitive != SL3metaPrimitives.end(); @@ -412,13 +412,19 @@ void MuonPathAssociator::correlateMPaths(edm::Handle dtdigis, next_tdc = best_tdc; next_layer = best_layer; next_lat = best_lat; + + best_wire = (*digiIt).wire(); + best_tdc = (*digiIt).time(); + best_layer = dtLId.layer(); + best_lat = lat; matched_digis++; + } else if (dtLId.layer() == + best_layer) { // same layer than stored, just substituting the hit, no matched_digis++; + best_wire = (*digiIt).wire(); + best_tdc = (*digiIt).time(); + best_layer = dtLId.layer(); + best_lat = lat; } - best_wire = (*digiIt).wire(); - best_tdc = (*digiIt).time(); - best_layer = dtLId.layer(); - best_lat = lat; - } else if ((std::abs(x_inSL3 - x_wire) >= minx) && (std::abs(x_inSL3 - x_wire) < min2x)) { // same layer than the stored in best, no hit added if (dtLId.layer() == best_layer) @@ -427,7 +433,8 @@ void MuonPathAssociator::correlateMPaths(edm::Handle dtdigis, // buggy, as we could have stored as next LayerX -> LayerY -> LayerX, and this should // count only as 2 hits. However, as we confirm with at least 2 hits, having 2 or more // makes no difference - matched_digis++; + else if (dtLId.layer() != next_layer) + matched_digis++; // whether the layer is the same for this hit and the stored in next, we substitute // the one stored and modify the min distance min2x = std::abs(x_inSL3 - x_wire); @@ -640,12 +647,19 @@ void MuonPathAssociator::correlateMPaths(edm::Handle dtdigis, next_tdc = best_tdc; next_layer = best_layer; next_lat = best_lat; + + best_wire = (*digiIt).wire(); + best_tdc = (*digiIt).time(); + best_layer = dtLId.layer(); + best_lat = lat; matched_digis++; + } else if (dtLId.layer() == + best_layer) { // same layer than stored, just substituting the hit, no matched_digis++; + best_wire = (*digiIt).wire(); + best_tdc = (*digiIt).time(); + best_layer = dtLId.layer(); + best_lat = lat; } - best_wire = (*digiIt).wire(); - best_tdc = (*digiIt).time(); - best_layer = dtLId.layer(); - best_lat = lat; } else if ((std::abs(x_inSL1 - x_wire) >= minx) && (std::abs(x_inSL1 - x_wire) < min2x)) { // same layer than the stored in best, no hit added if (dtLId.layer() == best_layer) @@ -654,7 +668,8 @@ void MuonPathAssociator::correlateMPaths(edm::Handle dtdigis, // buggy, as we could have stored as next LayerX -> LayerY -> LayerX, and this should // count only as 2 hits. However, as we confirm with at least 2 hits, having 2 or more // makes no difference - matched_digis++; + else if (dtLId.layer() != next_layer) + matched_digis++; // whether the layer is the same for this hit and the stored in next, we substitute // the one stored and modify the min distance min2x = std::abs(x_inSL1 - x_wire); @@ -986,9 +1001,9 @@ void MuonPathAssociator::correlateMPaths(edm::Handle dtdigis, } void MuonPathAssociator::removeSharingFits(vector &chamberMPaths, vector &allMPaths) { - vector useFit; + bool useFit[chamberMPaths.size()]; for (unsigned int i = 0; i < chamberMPaths.size(); i++) { - useFit.push_back(true); + useFit[i] = true; } for (unsigned int i = 0; i < chamberMPaths.size(); i++) { if (debug_) diff --git a/L1Trigger/DTTriggerPhase2/src/MuonPathConfirmator.cc b/L1Trigger/DTTriggerPhase2/src/MuonPathConfirmator.cc new file mode 100644 index 0000000000000..8390b62fe8632 --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/src/MuonPathConfirmator.cc @@ -0,0 +1,237 @@ +#include "L1Trigger/DTTriggerPhase2/interface/MuonPathConfirmator.h" +#include +#include + +using namespace edm; +using namespace std; +using namespace cmsdt; + +// ============================================================================ +// Constructors and destructor +// ============================================================================ +MuonPathConfirmator::MuonPathConfirmator(const ParameterSet &pset, edm::ConsumesCollector &iC) + : debug_(pset.getUntrackedParameter("debug")), + minx_match_2digis_(pset.getParameter("minx_match_2digis")) { + if (debug_) + LogDebug("MuonPathConfirmator") << "MuonPathConfirmator: constructor"; + + //shift phi + int rawId; + shift_filename_ = pset.getParameter("shift_filename"); + std::ifstream ifin3(shift_filename_.fullPath()); + double shift; + if (ifin3.fail()) { + throw cms::Exception("Missing Input File") + << "MuonPathConfirmator::MuonPathConfirmator() - Cannot find " << shift_filename_.fullPath(); + } + while (ifin3.good()) { + ifin3 >> rawId >> shift; + shiftinfo_[rawId] = shift; + } + + int wh, st, se, maxdrift; + maxdrift_filename_ = pset.getParameter("maxdrift_filename"); + std::ifstream ifind(maxdrift_filename_.fullPath()); + if (ifind.fail()) { + throw cms::Exception("Missing Input File") + << "MPSLFilter::MPSLFilter() - Cannot find " << maxdrift_filename_.fullPath(); + } + while (ifind.good()) { + ifind >> wh >> st >> se >> maxdrift; + maxdriftinfo_[wh][st][se] = maxdrift; + } +} + +MuonPathConfirmator::~MuonPathConfirmator() { + if (debug_) + LogDebug("MuonPathConfirmator") << "MuonPathAnalyzer: destructor"; +} + +// ============================================================================ +// Main methods (initialise, run, finish) +// ============================================================================ + +void MuonPathConfirmator::run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + std::vector inMetaPrimitives, + edm::Handle dtdigis, + std::vector &outMetaPrimitives) { + if (debug_) + LogDebug("MuonPathConfirmator") << "MuonPathConfirmator: run"; + + // fit per SL (need to allow for multiple outputs for a single mpath) + if (!inMetaPrimitives.empty()) { + int dum_sl_rawid = inMetaPrimitives[0].rawId; + DTSuperLayerId dumSlId(dum_sl_rawid); + DTChamberId ChId(dumSlId.wheel(), dumSlId.station(), dumSlId.sector()); + max_drift_tdc = maxdriftinfo_[dumSlId.wheel() + 2][dumSlId.station() - 1][dumSlId.sector() - 1]; + } + + for (auto &mp : inMetaPrimitives) { + analyze(mp, dtdigis, outMetaPrimitives); + } +} + +void MuonPathConfirmator::initialise(const edm::EventSetup &iEventSetup) { + if (debug_) + LogDebug("MuonPathConfirmator") << "MuonPathConfirmator::initialiase"; +} + +void MuonPathConfirmator::finish() { + if (debug_) + LogDebug("MuonPathConfirmator") << "MuonPathConfirmator: finish"; +}; + +//------------------------------------------------------------------ +//--- Metodos privados +//------------------------------------------------------------------ + +void MuonPathConfirmator::analyze(cmsdt::metaPrimitive mp, + edm::Handle dtdigis, + std::vector &outMetaPrimitives) { + int dum_sl_rawid = mp.rawId; + DTSuperLayerId dumSlId(dum_sl_rawid); + DTChamberId ChId(dumSlId.wheel(), dumSlId.station(), dumSlId.sector()); + DTSuperLayerId sl1Id(ChId.rawId(), 1); + DTSuperLayerId sl3Id(ChId.rawId(), 3); + + DTWireId wireIdSL1(sl1Id, 2, 1); + DTWireId wireIdSL3(sl3Id, 2, 1); + auto sl_shift_cm = shiftinfo_[wireIdSL1.rawId()] - shiftinfo_[wireIdSL3.rawId()]; + bool isSL1 = (mp.rawId == sl1Id.rawId()); + bool isSL3 = (mp.rawId == sl3Id.rawId()); + if (!isSL1 && !isSL3) + outMetaPrimitives.emplace_back(mp); + else { + int best_tdc = -1; + int next_tdc = -1; + int best_wire = -1; + int next_wire = -1; + int best_layer = -1; + int next_layer = -1; + int best_lat = -1; + int next_lat = -1; + int lat = -1; + int matched_digis = 0; + + int position_prec = ((int)(mp.x)) << PARTIALS_PRECISSION; + int slope_prec = ((int)(mp.tanPhi)) << PARTIALS_PRECISSION; + + int slope_x_halfchamb = (((long int)slope_prec) * SEMICHAMBER_H) >> SEMICHAMBER_RES_SHR; + int slope_x_3semicells = (slope_prec * 3) >> LYRANDAHALF_RES_SHR; + int slope_x_1semicell = (slope_prec * 1) >> LYRANDAHALF_RES_SHR; + + for (const auto &dtLayerId_It : *dtdigis) { + const DTLayerId dtLId = dtLayerId_It.first; + // creating a new DTSuperLayerId object to compare with the required SL id + const DTSuperLayerId dtSLId(dtLId.wheel(), dtLId.station(), dtLId.sector(), dtLId.superLayer()); + bool hitFromSL1 = (dtSLId.rawId() == sl1Id.rawId()); + bool hitFromSL3 = (dtSLId.rawId() == sl3Id.rawId()); + if (!(hitFromSL1 || hitFromSL3)) // checking hits are from one of the other SL of the same chamber + continue; + double minx = 10 * minx_match_2digis_ * ((double)max_drift_tdc / (double)CELL_SEMILENGTH); + double min2x = 10 * minx_match_2digis_ * ((double)max_drift_tdc / (double)CELL_SEMILENGTH); + if (isSL1 != hitFromSL1) { // checking hits have the opposite SL than the TP + for (auto digiIt = (dtLayerId_It.second).first; digiIt != (dtLayerId_It.second).second; ++digiIt) { + if ((*digiIt).time() < mp.t0) + continue; + int wp_semicells = ((*digiIt).wire() - 1 - SL1_CELLS_OFFSET) * 2 + 1; + int ly = dtLId.layer() - 1; + if (ly % 2 == 1) + wp_semicells -= 1; + if (hitFromSL3) + wp_semicells -= (int)round((sl_shift_cm * 10) / CELL_SEMILENGTH); + double hit_position = wp_semicells * max_drift_tdc + + ((*digiIt).time() - mp.t0) * (double)TIME_TO_TDC_COUNTS / (double)LHC_CLK_FREQ; + double hit_position_left = wp_semicells * max_drift_tdc - + ((*digiIt).time() - mp.t0) * (double)TIME_TO_TDC_COUNTS / (double)LHC_CLK_FREQ; + // extrapolating position to the layer of the hit + // mp.position is referred to the center between SLs, so one has to add half the distance between SLs + // + half a cell height to get to the first wire + ly * cell height to reach the desired ly + // 10 * VERT_PHI1_PHI3 / 2 + (CELL_HEIGHT / 2) + ly * CELL_HEIGHT = (10 * VERT_PHI1_PHI3 + (2 * ly + 1) * CELL_HEIGHT) / 2 + + int position_in_layer = position_prec + (1 - 2 * (int)hitFromSL1) * slope_x_halfchamb; + if (ly == 0) + position_in_layer -= slope_x_3semicells; + if (ly == 1) + position_in_layer -= slope_x_1semicell; + if (ly == 2) + position_in_layer += slope_x_1semicell; + if (ly == 3) + position_in_layer += slope_x_3semicells; + position_in_layer = position_in_layer >> PARTIALS_PRECISSION; + + if (std::abs(position_in_layer - hit_position_left) < std::abs(position_in_layer - hit_position)) { + lat = 0; + hit_position = hit_position_left; + } + if (std::abs(position_in_layer - hit_position) < minx) { + // different layer than the stored in best, hit added, matched_digis++;. This approach in somewhat + // buggy, as we could have stored as best LayerX -> LayerY -> LayerX, and this should + // count only as 2 hits. However, as we confirm with at least 2 hits, having 2 or more + // makes no difference + if (dtLId.layer() != best_layer) { + minx = std::abs(position_in_layer - hit_position); + next_wire = best_wire; + next_tdc = best_tdc; + next_layer = best_layer; + next_lat = best_lat; + matched_digis++; + } + best_wire = (*digiIt).wire() - 1; + best_tdc = (*digiIt).time(); + best_layer = dtLId.layer(); + best_lat = lat; + + } else if ((std::abs(position_in_layer - hit_position) >= minx) && + (std::abs(position_in_layer - hit_position) < min2x)) { + // same layer than the stored in best, no hit added + if (dtLId.layer() == best_layer) + continue; + // different layer than the stored in next, hit added. This approach in somewhat + // buggy, as we could have stored as next LayerX -> LayerY -> LayerX, and this should + // count only as 2 hits. However, as we confirm with at least 2 hits, having 2 or more + // makes no difference + matched_digis++; + // whether the layer is the same for this hit and the stored in next, we substitute + // the one stored and modify the min distance + min2x = std::abs(position_in_layer - hit_position); + next_wire = (*digiIt).wire() - 1; + next_tdc = (*digiIt).time(); + next_layer = dtLId.layer(); + next_lat = lat; + } + } + } + } + int new_quality = mp.quality; + std::vector wi_c(4, -1), tdc_c(4, -1), lat_c(4, -1); + if (matched_digis >= 2 and best_layer != -1 and next_layer != -1) { // actually confirm + new_quality = CHIGHQ; + if (mp.quality == LOWQ) + new_quality = CLOWQ; + + wi_c[next_layer - 1] = next_wire; + tdc_c[next_layer - 1] = next_tdc; + lat_c[next_layer - 1] = next_lat; + + wi_c[best_layer - 1] = best_wire; + tdc_c[best_layer - 1] = best_tdc; + lat_c[best_layer - 1] = best_lat; + } + if (isSL1) { + outMetaPrimitives.emplace_back(metaPrimitive( + {mp.rawId, mp.t0, mp.x, mp.tanPhi, mp.phi, mp.phiB, mp.phi_cmssw, mp.phiB_cmssw, mp.chi2, new_quality, + mp.wi1, mp.tdc1, mp.lat1, mp.wi2, mp.tdc2, mp.lat2, mp.wi3, mp.tdc3, mp.lat3, mp.wi4, + mp.tdc4, mp.lat4, wi_c[0], tdc_c[0], lat_c[0], wi_c[1], tdc_c[1], lat_c[1], wi_c[2], tdc_c[2], + lat_c[2], wi_c[3], tdc_c[3], lat_c[3], -1})); + } else { + outMetaPrimitives.emplace_back( + metaPrimitive({mp.rawId, mp.t0, mp.x, mp.tanPhi, mp.phi, mp.phiB, mp.phi_cmssw, + mp.phiB_cmssw, mp.chi2, new_quality, wi_c[0], tdc_c[0], lat_c[0], wi_c[1], + tdc_c[1], lat_c[1], wi_c[2], tdc_c[2], lat_c[2], wi_c[3], tdc_c[3], + lat_c[3], mp.wi5, mp.tdc5, mp.lat5, mp.wi6, mp.tdc6, mp.lat6, + mp.wi7, mp.tdc7, mp.lat7, mp.wi8, mp.tdc8, mp.lat8, -1})); + } + } //SL2 +} diff --git a/L1Trigger/DTTriggerPhase2/src/MuonPathCorFitter.cc b/L1Trigger/DTTriggerPhase2/src/MuonPathCorFitter.cc new file mode 100644 index 0000000000000..4ef798f843119 --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/src/MuonPathCorFitter.cc @@ -0,0 +1,490 @@ +#include "L1Trigger/DTTriggerPhase2/interface/MuonPathCorFitter.h" +#include +#include + +using namespace edm; +using namespace std; +using namespace cmsdt; +// ============================================================================ +// Constructors and destructor +// ============================================================================ +MuonPathCorFitter::MuonPathCorFitter(const ParameterSet& pset, + edm::ConsumesCollector& iC, + std::shared_ptr& globalcoordsobtainer) + : MuonPathFitter(pset, iC, globalcoordsobtainer), dT0_correlate_TP_(pset.getParameter("dT0_correlate_TP")) { + if (debug_) + LogDebug("MuonPathCorFitter") << "MuonPathCorFitter: constructor"; + + // LUTs + both_sl_filename_ = pset.getParameter("lut_2sl"); + + fillLuts(); + + setChi2Th(pset.getParameter("chi2corTh")); + setTanPhiTh(pset.getParameter("dTanPsi_correlate_TP")); +} + +MuonPathCorFitter::~MuonPathCorFitter() { + if (debug_) + LogDebug("MuonPathCorFitter") << "MuonPathCorFitter: destructor"; +} + +// ============================================================================ +// Main methods (initialise, run, finish) +// ============================================================================ +void MuonPathCorFitter::initialise(const edm::EventSetup& iEventSetup) { + if (debug_) + LogDebug("MuonPathCorFitter") << "MuonPathCorFitter::initialiase"; + + auto geom = iEventSetup.getHandle(dtGeomH); + dtGeo_ = &(*geom); +} + +void MuonPathCorFitter::run(edm::Event& iEvent, + const edm::EventSetup& iEventSetup, + std::vector& inMPaths, + std::vector& outMPaths) { + if (debug_) + LogDebug("MuonPathCorFitter") << "MuonPathCorFitter: run"; + if (!inMPaths.empty()) { + int dum_sl_rawid = inMPaths[0].rawId; + DTSuperLayerId dumSlId(dum_sl_rawid); + DTChamberId ChId(dumSlId.wheel(), dumSlId.station(), dumSlId.sector()); + max_drift_tdc = maxdriftinfo_[dumSlId.wheel() + 2][dumSlId.station() - 1][dumSlId.sector() - 1]; + DTSuperLayerId sl1Id(ChId.rawId(), 1); + DTSuperLayerId sl3Id(ChId.rawId(), 3); + + std::map> SL1metaPrimitivesPerBX; + std::map> SL3metaPrimitivesPerBX; + for (const auto& metaprimitiveIt : inMPaths) { + int BX = metaprimitiveIt.t0 / 25; + if (metaprimitiveIt.rawId == sl1Id.rawId()) + SL1metaPrimitivesPerBX[BX].push_back(metaprimitiveIt); + else if (metaprimitiveIt.rawId == sl3Id.rawId()) + SL3metaPrimitivesPerBX[BX].push_back(metaprimitiveIt); + } + + std::vector bxs_to_consider; + bxs_to_consider.reserve(SL1metaPrimitivesPerBX.size()); + for (auto& prims_sl1 : SL1metaPrimitivesPerBX) + bxs_to_consider.push_back(bx_sl_vector({prims_sl1.first, prims_sl1.second, 1})); + + for (auto& prims_sl3 : SL3metaPrimitivesPerBX) + bxs_to_consider.push_back(bx_sl_vector({prims_sl3.first, prims_sl3.second, 3})); + + std::stable_sort(bxs_to_consider.begin(), bxs_to_consider.end(), bxSort); + + std::vector mps_q8; + std::vector mps_q7; + std::vector mps_q6; + + for (size_t ibx = 1; ibx < bxs_to_consider.size(); ibx++) { + for (size_t ibx2 = 0; ibx2 < ibx; ibx2++) { + if (bxs_to_consider[ibx].sl != bxs_to_consider[ibx2].sl && + (abs(bxs_to_consider[ibx].bx - bxs_to_consider[ibx2].bx)) <= MAX_BX_FOR_COR) { + int isl1 = 0; + for (auto& prim1 : bxs_to_consider[ibx].mps) { + if (isl1 >= MAX_PRIM_PER_BX_FOR_COR) + break; + int isl2 = 0; + for (auto& prim2 : bxs_to_consider[ibx2].mps) { + if (isl2 >= MAX_PRIM_PER_BX_FOR_COR) + break; + if (bxs_to_consider[ibx].sl == 1) { + if (!canCorrelate(prim1, prim2)) { + continue; + } + if (prim1.quality >= 3 && prim2.quality >= 3) + mps_q8.push_back(mp_group({prim1, prim2})); + else if ((prim1.quality >= 3 && prim2.quality < 3) || (prim1.quality < 3 && prim2.quality >= 3)) + mps_q7.push_back(mp_group({prim1, prim2})); + else + mps_q6.push_back(mp_group({prim1, prim2})); + } else { + if (!canCorrelate(prim2, prim1)) { + continue; + } + if (prim2.quality >= 3 && prim1.quality >= 3) + mps_q8.push_back(mp_group({prim2, prim1})); + else if ((prim2.quality >= 3 && prim1.quality < 3) || (prim2.quality < 3 && prim1.quality >= 3)) + mps_q7.push_back(mp_group({prim2, prim1})); + else + mps_q6.push_back(mp_group({prim2, prim1})); + } + isl2++; + } + isl1++; + } + } + } // looping over the 0 -> N-1 BX groups + } // looping over the 1 -> N BX groups + int iq = 0; + for (size_t i = 0; i < mps_q8.size(); i++) { + if (iq >= MAX_PRIM_FOR_COR) + break; + analyze(mps_q8[i], outMPaths); + iq += 1; + } + for (size_t i = 0; i < mps_q7.size(); i++) { + if (iq >= MAX_PRIM_FOR_COR) + break; + analyze(mps_q7[i], outMPaths); + iq += 1; + } + for (size_t i = 0; i < mps_q6.size(); i++) { + if (iq >= MAX_PRIM_FOR_COR) + break; + analyze(mps_q6[i], outMPaths); + iq += 1; + } + } +} + +bool MuonPathCorFitter::canCorrelate(cmsdt::metaPrimitive mp_sl1, cmsdt::metaPrimitive mp_sl3) { + // moving position from SL RF to chamber RF + float pos_ch_sl1_f = mp_sl1.x; + float pos_ch_sl3_f = mp_sl3.x; + + // translating into tdc counts + int pos_ch_sl1 = int(pos_ch_sl1_f); + int pos_ch_sl3 = int(pos_ch_sl3_f); + + int slope_sl1 = (int)mp_sl1.tanPhi; + int slope_sl3 = (int)mp_sl3.tanPhi; + + if (abs((slope_sl1 >> WIDTH_POS_SLOPE_CORR) - (slope_sl3 >> WIDTH_POS_SLOPE_CORR)) > 1) + return false; + + if (abs((pos_ch_sl1 >> WIDTH_POS_SLOPE_CORR) - (pos_ch_sl3 >> WIDTH_POS_SLOPE_CORR)) > 1) + return false; + + if (abs(mp_sl1.t0 - mp_sl3.t0) > dT0_correlate_TP_) + return false; + + return true; +} + +void MuonPathCorFitter::finish() { + if (debug_) + LogDebug("MuonPathCorFitter") << "MuonPathCorFitter: finish"; +}; + +//------------------------------------------------------------------ +//--- Metodos privados +//------------------------------------------------------------------ + +void MuonPathCorFitter::analyze(mp_group mp, std::vector& metaPrimitives) { + //FIXME + DTSuperLayerId MuonPathSLId(mp[0].rawId); // SL1 + + DTChamberId ChId(MuonPathSLId.wheel(), MuonPathSLId.station(), MuonPathSLId.sector()); + + DTSuperLayerId MuonPathSL1Id(ChId.wheel(), ChId.station(), ChId.sector(), 1); + DTSuperLayerId MuonPathSL3Id(ChId.wheel(), ChId.station(), ChId.sector(), 3); + DTWireId wireIdSL1(MuonPathSL1Id, 2, 1); + DTWireId wireIdSL3(MuonPathSL3Id, 2, 1); + auto sl_shift_cm = shiftinfo_[wireIdSL1.rawId()] - shiftinfo_[wireIdSL3.rawId()]; + + fit_common_in_t fit_common_in; + + // 8-element vectors, for the 8 layers. As here we are fitting one SL only, we leave the other SL values as dummy ones + fit_common_in.hits = {}; + fit_common_in.hits_valid = {}; + short quality = 0; + if (mp[0].quality >= 3 && mp[1].quality >= 3) + quality = 8; + else if ((mp[0].quality >= 3 && mp[1].quality < 3) || (mp[0].quality < 3 && mp[1].quality >= 3)) + quality = 7; + else + quality = 6; + + std::vector missing_layers; + + for (int isl = 0; isl < 2; isl++) { + int wire[4], tdc[4]; + if (isl != 1) { + wire[0] = mp[isl].wi1; + tdc[0] = mp[isl].tdc1; + wire[1] = mp[isl].wi2; + tdc[1] = mp[isl].tdc2; + wire[2] = mp[isl].wi3; + tdc[2] = mp[isl].tdc3; + wire[3] = mp[isl].wi4; + tdc[3] = mp[isl].tdc4; + } else { + wire[0] = mp[isl].wi5; + tdc[0] = mp[isl].tdc5; + wire[1] = mp[isl].wi6; + tdc[1] = mp[isl].tdc6; + wire[2] = mp[isl].wi7; + tdc[2] = mp[isl].tdc7; + wire[3] = mp[isl].wi8; + tdc[3] = mp[isl].tdc8; + } + + for (int i = 0; i < NUM_LAYERS; i++) { + if (wire[i] != -1) { + // Include both valid and non-valid hits. Non-valid values can be whatever, leaving all as -1 to make debugging easier. + auto ti = tdc[i]; + if (ti != -1) + ti = (int)round(((float)TIME_TO_TDC_COUNTS / (float)LHC_CLK_FREQ) * ti); + auto wi = wire[i]; + auto ly = i; + + int wp_semicells = (wi - SL1_CELLS_OFFSET) * 2 + 1; + if (ly % 2 == 1) + wp_semicells -= 1; + if (isl == 1) // SL3 + wp_semicells -= (int)round((sl_shift_cm * 10) / CELL_SEMILENGTH); + float wp_tdc = wp_semicells * max_drift_tdc; + int wp = (int)((long int)(round(wp_tdc * std::pow(2, WIREPOS_WIDTH))) / (int)std::pow(2, WIREPOS_WIDTH)); + + // wp in tdc counts (still in floating point) + fit_common_in.hits.push_back({ti, wi, ly, wp}); + // fill valids as well + fit_common_in.hits_valid.push_back(1); + } else { + missing_layers.push_back(isl * NUM_LAYERS + i); + fit_common_in.hits.push_back({-1, -1, -1, -1}); + fit_common_in.hits_valid.push_back(0); + } + } + } + + int smallest_time = 999999, tmp_coarse_wirepos_1 = -1, tmp_coarse_wirepos_3 = -1; + // coarse_bctr is the 12 MSB of the smallest tdc + for (int isl = 0; isl < 2; isl++) { + for (size_t i = 0; i < NUM_LAYERS; i++) { + if (fit_common_in.hits_valid[NUM_LAYERS * isl + i] == 0) + continue; + else if (fit_common_in.hits[NUM_LAYERS * isl + i].ti < smallest_time) + smallest_time = fit_common_in.hits[NUM_LAYERS * isl + i].ti; + } + } + if (fit_common_in.hits_valid[NUM_LAYERS * 0 + 0] == 1) + tmp_coarse_wirepos_1 = fit_common_in.hits[NUM_LAYERS * 0 + 0].wp; + else + tmp_coarse_wirepos_1 = fit_common_in.hits[NUM_LAYERS * 0 + 1].wp; + if (fit_common_in.hits_valid[NUM_LAYERS * 1 + 3] == 1) + tmp_coarse_wirepos_3 = fit_common_in.hits[NUM_LAYERS * 1 + 3].wp; + else + tmp_coarse_wirepos_3 = fit_common_in.hits[NUM_LAYERS * 1 + 2].wp; + + tmp_coarse_wirepos_1 = tmp_coarse_wirepos_1 >> WIREPOS_NORM_LSB_IGNORED; + tmp_coarse_wirepos_3 = tmp_coarse_wirepos_3 >> WIREPOS_NORM_LSB_IGNORED; + + fit_common_in.coarse_bctr = smallest_time >> (WIDTH_FULL_TIME - WIDTH_COARSED_TIME); + fit_common_in.coarse_wirepos = (tmp_coarse_wirepos_1 + tmp_coarse_wirepos_3) >> 1; + + fit_common_in.lateralities.clear(); + + auto rom_addr = get_rom_addr(mp, missing_layers); + + coeffs_t coeffs = + RomDataConvert(lut_2sl[rom_addr], COEFF_WIDTH_COR_T0, COEFF_WIDTH_COR_POSITION, COEFF_WIDTH_COR_SLOPE, 0, 7); + + // Filling lateralities + for (int isl = 0; isl < 2; isl++) { + int lat[4]; + if (isl != 1) { + lat[0] = mp[isl].lat1; + lat[1] = mp[isl].lat2; + lat[2] = mp[isl].lat3; + lat[3] = mp[isl].lat4; + } else { + lat[0] = mp[isl].lat5; + lat[1] = mp[isl].lat6; + lat[2] = mp[isl].lat7; + lat[3] = mp[isl].lat8; + } + + for (size_t i = 0; i < NUM_LAYERS; i++) { + fit_common_in.lateralities.push_back(lat[i]); + } + } + + fit_common_in.coeffs = coeffs; + + auto fit_common_out = fit(fit_common_in, + XI_COR_WIDTH, + COEFF_WIDTH_COR_T0, + COEFF_WIDTH_COR_POSITION, + COEFF_WIDTH_COR_SLOPE, + PRECISSION_COR_T0, + PRECISSION_COR_POSITION, + PRECISSION_COR_SLOPE, + PROD_RESIZE_COR_T0, + PROD_RESIZE_COR_POSITION, + PROD_RESIZE_COR_SLOPE, + max_drift_tdc, + 0); + + if (fit_common_out.valid_fit == 1) { + float t0_f = ((float)fit_common_out.t0) * (float)LHC_CLK_FREQ / (float)TIME_TO_TDC_COUNTS; + float slope_f = -fit_common_out.slope * ((float)CELL_SEMILENGTH / max_drift_tdc) * (1) / (CELL_SEMIHEIGHT * 16.); + if (std::abs(slope_f) > tanPhiTh_) + return; + + DTWireId wireId(MuonPathSLId, 2, 1); + float pos_ch_f = (float)(fit_common_out.position) * ((float)CELL_SEMILENGTH / (float)max_drift_tdc) / 10; + pos_ch_f += (SL1_CELLS_OFFSET * CELL_LENGTH) / 10.; + pos_ch_f += shiftinfo_[wireId.rawId()]; + + float chi2_f = fit_common_out.chi2 * std::pow(((float)CELL_SEMILENGTH / (float)max_drift_tdc), 2) / 100; + + // obtention of global coordinates using luts + int pos = (int)(10 * (pos_ch_f - shiftinfo_[wireId.rawId()]) * INCREASED_RES_POS_POW); + int slope = (int)(-slope_f * INCREASED_RES_SLOPE_POW); + auto global_coords = globalcoordsobtainer_->get_global_coordinates(ChId.rawId(), 0, pos, slope); + float phi = global_coords[0]; + float phiB = global_coords[1]; + + // obtention of global coordinates using cmssw geometry + double z = 0; + if (ChId.station() == 3 or ChId.station() == 4) { + z += Z_SHIFT_MB4; + } + GlobalPoint jm_x_cmssw_global = dtGeo_->chamber(ChId)->toGlobal(LocalPoint(pos_ch_f, 0., z)); + int thisec = ChId.sector(); + if (thisec == 13) + thisec = 4; + if (thisec == 14) + thisec = 10; + float phi_cmssw = jm_x_cmssw_global.phi() - PHI_CONV * (thisec - 1); + float psi = atan(slope_f); + float phiB_cmssw = hasPosRF(ChId.wheel(), ChId.sector()) ? psi - phi_cmssw : -psi - phi_cmssw; + metaPrimitives.emplace_back(metaPrimitive({MuonPathSLId.rawId(), + t0_f, + (double)fit_common_out.position, + (double)fit_common_out.slope, + phi, + phiB, + phi_cmssw, + phiB_cmssw, + chi2_f, + quality, + mp[0].wi1, + mp[0].tdc1, + mp[0].lat1, + mp[0].wi2, + mp[0].tdc2, + mp[0].lat2, + mp[0].wi3, + mp[0].tdc3, + mp[0].lat3, + mp[0].wi4, + mp[0].tdc4, + mp[0].lat4, + mp[1].wi5, + mp[1].tdc5, + mp[1].lat5, + mp[1].wi6, + mp[1].tdc6, + mp[1].lat6, + mp[1].wi7, + mp[1].tdc7, + mp[1].lat7, + mp[1].wi8, + mp[1].tdc8, + mp[1].lat8, + -1})); + } + return; +} + +void MuonPathCorFitter::fillLuts() { + std::ifstream ifin2sl(both_sl_filename_.fullPath()); + std::string line; + while (ifin2sl.good()) { + ifin2sl >> line; + + std::vector myNumbers; + for (size_t i = 0; i < line.size(); i++) { + // This converts the char into an int and pushes it into vec + myNumbers.push_back(line[i] - '0'); // The digits will be in the same order as before + } + std::reverse(myNumbers.begin(), myNumbers.end()); + lut_2sl.push_back(myNumbers); + } + + return; +} + +int MuonPathCorFitter::get_rom_addr(mp_group mps, std::vector missing_hits) { + std::vector lats = { + mps[0].lat1, mps[0].lat2, mps[0].lat3, mps[0].lat4, mps[1].lat5, mps[1].lat6, mps[1].lat7, mps[1].lat8}; + + std::vector rom_addr; + if (missing_hits.size() == 1) + rom_addr.push_back(1); + else + rom_addr.push_back(0); + + if (missing_hits.size() == 1) { // 7 layers fit + if (missing_hits[0] < 4) + rom_addr.push_back(0); // First SL has 4 hits (1) or 3 (0) + else + rom_addr.push_back(1); + if (missing_hits[0] % 4 == 0) { + rom_addr.push_back(0); + rom_addr.push_back(0); + } else if (missing_hits[0] % 4 == 1) { + rom_addr.push_back(0); + rom_addr.push_back(1); + } else if (missing_hits[0] % 4 == 2) { + rom_addr.push_back(1); + rom_addr.push_back(0); + } else { // missing_hits[0] == 3 + rom_addr.push_back(1); + rom_addr.push_back(1); + } + for (size_t ilat = 0; ilat < lats.size(); ilat++) { + if ((int)ilat == missing_hits[0]) // only applies to 3-hit, as in 4-hit missL=-1 + continue; + auto lat = lats[ilat]; + if (lat == -1) + lat = 0; + rom_addr.push_back(lat); + } + + } else if (missing_hits.empty()) { // 8 layers fit + for (size_t ilat = 0; ilat < lats.size(); ilat++) { + auto lat = lats[ilat]; + if (lat == -1) + lat = 0; + rom_addr.push_back(lat); + } + auto lat = lats[NUM_LAYERS + 3]; + if (lat == -1) + lat = 0; + rom_addr.push_back(lat); + rom_addr.push_back(lat); + + } else { // 6 layers fit + for (int i = missing_hits.size() - 1; i >= 0; i--) { + if (missing_hits[i] % 4 == 0) { + rom_addr.push_back(0); + rom_addr.push_back(0); + } else if (missing_hits[i] % 4 == 1) { + rom_addr.push_back(0); + rom_addr.push_back(1); + } else if (missing_hits[i] % 4 == 2) { + rom_addr.push_back(1); + rom_addr.push_back(0); + } else { // missing_hits[i] % 4 == 3 + rom_addr.push_back(1); + rom_addr.push_back(1); + } + } + for (size_t ilat = 0; ilat < lats.size(); ilat++) { + if ((int)ilat == missing_hits[0] || (int)ilat == missing_hits[1]) // only applies to 3-hit, as in 4-hit missL=-1 + continue; + auto lat = lats[ilat]; + if (lat == -1) + lat = 0; + rom_addr.push_back(lat); + } + } + std::reverse(rom_addr.begin(), rom_addr.end()); + return vhdl_unsigned_to_int(rom_addr); +} diff --git a/L1Trigger/DTTriggerPhase2/src/MuonPathFitter.cc b/L1Trigger/DTTriggerPhase2/src/MuonPathFitter.cc new file mode 100644 index 0000000000000..57dc963b79995 --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/src/MuonPathFitter.cc @@ -0,0 +1,399 @@ +#include "L1Trigger/DTTriggerPhase2/interface/MuonPathFitter.h" +#include +#include + +using namespace edm; +using namespace std; +using namespace cmsdt; + +// ============================================================================ +// Constructors and destructor +// ============================================================================ +MuonPathFitter::MuonPathFitter(const ParameterSet &pset, + edm::ConsumesCollector &iC, + std::shared_ptr &globalcoordsobtainer) + : MuonPathAnalyzer(pset, iC), debug_(pset.getUntrackedParameter("debug")) { + if (debug_) + LogDebug("MuonPathFitter") << "MuonPathAnalyzer: constructor"; + + //shift phi + int rawId; + shift_filename_ = pset.getParameter("shift_filename"); + std::ifstream ifin3(shift_filename_.fullPath()); + double shift; + if (ifin3.fail()) { + throw cms::Exception("Missing Input File") + << "MuonPathFitter::MuonPathFitter() - Cannot find " << shift_filename_.fullPath(); + } + while (ifin3.good()) { + ifin3 >> rawId >> shift; + shiftinfo_[rawId] = shift; + } + + int wh, st, se, maxdrift; + maxdrift_filename_ = pset.getParameter("maxdrift_filename"); + std::ifstream ifind(maxdrift_filename_.fullPath()); + if (ifind.fail()) { + throw cms::Exception("Missing Input File") + << "MPSLFilter::MPSLFilter() - Cannot find " << maxdrift_filename_.fullPath(); + } + while (ifind.good()) { + ifind >> wh >> st >> se >> maxdrift; + maxdriftinfo_[wh][st][se] = maxdrift; + } + + dtGeomH = iC.esConsumes(); + globalcoordsobtainer_ = globalcoordsobtainer; +} + +MuonPathFitter::~MuonPathFitter() { + if (debug_) + LogDebug("MuonPathFitter") << "MuonPathAnalyzer: destructor"; +} + +// ============================================================================ +// Main methods (initialise, run, finish) +// ============================================================================ + +//------------------------------------------------------------------ +//--- Metodos privados +//------------------------------------------------------------------ + +fit_common_out_t MuonPathFitter::fit(fit_common_in_t fit_common_in, + int XI_WIDTH, + int COEFF_WIDTH_T0, + int COEFF_WIDTH_POSITION, + int COEFF_WIDTH_SLOPE, + int PRECISSION_T0, + int PRECISSION_POSITION, + int PRECISSION_SLOPE, + int PROD_RESIZE_T0, + int PROD_RESIZE_POSITION, + int PROD_RESIZE_SLOPE, + int MAX_DRIFT_TDC, + int sl) { + const int PARTIALS_PRECISSION = 4; + const int PARTIALS_SHR_T0 = PRECISSION_T0 - PARTIALS_PRECISSION; + const int PARTIALS_SHR_POSITION = PRECISSION_POSITION - PARTIALS_PRECISSION; + const int PARTIALS_SHR_SLOPE = PRECISSION_SLOPE - PARTIALS_PRECISSION; + const int PARTIALS_WIDTH_T0 = PROD_RESIZE_T0 - PARTIALS_SHR_T0; + const int PARTIALS_WIDTH_POSITION = PROD_RESIZE_POSITION - PARTIALS_SHR_POSITION; + const int PARTIALS_WIDTH_SLOPE = PROD_RESIZE_SLOPE - PARTIALS_SHR_SLOPE; + + const int WIDTH_TO_PREC = 11 + PARTIALS_PRECISSION; + const int WIDTH_SLOPE_PREC = 14 + PARTIALS_PRECISSION; + const int WIDTH_POSITION_PREC = WIDTH_SLOPE_PREC + 1; + + const int SEMICHAMBER_H_PRECISSION = 13 + PARTIALS_PRECISSION; + const float SEMICHAMBER_H_REAL = ((235. / 2.) / (16. * 6.5)) * std::pow(2, SEMICHAMBER_H_PRECISSION); + const int SEMICHAMBER_H = (int)SEMICHAMBER_H_REAL; // signed(SEMICHAMBER_H_WIDTH-1 downto 0) + + const int SEMICHAMBER_RES_SHR = SEMICHAMBER_H_PRECISSION; + + const int LYRANDAHALF_RES_SHR = 4; + + const int CHI2_CALC_RES_BITS = 7; + + /******************************* + clock cycle 1 + *******************************/ + std::vector normalized_times; + std::vector normalized_wirepos; + + for (int i = 0; i < 2 * NUM_LAYERS; i++) { + // normalized times + // this should be resized to an unsigned of 10 bits (max drift time ~508 TDC counts, using 9+1 to include tolerance) + // leaving it as an integer for now + // we are obtaining the difference as the difference in BX + the LS bits from the hit time + + if (fit_common_in.hits_valid[i] == 1) { + int dif_bx = (fit_common_in.hits[i].ti >> (WIDTH_FULL_TIME - WIDTH_COARSED_TIME)) - fit_common_in.coarse_bctr; + + int tmp_norm_time = (dif_bx << (WIDTH_FULL_TIME - WIDTH_COARSED_TIME)) + + (fit_common_in.hits[i].ti % (int)std::pow(2, WIDTH_FULL_TIME - WIDTH_COARSED_TIME)); + // resize test + // this has implications in the FW (reducing number of bits). + // we keep here the int as it is, but we do the same check done in the fw + std::vector tmp_dif_bx_vector; + vhdl_int_to_unsigned(dif_bx, tmp_dif_bx_vector); + vhdl_resize_unsigned(tmp_dif_bx_vector, 12); + if (!vhdl_resize_unsigned_ok(tmp_dif_bx_vector, WIDTH_DIFBX)) + return fit_common_out_t(); + + normalized_times.push_back(tmp_norm_time); + int tmp_wirepos = fit_common_in.hits[i].wp - (fit_common_in.coarse_wirepos << WIREPOS_NORM_LSB_IGNORED); + // resize test + std::vector tmp_wirepos_vector; + vhdl_int_to_signed(tmp_wirepos, tmp_wirepos_vector); + vhdl_resize_signed(tmp_wirepos_vector, WIREPOS_WIDTH); + if (!vhdl_resize_signed_ok(tmp_wirepos_vector, XI_WIDTH)) + return fit_common_out_t(); + + normalized_wirepos.push_back(tmp_wirepos); + } else { // dummy hit + normalized_times.push_back(-1); + normalized_wirepos.push_back(-1); + } + } + + /******************************* + clock cycle 2 + *******************************/ + + std::vector xi_arr; + // min and max times are computed throught several clk cycles in the fw, + // here we compute it at once + int min_hit_time = 999999, max_hit_time = 0; + for (int i = 0; i < 2 * NUM_LAYERS; i++) { + if (fit_common_in.hits_valid[i] == 1) { + // calculate xi array + auto tmp_xi_incr = normalized_wirepos[i]; + tmp_xi_incr += (-1 + 2 * fit_common_in.lateralities[i]) * normalized_times[i]; + + // resize test + std::vector tmp_xi_incr_vector; + vhdl_int_to_signed(tmp_xi_incr, tmp_xi_incr_vector); + vhdl_resize_signed(tmp_xi_incr_vector, XI_WIDTH + 1); + if (!vhdl_resize_signed_ok(tmp_xi_incr_vector, XI_WIDTH)) + return fit_common_out_t(); + xi_arr.push_back(tmp_xi_incr); + + // calculate min and max times + if (normalized_times[i] < min_hit_time) { + min_hit_time = normalized_times[i]; + } + if (normalized_times[i] > max_hit_time) { + max_hit_time = normalized_times[i]; + } + } else { + xi_arr.push_back(-1); + } + } + + /******************************* + clock cycle 3 + *******************************/ + + std::vector products_t0; + std::vector products_position; + std::vector products_slope; + for (int i = 0; i < 2 * NUM_LAYERS; i++) { + if (fit_common_in.hits_valid[i] == 0) { + products_t0.push_back(-1); + products_position.push_back(-1); + products_slope.push_back(-1); + } else { + products_t0.push_back(xi_arr[i] * vhdl_signed_to_int(fit_common_in.coeffs.t0[i])); + products_position.push_back(xi_arr[i] * vhdl_signed_to_int(fit_common_in.coeffs.position[i])); + products_slope.push_back(xi_arr[i] * vhdl_signed_to_int(fit_common_in.coeffs.slope[i])); + } + } + + /******************************* + clock cycle 4 + *******************************/ + // Do the 8 element sums + int t0_prec = 0, position_prec = 0, slope_prec = 0; + for (int i = 0; i < 2 * NUM_LAYERS; i++) { + if (fit_common_in.hits_valid[i] == 0) { + continue; + } else { + t0_prec += products_t0[i] >> PARTIALS_SHR_T0; + position_prec += products_position[i] >> PARTIALS_SHR_POSITION; + slope_prec += products_slope[i] >> PARTIALS_SHR_SLOPE; + } + } + + /******************************* + clock cycle 5 + *******************************/ + // Do resize tests for the computed sums with full precision + std::vector t0_prec_vector, position_prec_vector, slope_prec_vector; + vhdl_int_to_signed(t0_prec, t0_prec_vector); + + vhdl_resize_signed(t0_prec_vector, PARTIALS_WIDTH_T0); + if (!vhdl_resize_signed_ok(t0_prec_vector, WIDTH_TO_PREC)) + return fit_common_out_t(); + + vhdl_int_to_signed(position_prec, position_prec_vector); + vhdl_resize_signed(position_prec_vector, PARTIALS_WIDTH_POSITION); + if (!vhdl_resize_signed_ok(position_prec_vector, WIDTH_POSITION_PREC)) + return fit_common_out_t(); + + vhdl_int_to_signed(slope_prec, slope_prec_vector); + vhdl_resize_signed(slope_prec_vector, PARTIALS_WIDTH_SLOPE); + if (!vhdl_resize_signed_ok(slope_prec_vector, WIDTH_SLOPE_PREC)) + return fit_common_out_t(); + + /******************************* + clock cycle 6 + *******************************/ + // Round the fitting parameters to the final resolution; + // in vhdl something more sofisticated is done, here we do a float division, round + // and cast again to integer + + int norm_t0 = ((t0_prec >> (PARTIALS_PRECISSION - 1)) + 1) >> 1; + int norm_position = ((position_prec >> (PARTIALS_PRECISSION - 1)) + 1) >> 1; + int norm_slope = ((slope_prec >> (PARTIALS_PRECISSION - 1)) + 1) >> 1; + + // Calculate the (-xi) + pos (+/-) t0, which only is lacking the slope term to become the residuals + std::vector res_partials_arr; + for (int i = 0; i < 2 * NUM_LAYERS; i++) { + if (fit_common_in.hits_valid[i] == 0) { + res_partials_arr.push_back(-1); + } else { + int tmp_position_prec = position_prec - (xi_arr[i] << PARTIALS_PRECISSION); + // rounding + tmp_position_prec += std::pow(2, PARTIALS_PRECISSION - 1); + + tmp_position_prec += (-1 + 2 * fit_common_in.lateralities[i]) * t0_prec; + res_partials_arr.push_back(tmp_position_prec); + } + } + + // calculate the { slope x semichamber, slope x 1.5 layers, slope x 0.5 layers } + // these 3 values are later combined with different signs to get the slope part + // of the residual for each of the layers. + int slope_x_halfchamb = (((long int)slope_prec * (long int)SEMICHAMBER_H)) >> SEMICHAMBER_RES_SHR; + if (sl == 2) + slope_x_halfchamb = 0; + int slope_x_3semicells = (slope_prec * 3) >> LYRANDAHALF_RES_SHR; + int slope_x_1semicell = (slope_prec * 1) >> LYRANDAHALF_RES_SHR; + + /******************************* + clock cycle 7 + *******************************/ + // Complete the residuals calculation by constructing the slope term (1/2) + for (int i = 0; i < 2 * NUM_LAYERS; i++) { + if (fit_common_in.hits_valid[i] == 1) { + if (i % 4 == 0) + res_partials_arr[i] -= slope_x_3semicells; + else if (i % 4 == 1) + res_partials_arr[i] -= slope_x_1semicell; + else if (i % 4 == 2) + res_partials_arr[i] += slope_x_1semicell; + else + res_partials_arr[i] += slope_x_3semicells; + } + } + + /******************************* + clock cycle 8 + *******************************/ + // Complete the residuals calculation by constructing the slope term (2/2) + std::vector residuals, position_prec_arr; + for (int i = 0; i < 2 * NUM_LAYERS; i++) { + if (fit_common_in.hits_valid[i] == 0) { + residuals.push_back(-1); + position_prec_arr.push_back(-1); + } else { + int tmp_position_prec = res_partials_arr[i]; + tmp_position_prec += (-1 + 2 * (int)(i >= NUM_LAYERS)) * slope_x_halfchamb; + position_prec_arr.push_back(tmp_position_prec); + residuals.push_back(abs(tmp_position_prec >> PARTIALS_PRECISSION)); + } + } + + // minimum and maximum fit t0 + int min_t0 = max_hit_time - MAX_DRIFT_TDC - T0_CUT_TOLERANCE; + int max_t0 = min_hit_time + T0_CUT_TOLERANCE; + + /******************************* + clock cycle 9 + *******************************/ + // Prepare addition of coarse_offset to T0 (T0 de-normalization) + int t0_fine = norm_t0 & (int)(std::pow(2, 5) - 1); + int t0_bx_sign = ((int)(norm_t0 < 0)) * 1; + int t0_bx_abs = abs(norm_t0 >> 5); + + // De-normalize Position and slope + int position = (fit_common_in.coarse_wirepos << WIREPOS_NORM_LSB_IGNORED) + norm_position; + int slope = norm_slope; + + // Apply T0 cuts + if (norm_t0 < min_t0) + return fit_common_out_t(); + if (norm_t0 > max_t0) + return fit_common_out_t(); + + // square the residuals + std::vector squared_residuals; + for (int i = 0; i < 2 * NUM_LAYERS; i++) { + if (fit_common_in.hits_valid[i] == 0) { + squared_residuals.push_back(-1); + } else { + squared_residuals.push_back(residuals[i] * residuals[i]); + } + } + + // check for residuals overflow + for (int i = 0; i < 2 * NUM_LAYERS; i++) { + if (fit_common_in.hits_valid[i] == 1) { + std::vector tmp_vector; + int tmp_position_prec = (position_prec_arr[i] >> PARTIALS_PRECISSION); + vhdl_int_to_signed(tmp_position_prec, tmp_vector); + vhdl_resize_signed(tmp_vector, WIDTH_POSITION_PREC); + if (!vhdl_resize_signed_ok(tmp_vector, CHI2_CALC_RES_BITS + 1)) + return fit_common_out_t(); + // Commented for now, maybe later we need to do something here + // if ((tmp_position_prec / (int) std::pow(2, CHI2_CALC_RES_BITS)) > 0) + // return fit_common_out_t(); + } + } + + /******************************* + clock cycle 10, 11, 12 + *******************************/ + int t0 = t0_fine; + t0 += (fit_common_in.coarse_bctr - (-1 + 2 * t0_bx_sign) * t0_bx_abs) * (int)std::pow(2, 5); + + int chi2 = 0; + for (int i = 0; i < 2 * NUM_LAYERS; i++) { + if (fit_common_in.hits_valid[i] == 1) { + chi2 += squared_residuals[i]; + } + } + + // Impose the thresholds + + if (chi2 / 16 >= (int)round(chi2Th_ * (std::pow((float)MAX_DRIFT_TDC / ((float)CELL_SEMILENGTH / 10.), 2)) / 16.)) + return fit_common_out_t(); + + fit_common_out_t fit_common_out; + fit_common_out.position = position; + fit_common_out.slope = slope; + fit_common_out.t0 = t0; + fit_common_out.chi2 = chi2; + fit_common_out.valid_fit = 1; + + return fit_common_out; +} + +coeffs_t MuonPathFitter::RomDataConvert(std::vector slv, + short COEFF_WIDTH_T0, + short COEFF_WIDTH_POSITION, + short COEFF_WIDTH_SLOPE, + short LOLY, + short HILY) { + coeffs_t res; + int ctr = 0; + for (int i = LOLY; i <= HILY; i++) { + res.t0[i] = vhdl_slice(slv, COEFF_WIDTH_T0 + ctr - 1, ctr); + vhdl_resize_unsigned(res.t0[i], GENERIC_COEFF_WIDTH); + res.t0[i] = vhdl_slice(res.t0[i], COEFF_WIDTH_T0 - 1, 0); + ctr += COEFF_WIDTH_T0; + } + for (int i = LOLY; i <= HILY; i++) { + res.position[i] = vhdl_slice(slv, COEFF_WIDTH_POSITION + ctr - 1, ctr); + vhdl_resize_unsigned(res.position[i], GENERIC_COEFF_WIDTH); + res.position[i] = vhdl_slice(res.position[i], COEFF_WIDTH_POSITION - 1, 0); + ctr += COEFF_WIDTH_POSITION; + } + for (int i = LOLY; i <= HILY; i++) { + res.slope[i] = vhdl_slice(slv, COEFF_WIDTH_SLOPE + ctr - 1, ctr); + vhdl_resize_unsigned(res.slope[i], GENERIC_COEFF_WIDTH); + res.slope[i] = vhdl_slice(res.slope[i], COEFF_WIDTH_SLOPE - 1, 0); + ctr += COEFF_WIDTH_SLOPE; + } + return res; +} diff --git a/L1Trigger/DTTriggerPhase2/src/MuonPathSLFitter.cc b/L1Trigger/DTTriggerPhase2/src/MuonPathSLFitter.cc new file mode 100644 index 0000000000000..fda1c83d9cdaa --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/src/MuonPathSLFitter.cc @@ -0,0 +1,509 @@ +#include "L1Trigger/DTTriggerPhase2/interface/MuonPathSLFitter.h" +#include +#include + +using namespace edm; +using namespace std; +using namespace cmsdt; +// ============================================================================ +// Constructors and destructor +// ============================================================================ +MuonPathSLFitter::MuonPathSLFitter(const ParameterSet &pset, + edm::ConsumesCollector &iC, + std::shared_ptr &globalcoordsobtainer) + : MuonPathFitter(pset, iC, globalcoordsobtainer) { + if (debug_) + LogDebug("MuonPathSLFitter") << "MuonPathSLFitter: constructor"; + + //shift theta + int rawId; + double shift; + shift_theta_filename_ = pset.getParameter("shift_theta_filename"); + std::ifstream ifin4(shift_theta_filename_.fullPath()); + if (ifin4.fail()) { + throw cms::Exception("Missing Input File") + << "MuonPathSLFitter::MuonPathSLFitter() - Cannot find " << shift_theta_filename_.fullPath(); + } + + while (ifin4.good()) { + ifin4 >> rawId >> shift; + shiftthetainfo_[rawId] = shift; + } + + // LUTs + sl1_filename_ = pset.getParameter("lut_sl1"); + sl2_filename_ = pset.getParameter("lut_sl2"); + sl3_filename_ = pset.getParameter("lut_sl3"); + + fillLuts(); + + setChi2Th(pset.getParameter("chi2Th")); + setTanPhiTh(pset.getParameter("tanPhiTh")); +} + +MuonPathSLFitter::~MuonPathSLFitter() { + if (debug_) + LogDebug("MuonPathSLFitter") << "MuonPathSLFitter: destructor"; +} + +// ============================================================================ +// Main methods (initialise, run, finish) +// ============================================================================ +void MuonPathSLFitter::initialise(const edm::EventSetup &iEventSetup) { + if (debug_) + LogDebug("MuonPathSLFitter") << "MuonPathSLFitter::initialiase"; + + auto geom = iEventSetup.getHandle(dtGeomH); + dtGeo_ = &(*geom); +} + +void MuonPathSLFitter::run(edm::Event &iEvent, + const edm::EventSetup &iEventSetup, + MuonPathPtrs &muonpaths, + std::vector &lateralities, + std::vector &metaPrimitives) { + if (debug_) + LogDebug("MuonPathSLFitter") << "MuonPathSLFitter: run"; + + // fit per SL (need to allow for multiple outputs for a single mpath) + // for (auto &muonpath : muonpaths) { + if (!muonpaths.empty()) { + auto muonpath = muonpaths[0]; + int rawId = muonpath->primitive(0)->cameraId(); + if (muonpath->primitive(0)->cameraId() == -1) { + rawId = muonpath->primitive(1)->cameraId(); + } + const DTLayerId dtLId(rawId); + max_drift_tdc = maxdriftinfo_[dtLId.wheel() + 2][dtLId.station() - 1][dtLId.sector() - 1]; + } + + for (size_t i = 0; i < muonpaths.size(); i++) { + auto muonpath = muonpaths[i]; + auto lats = lateralities[i]; + analyze(muonpath, lats, metaPrimitives); + } +} + +void MuonPathSLFitter::finish() { + if (debug_) + LogDebug("MuonPathSLFitter") << "MuonPathSLFitter: finish"; +}; + +//------------------------------------------------------------------ +//--- Metodos privados +//------------------------------------------------------------------ + +void MuonPathSLFitter::analyze(MuonPathPtr &inMPath, + lat_vector lat_combs, + std::vector &metaPrimitives) { + auto sl = inMPath->primitive(0)->superLayerId(); // 0, 1, 2 + + int selected_lay = 1; + if (inMPath->primitive(0)->tdcTimeStamp() != -1) + selected_lay = 0; + + int dumLayId = inMPath->primitive(selected_lay)->cameraId(); + auto dtDumlayerId = DTLayerId(dumLayId); + DTSuperLayerId MuonPathSLId(dtDumlayerId.wheel(), dtDumlayerId.station(), dtDumlayerId.sector(), sl + 1); + + DTChamberId ChId(MuonPathSLId.wheel(), MuonPathSLId.station(), MuonPathSLId.sector()); + + DTSuperLayerId MuonPathSL1Id(dtDumlayerId.wheel(), dtDumlayerId.station(), dtDumlayerId.sector(), 1); + DTSuperLayerId MuonPathSL2Id(dtDumlayerId.wheel(), dtDumlayerId.station(), dtDumlayerId.sector(), 2); + DTSuperLayerId MuonPathSL3Id(dtDumlayerId.wheel(), dtDumlayerId.station(), dtDumlayerId.sector(), 3); + DTWireId wireIdSL1(MuonPathSL1Id, 2, 1); + DTWireId wireIdSL2(MuonPathSL2Id, 2, 1); + DTWireId wireIdSL3(MuonPathSL3Id, 2, 1); + auto sl_shift_cm = shiftinfo_[wireIdSL1.rawId()] - shiftinfo_[wireIdSL3.rawId()]; + + fit_common_in_t fit_common_in; + + // 8-element vectors, for the 8 layers. As here we are fitting one SL only, we leave the other SL values as dummy ones + fit_common_in.hits = {}; + fit_common_in.hits_valid = {}; + + int quality = 3; + if (inMPath->missingLayer() != -1) + quality = 1; + + int minISL = 1; + int maxISL = 3; + if (sl < 1) { + minISL = 0; + maxISL = 2; + } + + for (int isl = minISL; isl < maxISL; isl++) { + for (int i = 0; i < NUM_LAYERS; i++) { + if (isl == sl && inMPath->missingLayer() != i) { + // Include both valid and non-valid hits. Non-valid values can be whatever, leaving all as -1 to make debugging easier. + auto ti = inMPath->primitive(i)->tdcTimeStamp(); + if (ti != -1) + ti = (int)round(((float)TIME_TO_TDC_COUNTS / (float)LHC_CLK_FREQ) * ti); + auto wi = inMPath->primitive(i)->channelId(); + auto ly = inMPath->primitive(i)->layerId(); + // int layId = inMPath->primitive(i)->cameraId(); + // auto dtlayerId = DTLayerId(layId); + // auto wireId = DTWireId(dtlayerId, wi + 1); // wire start from 1, mixer groups them starting from 0 + // int rawId = wireId.rawId(); + // wp in tdc counts (still in floating point) + int wp_semicells = (wi - SL1_CELLS_OFFSET) * 2 + 1; + if (ly % 2 == 1) + wp_semicells -= 1; + if (isl == 2) + wp_semicells -= (int)round((sl_shift_cm * 10) / CELL_SEMILENGTH); + + float wp_tdc = wp_semicells * max_drift_tdc; + int wp = (int)((long int)(round(wp_tdc * std::pow(2, WIREPOS_WIDTH))) / (int)std::pow(2, WIREPOS_WIDTH)); + fit_common_in.hits.push_back({ti, wi, ly, wp}); + // fill valids as well + if (inMPath->missingLayer() == i) + fit_common_in.hits_valid.push_back(0); + else + fit_common_in.hits_valid.push_back(1); + } else { + fit_common_in.hits.push_back({-1, -1, -1, -1}); + fit_common_in.hits_valid.push_back(0); + } + } + } + + int smallest_time = 999999, tmp_coarse_wirepos_1 = -1, tmp_coarse_wirepos_3 = -1; + // coarse_bctr is the 12 MSB of the smallest tdc + for (int isl = 0; isl < 3; isl++) { + if (isl != sl) + continue; + int myisl = isl < 2 ? 0 : 1; + for (size_t i = 0; i < NUM_LAYERS; i++) { + if (fit_common_in.hits_valid[NUM_LAYERS * myisl + i] == 0) + continue; + else if (fit_common_in.hits[NUM_LAYERS * myisl + i].ti < smallest_time) + smallest_time = fit_common_in.hits[NUM_LAYERS * myisl + i].ti; + } + if (fit_common_in.hits_valid[NUM_LAYERS * myisl + 0] == 1) + tmp_coarse_wirepos_1 = fit_common_in.hits[NUM_LAYERS * myisl + 0].wp; + else + tmp_coarse_wirepos_1 = fit_common_in.hits[NUM_LAYERS * myisl + 1].wp; + if (fit_common_in.hits_valid[NUM_LAYERS * myisl + 3] == 1) + tmp_coarse_wirepos_3 = fit_common_in.hits[NUM_LAYERS * myisl + 3].wp; + else + tmp_coarse_wirepos_3 = fit_common_in.hits[NUM_LAYERS * myisl + 2].wp; + + tmp_coarse_wirepos_1 = tmp_coarse_wirepos_1 >> WIREPOS_NORM_LSB_IGNORED; + tmp_coarse_wirepos_3 = tmp_coarse_wirepos_3 >> WIREPOS_NORM_LSB_IGNORED; + } + fit_common_in.coarse_bctr = smallest_time >> (WIDTH_FULL_TIME - WIDTH_COARSED_TIME); + fit_common_in.coarse_wirepos = (tmp_coarse_wirepos_1 + tmp_coarse_wirepos_3) >> 1; + + for (auto &lat_comb : lat_combs) { + if (lat_comb[0] == 0 && lat_comb[1] == 0 && lat_comb[2] == 0 && lat_comb[3] == 0) + continue; + fit_common_in.lateralities.clear(); + + auto rom_addr = get_rom_addr(inMPath, lat_comb); + coeffs_t coeffs; + if (sl == 0) { + coeffs = + RomDataConvert(lut_sl1[rom_addr], COEFF_WIDTH_SL_T0, COEFF_WIDTH_SL_POSITION, COEFF_WIDTH_SL_SLOPE, 0, 3); + } else if (sl == 1) { + coeffs = + RomDataConvert(lut_sl2[rom_addr], COEFF_WIDTH_SL_T0, COEFF_WIDTH_SL2_POSITION, COEFF_WIDTH_SL_SLOPE, 0, 3); + } else { + coeffs = + RomDataConvert(lut_sl3[rom_addr], COEFF_WIDTH_SL_T0, COEFF_WIDTH_SL_POSITION, COEFF_WIDTH_SL_SLOPE, 4, 7); + } + // Filling lateralities + int minISL = 1; + int maxISL = 3; + if (sl < 1) { + minISL = 0; + maxISL = 2; + } + + for (int isl = minISL; isl < maxISL; isl++) { + for (size_t i = 0; i < NUM_LAYERS; i++) { + if (isl == sl) { + fit_common_in.lateralities.push_back(lat_comb[i]); + } else + fit_common_in.lateralities.push_back(-1); + } + } + fit_common_in.coeffs = coeffs; + + auto fit_common_out = fit(fit_common_in, + XI_SL_WIDTH, + COEFF_WIDTH_SL_T0, + sl == 1 ? COEFF_WIDTH_SL2_POSITION : COEFF_WIDTH_SL_POSITION, + COEFF_WIDTH_SL_SLOPE, + PRECISSION_SL_T0, + PRECISSION_SL_POSITION, + PRECISSION_SL_SLOPE, + PROD_RESIZE_SL_T0, + sl == 1 ? PROD_RESIZE_SL2_POSITION : PROD_RESIZE_SL_POSITION, + PROD_RESIZE_SL_SLOPE, + max_drift_tdc, + sl + 1); + + if (fit_common_out.valid_fit == 1) { + float t0_f = ((float)fit_common_out.t0) * (float)LHC_CLK_FREQ / (float)TIME_TO_TDC_COUNTS; + + float slope_f = -fit_common_out.slope * ((float)CELL_SEMILENGTH / max_drift_tdc) * (1) / (CELL_SEMIHEIGHT * 16.); + if (sl != 1 && std::abs(slope_f) > tanPhiTh_) + continue; + + DTWireId wireId(MuonPathSLId, 2, 1); + float pos_ch_f = (float)(fit_common_out.position) * ((float)CELL_SEMILENGTH / (float)max_drift_tdc) / 10; + pos_ch_f += (SL1_CELLS_OFFSET * CELL_LENGTH) / 10.; + if (sl != 1) + pos_ch_f += shiftinfo_[wireIdSL1.rawId()]; + else if (sl == 1) + pos_ch_f += shiftthetainfo_[wireIdSL2.rawId()]; + + float pos_sl_f = pos_ch_f - (sl - 1) * slope_f * VERT_PHI1_PHI3 / 2; + float chi2_f = fit_common_out.chi2 * std::pow(((float)CELL_SEMILENGTH / (float)max_drift_tdc), 2) / 100; + + // obtention of global coordinates using luts + int pos = (int)(10 * (pos_sl_f - shiftinfo_[wireId.rawId()]) * INCREASED_RES_POS_POW); + int slope = (int)(-slope_f * INCREASED_RES_SLOPE_POW); + + auto global_coords = globalcoordsobtainer_->get_global_coordinates(ChId.rawId(), sl + 1, pos, slope); + float phi = global_coords[0]; + float phiB = global_coords[1]; + + // obtention of global coordinates using cmssw geometry + double z = 0; + if (ChId.station() == 3 or ChId.station() == 4) { + z = Z_SHIFT_MB4; + } + GlobalPoint jm_x_cmssw_global = dtGeo_->chamber(ChId)->toGlobal(LocalPoint(pos_sl_f, 0., z)); + int thisec = ChId.sector(); + if (thisec == 13) + thisec = 4; + if (thisec == 14) + thisec = 10; + float phi_cmssw = jm_x_cmssw_global.phi() - PHI_CONV * (thisec - 1); + float psi = atan(slope_f); + float phiB_cmssw = hasPosRF(ChId.wheel(), ChId.sector()) ? psi - phi_cmssw : -psi - phi_cmssw; + if (sl == 0) + metaPrimitives.emplace_back(metaPrimitive({MuonPathSLId.rawId(), + t0_f, + (double)(fit_common_out.position), + (double)fit_common_out.slope, + phi, + phiB, + phi_cmssw, + phiB_cmssw, + chi2_f, + quality, + inMPath->primitive(0)->channelId(), + inMPath->primitive(0)->tdcTimeStamp(), + lat_comb[0], + inMPath->primitive(1)->channelId(), + inMPath->primitive(1)->tdcTimeStamp(), + lat_comb[1], + inMPath->primitive(2)->channelId(), + inMPath->primitive(2)->tdcTimeStamp(), + lat_comb[2], + inMPath->primitive(3)->channelId(), + inMPath->primitive(3)->tdcTimeStamp(), + lat_comb[3], + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1})); + else if (sl == 2) + metaPrimitives.emplace_back(metaPrimitive({MuonPathSLId.rawId(), + t0_f, + (double)(fit_common_out.position), + (double)fit_common_out.slope, + phi, + phiB, + phi_cmssw, + phiB_cmssw, + chi2_f, + quality, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + inMPath->primitive(0)->channelId(), + inMPath->primitive(0)->tdcTimeStamp(), + lat_comb[0], + inMPath->primitive(1)->channelId(), + inMPath->primitive(1)->tdcTimeStamp(), + lat_comb[1], + inMPath->primitive(2)->channelId(), + inMPath->primitive(2)->tdcTimeStamp(), + lat_comb[2], + inMPath->primitive(3)->channelId(), + inMPath->primitive(3)->tdcTimeStamp(), + lat_comb[3], + -1})); + else if (sl == 1) { + // fw-like calculation + DTLayerId SL2_layer2Id(MuonPathSLId, 2); + double z_shift = shiftthetainfo_[SL2_layer2Id.rawId()]; + double pos_cm = + pos / 10 / INCREASED_RES_POS_POW; // fixed to have z_shift and the position in the same units (MC) + double jm_y = hasPosRF(MuonPathSLId.wheel(), MuonPathSLId.sector()) ? z_shift - pos_cm : z_shift + pos_cm; + + phi = jm_y; + + // Fixed sign of k (MC) + double k_fromfw = hasPosRF(MuonPathSLId.wheel(), MuonPathSLId.sector()) ? slope_f : -slope_f; + phiB = k_fromfw; + + // cmssw-like calculation + LocalPoint wire1_in_layer(dtGeo_->layer(SL2_layer2Id)->specificTopology().wirePosition(1), 0, -0.65); + GlobalPoint wire1_in_global = dtGeo_->layer(SL2_layer2Id)->toGlobal(wire1_in_layer); + LocalPoint wire1_in_sl = dtGeo_->superLayer(MuonPathSLId)->toLocal(wire1_in_global); + double x_shift = wire1_in_sl.x(); + jm_y = (dtGeo_->superLayer(MuonPathSLId) + ->toGlobal(LocalPoint(double(pos) / (10 * pow(2, INCREASED_RES_POS)) + x_shift, 0., 0))) + .z(); + phi_cmssw = jm_y; + + double x_cmssw = (dtGeo_->superLayer(MuonPathSLId) + ->toGlobal(LocalPoint(double(pos) / (10 * pow(2, INCREASED_RES_POS)) + x_shift, 0., 0))) + .x(); + double y_cmssw = (dtGeo_->superLayer(MuonPathSLId) + ->toGlobal(LocalPoint(double(pos) / (10 * pow(2, INCREASED_RES_POS)) + x_shift, 0., 0))) + .y(); + double r_cmssw = sqrt(x_cmssw * x_cmssw + y_cmssw * y_cmssw); + double k_cmssw = jm_y / r_cmssw; + phiB_cmssw = k_cmssw; + + metaPrimitives.emplace_back(metaPrimitive({MuonPathSLId.rawId(), + t0_f, + (double)(fit_common_out.position), + (double)fit_common_out.slope, + phi, + phiB, + phi_cmssw, + phiB_cmssw, + chi2_f, + quality, + inMPath->primitive(0)->channelId(), + inMPath->primitive(0)->tdcTimeStamp(), + lat_comb[0], + inMPath->primitive(1)->channelId(), + inMPath->primitive(1)->tdcTimeStamp(), + lat_comb[1], + inMPath->primitive(2)->channelId(), + inMPath->primitive(2)->tdcTimeStamp(), + lat_comb[2], + inMPath->primitive(3)->channelId(), + inMPath->primitive(3)->tdcTimeStamp(), + lat_comb[3], + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1})); + } + } // (fit_common_out.valid_fit == 1) + } // loop in lat_combs + return; +} + +void MuonPathSLFitter::fillLuts() { + std::ifstream ifinsl1(sl1_filename_.fullPath()); + std::string line; + while (ifinsl1.good()) { + ifinsl1 >> line; + + std::vector myNumbers; + for (size_t i = 0; i < line.size(); i++) { + // This converts the char into an int and pushes it into vec + myNumbers.push_back(line[i] - '0'); // The digits will be in the same order as before + } + std::reverse(myNumbers.begin(), myNumbers.end()); + lut_sl1.push_back(myNumbers); + } + + std::ifstream ifinsl2(sl2_filename_.fullPath()); + while (ifinsl2.good()) { + ifinsl2 >> line; + + std::vector myNumbers; + for (size_t i = 0; i < line.size(); i++) { + // This converts the char into an int and pushes it into vec + myNumbers.push_back(line[i] - '0'); // The digits will be in the same order as before + } + std::reverse(myNumbers.begin(), myNumbers.end()); + lut_sl2.push_back(myNumbers); + } + + std::ifstream ifinsl3(sl3_filename_.fullPath()); + while (ifinsl3.good()) { + ifinsl3 >> line; + + std::vector myNumbers; + for (size_t i = 0; i < line.size(); i++) { + // This converts the char into an int and pushes it into vec + myNumbers.push_back(line[i] - '0'); // The digits will be in the same order as before + } + std::reverse(myNumbers.begin(), myNumbers.end()); + lut_sl3.push_back(myNumbers); + } + + return; +} + +int MuonPathSLFitter::get_rom_addr(MuonPathPtr &inMPath, latcomb lats) { + std::vector rom_addr; + auto missing_layer = inMPath->missingLayer(); + if (missing_layer == -1) { + rom_addr.push_back(1); + rom_addr.push_back(0); + } else { + if (missing_layer == 0) { + rom_addr.push_back(0); + rom_addr.push_back(0); + } else if (missing_layer == 1) { + rom_addr.push_back(0); + rom_addr.push_back(1); + } else if (missing_layer == 2) { + rom_addr.push_back(1); + rom_addr.push_back(0); + } else { // missing_layer == 3 + rom_addr.push_back(1); + rom_addr.push_back(1); + } + } + for (size_t ilat = 0; ilat < lats.size(); ilat++) { + if ((int)ilat == missing_layer) // only applies to 3-hit, as in 4-hit missL=-1 + continue; + auto lat = lats[ilat]; + if (lat == -1) + lat = 0; + rom_addr.push_back(lat); + } + std::reverse(rom_addr.begin(), rom_addr.end()); + return vhdl_unsigned_to_int(rom_addr); +} diff --git a/L1Trigger/DTTriggerPhase2/src/TrapezoidalGrouping.cc b/L1Trigger/DTTriggerPhase2/src/TrapezoidalGrouping.cc new file mode 100644 index 0000000000000..fee10c32095d0 --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/src/TrapezoidalGrouping.cc @@ -0,0 +1,229 @@ +#include + +#include "FWCore/MessageLogger/interface/MessageLogger.h" + +#include "L1Trigger/DTTriggerPhase2/interface/TrapezoidalGrouping.h" + +using namespace edm; +using namespace std; +using namespace cmsdt; +using namespace dtamgrouping; +// ============================================================================ +// Constructors and destructor +// ============================================================================ +TrapezoidalGrouping::TrapezoidalGrouping(const ParameterSet &pset, edm::ConsumesCollector &iC) + : MotherGrouping(pset, iC), debug_(pset.getUntrackedParameter("debug")), currentBaseChannel_(-1) { + // Obtention of parameters + if (debug_) + LogDebug("TrapezoidalGrouping") << "TrapezoidalGrouping: constructor"; + + // Initialisation of channelIn array + chInDummy_.push_back(DTPrimitive()); + for (int lay = 0; lay < NUM_LAYERS; lay++) { + for (int ch = 0; ch < NUM_CH_PER_LAYER; ch++) { + channelIn_[lay][ch] = {chInDummy_}; + channelIn_[lay][ch].clear(); + } + } +} + +TrapezoidalGrouping::~TrapezoidalGrouping() { + if (debug_) + LogDebug("TrapezoidalGrouping") << "TrapezoidalGrouping: destructor"; +} + +// ============================================================================ +// Main methods (initialise, run, finish) +// ============================================================================ +void TrapezoidalGrouping::initialise(const edm::EventSetup &iEventSetup) { + if (debug_) + LogDebug("TrapezoidalGrouping") << "TrapezoidalGrouping::initialiase"; +} + +void TrapezoidalGrouping::run(Event &iEvent, + const EventSetup &iEventSetup, + const DTDigiCollection &digis, + MuonPathPtrs &mpaths) { + // This function returns the analyzable mpath collection back to the the main function + // so it can be fitted. This is in fact doing the so-called grouping. + for (int supLayer = 0; supLayer < NUM_SUPERLAYERS; supLayer++) { // for each SL: + if (debug_) + LogDebug("TrapezoidalGrouping") << "TrapezoidalGrouping::run Reading SL" << supLayer; + setInChannels(&digis, supLayer); + + for (auto &hit : all_hits) { + int layer_to_pivot = hit.layerId(); + int channel_to_pivot = hit.channelId(); + DTPrimitives hits_in_trapezoid; + std::vector hit_mpaths; + std::vector hit_tasks; + for (size_t itask = 0; itask < task_list.size(); itask++) { + // when pivoting over an internal layer, there are two cases + // where the second layer is duplicated + // 12 (0, 5) <-> 14 (0, 7) + // 15 (1, 6) <-> 17 (1, 8) + // we leave it hard-coded here, could be moved somewhere else + if (layer_to_pivot == 1 || layer_to_pivot == 2) { + if (itask == 14 || itask == 17) + continue; + } + + auto task = task_list[itask]; + + std::vector task_mpaths; + std::stack> mpath_cells_per_task; + mpath_cells_per_task.push(std::make_pair(DTPrimitives({hit}), 0)); + + while (!mpath_cells_per_task.empty()) { + auto mpath_cells = std::move(mpath_cells_per_task.top()); + std::vector tmp_mpaths = {mpath_cells.first}; + auto task_index = mpath_cells.second; + auto cell = task[task_index]; + auto vertical_shift = trapezoid_vertical_mapping[layer_to_pivot][cell]; + auto horizontal_shift = trapezoid_horizontal_mapping[layer_to_pivot][cell]; + if (channel_to_pivot + horizontal_shift >= 0 && channel_to_pivot + horizontal_shift < NUM_CH_PER_LAYER) { + tmp_mpaths = group_hits(hit, + tmp_mpaths, + channelIn_[layer_to_pivot + vertical_shift][channel_to_pivot + horizontal_shift], + hits_in_trapezoid); + } + mpath_cells_per_task.pop(); + for (const auto &tmp_mpath : tmp_mpaths) { + mpath_cells_per_task.push(std::make_pair(tmp_mpath, task_index + 1)); + } + while (!mpath_cells_per_task.empty()) { + if (mpath_cells_per_task.top().second == (int)task.size()) { + task_mpaths.push_back(mpath_cells_per_task.top().first); + mpath_cells_per_task.pop(); + } else + break; + } + } + for (auto &task_mpath : task_mpaths) { + hit_mpaths.push_back(task_mpath); + hit_tasks.push_back(itask); + } + } + if (hits_in_trapezoid.size() <= PATHFINDER_INPUT_HITS_LIMIT) { + for (size_t ipath = 0; ipath < hit_mpaths.size(); ipath++) { + auto ptrPrimitive = hit_mpaths[ipath]; + auto itask = hit_tasks[ipath]; + + // In any case, if we have less than 3 hits, we don't output the mpath + if (ptrPrimitive.size() < 3) + continue; + + // check if the task has a missing layer associated + // if it does, we add a dummy hit in the missing layer + // if it does not, we check that we actually have 4 present hits; + // if not, we skip the mpath. + if (MISSING_LAYER_LAYOUTS_PER_TASK[layer_to_pivot][itask] != -1) { + auto dtpAux = DTPrimitive(); + dtpAux.setTDCTimeStamp(-1); + dtpAux.setChannelId(-1); + dtpAux.setLayerId(MISSING_LAYER_LAYOUTS_PER_TASK[layer_to_pivot][itask]); // L=0,1,2,3 + dtpAux.setSuperLayerId(hit.superLayerId()); + dtpAux.setCameraId(-1); + ptrPrimitive.push_back(dtpAux); + } else { // we have no missing hits, it must be a 4-hit TP. + if (ptrPrimitive.size() < 4) + continue; + } + + // sort the hits by layer, so they are included ordered in the MuonPath object + std::stable_sort(ptrPrimitive.begin(), ptrPrimitive.end(), hitLayerSort); + + auto ptrMuonPath = std::make_shared(ptrPrimitive); + ptrMuonPath->setCellHorizontalLayout(CELL_HORIZONTAL_LAYOUTS_PER_TASK[layer_to_pivot][itask]); + ptrMuonPath->setMissingLayer(MISSING_LAYER_LAYOUTS_PER_TASK[layer_to_pivot][itask]); + mpaths.push_back(std::move(ptrMuonPath)); + } + } + } + } + if (debug_) + LogDebug("TrapezoidalGrouping") << "[TrapezoidalGrouping::run] end"; +} + +void TrapezoidalGrouping::finish() { return; }; + +// ============================================================================ +// Other methods +// ============================================================================ + +void TrapezoidalGrouping::setInChannels(const DTDigiCollection *digis, int sl) { + // before setting channels we need to clear + for (int lay = 0; lay < NUM_LAYERS; lay++) { + for (int ch = 0; ch < NUM_CH_PER_LAYER; ch++) { + channelIn_[lay][ch].clear(); + } + } + all_hits.clear(); + + // now fill with those primitives that make sense: + for (const auto &dtLayerId_It : *digis) { + const DTLayerId dtLId = dtLayerId_It.first; + + if (dtLId.superlayer() != sl + 1) + continue; //skip digis not in SL... + + for (DTDigiCollection::const_iterator digiIt = (dtLayerId_It.second).first; digiIt != (dtLayerId_It.second).second; + ++digiIt) { + int layer = dtLId.layer() - 1; + int wire = (*digiIt).wire() - 1; + int digiTIME = (*digiIt).time(); + int digiTIMEPhase2 = digiTIME; + + if (debug_) + LogDebug("TrapezoidalGrouping") << "[TrapezoidalGrouping::setInChannels] SL" << sl << " L" << layer << " : " + << wire << " " << digiTIMEPhase2; + auto dtpAux = DTPrimitive(); + dtpAux.setTDCTimeStamp(digiTIMEPhase2); + dtpAux.setChannelId(wire); + dtpAux.setLayerId(layer); // L=0,1,2,3 + dtpAux.setSuperLayerId(sl); // SL=0,1,2 + dtpAux.setCameraId(dtLId.rawId()); + channelIn_[layer][wire].push_back(dtpAux); + all_hits.push_back(dtpAux); + } + } + + // sort everything by the time of the hits, so it has the same behaviour as the fw + for (int lay = 0; lay < NUM_LAYERS; lay++) { + for (int ch = 0; ch < NUM_CH_PER_LAYER; ch++) { + std::stable_sort(channelIn_[lay][ch].begin(), channelIn_[lay][ch].end(), hitTimeSort); + } + } + std::stable_sort(all_hits.begin(), all_hits.end(), hitTimeSort); +} + +std::vector TrapezoidalGrouping::group_hits(DTPrimitive pivot_hit, + std::vector input_paths, + DTPrimitives hits_per_cell, + DTPrimitives &hits_in_trapezoid) { + std::vector output_paths; + for (auto &hit : hits_per_cell) { + int hit_bx = hit.tdcTimeStamp() / LHC_CLK_FREQ; + int pivot_hit_bx = pivot_hit.tdcTimeStamp() / LHC_CLK_FREQ; + if (hitTimeSort(pivot_hit, hit) || (pivot_hit_bx / BX_PER_FRAME) - (hit_bx / BX_PER_FRAME) > MAX_FRAME_DIF) + continue; + // limit the number of hits in the trapezoid to PATHFINDER_INPUT_HITS_LIMIT + if (std::find(hits_in_trapezoid.begin(), hits_in_trapezoid.end(), hit) == hits_in_trapezoid.end()) + hits_in_trapezoid.push_back(hit); + + if (hits_in_trapezoid.size() > PATHFINDER_INPUT_HITS_LIMIT) { + std::vector empty_paths; + return empty_paths; + } + + for (auto &input_path : input_paths) { + auto tmp_path = input_path; + tmp_path.push_back(hit); + output_paths.push_back(tmp_path); + } + } + if (output_paths.empty()) + return input_paths; + else + return output_paths; +} diff --git a/L1Trigger/DTTriggerPhase2/src/lat_code_maker.py b/L1Trigger/DTTriggerPhase2/src/lat_code_maker.py new file mode 100644 index 0000000000000..23a825e65786c --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/src/lat_code_maker.py @@ -0,0 +1,9 @@ +filename = "basic_lats.txt" + +with open(filename) as f: + lines = f.readlines() + +for iline, line in enumerate(lines): + line = line.strip().split("/") + text = "lat_combinations.push_back({ %s, %s, %s });" % (line[0], line[1], line[2]) + print(text) diff --git a/L1Trigger/DTTriggerPhase2/src/vhdl_functions.cc b/L1Trigger/DTTriggerPhase2/src/vhdl_functions.cc new file mode 100644 index 0000000000000..1343ed5786fb1 --- /dev/null +++ b/L1Trigger/DTTriggerPhase2/src/vhdl_functions.cc @@ -0,0 +1,93 @@ +#include "L1Trigger/DTTriggerPhase2/interface/vhdl_functions.h" + +// "a la vhdl" functions +std::vector vhdl_slice(std::vector v, int upper, int lower) { + int final_value = lower; + if (final_value < 0) + final_value = 0; + + std::vector v1; + for (int i = final_value; i <= upper; i++) { + v1.push_back(v[i]); + } + return v1; +} + +int vhdl_unsigned_to_int(std::vector v) { + int res = 0; + + for (size_t i = 0; i < v.size(); i++) { + res = res + v[i] * std::pow(2, i); + } + return res; +} + +int vhdl_signed_to_int(std::vector v) { + if (v[v.size() - 1] == 0) + return vhdl_unsigned_to_int(v); + else + return -(std::pow(2, v.size()) - vhdl_unsigned_to_int(v)); +} + +void vhdl_int_to_unsigned(int value, std::vector &v) { + if (value == 0) { + v.push_back(0); + } else if (value != 1) { + v.push_back(value % 2); + vhdl_int_to_unsigned(value / 2, v); + } else { + v.push_back(1); + } + return; +} + +void vhdl_int_to_signed(int value, std::vector &v) { + if (value < 0) { + int val = 1; + int size = 1; + while (val < -value) { + val *= 2; + size += 1; + } + vhdl_int_to_unsigned(val + value, v); + for (int i = v.size(); i < size - 1; i++) { + v.push_back(0); + } + v.push_back(1); + } else { + vhdl_int_to_unsigned(value, v); + v.push_back(0); + } + return; +} + +void vhdl_resize_unsigned(std::vector &v, int new_size) { + for (int i = v.size(); i < new_size; i++) { + v.push_back(0); + } +} + +void vhdl_resize_signed(std::vector &v, int new_size) { + int elem = 0; + if (v[v.size() - 1] == 1) + elem = 1; + for (int i = v.size(); i < new_size; i++) { + v.push_back(elem); + } +} + +bool vhdl_resize_signed_ok(std::vector v, int new_size) { + for (size_t i = v.size() - 1 - 1; i >= v.size() - 1 - (v.size() - new_size); i--) { + if (v[i] != v[v.size() - 1]) + return false; + } + return true; +}; + +bool vhdl_resize_unsigned_ok(std::vector v, int new_size) { + for (size_t i = v.size() - 1; i >= v.size() - 1 + 1 - (v.size() - new_size); i--) { + if (v[i] != 0) + return false; + } + return true; +}; diff --git a/L1Trigger/DemonstratorTools/test/gtt/createFirmwareInputFiles_cfg.py b/L1Trigger/DemonstratorTools/test/gtt/createFirmwareInputFiles_cfg.py index 1a5540c377d94..293efb9de91e4 100644 --- a/L1Trigger/DemonstratorTools/test/gtt/createFirmwareInputFiles_cfg.py +++ b/L1Trigger/DemonstratorTools/test/gtt/createFirmwareInputFiles_cfg.py @@ -89,6 +89,7 @@ nStubsMin = cms.int32(4), # number of stubs must be greater than or equal to this value nPSStubsMin = cms.int32(0), # the number of stubs in the PS Modules must be greater than or equal to this value + promptMVAMin = cms.double(-1.0), # MVA must be greater than this value reducedBendChi2Max = cms.double(2.25), # bend chi2 must be less than this value reducedChi2RZMax = cms.double(5.0), # chi2rz/dof must be less than this value reducedChi2RPhiMax = cms.double(20.0), # chi2rphi/dof must be less than this value @@ -103,15 +104,8 @@ process.l1tTrackerEmuHTMiss.debug = (options.debug > 0) #Disable internal track selection -process.l1tTrackJetsEmulation.MaxDzTrackPV = cms.double(10000.0) -process.l1tTrackJetsEmulation.trk_zMax = cms.double(10000.0) # maximum track z -process.l1tTrackJetsEmulation.trk_ptMax = cms.double(10000.0) # maximumum track pT before saturation [GeV] -process.l1tTrackJetsEmulation.trk_ptMin = cms.double(0.0) # minimum track pt [GeV] -process.l1tTrackJetsEmulation.trk_etaMax = cms.double(10000.0) # maximum track eta -process.l1tTrackJetsEmulation.nStubs4PromptChi2=cms.double(10000.0) #Prompt track quality flags for loose/tight -process.l1tTrackJetsEmulation.nStubs4PromptBend=cms.double(10000.0) -process.l1tTrackJetsEmulation.nStubs5PromptChi2=cms.double(10000.0) -process.l1tTrackJetsEmulation.nStubs5PromptBend=cms.double(10000.0) +#There is a problem with setting all of these (especially eta) to high numbers. +process.l1tTrackJetsEmulation.trk_zMax = cms.double(20.46912512) # maximum track z from TrackWord if options.debug: process.MessageLogger.cerr.INFO.limit = cms.untracked.int32(1000000000) diff --git a/L1Trigger/L1CaloTrigger/interface/Phase2L1CaloEGammaUtils.h b/L1Trigger/L1CaloTrigger/interface/Phase2L1CaloEGammaUtils.h index a325850b399e8..6504ae2dbb816 100644 --- a/L1Trigger/L1CaloTrigger/interface/Phase2L1CaloEGammaUtils.h +++ b/L1Trigger/L1CaloTrigger/interface/Phase2L1CaloEGammaUtils.h @@ -5,6 +5,7 @@ #define L1Trigger_L1CaloTrigger_Phase2L1CaloEGammaUtils #include +#include #include #include #include @@ -60,7 +61,7 @@ namespace p2eg { static constexpr float e0_looseTkss = 0.944, e1_looseTkss = 0.65, e2_looseTkss = 0.4; // passes_looseTkss static constexpr float cut_500_MeV = 0.5; - static constexpr float ECAL_LSB = 0.125; // to convert from int to float (GeV) multiply by LSB + static constexpr float ECAL_LSB = 0.5; // to convert from int to float (GeV) multiply by LSB static constexpr float HCAL_LSB = 0.5; static constexpr int N_CLUSTERS_PER_REGION = 4; // number of clusters per ECAL region @@ -389,28 +390,12 @@ namespace p2eg { class region3x4 { private: int idx_ = -1; - linkECAL linksECAL[TOWER_IN_ETA][TOWER_IN_PHI]; // 3x4 in towers + std::array, TOWER_IN_ETA> linksECAL; // 3x4 in towers public: // constructor region3x4() { idx_ = -1; } - // copy constructor - region3x4(const region3x4& other) { - idx_ = other.idx_; - for (int i = 0; i < TOWER_IN_ETA; i++) { - for (int j = 0; j < TOWER_IN_PHI; j++) { - linksECAL[i][j] = other.linksECAL[i][j]; - } - } - } - - // overload operator= to use copy constructor - region3x4 operator=(const region3x4& other) { - const region3x4& newRegion(other); - return newRegion; - }; - // set members inline void zeroOut() { for (int i = 0; i < TOWER_IN_ETA; i++) { @@ -444,12 +429,6 @@ namespace p2eg { fb = 0; }; - // copy constructor - towerHCAL(const towerHCAL& other) { - et = other.et; - fb = other.fb; - }; - // set members inline void zeroOut() { et = 0; @@ -470,22 +449,12 @@ namespace p2eg { class towers3x4 { private: int idx_ = -1; - towerHCAL towersHCAL[TOWER_IN_ETA][TOWER_IN_PHI]; // 3x4 in towers + std::array, TOWER_IN_ETA> towersHCAL; // 3x4 in towers public: // constructor towers3x4() { idx_ = -1; }; - // copy constructor - towers3x4(const towers3x4& other) { - idx_ = other.idx_; - for (int i = 0; i < TOWER_IN_ETA; i++) { - for (int j = 0; j < TOWER_IN_PHI; j++) { - towersHCAL[i][j] = other.towersHCAL[i][j]; - } - }; - }; - // set members inline void zeroOut() { for (int i = 0; i < TOWER_IN_ETA; i++) { @@ -514,8 +483,8 @@ namespace p2eg { class card { private: int idx_ = -1; - region3x4 card3x4Regions[N_REGIONS_PER_CARD]; - towers3x4 card3x4Towers[N_REGIONS_PER_CARD]; + std::array card3x4Regions; + std::array card3x4Towers; public: // constructor @@ -529,21 +498,6 @@ namespace p2eg { } }; - // copy constructor - card(const card& other) { - idx_ = other.idx_; - for (int i = 0; i < N_REGIONS_PER_CARD; i++) { - card3x4Regions[i] = other.card3x4Regions[i]; - card3x4Towers[i] = other.card3x4Towers[i]; - } - }; - - // overload operator= to use copy constructor - card operator=(const card& other) { - const card& newCard(other); - return newCard; - }; - // set members inline void setIdx(int idx) { idx_ = idx; }; inline void zeroOut() { @@ -606,13 +560,6 @@ namespace p2eg { phiMax = 0; etaMax = 0; } - - crystalMax& operator=(const crystalMax& rhs) { - energy = rhs.energy; - phiMax = rhs.phiMax; - etaMax = rhs.etaMax; - return *this; - } }; class ecaltp_t { @@ -689,10 +636,6 @@ namespace p2eg { ap_uint<16> data; tower_t() { data = 0; } - tower_t& operator=(const tower_t& rhs) { - data = rhs.data; - return *this; - } tower_t(ap_uint<12> et, ap_uint<4> hoe) { data = (et) | (((ap_uint<16>)hoe) << 12); } @@ -709,7 +652,7 @@ namespace p2eg { float newEt = getEt() * factor; // Convert the new pT to an unsigned int (16 bits so we can take the logical OR with the bit mask later) - ap_uint<16> newEt_uint = (ap_uint<16>)(int)(newEt * 8.0); + ap_uint<16> newEt_uint = (ap_uint<16>)(int)(newEt / ECAL_LSB); // Make sure the first four bits are zero newEt_uint = (newEt_uint & 0x0FFF); @@ -721,9 +664,7 @@ namespace p2eg { /* * For towers: Calculate H/E ratio given the ECAL and HCAL energies and modify the hoe() value. */ - void getHoverE(ap_uint<12> ECAL, ap_uint<12> HCAL_inHcalConvention) { - // Convert HCAL ET to ECAL ET convention - ap_uint<12> HCAL = convertHcalETtoEcalET(HCAL_inHcalConvention); + void addHoverEToTower(ap_uint<12> ECAL, ap_uint<12> HCAL) { ap_uint<4> hoeOut; ap_uint<1> hoeLSB = 0; ap_uint<4> hoe = 0; @@ -782,17 +723,6 @@ namespace p2eg { etaMax = 0; brems = 0; } - - clusterInfo& operator=(const clusterInfo& rhs) { - seedEnergy = rhs.seedEnergy; - energy = rhs.energy; - et5x5 = rhs.et5x5; - et2x5 = rhs.et2x5; - phiMax = rhs.phiMax; - etaMax = rhs.etaMax; - brems = rhs.brems; - return *this; - } }; //--------------------------------------------------------// @@ -853,20 +783,6 @@ namespace p2eg { is_looseTkiso = cluster_is_looseTkiso; } - Cluster& operator=(const Cluster& rhs) { - data = rhs.data; - regionIdx = rhs.regionIdx; - calib = rhs.calib; - brems = rhs.brems; - et5x5 = rhs.et5x5; - et2x5 = rhs.et2x5; - is_ss = rhs.is_ss; - is_looseTkss = rhs.is_looseTkss; - is_iso = rhs.is_iso; - is_looseTkiso = rhs.is_looseTkiso; - return *this; - } - void setRegionIdx(int regIdx) { regionIdx = regIdx; } // Newly added ap_uint<12> clusterEnergy() const { return (data & 0xFFF); } @@ -1497,6 +1413,7 @@ namespace p2eg { l1tp2::CaloTower l1CaloTower; // Store total Et (HCAL+ECAL) in the ECAL Et member l1CaloTower.setEcalTowerEt(totalEtFloat()); + l1CaloTower.setHcalTowerEt(ecalEtFloat()); int global_tower_iEta = globalToweriEtaFromGCTcardiEta(gctCard_tower_iEta); int global_tower_iPhi = globalToweriPhiFromGCTcardiPhi(nGCTCard, gctCard_tower_iPhi); l1CaloTower.setTowerIEta(global_tower_iEta); diff --git a/L1Trigger/L1CaloTrigger/interface/Phase2L1CaloJetEmulator.h b/L1Trigger/L1CaloTrigger/interface/Phase2L1CaloJetEmulator.h new file mode 100644 index 0000000000000..01524d813b7d2 --- /dev/null +++ b/L1Trigger/L1CaloTrigger/interface/Phase2L1CaloJetEmulator.h @@ -0,0 +1,469 @@ +#ifndef L1TRIGGER_L1CALOTRIGGER_PHASE2L1CALOJETEMULATOR_H +#define L1TRIGGER_L1CALOTRIGGER_PHASE2L1CALOJETEMULATOR_H + +#include "DataFormats/L1TCalorimeterPhase2/interface/Phase2L1CaloJet.h" +#include + +static constexpr int nBarrelEta = 34; +static constexpr int nBarrelPhi = 72; +static constexpr int nHgcalEta = 36; +static constexpr int nHgcalPhi = 72; +static constexpr int nHfEta = 24; +static constexpr int nHfPhi = 72; +static constexpr int nSTEta = 6; +static constexpr int nSTEta_hf = 4; +static constexpr int nSTPhi = 24; +static constexpr int nJets = 10; + +namespace gctobj { + + class towerMax { + public: + float energy; + int phi; + int eta; + float energyMax; + int phiMax; + int etaMax; + int phiCenter; + int etaCenter; + + towerMax() { + energy = 0; + phi = 0; + eta = 0; + energyMax = 0; + phiMax = 0; + etaMax = 0; + phiCenter = 0; + etaCenter = 0; + } + }; + + class jetInfo { + public: + float seedEnergy; + float energy; + float tauEt; + int phi; + int eta; + float energyMax; + int phiMax; + int etaMax; + int phiCenter; + int etaCenter; + + jetInfo() { + seedEnergy = 0; + energy = 0; + tauEt = 0; + phi = 0; + eta = 0; + energyMax = 0; + phiMax = 0; + etaMax = 0; + phiCenter = 0; + etaCenter = 0; + } + }; + + typedef struct { + float et; + int eta; + int phi; + float towerEt; + int towerEta; + int towerPhi; + int centerEta; + int centerPhi; + } GCTsupertower_t; + + typedef struct { + GCTsupertower_t cr[nSTPhi]; + } etaStrip_t; + + typedef struct { + GCTsupertower_t etaStrip[nSTEta]; + } hgcalRegion_t; + + typedef struct { + GCTsupertower_t pk[nSTEta]; + } etaStripPeak_t; + + typedef struct { + float et; + float hoe; + } GCTtower_t; + + typedef struct { + GCTtower_t GCTtower[nBarrelEta / 2][nBarrelPhi]; + } GCTintTowers_t; + + inline int getPeakBinOf3(float et0, float et1, float et2) { + int x; + float temp; + if (et0 > et1) { + x = 0; + temp = et0; + } else { + x = 1; + temp = et1; + } + if (et2 > temp) { + x = 2; + } + return x; + } + + inline int getEtCenterOf3(float et0, float et1, float et2) { + float etSum = et0 + et1 + et2; + float iEtSum = 0.5 * et0 + 1.5 * et1 + 2.5 * et2; + int iAve = 0xEEF; + if (iEtSum <= etSum) + iAve = 0; + else if (iEtSum <= 2 * etSum) + iAve = 1; + else + iAve = 2; + return iAve; + } + + inline void makeST(const float GCTintTowers[nBarrelEta / 2][nBarrelPhi], + GCTsupertower_t supertower_return[nSTEta][nSTPhi]) { + float et_sumEta[nSTEta][nSTPhi][3]; + float stripEta[nSTEta][nSTPhi][3]; + float stripPhi[nSTEta][nSTPhi][3]; + + float ex_et[nBarrelEta / 2 + 1][nBarrelPhi]; + for (int j = 0; j < nBarrelPhi; j++) { + ex_et[nBarrelEta / 2][j] = 0; + for (int i = 0; i < nBarrelEta / 2; i++) { + ex_et[i][j] = GCTintTowers[i][j]; + } + } + + int index_i = 0; + int index_j = 0; + for (int i = 0; i < nBarrelEta / 2 + 1; i += 3) { // 17+1 to divide into 6 super towers + index_j = 0; + for (int j = 0; j < nBarrelPhi; j += 3) { // 72 phi to 24 super towers + stripEta[index_i][index_j][0] = ex_et[i][j] + ex_et[i][j + 1] + ex_et[i][j + 2]; + stripEta[index_i][index_j][1] = ex_et[i + 1][j] + ex_et[i + 1][j + 1] + ex_et[i + 1][j + 2]; + stripEta[index_i][index_j][2] = ex_et[i + 2][j] + ex_et[i + 2][j + 1] + ex_et[i + 2][j + 2]; + stripPhi[index_i][index_j][0] = ex_et[i][j] + ex_et[i + 1][j] + ex_et[i + 2][j]; + stripPhi[index_i][index_j][1] = ex_et[i][j + 1] + ex_et[i + 1][j + 1] + ex_et[i + 2][j + 1]; + stripPhi[index_i][index_j][2] = ex_et[i][j + 2] + ex_et[i + 1][j + 2] + ex_et[i + 2][j + 2]; + for (int k = 0; k < 3; k++) { + et_sumEta[index_i][index_j][k] = ex_et[i + k][j] + ex_et[i + k][j + 1] + ex_et[i + k][j + 2]; + } + index_j++; + } + index_i++; + } + for (int i = 0; i < nSTEta; i++) { + for (int j = 0; j < nSTPhi; j++) { + GCTsupertower_t temp; + float supertowerEt = et_sumEta[i][j][0] + et_sumEta[i][j][1] + et_sumEta[i][j][2]; + temp.et = supertowerEt; + temp.eta = i; + temp.phi = j; + int peakEta = getPeakBinOf3(stripEta[i][j][0], stripEta[i][j][1], stripEta[i][j][2]); + temp.towerEta = peakEta; + int peakPhi = getPeakBinOf3(stripPhi[i][j][0], stripPhi[i][j][1], stripPhi[i][j][2]); + temp.towerPhi = peakPhi; + float peakEt = ex_et[i * 3 + peakEta][j * 3 + peakPhi]; + temp.towerEt = peakEt; + int cEta = getEtCenterOf3(stripEta[i][j][0], stripEta[i][j][1], stripEta[i][j][2]); + temp.centerEta = cEta; + int cPhi = getEtCenterOf3(stripPhi[i][j][0], stripPhi[i][j][1], stripPhi[i][j][2]); + temp.centerPhi = cPhi; + supertower_return[i][j] = temp; + } + } + } + + inline void makeST_hgcal(const float hgcalTowers[nHgcalEta / 2][nHgcalPhi], + GCTsupertower_t supertower_return[nSTEta][nSTPhi]) { + float et_sumEta[nSTEta][nSTPhi][3]; + float stripEta[nSTEta][nSTPhi][3]; + float stripPhi[nSTEta][nSTPhi][3]; + + int index_i = 0; + int index_j = 0; + for (int i = 0; i < nHgcalEta / 2; i += 3) { // 18 eta to 6 super towers + index_j = 0; + for (int j = 0; j < nHgcalPhi; j += 3) { // 72 phi to 24 super towers + stripEta[index_i][index_j][0] = hgcalTowers[i][j] + hgcalTowers[i][j + 1] + hgcalTowers[i][j + 2]; + stripEta[index_i][index_j][1] = hgcalTowers[i + 1][j] + hgcalTowers[i + 1][j + 1] + hgcalTowers[i + 1][j + 2]; + stripEta[index_i][index_j][2] = hgcalTowers[i + 2][j] + hgcalTowers[i + 2][j + 1] + hgcalTowers[i + 2][j + 2]; + stripPhi[index_i][index_j][0] = hgcalTowers[i][j] + hgcalTowers[i + 1][j] + hgcalTowers[i + 2][j]; + stripPhi[index_i][index_j][1] = hgcalTowers[i][j + 1] + hgcalTowers[i + 1][j + 1] + hgcalTowers[i + 2][j + 1]; + stripPhi[index_i][index_j][2] = hgcalTowers[i][j + 2] + hgcalTowers[i + 1][j + 2] + hgcalTowers[i + 2][j + 2]; + for (int k = 0; k < 3; k++) { + et_sumEta[index_i][index_j][k] = + hgcalTowers[i + k][j] + hgcalTowers[i + k][j + 1] + hgcalTowers[i + k][j + 2]; + } + index_j++; + } + index_i++; + } + + for (int i = 0; i < nSTEta; i++) { + for (int j = 0; j < nSTPhi; j++) { + GCTsupertower_t temp; + temp.et = 0; + temp.eta = 0; + temp.phi = 0; + temp.towerEta = 0; + temp.towerPhi = 0; + temp.towerEt = 0; + temp.centerEta = 0; + temp.centerPhi = 0; + float supertowerEt = et_sumEta[i][j][0] + et_sumEta[i][j][1] + et_sumEta[i][j][2]; + temp.et = supertowerEt; + temp.eta = i; + temp.phi = j; + int peakEta = getPeakBinOf3(stripEta[i][j][0], stripEta[i][j][1], stripEta[i][j][2]); + temp.towerEta = peakEta; + int peakPhi = getPeakBinOf3(stripPhi[i][j][0], stripPhi[i][j][1], stripPhi[i][j][2]); + temp.towerPhi = peakPhi; + float peakEt = hgcalTowers[i * 3 + peakEta][j * 3 + peakPhi]; + temp.towerEt = peakEt; + int cEta = getEtCenterOf3(stripEta[i][j][0], stripEta[i][j][1], stripEta[i][j][2]); + temp.centerEta = cEta; + int cPhi = getEtCenterOf3(stripPhi[i][j][0], stripPhi[i][j][1], stripPhi[i][j][2]); + temp.centerPhi = cPhi; + supertower_return[i][j] = temp; + } + } + } + + inline void makeST_hf(const float hfTowers[nHfEta / 2][nHfPhi], GCTsupertower_t supertower_return[nSTEta][nSTPhi]) { + float et_sumEta[nSTEta][nSTPhi][3]; + float stripEta[nSTEta][nSTPhi][3]; + float stripPhi[nSTEta][nSTPhi][3]; + + int index_i = 0; // 5th and 6th ST to be set 0 + int index_j = 0; + for (int i = 0; i < nHfEta / 2; i += 3) { // 12 eta to 4 super towers + index_j = 0; + for (int j = 0; j < nHfPhi; j += 3) { // 72 phi to 24 super towers + stripEta[index_i][index_j][0] = hfTowers[i][j] + hfTowers[i][j + 1] + hfTowers[i][j + 2]; + stripEta[index_i][index_j][1] = hfTowers[i + 1][j] + hfTowers[i + 1][j + 1] + hfTowers[i + 1][j + 2]; + stripEta[index_i][index_j][2] = hfTowers[i + 2][j] + hfTowers[i + 2][j + 1] + hfTowers[i + 2][j + 2]; + stripPhi[index_i][index_j][0] = hfTowers[i][j] + hfTowers[i + 1][j] + hfTowers[i + 2][j]; + stripPhi[index_i][index_j][1] = hfTowers[i][j + 1] + hfTowers[i + 1][j + 1] + hfTowers[i + 2][j + 1]; + stripPhi[index_i][index_j][2] = hfTowers[i][j + 2] + hfTowers[i + 1][j + 2] + hfTowers[i + 2][j + 2]; + for (int k = 0; k < 3; k++) { + et_sumEta[index_i][index_j][k] = hfTowers[i + k][j] + hfTowers[i + k][j + 1] + hfTowers[i + k][j + 2]; + } + index_j++; + } + index_i++; + } + + for (int i = 0; i < nSTEta; i++) { + for (int j = 0; j < nSTPhi; j++) { + GCTsupertower_t temp; + temp.et = 0; + temp.eta = 0; + temp.phi = 0; + temp.towerEta = 0; + temp.towerPhi = 0; + temp.towerEt = 0; + temp.centerEta = 0; + temp.centerPhi = 0; + if (i < 4) { + float supertowerEt = et_sumEta[i][j][0] + et_sumEta[i][j][1] + et_sumEta[i][j][2]; + temp.et = supertowerEt; + temp.eta = i; + temp.phi = j; + int peakEta = getPeakBinOf3(stripEta[i][j][0], stripEta[i][j][1], stripEta[i][j][2]); + temp.towerEta = peakEta; + int peakPhi = getPeakBinOf3(stripPhi[i][j][0], stripPhi[i][j][1], stripPhi[i][j][2]); + temp.towerPhi = peakPhi; + float peakEt = hfTowers[i * 3 + peakEta][j * 3 + peakPhi]; + temp.towerEt = peakEt; + int cEta = getEtCenterOf3(stripEta[i][j][0], stripEta[i][j][1], stripEta[i][j][2]); + temp.centerEta = cEta; + int cPhi = getEtCenterOf3(stripPhi[i][j][0], stripPhi[i][j][1], stripPhi[i][j][2]); + temp.centerPhi = cPhi; + } + supertower_return[i][j] = temp; + } + } + } + + inline GCTsupertower_t bestOf2(const GCTsupertower_t& calotp0, const GCTsupertower_t& calotp1) { + GCTsupertower_t x; + x = (calotp0.et > calotp1.et) ? calotp0 : calotp1; + return x; + } + + inline GCTsupertower_t getPeakBin24N(const etaStrip_t& etaStrip) { + GCTsupertower_t best01 = bestOf2(etaStrip.cr[0], etaStrip.cr[1]); + GCTsupertower_t best23 = bestOf2(etaStrip.cr[2], etaStrip.cr[3]); + GCTsupertower_t best45 = bestOf2(etaStrip.cr[4], etaStrip.cr[5]); + GCTsupertower_t best67 = bestOf2(etaStrip.cr[6], etaStrip.cr[7]); + GCTsupertower_t best89 = bestOf2(etaStrip.cr[8], etaStrip.cr[9]); + GCTsupertower_t best1011 = bestOf2(etaStrip.cr[10], etaStrip.cr[11]); + GCTsupertower_t best1213 = bestOf2(etaStrip.cr[12], etaStrip.cr[13]); + GCTsupertower_t best1415 = bestOf2(etaStrip.cr[14], etaStrip.cr[15]); + GCTsupertower_t best1617 = bestOf2(etaStrip.cr[16], etaStrip.cr[17]); + GCTsupertower_t best1819 = bestOf2(etaStrip.cr[18], etaStrip.cr[19]); + GCTsupertower_t best2021 = bestOf2(etaStrip.cr[20], etaStrip.cr[21]); + GCTsupertower_t best2223 = bestOf2(etaStrip.cr[22], etaStrip.cr[23]); + + GCTsupertower_t best0123 = bestOf2(best01, best23); + GCTsupertower_t best4567 = bestOf2(best45, best67); + GCTsupertower_t best891011 = bestOf2(best89, best1011); + GCTsupertower_t best12131415 = bestOf2(best1213, best1415); + GCTsupertower_t best16171819 = bestOf2(best1617, best1819); + GCTsupertower_t best20212223 = bestOf2(best2021, best2223); + + GCTsupertower_t best01234567 = bestOf2(best0123, best4567); + GCTsupertower_t best89101112131415 = bestOf2(best891011, best12131415); + GCTsupertower_t best16to23 = bestOf2(best16171819, best20212223); + + GCTsupertower_t best0to15 = bestOf2(best01234567, best89101112131415); + GCTsupertower_t bestOf24 = bestOf2(best0to15, best16to23); + + return bestOf24; + } + + inline towerMax getPeakBin6N(const etaStripPeak_t& etaStrip) { + towerMax x; + + GCTsupertower_t best01 = bestOf2(etaStrip.pk[0], etaStrip.pk[1]); + GCTsupertower_t best23 = bestOf2(etaStrip.pk[2], etaStrip.pk[3]); + GCTsupertower_t best45 = bestOf2(etaStrip.pk[4], etaStrip.pk[5]); + + GCTsupertower_t best0123 = bestOf2(best01, best23); + + GCTsupertower_t bestOf6 = bestOf2(best0123, best45); + + x.energy = bestOf6.et; + x.phi = bestOf6.phi; + x.eta = bestOf6.eta; + x.energyMax = bestOf6.towerEt; + x.etaMax = bestOf6.towerEta; + x.phiMax = bestOf6.towerPhi; + x.etaCenter = bestOf6.centerEta; + x.phiCenter = bestOf6.centerPhi; + return x; + } + + inline jetInfo getJetPosition(GCTsupertower_t temp[nSTEta][nSTPhi]) { + etaStripPeak_t etaStripPeak; + jetInfo jet; + + for (int i = 0; i < nSTEta; i++) { + etaStrip_t test; + for (int j = 0; j < nSTPhi; j++) { + test.cr[j] = temp[i][j]; + } + etaStripPeak.pk[i] = getPeakBin24N(test); + } + + towerMax peakIn6; + peakIn6 = getPeakBin6N(etaStripPeak); + + jet.seedEnergy = peakIn6.energy; + jet.energy = 0; + jet.tauEt = 0; + jet.eta = peakIn6.eta; + jet.phi = peakIn6.phi; + jet.energyMax = peakIn6.energyMax; + jet.etaMax = peakIn6.etaMax; // overwritten in getJetValues + jet.phiMax = peakIn6.phiMax; // overwritten in getJetValues + jet.etaCenter = peakIn6.etaCenter; // overwritten in getJetValues + jet.phiCenter = peakIn6.phiCenter; // overwritten in getJetValues + + return jet; + } + + inline jetInfo getJetValues(GCTsupertower_t tempX[nSTEta][nSTPhi], int seed_eta, int seed_phi) { + float temp[nSTEta + 2][nSTPhi + 2]; + float eta_slice[3]; + jetInfo jet_tmp; + + for (int i = 0; i < nSTEta + 2; i++) { + for (int k = 0; k < nSTPhi + 2; k++) { + temp[i][k] = 0; + } + } + + for (int i = 0; i < nSTEta; i++) { + for (int k = 0; k < nSTPhi; k++) { + temp[i + 1][k + 1] = tempX[i][k].et; + } + } + + int seed_eta1, seed_phi1; + + seed_eta1 = seed_eta; //to start from corner + seed_phi1 = seed_phi; //to start from corner + float tmp1, tmp2, tmp3; + + for (int j = 0; j < nSTEta; j++) { + for (int k = 0; k < nSTPhi; k++) { + if (j == seed_eta1 && k == seed_phi1) { + for (int m = 0; m < 3; m++) { + tmp1 = temp[j + m][k]; + tmp2 = temp[j + m][k + 1]; + tmp3 = temp[j + m][k + 2]; + eta_slice[m] = tmp1 + tmp2 + tmp3; + } + } + } + } + + jet_tmp.energy = eta_slice[0] + eta_slice[1] + eta_slice[2]; + jet_tmp.tauEt = eta_slice[1]; //set tau Pt to be sum of ST energies in center eta slice */ + // To find the jet centre: note that seed supertower is always (1, 1) + jet_tmp.etaCenter = + 3 * seed_eta + tempX[seed_eta][seed_phi].centerEta; //this is the ET weighted eta centre of the ST + jet_tmp.phiCenter = + 3 * seed_phi + tempX[seed_eta][seed_phi].centerPhi; //this is the ET weighted phi centre of the ST + jet_tmp.etaMax = 3 * seed_eta + tempX[seed_eta][seed_phi].towerEta; + jet_tmp.phiMax = 3 * seed_phi + tempX[seed_eta][seed_phi].towerPhi; + + // set the used supertower ets to 0 + for (int i = 0; i < nSTEta; i++) { + if (i + 1 >= seed_eta && i <= seed_eta + 1) { + for (int k = 0; k < nSTPhi; k++) { + if (k + 1 >= seed_phi && k <= seed_phi + 1) + tempX[i][k].et = 0; + } + } + } + + return jet_tmp; + } + + inline jetInfo getRegion(GCTsupertower_t temp[nSTEta][nSTPhi]) { + jetInfo jet_tmp, jet; + jet_tmp = getJetPosition(temp); + int seed_phi = jet_tmp.phi; + int seed_eta = jet_tmp.eta; + float seed_energy = jet_tmp.seedEnergy; + jet = getJetValues(temp, seed_eta, seed_phi); + if (seed_energy > 10.) { // suppress <= 10 GeV ST as seed + jet_tmp.energy = jet.energy; + jet_tmp.tauEt = jet.tauEt; + } else { + jet_tmp.energy = 0.; + jet_tmp.tauEt = 0.; + } + jet_tmp.etaCenter = jet.etaCenter; // this is the ET weighted eta centre of the ST + jet_tmp.phiCenter = jet.phiCenter; // this is the ET weighted phi centre of the ST + jet_tmp.etaMax = jet.etaMax; // this is the leading tower eta in the ST + jet_tmp.phiMax = jet.phiMax; // this is the leading tower phi in the ST + return jet_tmp; + } + + inline bool compareByEt(l1tp2::Phase2L1CaloJet i, l1tp2::Phase2L1CaloJet j) { return (i.jetEt() > j.jetEt()); }; + +} // namespace gctobj + +#endif diff --git a/L1Trigger/L1CaloTrigger/interface/Phase2L1GCT.h b/L1Trigger/L1CaloTrigger/interface/Phase2L1GCT.h index 7751a9e3c0c81..2f28a4cf35566 100644 --- a/L1Trigger/L1CaloTrigger/interface/Phase2L1GCT.h +++ b/L1Trigger/L1CaloTrigger/interface/Phase2L1GCT.h @@ -240,7 +240,6 @@ inline p2eg::GCTinternal_t p2eg::getClustersTowers(const p2eg::GCTcard_t& GCTcar */ inline p2eg::GCTintTowers_t p2eg::getFullTowers(const p2eg::GCTinternal_t& GCTinternal) { p2eg::GCTintTowers_t GCTintTowers; - // Positive eta for (int i = 0; i < p2eg::N_GCTPOSITIVE_FIBERS; i = i + 4) { for (int i1 = 0; i1 < 4; i1++) { @@ -249,6 +248,8 @@ inline p2eg::GCTintTowers_t p2eg::getFullTowers(const p2eg::GCTinternal_t& GCTin ap_uint<15> eta = p2eg::N_GCTETA / 2 + k; GCTintTowers.GCTtower[eta][phi].et = GCTinternal.GCTCorrfiber[phi].GCTtowers[k].et; GCTintTowers.GCTtower[eta][phi].hoe = GCTinternal.GCTCorrfiber[phi].GCTtowers[k].hoe; + GCTintTowers.GCTtower[eta][phi].ecalEt = GCTinternal.GCTCorrfiber[phi].GCTtowers[k].ecalEt; + GCTintTowers.GCTtower[eta][phi].hcalEt = GCTinternal.GCTCorrfiber[phi].GCTtowers[k].hcalEt; for (int ic1 = 0; ic1 < 4; ic1++) { for (int jc = 0; jc < p2eg::N_GCTCLUSTERS_FIBER; jc++) { ap_uint<15> eta1 = p2eg::N_GCTETA / 2 + GCTinternal.GCTCorrfiber[i + ic1].GCTclusters[jc].towEta; @@ -271,6 +272,8 @@ inline p2eg::GCTintTowers_t p2eg::getFullTowers(const p2eg::GCTinternal_t& GCTin ap_uint<15> phi = i + i1 - p2eg::N_GCTPOSITIVE_FIBERS; GCTintTowers.GCTtower[eta][phi].et = GCTinternal.GCTCorrfiber[i + i1].GCTtowers[k].et; GCTintTowers.GCTtower[eta][phi].hoe = GCTinternal.GCTCorrfiber[i + i1].GCTtowers[k].hoe; + GCTintTowers.GCTtower[eta][phi].ecalEt = GCTinternal.GCTCorrfiber[i + i1].GCTtowers[k].ecalEt; + GCTintTowers.GCTtower[eta][phi].hcalEt = GCTinternal.GCTCorrfiber[i + i1].GCTtowers[k].hcalEt; for (int ic1 = 0; ic1 < 4; ic1++) { for (int jc = 0; jc < p2eg::N_GCTCLUSTERS_FIBER; jc++) { ap_uint<15> eta1 = p2eg::N_GCTETA / 2 - 1 - GCTinternal.GCTCorrfiber[i + ic1].GCTclusters[jc].towEta; diff --git a/L1Trigger/L1CaloTrigger/interface/Phase2L1RCT.h b/L1Trigger/L1CaloTrigger/interface/Phase2L1RCT.h index 43547af9d0bd8..02a3fb47e375f 100644 --- a/L1Trigger/L1CaloTrigger/interface/Phase2L1RCT.h +++ b/L1Trigger/L1CaloTrigger/interface/Phase2L1RCT.h @@ -1036,18 +1036,15 @@ inline void p2eg::getECALTowersEt(p2eg::crystal tempX[p2eg::CRYSTAL_IN_ETA][p2eg } } - towerEt[0] = towerEtN[0][0][0] + towerEtN[0][0][1] + towerEtN[0][0][2] + towerEtN[0][0][3] + towerEtN[0][0][4]; - towerEt[1] = towerEtN[0][1][0] + towerEtN[0][1][1] + towerEtN[0][1][2] + towerEtN[0][1][3] + towerEtN[0][1][4]; - towerEt[2] = towerEtN[0][2][0] + towerEtN[0][2][1] + towerEtN[0][2][2] + towerEtN[0][2][3] + towerEtN[0][2][4]; - towerEt[3] = towerEtN[0][3][0] + towerEtN[0][3][1] + towerEtN[0][3][2] + towerEtN[0][3][3] + towerEtN[0][3][4]; - towerEt[4] = towerEtN[1][0][0] + towerEtN[1][0][1] + towerEtN[1][0][2] + towerEtN[1][0][3] + towerEtN[1][0][4]; - towerEt[5] = towerEtN[1][1][0] + towerEtN[1][1][1] + towerEtN[1][1][2] + towerEtN[1][1][3] + towerEtN[1][1][4]; - towerEt[6] = towerEtN[1][2][0] + towerEtN[1][2][1] + towerEtN[1][2][2] + towerEtN[1][2][3] + towerEtN[1][2][4]; - towerEt[7] = towerEtN[1][3][0] + towerEtN[1][3][1] + towerEtN[1][3][2] + towerEtN[1][3][3] + towerEtN[1][3][4]; - towerEt[8] = towerEtN[2][0][0] + towerEtN[2][0][1] + towerEtN[2][0][2] + towerEtN[2][0][3] + towerEtN[2][0][4]; - towerEt[9] = towerEtN[2][1][0] + towerEtN[2][1][1] + towerEtN[2][1][2] + towerEtN[2][1][3] + towerEtN[2][1][4]; - towerEt[10] = towerEtN[2][2][0] + towerEtN[2][2][1] + towerEtN[2][2][2] + towerEtN[2][2][3] + towerEtN[2][2][4]; - towerEt[11] = towerEtN[2][3][0] + towerEtN[2][3][1] + towerEtN[2][3][2] + towerEtN[2][3][3] + towerEtN[2][3][4]; + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 4; j++) { + int index = j + 4 * i; + towerEt[index] = 0; + for (int k = 0; k < 5; k++) { + towerEt[index] += (towerEtN[i][j][k] >> 2); + } + } + } ap_uint<12> totalEt; for (int i = 0; i < 12; i++) { @@ -1226,7 +1223,7 @@ inline p2eg::clusterInfo p2eg::getBremsValuesPos(p2eg::crystal tempX[p2eg::CRYST for (int i = 0; i < 3; i++) { eta_slice[i] = phi0eta[i] + phi1eta[i] + phi2eta[i] + phi3eta[i] + phi4eta[i]; } - cluster_tmp.energy = (eta_slice[0] + eta_slice[1] + eta_slice[2]); + cluster_tmp.energy = (eta_slice[0] + eta_slice[1] + eta_slice[2]) >> 2; return cluster_tmp; } @@ -1301,7 +1298,7 @@ inline p2eg::clusterInfo p2eg::getBremsValuesNeg(p2eg::crystal tempX[p2eg::CRYST for (int i = 0; i < 3; i++) { eta_slice[i] = phi0eta[i] + phi1eta[i] + phi2eta[i] + phi3eta[i] + phi4eta[i]; } - cluster_tmp.energy = (eta_slice[0] + eta_slice[1] + eta_slice[2]); + cluster_tmp.energy = (eta_slice[0] + eta_slice[1] + eta_slice[2]) >> 2; return cluster_tmp; } @@ -1392,7 +1389,7 @@ inline p2eg::clusterInfo p2eg::getClusterValues(p2eg::crystal tempX[p2eg::CRYSTA eta_slice[i] = phi0eta[i] + phi1eta[i] + phi2eta[i] + phi3eta[i] + phi4eta[i]; } - cluster_tmp.energy = (eta_slice[1] + eta_slice[2] + eta_slice[3]); + cluster_tmp.energy = (eta_slice[1] + eta_slice[2] + eta_slice[3]) >> 2; // Get the energy totals in the 5x5 and also in two 2x5 et5x5Tot = (eta_slice[0] + eta_slice[1] + eta_slice[2] + eta_slice[3] + eta_slice[4]); @@ -1404,8 +1401,8 @@ inline p2eg::clusterInfo p2eg::getClusterValues(p2eg::crystal tempX[p2eg::CRYSTA else etSum2x5 = et2x5_2Tot; - cluster_tmp.et5x5 = et5x5Tot; - cluster_tmp.et2x5 = etSum2x5; + cluster_tmp.et5x5 = et5x5Tot >> 2; + cluster_tmp.et2x5 = etSum2x5 >> 2; return cluster_tmp; } @@ -1427,7 +1424,7 @@ inline p2eg::Cluster p2eg::getClusterFromRegion3x4(p2eg::crystal temp[p2eg::CRYS cluster_tmp = p2eg::getClusterPosition(ecalRegion); - float seedEnergyFloat = cluster_tmp.seedEnergy / 8.0; + float seedEnergyFloat = cluster_tmp.seedEnergy * ECAL_LSB; // Do not make cluster if seed is less than 1.0 GeV if (seedEnergyFloat < 1.0) { diff --git a/L1Trigger/L1CaloTrigger/plugins/Phase2L1CaloEGammaEmulator.cc b/L1Trigger/L1CaloTrigger/plugins/Phase2L1CaloEGammaEmulator.cc index 209b285f3fb29..999b208da8cde 100644 --- a/L1Trigger/L1CaloTrigger/plugins/Phase2L1CaloEGammaEmulator.cc +++ b/L1Trigger/L1CaloTrigger/plugins/Phase2L1CaloEGammaEmulator.cc @@ -134,8 +134,8 @@ void Phase2L1CaloEGammaEmulator::produce(edm::Event& iEvent, const edm::EventSet for (const auto& hit : *pcalohits.product()) { if (hit.encodedEt() > 0) // hit.encodedEt() returns an int corresponding to 2x the crystal Et { - // Et is 10 bit, by keeping the ADC saturation Et at 120 GeV it means that you have to divide by 8 - float et = hit.encodedEt() * p2eg::ECAL_LSB; + // Et is 10 bit, by keeping the ADC saturation Et at 120 GeV it means that you have to multiply by 0.125 (input LSB) + float et = hit.encodedEt() * 0.125; if (et < p2eg::cut_500_MeV) { continue; // Reject hits with < 500 MeV ET } @@ -332,8 +332,9 @@ void Phase2L1CaloEGammaEmulator::produce(edm::Event& iEvent, const edm::EventSet // Iteratively find four clusters and remove them from 'temporary' as we go, and fill cluster_list for (int c = 0; c < p2eg::N_CLUSTERS_PER_REGION; c++) { - p2eg::Cluster newCluster = p2eg::getClusterFromRegion3x4(temporary); // remove cluster from 'temporary' - newCluster.setRegionIdx(idxRegion); // add the region number + p2eg::Cluster newCluster = p2eg::getClusterFromRegion3x4( + temporary); // remove cluster from 'temporary', adjust for LSB 0.5 at GCT in getClusterValues + newCluster.setRegionIdx(idxRegion); // add the region number if (newCluster.clusterEnergy() > 0) { // do not push back 0-energy clusters cluster_list[cc].push_back(newCluster); @@ -342,7 +343,7 @@ void Phase2L1CaloEGammaEmulator::produce(edm::Event& iEvent, const edm::EventSet // Create towers using remaining ECAL energy, and the HCAL towers were already calculated in towersEtHCAL[12] ap_uint<12> towerEtECAL[12]; - p2eg::getECALTowersEt(temporary, towerEtECAL); + p2eg::getECALTowersEt(temporary, towerEtECAL); // adjust for LSB 0.5 at GCT // Fill towerHCALCard and towerECALCard arrays for (int i = 0; i < 12; i++) { @@ -440,7 +441,7 @@ void Phase2L1CaloEGammaEmulator::produce(edm::Event& iEvent, const edm::EventSet for (int jj = 0; jj < p2eg::n_towers_cardPhi; ++jj) { // 4 towers per card in phi ap_uint<12> ecalEt = towerECALCard[ii][jj][cc].et(); ap_uint<12> hcalEt = towerHCALCard[ii][jj][cc].et(); - towerECALCard[ii][jj][cc].getHoverE(ecalEt, hcalEt); + towerECALCard[ii][jj][cc].addHoverEToTower(ecalEt, hcalEt); } } diff --git a/L1Trigger/L1CaloTrigger/plugins/Phase2L1CaloJetEmulator.cc b/L1Trigger/L1CaloTrigger/plugins/Phase2L1CaloJetEmulator.cc new file mode 100644 index 0000000000000..089d630fa6fc3 --- /dev/null +++ b/L1Trigger/L1CaloTrigger/plugins/Phase2L1CaloJetEmulator.cc @@ -0,0 +1,924 @@ +// -*- C++ -*- +// +// Package: L1Trigger/L1CaloTrigger +// Class: Phase2L1CaloJetEmulator +// +/**\class Phase2L1CaloJetEmulator Phase2L1CaloJetEmulator.cc L1Trigger/L1CaloTrigger/plugins/Phase2L1CaloJetEmulator.cc + + Description: Producing GCT calo jets using GCT barrel, HGCal and HF towers, based on firmware logic. + + Implementation: + Depends on producers for CaloTowerCollection, HGCalTowerBxCollection and HcalTrigPrimDigiCollection. +*/ +// +// Original Author: Pallabi Das +// Created: Tue, 11 Apr 2023 11:27:33 GMT +// +// + +// system include files +#include + +// user include files +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/stream/EDProducer.h" + +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/MakerMacros.h" + +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/Utilities/interface/StreamID.h" + +#include "DataFormats/L1TCalorimeterPhase2/interface/CaloCrystalCluster.h" +#include "DataFormats/L1TCalorimeterPhase2/interface/CaloTower.h" +#include "DataFormats/L1TCalorimeterPhase2/interface/CaloPFCluster.h" +#include "DataFormats/L1TCalorimeterPhase2/interface/Phase2L1CaloJet.h" +#include "DataFormats/L1Trigger/interface/EGamma.h" +#include "DataFormats/L1THGCal/interface/HGCalTower.h" +#include "DataFormats/HcalDigi/interface/HcalDigiCollections.h" +#include "SimDataFormats/CaloHit/interface/PCaloHitContainer.h" +#include "CalibFormats/CaloTPG/interface/CaloTPGTranscoder.h" +#include "CalibFormats/CaloTPG/interface/CaloTPGRecord.h" +#include "L1Trigger/L1TCalorimeter/interface/CaloTools.h" + +#include "L1Trigger/L1CaloTrigger/interface/Phase2L1CaloJetEmulator.h" +#include +#include +#include +#include +#include +#include "TF1.h" + +// +// class declaration +// + +class Phase2L1CaloJetEmulator : public edm::stream::EDProducer<> { +public: + explicit Phase2L1CaloJetEmulator(const edm::ParameterSet&); + ~Phase2L1CaloJetEmulator() override; + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + +private: + void produce(edm::Event&, const edm::EventSetup&) override; + float get_jet_pt_calibration(const float& jet_pt, const float& jet_eta) const; + float get_tau_pt_calibration(const float& tau_pt, const float& tau_eta) const; + + // ----------member data --------------------------- + edm::EDGetTokenT caloTowerToken_; + edm::EDGetTokenT hgcalTowerToken_; + edm::EDGetTokenT hfToken_; + edm::ESGetToken decoderTag_; + std::vector nHits_to_nvtx_params; + std::vector nvtx_to_PU_sub_params; + std::map nHits_to_nvtx_funcs; + std::map hgcalEM_nvtx_to_PU_sub_funcs; + std::map hgcalHad_nvtx_to_PU_sub_funcs; + std::map hf_nvtx_to_PU_sub_funcs; + std::map> all_nvtx_to_PU_sub_funcs; + + // For fetching jet pt calibrations + std::vector jetPtBins; + std::vector absEtaBinsBarrel; + std::vector jetCalibrationsBarrel; + std::vector absEtaBinsHGCal; + std::vector jetCalibrationsHGCal; + std::vector absEtaBinsHF; + std::vector jetCalibrationsHF; + + // For fetching tau pt calibrations + std::vector tauPtBins; + std::vector tauAbsEtaBinsBarrel; + std::vector tauCalibrationsBarrel; + std::vector tauAbsEtaBinsHGCal; + std::vector tauCalibrationsHGCal; + + // For storing jet calibrations + std::vector> calibrationsBarrel; + std::vector> calibrationsHGCal; + std::vector> calibrationsHF; + + // For storing tau calibrations + std::vector> tauPtCalibrationsBarrel; + std::vector> tauPtCalibrationsHGCal; +}; + +// +// constructors and destructor +// +Phase2L1CaloJetEmulator::Phase2L1CaloJetEmulator(const edm::ParameterSet& iConfig) + : caloTowerToken_(consumes(iConfig.getParameter("gctFullTowers"))), + hgcalTowerToken_(consumes(iConfig.getParameter("hgcalTowers"))), + hfToken_(consumes(iConfig.getParameter("hcalDigis"))), + decoderTag_(esConsumes(edm::ESInputTag("", ""))), + nHits_to_nvtx_params(iConfig.getParameter>("nHits_to_nvtx_params")), + nvtx_to_PU_sub_params(iConfig.getParameter>("nvtx_to_PU_sub_params")), + jetPtBins(iConfig.getParameter>("jetPtBins")), + absEtaBinsBarrel(iConfig.getParameter>("absEtaBinsBarrel")), + jetCalibrationsBarrel(iConfig.getParameter>("jetCalibrationsBarrel")), + absEtaBinsHGCal(iConfig.getParameter>("absEtaBinsHGCal")), + jetCalibrationsHGCal(iConfig.getParameter>("jetCalibrationsHGCal")), + absEtaBinsHF(iConfig.getParameter>("absEtaBinsHF")), + jetCalibrationsHF(iConfig.getParameter>("jetCalibrationsHF")), + tauPtBins(iConfig.getParameter>("tauPtBins")), + tauAbsEtaBinsBarrel(iConfig.getParameter>("tauAbsEtaBinsBarrel")), + tauCalibrationsBarrel(iConfig.getParameter>("tauCalibrationsBarrel")), + tauAbsEtaBinsHGCal(iConfig.getParameter>("tauAbsEtaBinsHGCal")), + tauCalibrationsHGCal(iConfig.getParameter>("tauCalibrationsHGCal")) { + for (uint i = 0; i < nHits_to_nvtx_params.size(); i++) { + edm::ParameterSet* pset = &nHits_to_nvtx_params.at(i); + std::string calo = pset->getParameter("fit"); + nHits_to_nvtx_funcs[calo.c_str()] = TF1(calo.c_str(), "[0] + [1] * x"); + nHits_to_nvtx_funcs[calo.c_str()].SetParameter(0, pset->getParameter>("nHits_params").at(0)); + nHits_to_nvtx_funcs[calo.c_str()].SetParameter(1, pset->getParameter>("nHits_params").at(1)); + } + all_nvtx_to_PU_sub_funcs["hgcalEM"] = hgcalEM_nvtx_to_PU_sub_funcs; + all_nvtx_to_PU_sub_funcs["hgcalHad"] = hgcalHad_nvtx_to_PU_sub_funcs; + all_nvtx_to_PU_sub_funcs["hf"] = hf_nvtx_to_PU_sub_funcs; + for (uint i = 0; i < nvtx_to_PU_sub_params.size(); i++) { + edm::ParameterSet* pset = &nvtx_to_PU_sub_params.at(i); + std::string calo = pset->getParameter("calo"); + std::string iEta = pset->getParameter("iEta"); + double p1 = pset->getParameter>("nvtx_params").at(0); + double p2 = pset->getParameter>("nvtx_params").at(1); + + all_nvtx_to_PU_sub_funcs[calo.c_str()][iEta.c_str()] = TF1(calo.c_str(), "[0] + [1] * x"); + all_nvtx_to_PU_sub_funcs[calo.c_str()][iEta.c_str()].SetParameter(0, p1); + all_nvtx_to_PU_sub_funcs[calo.c_str()][iEta.c_str()].SetParameter(1, p2); + } + + // Fill the jet pt calibration 2D vector + // Dimension 1 is AbsEta bin + // Dimension 2 is jet pT bin which is filled with the actual callibration value + // size()-1 b/c the inputs have lower and upper bounds + // Do Barrel, then HGCal, then HF + int index = 0; + for (unsigned int abs_eta = 0; abs_eta < absEtaBinsBarrel.size() - 1; abs_eta++) { + std::vector pt_bin_calibs; + for (unsigned int pt = 0; pt < jetPtBins.size() - 1; pt++) { + pt_bin_calibs.push_back(jetCalibrationsBarrel.at(index)); + index++; + } + calibrationsBarrel.push_back(pt_bin_calibs); + } + + index = 0; + for (unsigned int abs_eta = 0; abs_eta < absEtaBinsHGCal.size() - 1; abs_eta++) { + std::vector pt_bin_calibs; + for (unsigned int pt = 0; pt < jetPtBins.size() - 1; pt++) { + pt_bin_calibs.push_back(jetCalibrationsHGCal.at(index)); + index++; + } + calibrationsHGCal.push_back(pt_bin_calibs); + } + + index = 0; + for (unsigned int abs_eta = 0; abs_eta < absEtaBinsHF.size() - 1; abs_eta++) { + std::vector pt_bin_calibs; + for (unsigned int pt = 0; pt < jetPtBins.size() - 1; pt++) { + pt_bin_calibs.push_back(jetCalibrationsHF.at(index)); + index++; + } + calibrationsHF.push_back(pt_bin_calibs); + } + + // Fill the tau pt calibration 2D vector + // Dimension 1 is AbsEta bin + // Dimension 2 is tau pT bin which is filled with the actual calibration value + // Do Barrel, then HGCal + // + // Note to future developers: be very concious of the order in which the calibrations are printed + // out in tool which makse the cfg files. You need to match that exactly when loading them and + // using the calibrations below. + index = 0; + for (unsigned int abs_eta = 0; abs_eta < tauAbsEtaBinsBarrel.size() - 1; abs_eta++) { + std::vector pt_bin_calibs; + for (unsigned int pt = 0; pt < tauPtBins.size() - 1; pt++) { + pt_bin_calibs.push_back(tauCalibrationsBarrel.at(index)); + index++; + } + tauPtCalibrationsBarrel.push_back(pt_bin_calibs); + } + + index = 0; + for (unsigned int abs_eta = 0; abs_eta < tauAbsEtaBinsHGCal.size() - 1; abs_eta++) { + std::vector pt_bin_calibs; + for (unsigned int pt = 0; pt < tauPtBins.size() - 1; pt++) { + pt_bin_calibs.push_back(tauCalibrationsHGCal.at(index)); + index++; + } + tauPtCalibrationsHGCal.push_back(pt_bin_calibs); + } + + produces("GCTJet"); +} + +Phase2L1CaloJetEmulator::~Phase2L1CaloJetEmulator() {} + +// +// member functions +// + +// ------------ method called to produce the data ------------ +void Phase2L1CaloJetEmulator::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { + using namespace edm; + std::unique_ptr jetCands(make_unique()); + + // Assign ETs to each eta-half of the barrel region (17x72 --> 18x72 to be able to make 3x3 super towers) + edm::Handle> caloTowerCollection; + if (!iEvent.getByToken(caloTowerToken_, caloTowerCollection)) + edm::LogError("Phase2L1CaloJetEmulator") << "Failed to get towers from caloTowerCollection!"; + + iEvent.getByToken(caloTowerToken_, caloTowerCollection); + float GCTintTowers[nBarrelEta][nBarrelPhi]; + float realEta[nBarrelEta][nBarrelPhi]; + float realPhi[nBarrelEta][nBarrelPhi]; + for (const l1tp2::CaloTower& i : *caloTowerCollection) { + int ieta = i.towerIEta(); + int iphi = i.towerIPhi(); + if (i.ecalTowerEt() > 1.) + GCTintTowers[ieta][iphi] = i.ecalTowerEt(); // suppress <= 1 GeV towers + else + GCTintTowers[ieta][iphi] = 0; + realEta[ieta][iphi] = i.towerEta(); + realPhi[ieta][iphi] = i.towerPhi(); + } + + float temporary[nBarrelEta / 2][nBarrelPhi]; + + // HGCal and HF info used for nvtx estimation + edm::Handle hgcalTowerCollection; + if (!iEvent.getByToken(hgcalTowerToken_, hgcalTowerCollection)) + edm::LogError("Phase2L1CaloJetEmulator") << "Failed to get towers from hgcalTowerCollection!"; + l1t::HGCalTowerBxCollection hgcalTowerColl; + iEvent.getByToken(hgcalTowerToken_, hgcalTowerCollection); + hgcalTowerColl = (*hgcalTowerCollection.product()); + + edm::Handle hfHandle; + if (!iEvent.getByToken(hfToken_, hfHandle)) + edm::LogError("Phase2L1CaloJetEmulator") << "Failed to get HcalTrigPrimDigi for HF!"; + iEvent.getByToken(hfToken_, hfHandle); + + int i_hgcalEM_hits_leq_threshold = 0; + int i_hgcalHad_hits_leq_threshold = 0; + int i_hf_hits_leq_threshold = 0; + for (auto it = hgcalTowerColl.begin(0); it != hgcalTowerColl.end(0); it++) { + if (it->etEm() <= 1.75 && it->etEm() >= 1.25) { + i_hgcalEM_hits_leq_threshold++; + } + if (it->etHad() <= 1.25 && it->etHad() >= 0.75) { + i_hgcalHad_hits_leq_threshold++; + } + } + const auto& decoder = iSetup.getData(decoderTag_); + for (const auto& hit : *hfHandle.product()) { + double et = decoder.hcaletValue(hit.id(), hit.t0()); + if (abs(hit.id().ieta()) < l1t::CaloTools::kHFBegin) + continue; + if (abs(hit.id().ieta()) > l1t::CaloTools::kHFEnd) + continue; + if (et <= 15.0 && et >= 10.0) + i_hf_hits_leq_threshold++; + } + + double hgcalEM_nvtx = nHits_to_nvtx_funcs["hgcalEM"].Eval(i_hgcalEM_hits_leq_threshold); + if (hgcalEM_nvtx < 0) + hgcalEM_nvtx = 0; + double hgcalHad_nvtx = nHits_to_nvtx_funcs["hgcalHad"].Eval(i_hgcalHad_hits_leq_threshold); + if (hgcalHad_nvtx < 0) + hgcalHad_nvtx = 0; + double hf_nvtx = nHits_to_nvtx_funcs["hf"].Eval(i_hf_hits_leq_threshold); + if (hf_nvtx < 0) + hf_nvtx = 0; + double EstimatedNvtx = (hgcalEM_nvtx + hgcalHad_nvtx + hf_nvtx) / 3.; + + // Assign ETs to each eta-half of the endcap region (18x72) + float hgcalTowers[nHgcalEta][nHgcalPhi]; + float hgcalEta[nHgcalEta][nHgcalPhi]; + float hgcalPhi[nHgcalEta][nHgcalPhi]; + + for (int iphi = 0; iphi < nHgcalPhi; iphi++) { + for (int ieta = 0; ieta < nHgcalEta; ieta++) { + hgcalTowers[ieta][iphi] = 0; + if (ieta < nHgcalEta / 2) + hgcalEta[ieta][iphi] = -3.045 + ieta * 0.087 + 0.0435; + else + hgcalEta[ieta][iphi] = 1.479 + (ieta - nHgcalEta / 2) * 0.087 + 0.0435; + hgcalPhi[ieta][iphi] = -M_PI + (iphi * 2 * M_PI / nHgcalPhi) + (M_PI / nHgcalPhi); + } + } + + for (auto it = hgcalTowerColl.begin(0); it != hgcalTowerColl.end(0); it++) { + float eta = it->eta(); + int ieta; + if (eta < 0) + ieta = 19 - it->id().iEta(); + else + ieta = 20 + it->id().iEta(); + if (eta > 1.479) + ieta = ieta - 4; + int iphi = it->id().iPhi(); + + float hgcal_etEm = it->etEm(); + float hgcal_etHad = it->etHad(); + std::string etaKey = ""; + if (abs(eta) <= 1.8) + etaKey = "er1p4to1p8"; + else if (abs(eta) <= 2.1 && abs(eta) > 1.8) + etaKey = "er1p8to2p1"; + else if (abs(eta) <= 2.4 && abs(eta) > 2.1) + etaKey = "er2p1to2p4"; + else if (abs(eta) <= 2.7 && abs(eta) > 2.4) + etaKey = "er2p4to2p7"; + else if (abs(eta) <= 3.1 && abs(eta) > 2.7) + etaKey = "er2p7to3p1"; + if (!etaKey.empty()) { + hgcal_etEm = it->etEm() - all_nvtx_to_PU_sub_funcs["hgcalEM"][etaKey].Eval(EstimatedNvtx); + hgcal_etHad = it->etHad() - all_nvtx_to_PU_sub_funcs["hgcalHad"][etaKey].Eval(EstimatedNvtx); + } + + if (hgcal_etEm < 0) + hgcal_etEm = 0; + if (hgcal_etHad < 0) + hgcal_etHad = 0; + if ((it->etEm() + it->etHad() > 1.) && abs(eta) > 1.479) + hgcalTowers[ieta][iphi] = hgcal_etEm + hgcal_etHad; // suppress <= 1 GeV towers + } + + float temporary_hgcal[nHgcalEta / 2][nHgcalPhi]; + + // Assign ETs to each eta-half of the forward region (12x72) + float hfTowers[nHfEta][nHfPhi]; + float hfEta[nHfEta][nHfPhi]; + float hfPhi[nHfEta][nHfPhi]; + for (int iphi = 0; iphi < nHfPhi; iphi++) { + for (int ieta = 0; ieta < nHfEta; ieta++) { + hfTowers[ieta][iphi] = 0; + int temp; + if (ieta < nHfEta / 2) + temp = ieta - l1t::CaloTools::kHFEnd; + else + temp = ieta - nHfEta / 2 + l1t::CaloTools::kHFBegin + 1; + hfEta[ieta][iphi] = l1t::CaloTools::towerEta(temp); + hfPhi[ieta][iphi] = -M_PI + (iphi * 2 * M_PI / nHfPhi) + (M_PI / nHfPhi); + } + } + + for (const auto& hit : *hfHandle.product()) { + double et = decoder.hcaletValue(hit.id(), hit.t0()); + int ieta = 0; + if (abs(hit.id().ieta()) < l1t::CaloTools::kHFBegin) + continue; + if (abs(hit.id().ieta()) > l1t::CaloTools::kHFEnd) + continue; + if (hit.id().ieta() <= -(l1t::CaloTools::kHFBegin + 1)) { + ieta = hit.id().ieta() + l1t::CaloTools::kHFEnd; + } else if (hit.id().ieta() >= (l1t::CaloTools::kHFBegin + 1)) { + ieta = nHfEta / 2 + (hit.id().ieta() - (l1t::CaloTools::kHFBegin + 1)); + } + int iphi = 0; + if (hit.id().iphi() <= nHfPhi / 2) + iphi = hit.id().iphi() + (nHfPhi / 2 - 1); + else if (hit.id().iphi() > nHfPhi / 2) + iphi = hit.id().iphi() - (nHfPhi / 2 + 1); + if (abs(hit.id().ieta()) <= 33 && abs(hit.id().ieta()) >= 29) + et = et - all_nvtx_to_PU_sub_funcs["hf"]["er29to33"].Eval(EstimatedNvtx); + if (abs(hit.id().ieta()) <= 37 && abs(hit.id().ieta()) >= 34) + et = et - all_nvtx_to_PU_sub_funcs["hf"]["er34to37"].Eval(EstimatedNvtx); + if (abs(hit.id().ieta()) <= 41 && abs(hit.id().ieta()) >= 38) + et = et - all_nvtx_to_PU_sub_funcs["hf"]["er38to41"].Eval(EstimatedNvtx); + if (et < 0) + et = 0; + if (et > 1.) + hfTowers[ieta][iphi] = et; // suppress <= 1 GeV towers + } + + float temporary_hf[nHfEta / 2][nHfPhi]; + + // Begin creating jets + // First create 3x3 super towers: 6x24 in barrel, endcap; 4x24 in forward + // Then create up to 10 jets in each eta half of barrel, endcap, forward regions + + vector halfBarrelJets, halfHgcalJets, halfHfJets; + halfBarrelJets.clear(); + halfHgcalJets.clear(); + halfHfJets.clear(); + vector allJets; + allJets.clear(); + + for (int k = 0; k < 2; k++) { + halfBarrelJets.clear(); + halfHgcalJets.clear(); + halfHfJets.clear(); + gctobj::jetInfo jet[3 * nJets]; + + // BARREL + for (int iphi = 0; iphi < nBarrelPhi; iphi++) { + for (int ieta = 0; ieta < nBarrelEta / 2; ieta++) { + if (k == 0) + temporary[ieta][iphi] = GCTintTowers[ieta][iphi]; + else + temporary[ieta][iphi] = GCTintTowers[nBarrelEta / 2 + ieta][iphi]; + } + } + + gctobj::GCTsupertower_t tempST[nSTEta][nSTPhi]; + gctobj::makeST(temporary, tempST); + + for (int i = 0; i < nJets; i++) { + jet[i] = gctobj::getRegion(tempST); + l1tp2::Phase2L1CaloJet tempJet; + int gctjeteta = jet[i].etaCenter; + int gctjetphi = jet[i].phiCenter; + tempJet.setJetIEta(gctjeteta + k * nBarrelEta / 2); + tempJet.setJetIPhi(gctjetphi); + float jeteta = realEta[gctjeteta + k * nBarrelEta / 2][gctjetphi]; + float jetphi = realPhi[gctjeteta + k * nBarrelEta / 2][gctjetphi]; + tempJet.setJetEta(jeteta); + tempJet.setJetPhi(jetphi); + tempJet.setJetEt(get_jet_pt_calibration(jet[i].energy, jeteta)); + tempJet.setTauEt(get_tau_pt_calibration(jet[i].tauEt, jeteta)); + tempJet.setTowerEt(jet[i].energyMax); + int gcttowereta = jet[i].etaMax; + int gcttowerphi = jet[i].phiMax; + tempJet.setTowerIEta(gcttowereta + k * nBarrelEta / 2); + tempJet.setTowerIPhi(gcttowerphi); + float towereta = realEta[gcttowereta + k * nBarrelEta / 2][gcttowerphi]; + float towerphi = realPhi[gcttowereta + k * nBarrelEta / 2][gcttowerphi]; + tempJet.setTowerEta(towereta); + tempJet.setTowerPhi(towerphi); + reco::Candidate::PolarLorentzVector tempJetp4; + tempJetp4.SetPt(tempJet.jetEt()); + tempJetp4.SetEta(tempJet.jetEta()); + tempJetp4.SetPhi(tempJet.jetPhi()); + tempJetp4.SetM(0.); + tempJet.setP4(tempJetp4); + + if (jet[i].energy > 0.) + halfBarrelJets.push_back(tempJet); + } + + // ENDCAP + for (int iphi = 0; iphi < nHgcalPhi; iphi++) { + for (int ieta = 0; ieta < nHgcalEta / 2; ieta++) { + if (k == 0) + temporary_hgcal[ieta][iphi] = hgcalTowers[ieta][iphi]; + else + temporary_hgcal[ieta][iphi] = hgcalTowers[nHgcalEta / 2 + ieta][iphi]; + } + } + + gctobj::GCTsupertower_t tempST_hgcal[nSTEta][nSTPhi]; + gctobj::makeST_hgcal(temporary_hgcal, tempST_hgcal); + for (int i = nJets; i < 2 * nJets; i++) { + jet[i] = gctobj::getRegion(tempST_hgcal); + l1tp2::Phase2L1CaloJet tempJet; + int hgcaljeteta = jet[i].etaCenter; + int hgcaljetphi = jet[i].phiCenter; + tempJet.setJetIEta(hgcaljeteta + k * nHgcalEta / 2); + tempJet.setJetIPhi(hgcaljetphi); + float jeteta = hgcalEta[hgcaljeteta + k * nHgcalEta / 2][hgcaljetphi]; + float jetphi = hgcalPhi[hgcaljeteta + k * nHgcalEta / 2][hgcaljetphi]; + tempJet.setJetEta(jeteta); + tempJet.setJetPhi(jetphi); + tempJet.setJetEt(get_jet_pt_calibration(jet[i].energy, jeteta)); + tempJet.setTauEt(get_tau_pt_calibration(jet[i].tauEt, jeteta)); + tempJet.setTowerEt(jet[i].energyMax); + int hgcaltowereta = jet[i].etaMax; + int hgcaltowerphi = jet[i].phiMax; + tempJet.setTowerIEta(hgcaltowereta + k * nHgcalEta / 2); + tempJet.setTowerIPhi(hgcaltowerphi); + float towereta = hgcalEta[hgcaltowereta + k * nHgcalEta / 2][hgcaltowerphi]; + float towerphi = hgcalPhi[hgcaltowereta + k * nHgcalEta / 2][hgcaltowerphi]; + tempJet.setTowerEta(towereta); + tempJet.setTowerPhi(towerphi); + reco::Candidate::PolarLorentzVector tempJetp4; + tempJetp4.SetPt(tempJet.jetEt()); + tempJetp4.SetEta(tempJet.jetEta()); + tempJetp4.SetPhi(tempJet.jetPhi()); + tempJetp4.SetM(0.); + tempJet.setP4(tempJetp4); + + if (jet[i].energy > 0.) + halfHgcalJets.push_back(tempJet); + } + + // HF + for (int iphi = 0; iphi < nHfPhi; iphi++) { + for (int ieta = 0; ieta < nHfEta / 2; ieta++) { + if (k == 0) + temporary_hf[ieta][iphi] = hfTowers[ieta][iphi]; + else + temporary_hf[ieta][iphi] = hfTowers[nHfEta / 2 + ieta][iphi]; + } + } + + gctobj::GCTsupertower_t tempST_hf[nSTEta][nSTPhi]; + gctobj::makeST_hf(temporary_hf, tempST_hf); + for (int i = 2 * nJets; i < 3 * nJets; i++) { + jet[i] = gctobj::getRegion(tempST_hf); + l1tp2::Phase2L1CaloJet tempJet; + int hfjeteta = jet[i].etaCenter; + int hfjetphi = jet[i].phiCenter; + tempJet.setJetIEta(hfjeteta + k * nHfEta / 2); + tempJet.setJetIPhi(hfjetphi); + float jeteta = hfEta[hfjeteta + k * nHfEta / 2][hfjetphi]; + float jetphi = hfPhi[hfjeteta + k * nHfEta / 2][hfjetphi]; + tempJet.setJetEta(jeteta); + tempJet.setJetPhi(jetphi); + tempJet.setJetEt(get_jet_pt_calibration(jet[i].energy, jeteta)); + tempJet.setTauEt(get_tau_pt_calibration(jet[i].tauEt, jeteta)); + tempJet.setTowerEt(jet[i].energyMax); + int hftowereta = jet[i].etaMax; + int hftowerphi = jet[i].phiMax; + tempJet.setTowerIEta(hftowereta + k * nHfEta / 2); + tempJet.setTowerIPhi(hftowerphi); + float towereta = hfEta[hftowereta + k * nHfEta / 2][hftowerphi]; + float towerphi = hfPhi[hftowereta + k * nHfEta / 2][hftowerphi]; + tempJet.setTowerEta(towereta); + tempJet.setTowerPhi(towerphi); + reco::Candidate::PolarLorentzVector tempJetp4; + tempJetp4.SetPt(tempJet.jetEt()); + tempJetp4.SetEta(tempJet.jetEta()); + tempJetp4.SetPhi(tempJet.jetPhi()); + tempJetp4.SetM(0.); + tempJet.setP4(tempJetp4); + + if (jet[i].energy > 0.) + halfHfJets.push_back(tempJet); + } + + // Stitching: + // if the jet eta is at the boundary: for HB it should be within 0-1 in -ve eta, 32-33 in +ve eta; for HE it should be within 0-1/16-17 in -ve eta, 34-35/18-19 in +ve eta; for HF it should be within 10-11 in -ve eta, 12-13 in +ve eta + // then get the phi of that jet and check if there is a neighbouring jet with the same phi, then merge to the jet that has higher ET + // in both eta/phi allow a maximum of one tower between jet centers for stitching + + for (size_t i = 0; i < halfHgcalJets.size(); i++) { + if (halfHgcalJets.at(i).jetIEta() >= (nHgcalEta / 2 - 2) && halfHgcalJets.at(i).jetIEta() < (nHgcalEta / 2 + 2)) { + float hgcal_ieta = k * nBarrelEta + halfHgcalJets.at(i).jetIEta(); + for (size_t j = 0; j < halfBarrelJets.size(); j++) { + float barrel_ieta = nHgcalEta / 2 + halfBarrelJets.at(j).jetIEta(); + if (abs(barrel_ieta - hgcal_ieta) <= 2 && + abs(halfBarrelJets.at(j).jetIPhi() - halfHgcalJets.at(i).jetIPhi()) <= 2) { + float totalet = halfBarrelJets.at(j).jetEt() + halfHgcalJets.at(i).jetEt(); + float totalTauEt = halfBarrelJets.at(j).tauEt() + halfHgcalJets.at(i).tauEt(); + if (halfBarrelJets.at(j).jetEt() > halfHgcalJets.at(i).jetEt()) { + halfHgcalJets.at(i).setJetEt(0.); + halfHgcalJets.at(i).setTauEt(0.); + halfBarrelJets.at(j).setJetEt(totalet); + halfBarrelJets.at(j).setTauEt(totalTauEt); + reco::Candidate::PolarLorentzVector tempJetp4; + tempJetp4.SetPt(totalet); + tempJetp4.SetEta(halfBarrelJets.at(j).jetEta()); + tempJetp4.SetPhi(halfBarrelJets.at(j).jetPhi()); + tempJetp4.SetM(0.); + halfBarrelJets.at(j).setP4(tempJetp4); + } else { + halfHgcalJets.at(i).setJetEt(totalet); + halfHgcalJets.at(i).setTauEt(totalTauEt); + halfBarrelJets.at(j).setJetEt(0.); + halfBarrelJets.at(j).setTauEt(0.); + reco::Candidate::PolarLorentzVector tempJetp4; + tempJetp4.SetPt(totalet); + tempJetp4.SetEta(halfHgcalJets.at(i).jetEta()); + tempJetp4.SetPhi(halfHgcalJets.at(i).jetPhi()); + tempJetp4.SetM(0.); + halfHgcalJets.at(i).setP4(tempJetp4); + } + } + } + } else if (halfHgcalJets.at(i).jetIEta() < 2 || halfHgcalJets.at(i).jetIEta() >= (nHgcalEta - 2)) { + float hgcal_ieta = k * nBarrelEta + nHfEta / 2 + halfHgcalJets.at(i).jetIEta(); + for (size_t j = 0; j < halfHfJets.size(); j++) { + float hf_ieta = k * nBarrelEta + k * nHgcalEta + halfHfJets.at(j).jetIEta(); + if (abs(hgcal_ieta - hf_ieta) < 3 && abs(halfHfJets.at(j).jetIPhi() - halfHgcalJets.at(i).jetIPhi()) < 3) { + float totalet = halfHfJets.at(j).jetEt() + halfHgcalJets.at(i).jetEt(); + float totalTauEt = halfHfJets.at(j).tauEt() + halfHgcalJets.at(i).tauEt(); + if (halfHfJets.at(j).jetEt() > halfHgcalJets.at(i).jetEt()) { + halfHgcalJets.at(i).setJetEt(0.); + halfHgcalJets.at(i).setTauEt(0.); + halfHfJets.at(j).setJetEt(totalet); + halfHfJets.at(j).setTauEt(totalTauEt); + reco::Candidate::PolarLorentzVector tempJetp4; + tempJetp4.SetPt(totalet); + tempJetp4.SetEta(halfHfJets.at(j).jetEta()); + tempJetp4.SetPhi(halfHfJets.at(j).jetPhi()); + tempJetp4.SetM(0.); + halfHfJets.at(j).setP4(tempJetp4); + } else { + halfHgcalJets.at(i).setJetEt(totalet); + halfHgcalJets.at(i).setTauEt(totalTauEt); + halfHfJets.at(j).setJetEt(0.); + halfHfJets.at(j).setTauEt(0.); + reco::Candidate::PolarLorentzVector tempJetp4; + tempJetp4.SetPt(totalet); + tempJetp4.SetEta(halfHgcalJets.at(i).jetEta()); + tempJetp4.SetPhi(halfHgcalJets.at(i).jetPhi()); + tempJetp4.SetM(0.); + halfHgcalJets.at(i).setP4(tempJetp4); + } + } + } + } + } + + // Write up to 6 jets from each eta half of barrel, endcap, forward regions + + std::sort(halfBarrelJets.begin(), halfBarrelJets.end(), gctobj::compareByEt); + for (size_t i = 0; i < halfBarrelJets.size(); i++) { + if (halfBarrelJets.at(i).jetEt() > 0. && i < 6) + allJets.push_back(halfBarrelJets.at(i)); + } + + std::sort(halfHgcalJets.begin(), halfHgcalJets.end(), gctobj::compareByEt); + for (size_t i = 0; i < halfHgcalJets.size(); i++) { + if (halfHgcalJets.at(i).jetEt() > 0. && i < 6) + allJets.push_back(halfHgcalJets.at(i)); + } + + std::sort(halfHfJets.begin(), halfHfJets.end(), gctobj::compareByEt); + for (size_t i = 0; i < halfHfJets.size(); i++) { + if (halfHfJets.at(i).jetEt() > 0. && i < 6) + allJets.push_back(halfHfJets.at(i)); + } + } + + std::sort(allJets.begin(), allJets.end(), gctobj::compareByEt); + for (size_t i = 0; i < allJets.size(); i++) { + jetCands->push_back(allJets.at(i)); + } + + iEvent.put(std::move(jetCands), "GCTJet"); +} + +// Apply calibrations to HCAL energy based on Jet Eta, Jet pT +float Phase2L1CaloJetEmulator::get_jet_pt_calibration(const float& jet_pt, const float& jet_eta) const { + float abs_eta = std::abs(jet_eta); + float tmp_jet_pt = jet_pt; + if (tmp_jet_pt > 499) + tmp_jet_pt = 499; + + // Different indices sizes in different calo regions. + // Barrel... + size_t eta_index = 0; + size_t pt_index = 0; + float calib = 1.0; + if (abs_eta <= 1.5) { + // Start loop checking 2nd value + for (unsigned int i = 1; i < absEtaBinsBarrel.size(); i++) { + if (abs_eta <= absEtaBinsBarrel.at(i)) + break; + eta_index++; + } + // Start loop checking 2nd value + for (unsigned int i = 1; i < jetPtBins.size(); i++) { + if (tmp_jet_pt <= jetPtBins.at(i)) + break; + pt_index++; + } + calib = calibrationsBarrel[eta_index][pt_index]; + } // end Barrel + else if (abs_eta <= 3.0) // HGCal + { + // Start loop checking 2nd value + for (unsigned int i = 1; i < absEtaBinsHGCal.size(); i++) { + if (abs_eta <= absEtaBinsHGCal.at(i)) + break; + eta_index++; + } + // Start loop checking 2nd value + for (unsigned int i = 1; i < jetPtBins.size(); i++) { + if (tmp_jet_pt <= jetPtBins.at(i)) + break; + pt_index++; + } + calib = calibrationsHGCal[eta_index][pt_index]; + } // end HGCal + else // HF + { + // Start loop checking 2nd value + for (unsigned int i = 1; i < absEtaBinsHF.size(); i++) { + if (abs_eta <= absEtaBinsHF.at(i)) + break; + eta_index++; + } + // Start loop checking 2nd value + for (unsigned int i = 1; i < jetPtBins.size(); i++) { + if (tmp_jet_pt <= jetPtBins.at(i)) + break; + pt_index++; + } + calib = calibrationsHF[eta_index][pt_index]; + } // end HF + + return jet_pt * calib; +} + +// Apply calibrations to tau pT based on L1EG info, EM Fraction, Tau Eta, Tau pT +float Phase2L1CaloJetEmulator::get_tau_pt_calibration(const float& tau_pt, const float& tau_eta) const { + float abs_eta = std::abs(tau_eta); + float tmp_tau_pt = tau_pt; + if (tmp_tau_pt > 199) + tmp_tau_pt = 199; + + // Different indices sizes in different calo regions. + // Barrel... + size_t eta_index = 0; + size_t pt_index = 0; + float calib = 1.0; + if (abs_eta <= 1.5) { + // Start loop checking 2nd value + for (unsigned int i = 1; i < tauAbsEtaBinsBarrel.size(); i++) { + if (abs_eta <= tauAbsEtaBinsBarrel.at(i)) + break; + eta_index++; + } + // Start loop checking 2nd value + for (unsigned int i = 1; i < tauPtBins.size(); i++) { + if (tmp_tau_pt <= tauPtBins.at(i)) + break; + pt_index++; + } + calib = tauPtCalibrationsBarrel[eta_index][pt_index]; + } // end Barrel + else if (abs_eta <= 3.0) // HGCal + { + // Start loop checking 2nd value + for (unsigned int i = 1; i < tauAbsEtaBinsHGCal.size(); i++) { + if (abs_eta <= tauAbsEtaBinsHGCal.at(i)) + break; + eta_index++; + } + // Start loop checking 2nd value + for (unsigned int i = 1; i < tauPtBins.size(); i++) { + if (tmp_tau_pt <= tauPtBins.at(i)) + break; + pt_index++; + } + calib = tauPtCalibrationsHGCal[eta_index][pt_index]; + } // end HGCal + else + return calib; + + return tau_pt * calib; +} + +// ------------ method fills 'descriptions' with the allowed parameters for the module ------------ +void Phase2L1CaloJetEmulator::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("gctFullTowers", edm::InputTag("l1tPhase2L1CaloEGammaEmulator", "GCTFullTowers")); + desc.add("hgcalTowers", edm::InputTag("l1tHGCalTowerProducer", "HGCalTowerProcessor")); + desc.add("hcalDigis", edm::InputTag("simHcalTriggerPrimitiveDigis")); + + edm::ParameterSetDescription nHits_params_validator; + nHits_params_validator.add("fit", "type"); + nHits_params_validator.add>("nHits_params", {1., 1.}); + std::vector nHits_params_default; + edm::ParameterSet nHits_params1; + nHits_params1.addParameter("fit", "hgcalEM"); + nHits_params1.addParameter>("nHits_params", {157.522, 0.090}); + nHits_params_default.push_back(nHits_params1); + edm::ParameterSet nHits_params2; + nHits_params2.addParameter("fit", "hgcalHad"); + nHits_params2.addParameter>("nHits_params", {159.295, 0.178}); + nHits_params_default.push_back(nHits_params2); + edm::ParameterSet nHits_params3; + nHits_params3.addParameter("fit", "hf"); + nHits_params3.addParameter>("nHits_params", {165.706, 0.153}); + nHits_params_default.push_back(nHits_params3); + desc.addVPSet("nHits_to_nvtx_params", nHits_params_validator, nHits_params_default); + + edm::ParameterSetDescription nvtx_params_validator; + nvtx_params_validator.add("calo", "type"); + nvtx_params_validator.add("iEta", "etaregion"); + nvtx_params_validator.add>("nvtx_params", {1., 1.}); + std::vector nvtx_params_default; + edm::ParameterSet nvtx_params1; + nvtx_params1.addParameter("calo", "hgcalEM"); + nvtx_params1.addParameter("iEta", "er1p4to1p8"); + nvtx_params1.addParameter>("nvtx_params", {-0.011772, 0.004142}); + nvtx_params_default.push_back(nvtx_params1); + edm::ParameterSet nvtx_params2; + nvtx_params2.addParameter("calo", "hgcalEM"); + nvtx_params2.addParameter("iEta", "er1p8to2p1"); + nvtx_params2.addParameter>("nvtx_params", {-0.015488, 0.005410}); + nvtx_params_default.push_back(nvtx_params2); + edm::ParameterSet nvtx_params3; + nvtx_params3.addParameter("calo", "hgcalEM"); + nvtx_params3.addParameter("iEta", "er2p1to2p4"); + nvtx_params3.addParameter>("nvtx_params", {-0.021150, 0.006078}); + nvtx_params_default.push_back(nvtx_params3); + edm::ParameterSet nvtx_params4; + nvtx_params4.addParameter("calo", "hgcalEM"); + nvtx_params4.addParameter("iEta", "er2p4to2p7"); + nvtx_params4.addParameter>("nvtx_params", {-0.015705, 0.005339}); + nvtx_params_default.push_back(nvtx_params4); + edm::ParameterSet nvtx_params5; + nvtx_params5.addParameter("calo", "hgcalEM"); + nvtx_params5.addParameter("iEta", "er2p7to3p1"); + nvtx_params5.addParameter>("nvtx_params", {-0.018492, 0.005620}); + nvtx_params_default.push_back(nvtx_params5); + edm::ParameterSet nvtx_params6; + nvtx_params6.addParameter("calo", "hgcalHad"); + nvtx_params6.addParameter("iEta", "er1p4to1p8"); + nvtx_params6.addParameter>("nvtx_params", {0.005675, 0.000615}); + nvtx_params_default.push_back(nvtx_params6); + edm::ParameterSet nvtx_params7; + nvtx_params7.addParameter("calo", "hgcalHad"); + nvtx_params7.addParameter("iEta", "er1p8to2p1"); + nvtx_params7.addParameter>("nvtx_params", {0.004560, 0.001099}); + nvtx_params_default.push_back(nvtx_params7); + edm::ParameterSet nvtx_params8; + nvtx_params8.addParameter("calo", "hgcalHad"); + nvtx_params8.addParameter("iEta", "er2p1to2p4"); + nvtx_params8.addParameter>("nvtx_params", {0.000036, 0.001608}); + nvtx_params_default.push_back(nvtx_params8); + edm::ParameterSet nvtx_params9; + nvtx_params9.addParameter("calo", "hgcalHad"); + nvtx_params9.addParameter("iEta", "er2p4to2p7"); + nvtx_params9.addParameter>("nvtx_params", {0.000869, 0.001754}); + nvtx_params_default.push_back(nvtx_params9); + edm::ParameterSet nvtx_params10; + nvtx_params10.addParameter("calo", "hgcalHad"); + nvtx_params10.addParameter("iEta", "er2p7to3p1"); + nvtx_params10.addParameter>("nvtx_params", {-0.006574, 0.003134}); + nvtx_params_default.push_back(nvtx_params10); + edm::ParameterSet nvtx_params11; + nvtx_params11.addParameter("calo", "hf"); + nvtx_params11.addParameter("iEta", "er29to33"); + nvtx_params11.addParameter>("nvtx_params", {-0.203291, 0.044096}); + nvtx_params_default.push_back(nvtx_params11); + edm::ParameterSet nvtx_params12; + nvtx_params12.addParameter("calo", "hf"); + nvtx_params12.addParameter("iEta", "er34to37"); + nvtx_params12.addParameter>("nvtx_params", {-0.210922, 0.045628}); + nvtx_params_default.push_back(nvtx_params12); + edm::ParameterSet nvtx_params13; + nvtx_params13.addParameter("calo", "hf"); + nvtx_params13.addParameter("iEta", "er38to41"); + nvtx_params13.addParameter>("nvtx_params", {-0.229562, 0.050560}); + nvtx_params_default.push_back(nvtx_params13); + desc.addVPSet("nvtx_to_PU_sub_params", nvtx_params_validator, nvtx_params_default); + + desc.add>("jetPtBins", {0.0, 5.0, 7.5, 10.0, 12.5, 15.0, 17.5, 20.0, 22.5, 25.0, 27.5, + 30.0, 35.0, 40.0, 45.0, 50.0, 55.0, 60.0, 65.0, 70.0, 75.0, 80.0, + 85.0, 90.0, 95.0, 100.0, 110.0, 120.0, 130.0, 140.0, 150.0, 160.0, 170.0, + 180.0, 190.0, 200.0, 225.0, 250.0, 275.0, 300.0, 325.0, 400.0, 500.0}); + desc.add>("absEtaBinsBarrel", {0.00, 0.30, 0.60, 1.00, 1.50}); + desc.add>( + "jetCalibrationsBarrel", + {2.459, 2.320, 2.239, 2.166, 2.100, 2.040, 1.986, 1.937, 1.892, 1.852, 1.816, 1.768, 1.714, 1.670, 1.633, 1.603, + 1.578, 1.557, 1.540, 1.525, 1.513, 1.502, 1.493, 1.486, 1.479, 1.470, 1.460, 1.452, 1.445, 1.439, 1.433, 1.427, + 1.422, 1.417, 1.411, 1.403, 1.390, 1.377, 1.365, 1.352, 1.327, 1.284, 4.695, 3.320, 2.751, 2.361, 2.093, 1.908, + 1.781, 1.694, 1.633, 1.591, 1.562, 1.533, 1.511, 1.499, 1.492, 1.486, 1.482, 1.478, 1.474, 1.470, 1.467, 1.463, + 1.459, 1.456, 1.452, 1.447, 1.440, 1.433, 1.425, 1.418, 1.411, 1.404, 1.397, 1.390, 1.382, 1.370, 1.352, 1.334, + 1.316, 1.298, 1.262, 1.200, 5.100, 3.538, 2.892, 2.448, 2.143, 1.933, 1.789, 1.689, 1.620, 1.572, 1.539, 1.506, + 1.482, 1.469, 1.460, 1.455, 1.450, 1.446, 1.442, 1.438, 1.434, 1.431, 1.427, 1.423, 1.420, 1.414, 1.407, 1.400, + 1.392, 1.385, 1.378, 1.370, 1.363, 1.356, 1.348, 1.336, 1.317, 1.299, 1.281, 1.263, 1.226, 1.162, 3.850, 3.438, + 3.211, 3.017, 2.851, 2.708, 2.585, 2.479, 2.388, 2.310, 2.243, 2.159, 2.072, 2.006, 1.956, 1.917, 1.887, 1.863, + 1.844, 1.828, 1.814, 1.802, 1.791, 1.782, 1.773, 1.760, 1.744, 1.729, 1.714, 1.699, 1.685, 1.670, 1.656, 1.641, + 1.627, 1.602, 1.566, 1.530, 1.494, 1.458, 1.386, 1.260}); + desc.add>("absEtaBinsHGCal", {1.50, 1.90, 2.40, 3.00}); + desc.add>( + "jetCalibrationsHGCal", + {5.604, 4.578, 4.061, 3.647, 3.314, 3.047, 2.832, 2.660, 2.521, 2.410, 2.320, 2.216, 2.120, 2.056, + 2.013, 1.983, 1.961, 1.945, 1.932, 1.922, 1.913, 1.905, 1.898, 1.891, 1.884, 1.874, 1.861, 1.848, + 1.835, 1.822, 1.810, 1.797, 1.784, 1.771, 1.759, 1.736, 1.704, 1.673, 1.641, 1.609, 1.545, 1.434, + 4.385, 3.584, 3.177, 2.849, 2.584, 2.370, 2.197, 2.057, 1.944, 1.853, 1.780, 1.695, 1.616, 1.564, + 1.530, 1.507, 1.491, 1.480, 1.472, 1.466, 1.462, 1.459, 1.456, 1.453, 1.451, 1.447, 1.443, 1.439, + 1.435, 1.431, 1.427, 1.423, 1.419, 1.416, 1.412, 1.405, 1.395, 1.385, 1.376, 1.366, 1.346, 1.312, + 562.891, 68.647, 17.648, 5.241, 2.223, 1.490, 1.312, 1.270, 1.260, 1.259, 1.259, 1.260, 1.263, 1.265, + 1.267, 1.269, 1.271, 1.273, 1.275, 1.277, 1.279, 1.281, 1.283, 1.285, 1.287, 1.290, 1.295, 1.299, + 1.303, 1.307, 1.311, 1.315, 1.319, 1.323, 1.328, 1.335, 1.345, 1.355, 1.366, 1.376, 1.397, 1.433}); + desc.add>("absEtaBinsHF", {3.00, 3.60, 6.00}); + desc.add>( + "jetCalibrationsHF", + {8.169, 6.873, 6.155, 5.535, 5.001, 4.539, 4.141, 3.798, 3.501, 3.245, 3.024, 2.748, 2.463, 2.249, + 2.090, 1.971, 1.881, 1.814, 1.763, 1.725, 1.695, 1.673, 1.655, 1.642, 1.631, 1.618, 1.605, 1.596, + 1.588, 1.581, 1.575, 1.569, 1.563, 1.557, 1.551, 1.541, 1.527, 1.513, 1.498, 1.484, 1.456, 1.406, + 2.788, 2.534, 2.388, 2.258, 2.141, 2.037, 1.945, 1.862, 1.788, 1.722, 1.664, 1.587, 1.503, 1.436, + 1.382, 1.339, 1.305, 1.277, 1.255, 1.237, 1.223, 1.211, 1.201, 1.193, 1.186, 1.178, 1.170, 1.164, + 1.159, 1.154, 1.151, 1.147, 1.144, 1.141, 1.138, 1.133, 1.126, 1.118, 1.111, 1.104, 1.090, 1.064}); + desc.add>("tauPtBins", {0.0, 5.0, 7.5, 10.0, 12.5, 15.0, 20.0, 25.0, 30.0, 35.0, + 40.0, 45.0, 50.0, 55.0, 60.0, 70.0, 80.0, 100.0, 150.0, 200.0}); + desc.add>("tauAbsEtaBinsBarrel", {0.00, 0.30, 0.60, 1.00, 1.50}); + desc.add>("tauCalibrationsBarrel", + {1.067, 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, + 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, + 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, 1.102, + 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, + 1.102, 1.102, 1.102, 1.102, 1.102, 1.139, 1.139, 1.139, 1.139, 1.139, 1.139, 1.139, 1.139}); + desc.add>("tauAbsEtaBinsHGCal", {1.50, 1.90, 2.40, 3.00}); + desc.add>( + "tauCalibrationsHGCal", + {1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, + 1.384, 1.384, 1.384, 1.384, 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, + 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, + 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, 1.133}); + + descriptions.addWithDefaultLabel(desc); +} + +//define this as a plug-in +DEFINE_FWK_MODULE(Phase2L1CaloJetEmulator); diff --git a/L1Trigger/L1CaloTrigger/python/l1tPhase2CaloJetEmulator_cfi.py b/L1Trigger/L1CaloTrigger/python/l1tPhase2CaloJetEmulator_cfi.py new file mode 100644 index 0000000000000..fd0ba87e750ee --- /dev/null +++ b/L1Trigger/L1CaloTrigger/python/l1tPhase2CaloJetEmulator_cfi.py @@ -0,0 +1,123 @@ +import FWCore.ParameterSet.Config as cms + +l1tPhase2CaloJetEmulator = cms.EDProducer("Phase2L1CaloJetEmulator", + gctFullTowers = cms.InputTag("l1tPhase2L1CaloEGammaEmulator","GCTFullTowers"), + hgcalTowers = cms.InputTag("l1tHGCalTowerProducer","HGCalTowerProcessor"), + hcalDigis = cms.InputTag("simHcalTriggerPrimitiveDigis"), + nHits_to_nvtx_params = cms.VPSet( + cms.PSet( + fit = cms.string( "hgcalEM" ), + nHits_params = cms.vdouble( 157.522, 0.090 ) + ), + cms.PSet( + fit = cms.string( "hgcalHad" ), + nHits_params = cms.vdouble( 159.295, 0.178 ) + ), + cms.PSet( + fit = cms.string( "hf" ), + nHits_params = cms.vdouble( 165.706, 0.153 ) + ), + ), + nvtx_to_PU_sub_params = cms.VPSet( + cms.PSet( + calo = cms.string( "hgcalEM" ), + iEta = cms.string( "er1p4to1p8" ), + nvtx_params = cms.vdouble( -0.011772, 0.004142 ) + ), + cms.PSet( + calo = cms.string( "hgcalEM" ), + iEta = cms.string( "er1p8to2p1" ), + nvtx_params = cms.vdouble( -0.015488, 0.005410 ) + ), + cms.PSet( + calo = cms.string( "hgcalEM" ), + iEta = cms.string( "er2p1to2p4" ), + nvtx_params = cms.vdouble( -0.021150, 0.006078 ) + ), + cms.PSet( + calo = cms.string( "hgcalEM" ), + iEta = cms.string( "er2p4to2p7" ), + nvtx_params = cms.vdouble( -0.015705, 0.005339 ) + ), + cms.PSet( + calo = cms.string( "hgcalEM" ), + iEta = cms.string( "er2p7to3p1" ), + nvtx_params = cms.vdouble( -0.018492, 0.005620 ) + ), + cms.PSet( + calo = cms.string( "hgcalHad" ), + iEta = cms.string( "er1p4to1p8" ), + nvtx_params = cms.vdouble( 0.005675, 0.000615 ) + ), + cms.PSet( + calo = cms.string( "hgcalHad" ), + iEta = cms.string( "er1p8to2p1" ), + nvtx_params = cms.vdouble( 0.004560, 0.001099 ) + ), + cms.PSet( + calo = cms.string( "hgcalHad" ), + iEta = cms.string( "er2p1to2p4" ), + nvtx_params = cms.vdouble( 0.000036, 0.001608 ) + ), + cms.PSet( + calo = cms.string( "hgcalHad" ), + iEta = cms.string( "er2p4to2p7" ), + nvtx_params = cms.vdouble( 0.000869, 0.001754 ) + ), + cms.PSet( + calo = cms.string( "hgcalHad" ), + iEta = cms.string( "er2p7to3p1" ), + nvtx_params = cms.vdouble( -0.006574, 0.003134 ) + ), + cms.PSet( + calo = cms.string( "hf" ), + iEta = cms.string( "er29to33" ), + nvtx_params = cms.vdouble( -0.203291, 0.044096 ) + ), + cms.PSet( + calo = cms.string( "hf" ), + iEta = cms.string( "er34to37" ), + nvtx_params = cms.vdouble( -0.210922, 0.045628 ) + ), + cms.PSet( + calo = cms.string( "hf" ), + iEta = cms.string( "er38to41" ), + nvtx_params = cms.vdouble( -0.229562, 0.050560 ) + ), + ), + # Calibrations derived 7 December 2023 on 13_2_0 Phase2Fall22DRMiniAOD QCD sample + jetPtBins = cms.vdouble([ 0.0,5.0,7.5,10.0,12.5,15.0,17.5,20.0,22.5,25.0,27.5,30.0,35.0,40.0,45.0,50.0,55.0,60.0,65.0,70.0,75.0,80.0,85.0,90.0,95.0,100.0,110.0,120.0,130.0,140.0,150.0,160.0,170.0,180.0,190.0,200.0,225.0,250.0,275.0,300.0,325.0,400.0,500.0]), + absEtaBinsBarrel = cms.vdouble([ 0.00,0.30,0.60,1.00,1.50]), + jetCalibrationsBarrel = cms.vdouble([ + 2.459, 2.320, 2.239, 2.166, 2.100, 2.040, 1.986, 1.937, 1.892, 1.852, 1.816, 1.768, 1.714, 1.670, 1.633, 1.603, 1.578, 1.557, 1.540, 1.525, 1.513, 1.502, 1.493, 1.486, 1.479, 1.470, 1.460, 1.452, 1.445, 1.439, 1.433, 1.427, 1.422, 1.417, 1.411, 1.403, 1.390, 1.377, 1.365, 1.352, 1.327, 1.284, + 4.695, 3.320, 2.751, 2.361, 2.093, 1.908, 1.781, 1.694, 1.633, 1.591, 1.562, 1.533, 1.511, 1.499, 1.492, 1.486, 1.482, 1.478, 1.474, 1.470, 1.467, 1.463, 1.459, 1.456, 1.452, 1.447, 1.440, 1.433, 1.425, 1.418, 1.411, 1.404, 1.397, 1.390, 1.382, 1.370, 1.352, 1.334, 1.316, 1.298, 1.262, 1.200, + 5.100, 3.538, 2.892, 2.448, 2.143, 1.933, 1.789, 1.689, 1.620, 1.572, 1.539, 1.506, 1.482, 1.469, 1.460, 1.455, 1.450, 1.446, 1.442, 1.438, 1.434, 1.431, 1.427, 1.423, 1.420, 1.414, 1.407, 1.400, 1.392, 1.385, 1.378, 1.370, 1.363, 1.356, 1.348, 1.336, 1.317, 1.299, 1.281, 1.263, 1.226, 1.162, + 3.850, 3.438, 3.211, 3.017, 2.851, 2.708, 2.585, 2.479, 2.388, 2.310, 2.243, 2.159, 2.072, 2.006, 1.956, 1.917, 1.887, 1.863, 1.844, 1.828, 1.814, 1.802, 1.791, 1.782, 1.773, 1.760, 1.744, 1.729, 1.714, 1.699, 1.685, 1.670, 1.656, 1.641, 1.627, 1.602, 1.566, 1.530, 1.494, 1.458, 1.386, 1.260, + ]), + absEtaBinsHGCal = cms.vdouble([ 1.50,1.90,2.40,3.00]), + jetCalibrationsHGCal = cms.vdouble([ + 5.604, 4.578, 4.061, 3.647, 3.314, 3.047, 2.832, 2.660, 2.521, 2.410, 2.320, 2.216, 2.120, 2.056, 2.013, 1.983, 1.961, 1.945, 1.932, 1.922, 1.913, 1.905, 1.898, 1.891, 1.884, 1.874, 1.861, 1.848, 1.835, 1.822, 1.810, 1.797, 1.784, 1.771, 1.759, 1.736, 1.704, 1.673, 1.641, 1.609, 1.545, 1.434, + 4.385, 3.584, 3.177, 2.849, 2.584, 2.370, 2.197, 2.057, 1.944, 1.853, 1.780, 1.695, 1.616, 1.564, 1.530, 1.507, 1.491, 1.480, 1.472, 1.466, 1.462, 1.459, 1.456, 1.453, 1.451, 1.447, 1.443, 1.439, 1.435, 1.431, 1.427, 1.423, 1.419, 1.416, 1.412, 1.405, 1.395, 1.385, 1.376, 1.366, 1.346, 1.312, + 562.891, 68.647, 17.648, 5.241, 2.223, 1.490, 1.312, 1.270, 1.260, 1.259, 1.259, 1.260, 1.263, 1.265, 1.267, 1.269, 1.271, 1.273, 1.275, 1.277, 1.279, 1.281, 1.283, 1.285, 1.287, 1.290, 1.295, 1.299, 1.303, 1.307, 1.311, 1.315, 1.319, 1.323, 1.328, 1.335, 1.345, 1.355, 1.366, 1.376, 1.397, 1.433, + ]), + absEtaBinsHF = cms.vdouble([ 3.00,3.60,6.00]), + jetCalibrationsHF = cms.vdouble([ + 8.169, 6.873, 6.155, 5.535, 5.001, 4.539, 4.141, 3.798, 3.501, 3.245, 3.024, 2.748, 2.463, 2.249, 2.090, 1.971, 1.881, 1.814, 1.763, 1.725, 1.695, 1.673, 1.655, 1.642, 1.631, 1.618, 1.605, 1.596, 1.588, 1.581, 1.575, 1.569, 1.563, 1.557, 1.551, 1.541, 1.527, 1.513, 1.498, 1.484, 1.456, 1.406, + 2.788, 2.534, 2.388, 2.258, 2.141, 2.037, 1.945, 1.862, 1.788, 1.722, 1.664, 1.587, 1.503, 1.436, 1.382, 1.339, 1.305, 1.277, 1.255, 1.237, 1.223, 1.211, 1.201, 1.193, 1.186, 1.178, 1.170, 1.164, 1.159, 1.154, 1.151, 1.147, 1.144, 1.141, 1.138, 1.133, 1.126, 1.118, 1.111, 1.104, 1.090, 1.064, + ]), + # Calibrations derived 7 December 2023 on 13_2_0 Phase2Fall22DRMiniAOD VBFHiggsTauTau sample + tauPtBins = cms.vdouble([ 0.0,5.0,7.5,10.0,12.5,15.0,20.0,25.0,30.0,35.0,40.0,45.0,50.0,55.0,60.0,70.0,80.0,100.0,150.0,200.0]), + tauAbsEtaBinsBarrel = cms.vdouble([ 0.00,0.30,0.60,1.00,1.50]), + tauCalibrationsBarrel = cms.vdouble([ + 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, 1.067, + 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, 1.106, + 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, 1.102, + 1.139, 1.139, 1.139, 1.139, 1.139, 1.139, 1.139, 1.139, 1.139, 1.139, 1.139, 1.139, 1.139, 1.139, 1.139, 1.139, 1.139, 1.139, 1.139, + ]), + tauAbsEtaBinsHGCal = cms.vdouble([ 1.50,1.90,2.40,3.00]), + tauCalibrationsHGCal = cms.vdouble([ + 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, 1.384, + 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, 1.473, + 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, 1.133, + ]), +) diff --git a/L1Trigger/L1TCaloLayer1/plugins/L1TCaloLayer1.cc b/L1Trigger/L1TCaloLayer1/plugins/L1TCaloLayer1.cc index 036ea962594e5..f04ab8afd3e13 100644 --- a/L1Trigger/L1TCaloLayer1/plugins/L1TCaloLayer1.cc +++ b/L1Trigger/L1TCaloLayer1/plugins/L1TCaloLayer1.cc @@ -66,10 +66,6 @@ class L1TCaloLayer1 : public edm::stream::EDProducer<> { void beginRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void endRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - //virtual void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - // ----------member data --------------------------- edm::EDGetTokenT ecalTPSource; @@ -348,30 +344,6 @@ void L1TCaloLayer1::beginRun(const edm::Run& iRun, const edm::EventSetup& iSetup } } -// ------------ method called when ending the processing of a run ------------ -/* - void - L1TCaloLayer1::endRun(edm::Run const&, edm::EventSetup const&) - { - } -*/ - -// ------------ method called when starting to processes a luminosity block ------------ -/* - void - L1TCaloLayer1::beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) - { - } -*/ - -// ------------ method called when ending the processing of a luminosity block ------------ -/* - void - L1TCaloLayer1::endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) - { - } -*/ - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void L1TCaloLayer1::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { //Description set to reflect default present in simCaloStage2Layer1Digis_cfi.py diff --git a/L1Trigger/L1TCaloLayer1/plugins/L1TCaloLayer1Validator.cc b/L1Trigger/L1TCaloLayer1/plugins/L1TCaloLayer1Validator.cc index 03f68391d3a59..542ba7a39d1f5 100644 --- a/L1Trigger/L1TCaloLayer1/plugins/L1TCaloLayer1Validator.cc +++ b/L1Trigger/L1TCaloLayer1/plugins/L1TCaloLayer1Validator.cc @@ -46,20 +46,13 @@ using namespace l1t; class L1TCaloLayer1Validator : public edm::one::EDAnalyzer<> { public: explicit L1TCaloLayer1Validator(const edm::ParameterSet&); - ~L1TCaloLayer1Validator() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: - void beginJob() override; void analyze(const edm::Event&, const edm::EventSetup&) override; void endJob() override; - //virtual void beginRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void endRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - //virtual void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - // ----------member data --------------------------- edm::EDGetTokenT testTowerToken; @@ -144,8 +137,6 @@ L1TCaloLayer1Validator::L1TCaloLayer1Validator(const edm::ParameterSet& iConfig) ngCard[c] = nbCard[c] = zgCard[c] = zbCard[c] = 0; } -L1TCaloLayer1Validator::~L1TCaloLayer1Validator() {} - // // member functions // @@ -390,9 +381,6 @@ void L1TCaloLayer1Validator::analyze(const edm::Event& iEvent, const edm::EventS eventCount++; } -// ------------ method called once each job just before starting event loop ------------ -void L1TCaloLayer1Validator::beginJob() {} - // ------------ method called once each job just after ending the event loop ------------ void L1TCaloLayer1Validator::endJob() { if (validateTowers) @@ -420,38 +408,6 @@ void L1TCaloLayer1Validator::endJob() { } } -// ------------ method called when starting to processes a run ------------ -/* -void -L1TCaloLayer1Validator::beginRun(edm::Run const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when ending the processing of a run ------------ -/* -void -L1TCaloLayer1Validator::endRun(edm::Run const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when starting to processes a luminosity block ------------ -/* -void -L1TCaloLayer1Validator::beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when ending the processing of a luminosity block ------------ -/* -void -L1TCaloLayer1Validator::endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) -{ -} -*/ - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void L1TCaloLayer1Validator::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { //The following says we do not know what parameters are allowed so do no validation diff --git a/L1Trigger/L1TCaloLayer1/plugins/L1TCaloSummary.cc b/L1Trigger/L1TCaloLayer1/plugins/L1TCaloSummary.cc index 4b93638dbce5a..4237afee3dcb8 100644 --- a/L1Trigger/L1TCaloLayer1/plugins/L1TCaloSummary.cc +++ b/L1Trigger/L1TCaloLayer1/plugins/L1TCaloSummary.cc @@ -57,6 +57,9 @@ #include "L1Trigger/L1TCaloLayer1/src/UCTLogging.hh" #include +#include +#include + //Anomaly detection includes #include "ap_fixed.h" #include "hls4ml/emulator.h" @@ -82,8 +85,6 @@ class L1TCaloSummary : public edm::stream::EDProducer<> { void produce(edm::Event&, const edm::EventSetup&) override; //void endJob() override; - void beginRun(edm::Run const&, edm::EventSetup const&) override{}; - void print(); // ----------member data --------------------------- @@ -110,6 +111,9 @@ class L1TCaloSummary : public edm::stream::EDProducer<> { hls4mlEmulator::ModelLoader loader; std::shared_ptr model; + + bool overwriteWithTestPatterns; + std::vector testPatterns; }; // @@ -137,7 +141,9 @@ L1TCaloSummary::L1TCaloSummary(const edm::ParameterSet& iConfig) verbose(iConfig.getParameter("verbose")), fwVersion(iConfig.getParameter("firmwareVersion")), regionToken(consumes(edm::InputTag("simCaloStage2Layer1Digis"))), - loader(hls4mlEmulator::ModelLoader(iConfig.getParameter("CICADAModelVersion"))) { + loader(hls4mlEmulator::ModelLoader(iConfig.getParameter("CICADAModelVersion"))), + overwriteWithTestPatterns(iConfig.getParameter("useTestPatterns")), + testPatterns(iConfig.getParameter>("testPatterns")) { std::vector pumLUTData; char pumLUTString[10]; for (uint32_t pumBin = 0; pumBin < nPumBins; pumBin++) { @@ -215,6 +221,32 @@ void L1TCaloSummary::produce(edm::Event& iEvent, const edm::Event //CICADA reads this as a flat vector modelInput[14 * i.gctPhi() + (i.gctEta() - 4)] = i.et(); } + // Check if we're using test patterns. If so, we overwrite the inputs with a test pattern + if (overwriteWithTestPatterns) { + unsigned int evt = iEvent.id().event(); + unsigned int totalTestPatterns = testPatterns.size(); + unsigned int patternElement = evt % totalTestPatterns; + const edm::ParameterSet& element = testPatterns.at(patternElement); + std::stringstream inputStream; + std::string PhiRowString; + + edm::LogWarning("L1TCaloSummary") << "Overwriting existing CICADA input with test pattern!\n"; + + for (unsigned short int iPhi = 1; iPhi <= 18; ++iPhi) { + PhiRowString = ""; + std::stringstream PhiRowStringStream; + PhiRowStringStream << "iPhi_" << iPhi; + PhiRowString = PhiRowStringStream.str(); + std::vector phiRow = element.getParameter>(PhiRowString); + for (unsigned short int iEta = 1; iEta <= 14; ++iEta) { + modelInput[14 * (iPhi - 1) + (iEta - 1)] = phiRow.at(iEta - 1); + inputStream << phiRow.at(iEta - 1) << " "; + } + inputStream << "\n"; + } + edm::LogInfo("L1TCaloSummary") << "Input Stream:\n" << inputStream.str(); + } + //Extract model output OUTPUT modelResult[1] = { OUTPUT("0.0", 10)}; //the 10 here refers to the fact that we read in "0.0" as a decimal number @@ -224,6 +256,9 @@ void L1TCaloSummary::produce(edm::Event& iEvent, const edm::Event *CICADAScore = modelResult[0].to_float(); + if (overwriteWithTestPatterns) + edm::LogInfo("L1TCaloSummary") << "Test Pattern Output: " << *CICADAScore; + summaryCard.setRegionData(inputRegions); if (!summaryCard.process()) { @@ -301,8 +336,19 @@ void L1TCaloSummary::fillDescriptions(edm::ConfigurationDescripti descriptions.addDefault(desc); } -typedef L1TCaloSummary, ap_fixed<11, 5>> L1TCaloSummaryCICADAv1; -typedef L1TCaloSummary, ap_ufixed<16, 8>> L1TCaloSummaryCICADAv2; -//define type version plugins -DEFINE_FWK_MODULE(L1TCaloSummaryCICADAv1); -DEFINE_FWK_MODULE(L1TCaloSummaryCICADAv2); \ No newline at end of file +// Initial version, X.0.0, input/output typing +typedef L1TCaloSummary, ap_fixed<11, 5>> L1TCaloSummary_CICADA_v1p0p0; +typedef L1TCaloSummary, ap_ufixed<16, 8>> L1TCaloSummary_CICADA_v2p0p0; +DEFINE_FWK_MODULE(L1TCaloSummary_CICADA_v1p0p0); +DEFINE_FWK_MODULE(L1TCaloSummary_CICADA_v2p0p0); +// X.1.0 version input.output typing +typedef L1TCaloSummary, ap_fixed<11, 5>> L1TCaloSummary_CICADA_v1p1p0; +typedef L1TCaloSummary, ap_ufixed<16, 8>> L1TCaloSummary_CICADA_v2p1p0; +DEFINE_FWK_MODULE(L1TCaloSummary_CICADA_v1p1p0); +DEFINE_FWK_MODULE(L1TCaloSummary_CICADA_v2p1p0); +// X.1.1 version input/output typing +typedef L1TCaloSummary, ap_ufixed<16, 8, AP_RND, AP_SAT, AP_SAT>> L1TCaloSummary_CICADA_vXp1p1; +DEFINE_FWK_MODULE(L1TCaloSummary_CICADA_vXp1p1); +// X.1.2 version input/output typing +typedef L1TCaloSummary, ap_ufixed<16, 8, AP_RND_CONV, AP_SAT>> L1TCaloSummary_CICADA_vXp1p2; +DEFINE_FWK_MODULE(L1TCaloSummary_CICADA_vXp1p2); diff --git a/L1Trigger/L1TCaloLayer1/python/CICADATestPatterns.py b/L1Trigger/L1TCaloLayer1/python/CICADATestPatterns.py new file mode 100644 index 0000000000000..a209dc998c34f --- /dev/null +++ b/L1Trigger/L1TCaloLayer1/python/CICADATestPatterns.py @@ -0,0 +1,64 @@ +import FWCore.ParameterSet.Config as cms + +standardCICADATestPatterns = cms.VPSet( + cms.PSet( + iPhi_1 = cms.vuint32(0, 0, 1, 0, 1, 0, 2, 3, 0, 0, 0, 3, 6, 0, ), + iPhi_2 = cms.vuint32(2, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 2, ), + iPhi_3 = cms.vuint32(0, 0, 0, 0, 0, 1, 0, 0, 5, 0, 0, 0, 0, 1, ), + iPhi_4 = cms.vuint32(0, 1, 0, 0, 0, 1, 0, 0, 31, 1, 8, 7, 2, 8, ), + iPhi_5 = cms.vuint32(1, 0, 1, 0, 0, 1, 0, 1, 2, 4, 0, 0, 0, 0, ), + iPhi_6 = cms.vuint32(0, 0, 0, 0, 4, 0, 0, 1, 0, 0, 0, 0, 0, 6, ), + iPhi_7 = cms.vuint32(0, 3, 1, 2, 1, 5, 1, 0, 0, 0, 0, 0, 1, 1, ), + iPhi_8 = cms.vuint32(0, 0, 3, 2, 0, 2, 3, 3, 8, 10, 1, 2, 0, 27, ), + iPhi_9 = cms.vuint32(6, 0, 0, 2, 0, 0, 2, 0, 0, 0, 1, 0, 0, 1, ), + iPhi_10 = cms.vuint32(0, 0, 1, 0, 12, 2, 0, 0, 0, 1, 0, 1, 0, 2, ), + iPhi_11 = cms.vuint32(5, 0, 0, 1, 0, 0, 1, 4, 2, 0, 15, 0, 0, 212, ), + iPhi_12 = cms.vuint32(4, 0, 2, 0, 2, 1, 1, 4, 1, 0, 2, 3, 0, 0, ), + iPhi_13 = cms.vuint32(0, 4, 1, 2, 182, 0, 2, 2, 0, 0, 0, 1, 1, 0, ), + iPhi_14 = cms.vuint32(0, 10, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 2, ), + iPhi_15 = cms.vuint32(6, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 12, ), + iPhi_16 = cms.vuint32(0, 0, 0, 1, 0, 1, 0, 0, 3, 1, 0, 0, 0, 1, ), + iPhi_17 = cms.vuint32(0, 0, 0, 0, 0, 2, 0, 4, 2, 0, 3, 0, 0, 2, ), + iPhi_18 = cms.vuint32(2, 0, 0, 0, 0, 1, 0, 4, 0, 2, 4, 5, 0, 0, ), + ), + cms.PSet( + iPhi_1 = cms.vuint32(3, 5, 6, 2, 1, 0, 9, 0, 1, 1, 2, 1, 1, 5, ), + iPhi_2 = cms.vuint32(4, 2, 1, 0, 5, 0, 0, 2, 4, 11, 10, 1, 1, 12, ), + iPhi_3 = cms.vuint32(5, 0, 0, 2, 1, 2, 1, 1, 19, 20, 237, 0, 2, 2, ), + iPhi_4 = cms.vuint32(5, 1, 0, 3, 2, 1, 2, 3, 3, 1, 2, 1, 1, 7, ), + iPhi_5 = cms.vuint32(1, 1, 1, 2, 0, 0, 0, 3, 5, 2, 1, 1, 3, 14, ), + iPhi_6 = cms.vuint32(4, 0, 2, 2, 0, 0, 0, 2, 1, 3, 3, 1, 0, 3, ), + iPhi_7 = cms.vuint32(1, 4, 62, 6, 0, 1, 10, 2, 2, 5, 1, 1, 0, 7, ), + iPhi_8 = cms.vuint32(13, 1, 0, 2, 1, 5, 1, 3, 1, 0, 1, 0, 4, 2, ), + iPhi_9 = cms.vuint32(4, 1, 2, 1, 6, 2, 6, 0, 2, 2, 1, 0, 0, 6, ), + iPhi_10 = cms.vuint32(10, 0, 2, 0, 3, 0, 1, 2, 12, 0, 20, 4, 0, 7, ), + iPhi_11 = cms.vuint32(16, 2, 4, 1, 0, 2, 3, 15, 4, 1, 0, 6, 5, 5, ), + iPhi_12 = cms.vuint32(3, 0, 1, 0, 1, 1, 4, 2, 9, 115, 38, 2, 3, 1, ), + iPhi_13 = cms.vuint32(10, 3, 10, 15, 2, 0, 8, 8, 0, 2, 2, 0, 1, 8, ), + iPhi_14 = cms.vuint32(4, 0, 0, 0, 1, 4, 0, 1, 1, 1, 1, 1, 0, 2, ), + iPhi_15 = cms.vuint32(11, 1, 1, 2, 1, 3, 5, 4, 4, 2, 0, 1, 0, 13, ), + iPhi_16 = cms.vuint32(6, 1, 1, 1, 0, 1, 3, 2, 1, 10, 3, 0, 0, 15, ), + iPhi_17 = cms.vuint32(4, 0, 0, 1, 2, 1, 1, 2, 0, 1, 0, 1, 0, 3, ), + iPhi_18 = cms.vuint32(5, 0, 0, 0, 4, 1, 0, 2, 5, 31, 0, 1, 1, 5, ), + ), + cms.PSet( + iPhi_1 = cms.vuint32(4, 2, 2, 0, 0, 0, 4, 6, 1, 0, 0, 2, 2, 7, ), + iPhi_2 = cms.vuint32(2, 2, 0, 1, 1, 1, 0, 0, 1, 2, 2, 1, 0, 0, ), + iPhi_3 = cms.vuint32(0, 0, 0, 1, 52, 0, 3, 2, 7, 2, 0, 0, 1, 4, ), + iPhi_4 = cms.vuint32(4, 0, 0, 0, 51, 6, 53, 4, 1, 0, 0, 0, 0, 0, ), + iPhi_5 = cms.vuint32(10, 0, 0, 0, 1, 1, 4, 1, 0, 0, 0, 0, 0, 8, ), + iPhi_6 = cms.vuint32(2, 0, 2, 0, 1, 5, 1, 3, 4, 0, 1, 0, 1, 14, ), + iPhi_7 = cms.vuint32(1, 0, 1, 1, 0, 0, 8, 9, 2, 3, 0, 1, 0, 3, ), + iPhi_8 = cms.vuint32(4, 0, 23, 62, 31, 0, 5, 3, 3, 1, 0, 0, 0, 4, ), + iPhi_9 = cms.vuint32(100, 3, 10, 5, 0, 2, 0, 2, 1, 2, 0, 0, 0, 0, ), + iPhi_10 = cms.vuint32(27, 2, 0, 0, 0, 2, 3, 1, 3, 0, 0, 2, 0, 0, ), + iPhi_11 = cms.vuint32(8, 2, 3, 5, 5, 1, 1, 0, 4, 2, 2, 0, 0, 5, ), + iPhi_12 = cms.vuint32(6, 6, 1, 0, 0, 2, 0, 3, 1, 3, 2, 1, 0, 2, ), + iPhi_13 = cms.vuint32(0, 2, 2, 1, 0, 0, 7, 6, 0, 0, 0, 0, 1, 352, ), + iPhi_14 = cms.vuint32(8, 0, 0, 1, 1, 1, 2, 2, 1, 4, 0, 0, 0, 2, ), + iPhi_15 = cms.vuint32(3, 0, 0, 0, 1, 3, 3, 3, 0, 1, 0, 0, 0, 2, ), + iPhi_16 = cms.vuint32(3, 166, 0, 4, 0, 2, 3, 1, 1, 1, 0, 0, 0, 6, ), + iPhi_17 = cms.vuint32(2, 2, 1, 0, 0, 0, 0, 2, 5, 0, 0, 0, 0, 2, ), + iPhi_18 = cms.vuint32(6, 3, 0, 2, 0, 4, 7, 1, 4, 4, 0, 0, 1, 2, ), + ), +) \ No newline at end of file diff --git a/L1Trigger/L1TCaloLayer1/python/simCaloStage2Layer1Summary_cfi.py b/L1Trigger/L1TCaloLayer1/python/simCaloStage2Layer1Summary_cfi.py index 0d26e0b5ad79c..a5c5373a0ca63 100644 --- a/L1Trigger/L1TCaloLayer1/python/simCaloStage2Layer1Summary_cfi.py +++ b/L1Trigger/L1TCaloLayer1/python/simCaloStage2Layer1Summary_cfi.py @@ -1,6 +1,8 @@ import FWCore.ParameterSet.Config as cms -simCaloStage2Layer1Summary = cms.EDProducer('L1TCaloSummaryCICADAv2', +from L1Trigger.L1TCaloLayer1.CICADATestPatterns import standardCICADATestPatterns + +simCaloStage2Layer1Summary = cms.EDProducer('L1TCaloSummary_CICADA_vXp1p1', nPumBins = cms.uint32(18), pumLUT00n= cms.vdouble(0.43, 0.32, 0.29, 0.36, 0.33, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25), pumLUT00p= cms.vdouble(0.45, 0.32, 0.29, 0.35, 0.31, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25), @@ -48,5 +50,7 @@ verbose = cms.bool(False), # See UCTLayer1.hh for firmware version firmwareVersion = cms.int32(1), - CICADAModelVersion = cms.string("CICADAModel_v2p1") + CICADAModelVersion = cms.string("CICADAModel_v1p1p1"), + useTestPatterns = cms.bool(False), + testPatterns = standardCICADATestPatterns ) diff --git a/L1Trigger/L1TCalorimeter/plugins/L1TCaloStage2ParamsESProducer.cc b/L1Trigger/L1TCalorimeter/plugins/L1TCaloStage2ParamsESProducer.cc index 7ba7ca4e9b388..77d4a54f47306 100644 --- a/L1Trigger/L1TCalorimeter/plugins/L1TCaloStage2ParamsESProducer.cc +++ b/L1Trigger/L1TCalorimeter/plugins/L1TCaloStage2ParamsESProducer.cc @@ -344,12 +344,6 @@ L1TCaloStage2ParamsESProducer::L1TCaloStage2ParamsESProducer(const edm::Paramete std::shared_ptr q2LUT(new LUT(q2LUTStream)); m_params_helper.setQ2LUT(*q2LUT); - // HI ZDC calibration LUT for trigger - edm::FileInPath zdcLUTFile = conf.getParameter("zdcLUTFile"); - std::ifstream zdcLUTStream(zdcLUTFile.fullPath()); - std::shared_ptr zdcLUT(new LUT(zdcLUTStream)); - m_params_helper.setZDCLUT(*zdcLUT); - // Layer 1 LUT specification m_params_helper.setLayer1ECalScaleFactors(conf.getParameter>("layer1ECalScaleFactors")); m_params_helper.setLayer1HCalScaleFactors(conf.getParameter>("layer1HCalScaleFactors")); diff --git a/L1Trigger/L1TCalorimeter/plugins/L1TStage2CaloAnalyzer.cc b/L1Trigger/L1TCalorimeter/plugins/L1TStage2CaloAnalyzer.cc index 9c3d6d7558f23..4b592920b1688 100644 --- a/L1Trigger/L1TCalorimeter/plugins/L1TStage2CaloAnalyzer.cc +++ b/L1Trigger/L1TCalorimeter/plugins/L1TStage2CaloAnalyzer.cc @@ -29,19 +29,12 @@ namespace l1t { class L1TStage2CaloAnalyzer : public edm::one::EDAnalyzer { public: explicit L1TStage2CaloAnalyzer(const edm::ParameterSet&); - ~L1TStage2CaloAnalyzer() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: void beginJob() override; void analyze(const edm::Event&, const edm::EventSetup&) override; - void endJob() override; - - //virtual void beginRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void endRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - //virtual void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; // ----------member data --------------------------- edm::EDGetToken m_towerToken; @@ -296,11 +289,6 @@ namespace l1t { typeStr_.push_back("sumasymhthf"); } - L1TStage2CaloAnalyzer::~L1TStage2CaloAnalyzer() { - // do anything here that needs to be done at desctruction time - // (e.g. close files, deallocate resources etc.) - } - // // member functions // @@ -815,41 +803,6 @@ namespace l1t { hsortMP_ = fs->make("sortMP", "", 201, -100.5, 100.5); } - // ------------ method called once each job just after ending the event loop ------------ - void L1TStage2CaloAnalyzer::endJob() {} - - // ------------ method called when starting to processes a run ------------ - /* - void - L1TStage2CaloAnalyzer::beginRun(edm::Run const&, edm::EventSetup const&) - { - } - */ - - // ------------ method called when ending the processing of a run ------------ - /* - void - L1TStage2CaloAnalyzer::endRun(edm::Run const&, edm::EventSetup const&) - { - } - */ - - // ------------ method called when starting to processes a luminosity block ------------ - /* - void - L1TStage2CaloAnalyzer::beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) - { - } - */ - - // ------------ method called when ending the processing of a luminosity block ------------ - /* - void - L1TStage2CaloAnalyzer::endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) - { - } - */ - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void L1TStage2CaloAnalyzer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { //The following says we do not know what parameters are allowed so do no validation diff --git a/L1Trigger/L1TCalorimeter/plugins/L1TStage2InputPatternWriter.cc b/L1Trigger/L1TCalorimeter/plugins/L1TStage2InputPatternWriter.cc index eeb26ca7a64aa..940cc5154e2b9 100644 --- a/L1Trigger/L1TCalorimeter/plugins/L1TStage2InputPatternWriter.cc +++ b/L1Trigger/L1TCalorimeter/plugins/L1TStage2InputPatternWriter.cc @@ -44,20 +44,13 @@ class L1TStage2InputPatternWriter : public edm::one::EDAnalyzer<> { public: explicit L1TStage2InputPatternWriter(const edm::ParameterSet&); - ~L1TStage2InputPatternWriter() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: - void beginJob() override; void analyze(const edm::Event&, const edm::EventSetup&) override; void endJob() override; - //virtual void beginRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void endRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - //virtual void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - // ----------member data --------------------------- edm::EDGetToken m_towerToken; @@ -118,11 +111,6 @@ L1TStage2InputPatternWriter::L1TStage2InputPatternWriter(const edm::ParameterSet LogDebug("L1TDebug") << "Preparing for " << nLink_ << " links" << std::endl; } -L1TStage2InputPatternWriter::~L1TStage2InputPatternWriter() { - // do anything here that needs to be done at desctruction time - // (e.g. close files, deallocate resources etc.) -} - // // member functions // @@ -221,9 +209,6 @@ void L1TStage2InputPatternWriter::analyze(const edm::Event& iEvent, const edm::E } } -// ------------ method called once each job just before starting event loop ------------ -void L1TStage2InputPatternWriter::beginJob() {} - // ------------ method called once each job just after ending the event loop ------------ void L1TStage2InputPatternWriter::endJob() { //frames per event @@ -299,38 +284,6 @@ void L1TStage2InputPatternWriter::endJob() { } } -// ------------ method called when starting to processes a run ------------ -/* -void -L1TStage2InputPatternWriter::beginRun(edm::Run const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when ending the processing of a run ------------ -/* -void -L1TStage2InputPatternWriter::endRun(edm::Run const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when starting to processes a luminosity block ------------ -/* -void -L1TStage2InputPatternWriter::beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when ending the processing of a luminosity block ------------ -/* -void -L1TStage2InputPatternWriter::endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) -{ -} -*/ - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void L1TStage2InputPatternWriter::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { //The following says we do not know what parameters are allowed so do no validation diff --git a/L1Trigger/L1TCalorimeter/plugins/L1TStage2Layer1Producer.cc b/L1Trigger/L1TCalorimeter/plugins/L1TStage2Layer1Producer.cc index 1428e77c55e09..ddeed6650b81f 100644 --- a/L1Trigger/L1TCalorimeter/plugins/L1TStage2Layer1Producer.cc +++ b/L1Trigger/L1TCalorimeter/plugins/L1TStage2Layer1Producer.cc @@ -71,9 +71,6 @@ class L1TStage2Layer1Producer : public edm::stream::EDProducer<> { void produce(edm::Event&, const edm::EventSetup&) override; void beginRun(edm::Run const&, edm::EventSetup const&) override; - void endRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - //virtual void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; // ----------member data --------------------------- @@ -355,27 +352,6 @@ void L1TStage2Layer1Producer::beginRun(edm::Run const& iRun, edm::EventSetup con } } -// ------------ method called when ending the processing of a run ------------ -void L1TStage2Layer1Producer::endRun(edm::Run const&, edm::EventSetup const&) {} - -// ------------ method called when starting to processes a luminosity block ------------ -/* -void -L1TStage2Layer1Producer::beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup cons -t&) -{ -} -*/ - -// ------------ method called when ending the processing of a luminosity block ------------ -/* -void -L1TStage2Layer1Producer::endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const& -) -{ -} -*/ - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void L1TStage2Layer1Producer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { //The following says we do not know what parameters are allowed so do no validation diff --git a/L1Trigger/L1TCalorimeter/plugins/L1TStage2Layer2Producer.cc b/L1Trigger/L1TCalorimeter/plugins/L1TStage2Layer2Producer.cc index 2d49330b96462..b13c93ef24889 100644 --- a/L1Trigger/L1TCalorimeter/plugins/L1TStage2Layer2Producer.cc +++ b/L1Trigger/L1TCalorimeter/plugins/L1TStage2Layer2Producer.cc @@ -65,9 +65,6 @@ class L1TStage2Layer2Producer : public edm::stream::EDProducer<> { void produce(edm::Event&, const edm::EventSetup&) override; void beginRun(edm::Run const&, edm::EventSetup const&) override; - void endRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - //virtual void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; // ----------member data --------------------------- @@ -314,27 +311,6 @@ void L1TStage2Layer2Producer::beginRun(edm::Run const& iRun, edm::EventSetup con } } -// ------------ method called when ending the processing of a run ------------ -void L1TStage2Layer2Producer::endRun(edm::Run const&, edm::EventSetup const&) {} - -// ------------ method called when starting to processes a luminosity block ------------ -/* -void -L1TStage2Layer2Producer::beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup cons -t&) -{ -} -*/ - -// ------------ method called when ending the processing of a luminosity block ------------ -/* -void -L1TStage2Layer2Producer::endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const& -) -{ -} -*/ - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void L1TStage2Layer2Producer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { //The following says we do not know what parameters are allowed so do no validation diff --git a/L1Trigger/L1TCalorimeter/python/caloParamsHI_2023_v0_4_2_cfi.py b/L1Trigger/L1TCalorimeter/python/caloParamsHI_2023_v0_4_2_cfi.py new file mode 100644 index 0000000000000..f769df6d05dbd --- /dev/null +++ b/L1Trigger/L1TCalorimeter/python/caloParamsHI_2023_v0_4_2_cfi.py @@ -0,0 +1,128 @@ +import FWCore.ParameterSet.Config as cms + +from L1Trigger.L1TCalorimeter.caloParams_cfi import caloParamsSource +import L1Trigger.L1TCalorimeter.caloParams_cfi +caloStage2Params = L1Trigger.L1TCalorimeter.caloParams_cfi.caloParams.clone( + + # EG + egEtaCut = 24, + egHcalThreshold = 0., + egTrimmingLUTFile = "L1Trigger/L1TCalorimeter/data/egTrimmingLUT_10_v16.01.19.txt", + egHOverEcutBarrel = 1, + egHOverEcutEndcap = 1, + egBypassExtHOverE = 1, + egBypassShape = 1, + egBypassECALFG = 1, + + egMaxHOverELUTFile = "L1Trigger/L1TCalorimeter/data/HoverEIdentification_0.995_v15.12.23.txt", + egCompressShapesLUTFile = "L1Trigger/L1TCalorimeter/data/egCompressLUT_v4.txt", + egShapeIdType = "compressed", + egShapeIdLUTFile = "L1Trigger/L1TCalorimeter/data/shapeIdentification_adapt0.99_compressedieta_compressedE_compressedshape_v15.12.08.txt", #Not used any more in the current emulator version, merged with calibration LUT + + egIsolationType = "compressed", + egIsoLUTFile = "L1Trigger/L1TCalorimeter/data/EG_Iso_LUT_Flat_WP_v2_Tight1358_20p0_0p7_40p0_v1_APR23.txt", + egIsoLUTFile2 = "L1Trigger/L1TCalorimeter/data/EG_Iso_LUT_Flat_WP_v2_Loose610_10p0_0p7_40p0_v1_APR23.txt", + + egIsoVetoNrTowersPhi = 2, + egPUSParams = cms.vdouble(1,4,32), #Isolation window in firmware goes up to abs(ieta)=32 for now + egCalibrationType = "compressed", + egCalibrationVersion = 0, + egCalibrationLUTFile = "L1Trigger/L1TCalorimeter/data/EG_Calibration_LUT_correctedEtCalibLUT_v1_APR2023.txt", + + # Tau + isoTauEtaMax = 25, + tauSeedThreshold = 0., + tauIsoLUTFile = "L1Trigger/L1TCalorimeter/data/Tau_Iso_LUT_2023_calibThr1p7_V2gs_effMin0p9_eMin16_eMax60.txt", + tauIsoLUTFile2 = "L1Trigger/L1TCalorimeter/data/Tau_Iso_LUT_2023_calibThr1p7_V2gs_effMin0p9_eMin16_eMax60.txt", + tauCalibrationLUTFile = "L1Trigger/L1TCalorimeter/data/Tau_Cal_LUT_2023_calibThr1p7_V2.txt", + tauCompressLUTFile = "L1Trigger/L1TCalorimeter/data/tauCompressAllLUT_12bit_v3.txt", + tauPUSParams = [1,4,32], + + # jets + jetSeedThreshold = 2.5, + jetPUSType = "PhiRing1", + jetPUSUsePhiRing = 1, + + # Calibration options + jetCalibrationType = "LUT", + jetCompressPtLUTFile = "L1Trigger/L1TCalorimeter/data/lut_pt_compress_2017v1.txt", + jetCompressEtaLUTFile = "L1Trigger/L1TCalorimeter/data/lut_eta_compress_2017v1.txt", + jetCalibrationLUTFile = "L1Trigger/L1TCalorimeter/data/lut_calib_2023v0_ECALZS_PhiRing.txt", + + + # sums: 0=ET, 1=HT, 2=MET, 3=MHT + etSumEtaMin = [1, 1, 1, 1, 1], + etSumEtaMax = [28, 26, 28, 26, 28], + etSumEtThreshold = [0., 30., 0., 30., 0.], # only 2nd (HT) and 4th (MHT) values applied + etSumMetPUSType = "LUT", # et threshold from this LUT supercedes et threshold in line above + etSumBypassEttPUS = 1, + etSumBypassEcalSumPUS = 1, + + etSumMetPUSLUTFile = "L1Trigger/L1TCalorimeter/data/metPumLUT_2023v0_puppiMet_fit.txt", + + etSumCentralityUpper = [5.5, 37.0, 182.5, 502.5, 1244.0, 6000.0, 6000.0, 65535.0], + etSumCentralityLower = [0.0, 5.0, 28.5, 148.0, 427.0, 4662.5, 4810.5, 65535.0], + + # Layer 1 SF + layer1ECalScaleETBins = cms.vint32([3, 6, 9, 12, 15, 20, 25, 30, 35, 40, 45, 55, 70, 256]), + layer1ECalScaleFactors = cms.vdouble([ + 1.12, 1.13, 1.13, 1.12, 1.12, 1.12, 1.13, 1.12, 1.13, 1.12, 1.13, 1.13, 1.14, 1.13, 1.13, 1.13, 1.14, 1.26, 1.11, 1.20, 1.21, 1.22, 1.19, 1.20, 1.19, 0.00, 0.00, 0.00, + 1.12, 1.13, 1.13, 1.12, 1.12, 1.12, 1.13, 1.12, 1.13, 1.12, 1.13, 1.13, 1.14, 1.13, 1.13, 1.13, 1.14, 1.26, 1.11, 1.20, 1.21, 1.22, 1.19, 1.20, 1.19, 1.22, 0.00, 0.00, + 1.08, 1.09, 1.08, 1.08, 1.11, 1.08, 1.09, 1.09, 1.09, 1.09, 1.15, 1.09, 1.10, 1.10, 1.10, 1.10, 1.10, 1.23, 1.07, 1.15, 1.14, 1.16, 1.14, 1.14, 1.15, 1.14, 1.14, 0.00, + 1.06, 1.06, 1.06, 1.06, 1.06, 1.06, 1.06, 1.06, 1.07, 1.07, 1.07, 1.07, 1.07, 1.08, 1.07, 1.09, 1.08, 1.17, 1.06, 1.11, 1.10, 1.13, 1.10, 1.10, 1.11, 1.11, 1.11, 1.09, + 1.04, 1.05, 1.04, 1.05, 1.04, 1.05, 1.06, 1.06, 1.05, 1.05, 1.05, 1.06, 1.06, 1.06, 1.06, 1.06, 1.07, 1.15, 1.04, 1.09, 1.09, 1.10, 1.09, 1.09, 1.10, 1.10, 1.10, 1.08, + 1.04, 1.03, 1.04, 1.04, 1.04, 1.04, 1.04, 1.04, 1.04, 1.04, 1.04, 1.04, 1.05, 1.06, 1.04, 1.05, 1.05, 1.13, 1.03, 1.07, 1.08, 1.08, 1.08, 1.07, 1.07, 1.09, 1.08, 1.07, + 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.04, 1.04, 1.05, 1.05, 1.05, 1.05, 1.05, 1.12, 1.03, 1.06, 1.06, 1.08, 1.07, 1.07, 1.06, 1.08, 1.07, 1.06, + 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.04, 1.04, 1.04, 1.04, 1.04, 1.03, 1.10, 1.02, 1.05, 1.06, 1.06, 1.06, 1.06, 1.05, 1.06, 1.06, 1.06, + 1.02, 1.02, 1.02, 1.02, 1.02, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.04, 1.03, 1.03, 1.02, 1.07, 1.02, 1.04, 1.04, 1.05, 1.06, 1.05, 1.05, 1.06, 1.06, 1.05, + 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.09, 1.02, 1.04, 1.05, 1.05, 1.05, 1.05, 1.04, 1.05, 1.06, 1.05, + 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.03, 1.03, 1.03, 1.03, 1.03, 1.08, 1.01, 1.04, 1.04, 1.05, 1.05, 1.04, 1.04, 1.05, 1.06, 1.05, + 1.01, 1.01, 1.01, 1.01, 1.01, 1.01, 1.02, 1.01, 1.02, 1.02, 1.02, 1.02, 1.03, 1.03, 1.03, 1.03, 1.03, 1.06, 1.01, 1.04, 1.04, 1.05, 1.04, 1.03, 1.03, 1.04, 1.05, 1.04, + 1.01, 1.00, 1.01, 1.01, 1.01, 1.01, 1.01, 1.00, 1.01, 1.02, 1.01, 1.01, 1.02, 1.02, 1.02, 1.02, 1.03, 1.04, 1.01, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.00, 1.01, + 1.02, 1.00, 1.00, 1.02, 1.00, 1.01, 1.01, 1.00, 1.00, 1.02, 1.01, 1.01, 1.02, 1.02, 1.02, 1.02, 1.02, 1.04, 1.01, 1.03, 1.03, 1.03, 1.03, 1.02, 1.02, 1.02, 1.00, 1.01 + ]), + + layer1HCalScaleETBins = cms.vint32([1, 6, 9, 12, 15, 20, 25, 30, 35, 40, 45, 55, 70, 256]), + layer1HCalScaleFactors = cms.vdouble([ + 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, # ZERO-SUPPRESS <1GeV (i.e. 0.5GeV) IN THE BARREL ONLY (ieta<=15 == eta<=1.305) + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00 + ]), + + layer1HFScaleETBins = cms.vint32([6, 9, 12, 15, 20, 25, 30, 35, 40, 45, 55, 70, 256]), + layer1HFScaleFactors = cms.vdouble([ + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00 + ]), + + # HCal FB LUT + layer1HCalFBLUTUpper = cms.vuint32([ + 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, + ]), + + layer1HCalFBLUTLower = cms.vuint32([ + 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, + ]) +) diff --git a/L1Trigger/L1TCalorimeter/python/caloParamsHI_2023_v0_4_3_cfi.py b/L1Trigger/L1TCalorimeter/python/caloParamsHI_2023_v0_4_3_cfi.py new file mode 100644 index 0000000000000..b229997cb4d82 --- /dev/null +++ b/L1Trigger/L1TCalorimeter/python/caloParamsHI_2023_v0_4_3_cfi.py @@ -0,0 +1,128 @@ +import FWCore.ParameterSet.Config as cms + +from L1Trigger.L1TCalorimeter.caloParams_cfi import caloParamsSource +import L1Trigger.L1TCalorimeter.caloParams_cfi +caloStage2Params = L1Trigger.L1TCalorimeter.caloParams_cfi.caloParams.clone( + + # EG + egEtaCut = 24, + egHcalThreshold = 0., + egTrimmingLUTFile = "L1Trigger/L1TCalorimeter/data/egTrimmingLUT_10_v16.01.19.txt", + egHOverEcutBarrel = 1, + egHOverEcutEndcap = 1, + egBypassExtHOverE = 1, + egBypassShape = 1, + egBypassECALFG = 1, + + egMaxHOverELUTFile = "L1Trigger/L1TCalorimeter/data/HoverEIdentification_0.995_v15.12.23.txt", + egCompressShapesLUTFile = "L1Trigger/L1TCalorimeter/data/egCompressLUT_v4.txt", + egShapeIdType = "compressed", + egShapeIdLUTFile = "L1Trigger/L1TCalorimeter/data/shapeIdentification_adapt0.99_compressedieta_compressedE_compressedshape_v15.12.08.txt", #Not used any more in the current emulator version, merged with calibration LUT + + egIsolationType = "compressed", + egIsoLUTFile = "L1Trigger/L1TCalorimeter/data/EG_Iso_LUT_Flat_WP_v2_Tight1358_20p0_0p7_40p0_v1_APR23.txt", + egIsoLUTFile2 = "L1Trigger/L1TCalorimeter/data/EG_Iso_LUT_Flat_WP_v2_Loose610_10p0_0p7_40p0_v1_APR23.txt", + + egIsoVetoNrTowersPhi = 2, + egPUSParams = cms.vdouble(1,4,32), #Isolation window in firmware goes up to abs(ieta)=32 for now + egCalibrationType = "compressed", + egCalibrationVersion = 0, + egCalibrationLUTFile = "L1Trigger/L1TCalorimeter/data/EG_Calibration_LUT_correctedEtCalibLUT_v1_APR2023.txt", + + # Tau + isoTauEtaMax = 25, + tauSeedThreshold = 0., + tauIsoLUTFile = "L1Trigger/L1TCalorimeter/data/Tau_Iso_LUT_2023_calibThr1p7_V2gs_effMin0p9_eMin16_eMax60.txt", + tauIsoLUTFile2 = "L1Trigger/L1TCalorimeter/data/Tau_Iso_LUT_2023_calibThr1p7_V2gs_effMin0p9_eMin16_eMax60.txt", + tauCalibrationLUTFile = "L1Trigger/L1TCalorimeter/data/Tau_Cal_LUT_2023_calibThr1p7_V2.txt", + tauCompressLUTFile = "L1Trigger/L1TCalorimeter/data/tauCompressAllLUT_12bit_v3.txt", + tauPUSParams = [1,4,32], + + # jets + jetSeedThreshold = 2.5, + jetPUSType = "PhiRing1", + jetPUSUsePhiRing = 1, + + # Calibration options + jetCalibrationType = "LUT", + jetCompressPtLUTFile = "L1Trigger/L1TCalorimeter/data/lut_pt_compress_2017v1.txt", + jetCompressEtaLUTFile = "L1Trigger/L1TCalorimeter/data/lut_eta_compress_2017v1.txt", + jetCalibrationLUTFile = "L1Trigger/L1TCalorimeter/data/lut_calib_2023v0_ECALZS_PhiRing.txt", + + + # sums: 0=ET, 1=HT, 2=MET, 3=MHT + etSumEtaMin = [1, 1, 1, 1, 1], + etSumEtaMax = [28, 26, 28, 26, 28], + etSumEtThreshold = [0., 30., 0., 30., 0.], # only 2nd (HT) and 4th (MHT) values applied + etSumMetPUSType = "LUT", # et threshold from this LUT supercedes et threshold in line above + etSumBypassEttPUS = 1, + etSumBypassEcalSumPUS = 1, + + etSumMetPUSLUTFile = "L1Trigger/L1TCalorimeter/data/metPumLUT_2023v0_puppiMet_fit.txt", + + etSumCentralityUpper = [5.5, 37.0, 182.5, 502.5, 1244.0, 6000.0, 6000.0, 65535.0], + etSumCentralityLower = [0.0, 5.0, 28.5, 148.0, 427.0, 4547.0, 4736.0, 65535.0], + + # Layer 1 SF + layer1ECalScaleETBins = cms.vint32([3, 6, 9, 12, 15, 20, 25, 30, 35, 40, 45, 55, 70, 256]), + layer1ECalScaleFactors = cms.vdouble([ + 1.12, 1.13, 1.13, 1.12, 1.12, 1.12, 1.13, 1.12, 1.13, 1.12, 1.13, 1.13, 1.14, 1.13, 1.13, 1.13, 1.14, 1.26, 1.11, 1.20, 1.21, 1.22, 1.19, 1.20, 1.19, 0.00, 0.00, 0.00, + 1.12, 1.13, 1.13, 1.12, 1.12, 1.12, 1.13, 1.12, 1.13, 1.12, 1.13, 1.13, 1.14, 1.13, 1.13, 1.13, 1.14, 1.26, 1.11, 1.20, 1.21, 1.22, 1.19, 1.20, 1.19, 1.22, 0.00, 0.00, + 1.08, 1.09, 1.08, 1.08, 1.11, 1.08, 1.09, 1.09, 1.09, 1.09, 1.15, 1.09, 1.10, 1.10, 1.10, 1.10, 1.10, 1.23, 1.07, 1.15, 1.14, 1.16, 1.14, 1.14, 1.15, 1.14, 1.14, 0.00, + 1.06, 1.06, 1.06, 1.06, 1.06, 1.06, 1.06, 1.06, 1.07, 1.07, 1.07, 1.07, 1.07, 1.08, 1.07, 1.09, 1.08, 1.17, 1.06, 1.11, 1.10, 1.13, 1.10, 1.10, 1.11, 1.11, 1.11, 1.09, + 1.04, 1.05, 1.04, 1.05, 1.04, 1.05, 1.06, 1.06, 1.05, 1.05, 1.05, 1.06, 1.06, 1.06, 1.06, 1.06, 1.07, 1.15, 1.04, 1.09, 1.09, 1.10, 1.09, 1.09, 1.10, 1.10, 1.10, 1.08, + 1.04, 1.03, 1.04, 1.04, 1.04, 1.04, 1.04, 1.04, 1.04, 1.04, 1.04, 1.04, 1.05, 1.06, 1.04, 1.05, 1.05, 1.13, 1.03, 1.07, 1.08, 1.08, 1.08, 1.07, 1.07, 1.09, 1.08, 1.07, + 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.04, 1.04, 1.05, 1.05, 1.05, 1.05, 1.05, 1.12, 1.03, 1.06, 1.06, 1.08, 1.07, 1.07, 1.06, 1.08, 1.07, 1.06, + 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.04, 1.04, 1.04, 1.04, 1.04, 1.03, 1.10, 1.02, 1.05, 1.06, 1.06, 1.06, 1.06, 1.05, 1.06, 1.06, 1.06, + 1.02, 1.02, 1.02, 1.02, 1.02, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.04, 1.03, 1.03, 1.02, 1.07, 1.02, 1.04, 1.04, 1.05, 1.06, 1.05, 1.05, 1.06, 1.06, 1.05, + 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.09, 1.02, 1.04, 1.05, 1.05, 1.05, 1.05, 1.04, 1.05, 1.06, 1.05, + 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.03, 1.03, 1.03, 1.03, 1.03, 1.08, 1.01, 1.04, 1.04, 1.05, 1.05, 1.04, 1.04, 1.05, 1.06, 1.05, + 1.01, 1.01, 1.01, 1.01, 1.01, 1.01, 1.02, 1.01, 1.02, 1.02, 1.02, 1.02, 1.03, 1.03, 1.03, 1.03, 1.03, 1.06, 1.01, 1.04, 1.04, 1.05, 1.04, 1.03, 1.03, 1.04, 1.05, 1.04, + 1.01, 1.00, 1.01, 1.01, 1.01, 1.01, 1.01, 1.00, 1.01, 1.02, 1.01, 1.01, 1.02, 1.02, 1.02, 1.02, 1.03, 1.04, 1.01, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.00, 1.01, + 1.02, 1.00, 1.00, 1.02, 1.00, 1.01, 1.01, 1.00, 1.00, 1.02, 1.01, 1.01, 1.02, 1.02, 1.02, 1.02, 1.02, 1.04, 1.01, 1.03, 1.03, 1.03, 1.03, 1.02, 1.02, 1.02, 1.00, 1.01 + ]), + + layer1HCalScaleETBins = cms.vint32([1, 6, 9, 12, 15, 20, 25, 30, 35, 40, 45, 55, 70, 256]), + layer1HCalScaleFactors = cms.vdouble([ + 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, # ZERO-SUPPRESS <1GeV (i.e. 0.5GeV) IN THE BARREL ONLY (ieta<=15 == eta<=1.305) + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00 + ]), + + layer1HFScaleETBins = cms.vint32([6, 9, 12, 15, 20, 25, 30, 35, 40, 45, 55, 70, 256]), + layer1HFScaleFactors = cms.vdouble([ + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, + 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00 + ]), + + # HCal FB LUT + layer1HCalFBLUTUpper = cms.vuint32([ + 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, + ]), + + layer1HCalFBLUTLower = cms.vuint32([ + 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, 0xBBBABBBA, + ]) +) diff --git a/L1Trigger/L1TCalorimeter/python/caloParams_cfi.py b/L1Trigger/L1TCalorimeter/python/caloParams_cfi.py index abbab772c6e9b..29bfc87f44d35 100644 --- a/L1Trigger/L1TCalorimeter/python/caloParams_cfi.py +++ b/L1Trigger/L1TCalorimeter/python/caloParams_cfi.py @@ -142,7 +142,6 @@ minimumBiasThresholds = cms.vint32(0, 0, 0, 0), centralityLUTFile = cms.FileInPath("L1Trigger/L1TCalorimeter/data/centralityLUT_stage1.txt"), q2LUTFile = cms.FileInPath("L1Trigger/L1TCalorimeter/data/q2LUT_stage1.txt"), - zdcLUTFile = cms.FileInPath("L1Trigger/L1TZDC/data/zdcLUT_HI_v0_1.txt"), # HCal FB LUT layer1HCalFBLUTUpper = cms.vuint32([ diff --git a/L1Trigger/L1TGEM/plugins/GEMPadDigiClusterProducer.cc b/L1Trigger/L1TGEM/plugins/GEMPadDigiClusterProducer.cc index 0c355bc86afaa..ce609404b0116 100644 --- a/L1Trigger/L1TGEM/plugins/GEMPadDigiClusterProducer.cc +++ b/L1Trigger/L1TGEM/plugins/GEMPadDigiClusterProducer.cc @@ -33,7 +33,7 @@ #include "FWCore/Framework/interface/MakerMacros.h" #include "FWCore/PluginManager/interface/ModuleDef.h" -#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/Framework/interface/global/EDProducer.h" #include "FWCore/Framework/interface/MakerMacros.h" #include "FWCore/Framework/interface/ESHandle.h" #include "FWCore/Framework/interface/ConsumesCollector.h" @@ -57,7 +57,7 @@ #include #include -class GEMPadDigiClusterProducer : public edm::stream::EDProducer<> { +class GEMPadDigiClusterProducer : public edm::global::EDProducer<> { public: typedef std::vector GEMPadDigiClusters; typedef std::map GEMPadDigiClusterContainer; @@ -66,20 +66,19 @@ class GEMPadDigiClusterProducer : public edm::stream::EDProducer<> { ~GEMPadDigiClusterProducer() override; - void beginRun(const edm::Run&, const edm::EventSetup&) override; - - void produce(edm::Event&, const edm::EventSetup&) override; + void produce(edm::StreamID, edm::Event&, const edm::EventSetup&) const override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: - void buildClusters(const GEMPadDigiCollection& pads, GEMPadDigiClusterContainer& out_clusters) const; - void selectClusters(const GEMPadDigiClusterContainer& in_clusters, GEMPadDigiClusterCollection& out) const; + GEMPadDigiClusterContainer buildClusters(const GEMPadDigiCollection& pads, const GEMGeometry&) const; + GEMPadDigiClusterCollection selectClusters(const GEMPadDigiClusterContainer& in_clusters, const GEMGeometry&) const; template void checkValid(const T& cluster, const GEMDetId& id) const; /// Name of input digi Collection edm::EDGetTokenT pad_token_; + edm::EDPutTokenT put_token_; edm::ESGetToken geom_token_; edm::InputTag pads_; @@ -93,11 +92,9 @@ class GEMPadDigiClusterProducer : public edm::stream::EDProducer<> { unsigned int maxClustersOHGE21_; unsigned int maxClusterSize_; bool sendOverflowClusters_; - - const GEMGeometry* geometry_; }; -GEMPadDigiClusterProducer::GEMPadDigiClusterProducer(const edm::ParameterSet& ps) : geometry_(nullptr) { +GEMPadDigiClusterProducer::GEMPadDigiClusterProducer(const edm::ParameterSet& ps) { pads_ = ps.getParameter("InputCollection"); nPartitionsGE11_ = ps.getParameter("nPartitionsGE11"); nPartitionsGE21_ = ps.getParameter("nPartitionsGE21"); @@ -116,10 +113,9 @@ GEMPadDigiClusterProducer::GEMPadDigiClusterProducer(const edm::ParameterSet& ps } pad_token_ = consumes(pads_); - geom_token_ = esConsumes(); + geom_token_ = esConsumes(); - produces(); - consumes(pads_); + put_token_ = produces(); } GEMPadDigiClusterProducer::~GEMPadDigiClusterProducer() {} @@ -141,36 +137,24 @@ void GEMPadDigiClusterProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions.add("simMuonGEMPadDigiClustersDef", desc); } -void GEMPadDigiClusterProducer::beginRun(const edm::Run& run, const edm::EventSetup& eventSetup) { - edm::ESHandle hGeom = eventSetup.getHandle(geom_token_); - geometry_ = &*hGeom; -} - -void GEMPadDigiClusterProducer::produce(edm::Event& e, const edm::EventSetup& eventSetup) { - edm::Handle hpads; - e.getByToken(pad_token_, hpads); +void GEMPadDigiClusterProducer::produce(edm::StreamID, edm::Event& e, const edm::EventSetup& eventSetup) const { + auto const& geometry = eventSetup.getData(geom_token_); - // Create empty output - std::unique_ptr pClusters(new GEMPadDigiClusterCollection()); + auto const& pads = e.get(pad_token_); // build the proto clusters (per partition) - GEMPadDigiClusterContainer proto_clusters; - buildClusters(*(hpads.product()), proto_clusters); + GEMPadDigiClusterContainer proto_clusters = buildClusters(pads, geometry); // sort and select clusters per chamber, per OH, per partition number and per pad number - selectClusters(proto_clusters, *pClusters); - - // store them in the event - e.put(std::move(pClusters)); + e.emplace(put_token_, selectClusters(proto_clusters, geometry)); } -void GEMPadDigiClusterProducer::buildClusters(const GEMPadDigiCollection& det_pads, - GEMPadDigiClusterContainer& proto_clusters) const { - // clear the container - proto_clusters.clear(); +GEMPadDigiClusterProducer::GEMPadDigiClusterContainer GEMPadDigiClusterProducer::buildClusters( + const GEMPadDigiCollection& det_pads, const GEMGeometry& geometry) const { + GEMPadDigiClusterContainer proto_clusters; // construct clusters - for (const auto& part : geometry_->etaPartitions()) { + for (const auto& part : geometry.etaPartitions()) { // clusters are not build for ME0 // -> ignore hits from station 0 if (part->isME0()) @@ -228,11 +212,13 @@ void GEMPadDigiClusterProducer::buildClusters(const GEMPadDigiCollection& det_pa proto_clusters.emplace(part->id(), all_pad_clusters); } // end of partition loop + return proto_clusters; } -void GEMPadDigiClusterProducer::selectClusters(const GEMPadDigiClusterContainer& proto_clusters, - GEMPadDigiClusterCollection& out_clusters) const { - for (const auto& ch : geometry_->chambers()) { +GEMPadDigiClusterCollection GEMPadDigiClusterProducer::selectClusters(const GEMPadDigiClusterContainer& proto_clusters, + const GEMGeometry& geometry) const { + GEMPadDigiClusterCollection out_clusters; + for (const auto& ch : geometry.chambers()) { const unsigned nOH = ch->id().isGE11() ? nOHGE11_ : nOHGE21_; const unsigned nPartitions = ch->id().isGE11() ? nPartitionsGE11_ : nPartitionsGE21_; const unsigned nEtaPerPartition = ch->nEtaPartitions() / (nPartitions * nOH); @@ -264,6 +250,7 @@ void GEMPadDigiClusterProducer::selectClusters(const GEMPadDigiClusterContainer& } // end of clusterizer partition loop } // end of OH loop } // end of chamber loop + return out_clusters; } template diff --git a/L1Trigger/L1TGEM/plugins/ME0PadDigiProducer.cc b/L1Trigger/L1TGEM/plugins/ME0PadDigiProducer.cc index 7202358762e24..33e5346cfdc87 100644 --- a/L1Trigger/L1TGEM/plugins/ME0PadDigiProducer.cc +++ b/L1Trigger/L1TGEM/plugins/ME0PadDigiProducer.cc @@ -1,4 +1,4 @@ -#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/Framework/interface/global/EDProducer.h" #include "FWCore/Framework/interface/MakerMacros.h" #include "FWCore/Framework/interface/ESHandle.h" #include "FWCore/Framework/interface/ConsumesCollector.h" @@ -21,59 +21,45 @@ /// \class ME0PadDigiProducer -class ME0PadDigiProducer : public edm::stream::EDProducer<> { +class ME0PadDigiProducer : public edm::global::EDProducer<> { public: explicit ME0PadDigiProducer(const edm::ParameterSet& ps); - ~ME0PadDigiProducer() override; - - void beginRun(const edm::Run&, const edm::EventSetup&) override; - - void produce(edm::Event&, const edm::EventSetup&) override; + void produce(edm::StreamID, edm::Event&, const edm::EventSetup&) const override; private: - void buildPads(const ME0DigiCollection& digis, ME0PadDigiCollection& out_pads) const; + ME0PadDigiCollection buildPads(const ME0DigiCollection& digis, const ME0Geometry& geometry) const; /// Name of input digi Collection edm::EDGetTokenT digi_token_; edm::InputTag digis_; edm::ESGetToken geom_token_; - - const ME0Geometry* geometry_; + edm::EDPutTokenT put_token_; }; -ME0PadDigiProducer::ME0PadDigiProducer(const edm::ParameterSet& ps) : geometry_(nullptr) { +ME0PadDigiProducer::ME0PadDigiProducer(const edm::ParameterSet& ps) { digis_ = ps.getParameter("InputCollection"); digi_token_ = consumes(digis_); - geom_token_ = esConsumes(); + geom_token_ = esConsumes(); - produces(); + put_token_ = produces(); } -ME0PadDigiProducer::~ME0PadDigiProducer() {} +void ME0PadDigiProducer::produce(edm::StreamID, edm::Event& e, const edm::EventSetup& eventSetup) const { + auto const& geometry = eventSetup.getData(geom_token_); -void ME0PadDigiProducer::beginRun(const edm::Run& run, const edm::EventSetup& eventSetup) { - edm::ESHandle hGeom = eventSetup.getHandle(geom_token_); - geometry_ = &*hGeom; -} - -void ME0PadDigiProducer::produce(edm::Event& e, const edm::EventSetup& eventSetup) { edm::Handle hdigis; e.getByToken(digi_token_, hdigis); - // Create empty output - std::unique_ptr pPads(new ME0PadDigiCollection()); - - // build the pads - buildPads(*(hdigis.product()), *pPads); - - // store them in the event - e.put(std::move(pPads)); + // build the pads and store them in the event + e.emplace(put_token_, buildPads(*(hdigis.product()), geometry)); } -void ME0PadDigiProducer::buildPads(const ME0DigiCollection& det_digis, ME0PadDigiCollection& out_pads) const { - for (const auto& p : geometry_->etaPartitions()) { +ME0PadDigiCollection ME0PadDigiProducer::buildPads(const ME0DigiCollection& det_digis, + const ME0Geometry& geometry) const { + ME0PadDigiCollection out_pads; + for (const auto& p : geometry.etaPartitions()) { // set of pairs, sorted first by pad then by bx std::set > proto_pads; @@ -91,6 +77,7 @@ void ME0PadDigiProducer::buildPads(const ME0DigiCollection& det_digis, ME0PadDig out_pads.insertDigi(p->id(), pad_digi); } } + return out_pads; } DEFINE_FWK_MODULE(ME0PadDigiProducer); diff --git a/L1Trigger/L1TGlobal/plugins/L1TGlobalAnalyzer.cc b/L1Trigger/L1TGlobal/plugins/L1TGlobalAnalyzer.cc index d396a6fbb358b..d4d8429528dec 100644 --- a/L1Trigger/L1TGlobal/plugins/L1TGlobalAnalyzer.cc +++ b/L1Trigger/L1TGlobal/plugins/L1TGlobalAnalyzer.cc @@ -53,19 +53,12 @@ namespace l1t { class L1TGlobalAnalyzer : public edm::one::EDAnalyzer<> { public: explicit L1TGlobalAnalyzer(const edm::ParameterSet&); - ~L1TGlobalAnalyzer() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: void beginJob() override; void analyze(const edm::Event&, const edm::EventSetup&) override; - void endJob() override; - - //virtual void beginRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void endRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - //virtual void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; // ----------member data --------------------------- edm::EDGetToken m_gmuToken; @@ -255,11 +248,6 @@ namespace l1t { typeStr_.push_back("sum"); } - L1TGlobalAnalyzer::~L1TGlobalAnalyzer() { - // do anything here that needs to be done at desctruction time - // (e.g. close files, deallocate resources etc.) - } - // // member functions // @@ -986,41 +974,6 @@ namespace l1t { dmxVGtDir_.make("hDmxVsGTSumEt_HFM1", "Dmx versus GT HFM1", 16, -0.5, 15.5, 16, -0.5, 15.5); } - // ------------ method called once each job just after ending the event loop ------------ - void L1TGlobalAnalyzer::endJob() {} - - // ------------ method called when starting to processes a run ------------ - /* -void -L1TGlobalAnalyzer::beginRun(edm::Run const&, edm::EventSetup const&) -{ -} -*/ - - // ------------ method called when ending the processing of a run ------------ - /* -void -L1TGlobalAnalyzer::endRun(edm::Run const&, edm::EventSetup const&) -{ -} -*/ - - // ------------ method called when starting to processes a luminosity block ------------ - /* -void -L1TGlobalAnalyzer::beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) -{ -} -*/ - - // ------------ method called when ending the processing of a luminosity block ------------ - /* -void -L1TGlobalAnalyzer::endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) -{ -} -*/ - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void L1TGlobalAnalyzer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { //The following says we do not know what parameters are allowed so do no validation diff --git a/L1Trigger/L1TGlobal/plugins/L1TUtmTriggerMenuDumper.cc b/L1Trigger/L1TGlobal/plugins/L1TUtmTriggerMenuDumper.cc index 54bb2e78a87f4..1dbf391d7e7b0 100644 --- a/L1Trigger/L1TGlobal/plugins/L1TUtmTriggerMenuDumper.cc +++ b/L1Trigger/L1TGlobal/plugins/L1TUtmTriggerMenuDumper.cc @@ -34,10 +34,9 @@ using namespace edm; using namespace std; using namespace tmeventsetup; -class L1TUtmTriggerMenuDumper : public one::EDAnalyzer { +class L1TUtmTriggerMenuDumper : public one::EDAnalyzer { public: explicit L1TUtmTriggerMenuDumper(const ParameterSet&); - ~L1TUtmTriggerMenuDumper() override; static void fillDescriptions(ConfigurationDescriptions& descriptions); @@ -48,16 +47,12 @@ class L1TUtmTriggerMenuDumper : public one::EDAnalyzer m_l1TriggerMenuToken; }; L1TUtmTriggerMenuDumper::L1TUtmTriggerMenuDumper(const ParameterSet& iConfig) : m_l1TriggerMenuToken(esConsumes()) {} -L1TUtmTriggerMenuDumper::~L1TUtmTriggerMenuDumper() {} - void L1TUtmTriggerMenuDumper::analyze(Event const& iEvent, EventSetup const& iSetup) {} void L1TUtmTriggerMenuDumper::beginJob() { cout << "INFO: L1TUtmTriggerMenuDumper module beginJob called.\n"; } @@ -187,10 +182,6 @@ void L1TUtmTriggerMenuDumper::beginRun(Run const& run, EventSetup const& iSetup) void L1TUtmTriggerMenuDumper::endRun(Run const&, EventSetup const&) {} -void L1TUtmTriggerMenuDumper::beginLuminosityBlock(LuminosityBlock const&, EventSetup const&) {} - -void L1TUtmTriggerMenuDumper::endLuminosityBlock(LuminosityBlock const&, EventSetup const&) {} - void L1TUtmTriggerMenuDumper::fillDescriptions(ConfigurationDescriptions& descriptions) { //The following says we do not know what parameters are allowed so do no validation // Please change this to state exactly what you do use, even if it is no parameters diff --git a/L1Trigger/L1TGlobal/python/simGtStage2Digis_cfi.py b/L1Trigger/L1TGlobal/python/simGtStage2Digis_cfi.py index 28909e6077644..685ae9b7dd690 100644 --- a/L1Trigger/L1TGlobal/python/simGtStage2Digis_cfi.py +++ b/L1Trigger/L1TGlobal/python/simGtStage2Digis_cfi.py @@ -16,7 +16,7 @@ JetInputTag = cms.InputTag("simCaloStage2Digis"), EtSumInputTag = cms.InputTag("simCaloStage2Digis"), EtSumZdcInputTag = cms.InputTag("etSumZdcProducer"), - AXOL1TLModelVersion = cms.string("GTADModel_v1"), + AXOL1TLModelVersion = cms.string("GTADModel_v3"), AlgorithmTriggersUnmasked = cms.bool(True), AlgorithmTriggersUnprescaled = cms.bool(True), GetPrescaleColumnFromData = cms.bool(False), diff --git a/L1Trigger/L1TGlobal/src/AXOL1TLCondition.cc b/L1Trigger/L1TGlobal/src/AXOL1TLCondition.cc index bcc0f051278f4..76861ea2ec598 100644 --- a/L1Trigger/L1TGlobal/src/AXOL1TLCondition.cc +++ b/L1Trigger/L1TGlobal/src/AXOL1TLCondition.cc @@ -90,11 +90,10 @@ const bool l1t::AXOL1TLCondition::evaluateCondition(const int bxEval) const { int useBx = bxEval + m_gtAXOL1TLTemplate->condRelativeBx(); //HLS4ML stuff - std::string AXOL1TLmodelversion = m_AXOL1TLmodelversion; + std::string AXOL1TLmodelversion = m_AXOL1TLmodelversion; //config loading method hls4mlEmulator::ModelLoader loader(AXOL1TLmodelversion); std::shared_ptr model; model = loader.load_model(); - cout << "loading model... " << AXOL1TLmodelversion << std::endl; // //pointers to objects const BXVector* candMuVec = m_gtGTB->getCandL1Mu(); @@ -116,24 +115,30 @@ const bool l1t::AXOL1TLCondition::evaluateCondition(const int bxEval) const { //total # inputs in vector is (4+10+4+1)*3 = 57 const int NInputs = 57; + //types of inputs and outputs + typedef ap_fixed<18, 13> inputtype; + typedef std::array, 8> resulttype; //v3 + typedef ap_ufixed<18, 14> losstype; + typedef std::pair pairtype; + // typedef std::array, 13> resulttype; //deprecated v1 type: + //define zero - ap_fixed<18, 13> fillzero = 0.0; + inputtype fillzero = 0.0; //AD vector declaration, will fill later - ap_fixed<18, 13> ADModelInput[NInputs] = {}; + inputtype ADModelInput[NInputs] = {}; //initializing vector by type for my sanity - ap_fixed<18, 13> MuInput[MuVecSize]; - ap_fixed<18, 13> JetInput[JVecSize]; - ap_fixed<18, 13> EgammaInput[EGVecSize]; - ap_fixed<18, 13> EtSumInput[EtSumVecSize]; + inputtype MuInput[MuVecSize]; + inputtype JetInput[JVecSize]; + inputtype EgammaInput[EGVecSize]; + inputtype EtSumInput[EtSumVecSize]; //declare result vectors +score - std::array, 13> result; - ap_ufixed<18, 14> loss; - std::pair, 13>, ap_ufixed<18, 14>> - ADModelResult; //model outputs a pair of the (result vector, loss) - float score = -1.0; //not sure what the best default is hm?? + resulttype result; + losstype loss; + pairtype ADModelResult; //model outputs a pair of the (result vector, loss) + float score = -1.0; //not sure what the best default is hm?? //check number of input objects we actually have (muons, jets etc) int NCandMu = candMuVec->size(useBx); diff --git a/L1Trigger/L1THGCal/python/l1tHGCalConcentratorProducer_cfi.py b/L1Trigger/L1THGCal/python/l1tHGCalConcentratorProducer_cfi.py index 5220528262b6b..cce8a02986697 100644 --- a/L1Trigger/L1THGCal/python/l1tHGCalConcentratorProducer_cfi.py +++ b/L1Trigger/L1THGCal/python/l1tHGCalConcentratorProducer_cfi.py @@ -52,8 +52,10 @@ ] +# Values taken from ECON-T working document v9 (March 2022) +# https://edms.cern.ch/file/2206779/1/ECON-T_specification_working_doc_v9_2mar2022.pdf bestchoice_ndata_decentralized = [ - 1, 3, 6, 9, 14, 18, 23, 27, 32, 37, 41, 46, 0, 0, 0, 0, + 1, 4, 6, 9, 14, 18, 23, 28, 32, 37, 41, 46, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, diff --git a/L1Trigger/L1THGCal/test/HGCalTriggerGeomTesterV9Imp3.cc b/L1Trigger/L1THGCal/test/HGCalTriggerGeomTesterV9Imp3.cc index 93309557c6133..1265197703e1a 100644 --- a/L1Trigger/L1THGCal/test/HGCalTriggerGeomTesterV9Imp3.cc +++ b/L1Trigger/L1THGCal/test/HGCalTriggerGeomTesterV9Imp3.cc @@ -122,6 +122,8 @@ class HGCalTriggerGeomTesterV9Imp3 : public edm::stream::EDAnalyzer<> { int triggerCellLayer_ = 0; int triggerCellWaferU_ = 0; int triggerCellWaferV_ = 0; + int triggerCellWaferPart_ = -1; + int triggerCellWaferOrient_ = -1; int triggerCellU_ = 0; int triggerCellV_ = 0; int triggerCellIEta_ = 0; @@ -300,6 +302,8 @@ HGCalTriggerGeomTesterV9Imp3::HGCalTriggerGeomTesterV9Imp3(const edm::ParameterS treeTriggerCells_->Branch("layer", &triggerCellLayer_, "layer/I"); treeTriggerCells_->Branch("waferu", &triggerCellWaferU_, "waferu/I"); treeTriggerCells_->Branch("waferv", &triggerCellWaferV_, "waferv/I"); + treeTriggerCells_->Branch("waferpart", &triggerCellWaferPart_, "waferpart/I"); + treeTriggerCells_->Branch("waferorient", &triggerCellWaferOrient_, "waferorient/I"); treeTriggerCells_->Branch("triggercellu", &triggerCellU_, "triggercellu/I"); treeTriggerCells_->Branch("triggercellv", &triggerCellV_, "triggercellv/I"); treeTriggerCells_->Branch("triggercellieta", &triggerCellIEta_, "triggercellieta/I"); @@ -977,7 +981,12 @@ void HGCalTriggerGeomTesterV9Imp3::fillTriggerGeometry() // Loop over trigger cells edm::LogPrint("TreeFilling") << "Filling trigger cells tree"; for (const auto& triggercell_cells : trigger_cells) { + if (!triggercell_cells.second.size()) { + throw cms::Exception("BadGeometry") << "HGCalTriggerGeometry: No cells in trigger cell!"; + } + DetId id(triggercell_cells.first); + std::tuple wafertype; GlobalPoint position = triggerGeometry_->getTriggerCellPosition(triggercell_cells.first); triggerCellId_ = id.rawId(); if (id.det() == DetId::HGCalHSc) { @@ -989,6 +998,8 @@ void HGCalTriggerGeomTesterV9Imp3::fillTriggerGeometry() triggerCellIPhi_ = id_sc.iphi(); triggerCellWaferU_ = 0; triggerCellWaferV_ = 0; + triggerCellWaferPart_ = -1; + triggerCellWaferOrient_ = -1; triggerCellU_ = 0; triggerCellV_ = 0; } else if (HFNoseTriggerDetId(triggercell_cells.first).det() == DetId::HGCalTrigger && @@ -1001,6 +1012,8 @@ void HGCalTriggerGeomTesterV9Imp3::fillTriggerGeometry() triggerCellIPhi_ = 0; triggerCellWaferU_ = id_nose_trig.waferU(); triggerCellWaferV_ = id_nose_trig.waferV(); + triggerCellWaferPart_ = -1; + triggerCellWaferOrient_ = -1; triggerCellU_ = id_nose_trig.triggerCellU(); triggerCellV_ = id_nose_trig.triggerCellV(); } else { @@ -1014,6 +1027,18 @@ void HGCalTriggerGeomTesterV9Imp3::fillTriggerGeometry() triggerCellWaferV_ = id_si_trig.waferV(); triggerCellU_ = id_si_trig.triggerCellU(); triggerCellV_ = id_si_trig.triggerCellV(); + + const HGCSiliconDetId& firstCellId(*triggercell_cells.second.begin()); + if (firstCellId.det() == DetId::HGCalEE) { + wafertype = triggerGeometry_->eeTopology().dddConstants().waferType(firstCellId, false); + } else if (firstCellId.det() == DetId::HGCalHSi) { + wafertype = triggerGeometry_->hsiTopology().dddConstants().waferType(firstCellId, false); + } else { + throw cms::Exception("BadGeometry") + << "HGCalTriggerGeometry: Found inconsistency in cell <-> trigger cell type mapping"; + } + triggerCellWaferPart_ = std::get<1>(wafertype); + triggerCellWaferOrient_ = std::get<2>(wafertype); } triggerCellX_ = position.x(); triggerCellY_ = position.y(); diff --git a/L1Trigger/L1THGCalUtilities/plugins/CaloTruthCellsProducer.cc b/L1Trigger/L1THGCalUtilities/plugins/CaloTruthCellsProducer.cc index d3200dfed83f7..c21f3e2e298f5 100644 --- a/L1Trigger/L1THGCalUtilities/plugins/CaloTruthCellsProducer.cc +++ b/L1Trigger/L1THGCalUtilities/plugins/CaloTruthCellsProducer.cc @@ -31,7 +31,6 @@ class CaloTruthCellsProducer : public edm::stream::EDProducer<> { static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: - void beginRun(const edm::Run&, const edm::EventSetup&) override; void produce(edm::Event&, edm::EventSetup const&) override; std::unordered_map makeHitMap(edm::Event const&, @@ -47,7 +46,6 @@ class CaloTruthCellsProducer : public edm::stream::EDProducer<> { edm::EDGetTokenT> simHitsTokenHEfront_; edm::EDGetTokenT> simHitsTokenHEback_; edm::ESGetToken triggerGeomToken_; - edm::ESHandle triggerGeomHandle_; HGCalClusteringDummyImpl dummyClustering_; HGCalShowerShape showerShape_; @@ -74,24 +72,19 @@ CaloTruthCellsProducer::CaloTruthCellsProducer(edm::ParameterSet const& config) CaloTruthCellsProducer::~CaloTruthCellsProducer() {} -void CaloTruthCellsProducer::beginRun(const edm::Run& /*run*/, const edm::EventSetup& es) { - triggerGeomHandle_ = es.getHandle(triggerGeomToken_); -} - void CaloTruthCellsProducer::produce(edm::Event& event, edm::EventSetup const& setup) { - edm::Handle caloParticlesHandle; - event.getByToken(caloParticlesToken_, caloParticlesHandle); - auto const& caloParticles(*caloParticlesHandle); + auto caloParticlesHandle = event.getHandle(caloParticlesToken_); + auto const& caloParticles = *caloParticlesHandle; - edm::Handle triggerCellsHandle; - event.getByToken(triggerCellsToken_, triggerCellsHandle); - auto const& triggerCells(*triggerCellsHandle); + auto const& triggerCellsHandle = event.getHandle(triggerCellsToken_); + auto const& triggerCells = *triggerCellsHandle; - auto const& geometry(*triggerGeomHandle_); + auto const& geometry = setup.getData(triggerGeomToken_); + ; - dummyClustering_.setGeometry(triggerGeomHandle_.product()); - showerShape_.setGeometry(triggerGeomHandle_.product()); - triggerTools_.setGeometry(triggerGeomHandle_.product()); + dummyClustering_.setGeometry(&geometry); + showerShape_.setGeometry(&geometry); + triggerTools_.setGeometry(&geometry); std::unordered_map tcToCalo; diff --git a/L1Trigger/L1TMuon/plugins/L1TBMTFConverter.cc b/L1Trigger/L1TMuon/plugins/L1TBMTFConverter.cc index b7c82cacd7700..038f9b5978adc 100644 --- a/L1Trigger/L1TMuon/plugins/L1TBMTFConverter.cc +++ b/L1Trigger/L1TMuon/plugins/L1TBMTFConverter.cc @@ -44,17 +44,12 @@ using namespace l1t; class L1TBMTFConverter : public edm::stream::EDProducer<> { public: explicit L1TBMTFConverter(const edm::ParameterSet&); - ~L1TBMTFConverter() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: void produce(edm::Event&, const edm::EventSetup&) override; - void beginRun(const edm::Run&, edm::EventSetup const&) override; - void endRun(const edm::Run&, edm::EventSetup const&) override; - void beginLuminosityBlock(const edm::LuminosityBlock&, edm::EventSetup const&) override; - void endLuminosityBlock(const edm::LuminosityBlock&, edm::EventSetup const&) override; // ----------member data --------------------------- edm::EDGetTokenT m_barrelTfInputToken; edm::InputTag m_barrelTfInputTag; @@ -111,11 +106,6 @@ L1TBMTFConverter::L1TBMTFConverter(const edm::ParameterSet& iConfig) { ptMap_[31] = 280; } -L1TBMTFConverter::~L1TBMTFConverter() { - // do anything here that needs to be done at desctruction time - // (e.g. close files, deallocate resources etc.) -} - // // member functions // @@ -144,18 +134,6 @@ void L1TBMTFConverter::produce(edm::Event& iEvent, const edm::EventSetup& iSetup iEvent.put(std::move(convMuons), "ConvBMTFMuons"); } -// ------------ method called when starting to processes a run ------------ -void L1TBMTFConverter::beginRun(const edm::Run&, edm::EventSetup const&) {} - -// ------------ method called when ending the processing of a run ------------ -void L1TBMTFConverter::endRun(const edm::Run&, edm::EventSetup const&) {} - -// ------------ method called when starting to processes a luminosity block ------------ -void L1TBMTFConverter::beginLuminosityBlock(const edm::LuminosityBlock&, edm::EventSetup const&) {} - -// ------------ method called when ending the processing of a luminosity block ------------ -void L1TBMTFConverter::endLuminosityBlock(const edm::LuminosityBlock&, edm::EventSetup const&) {} - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void L1TBMTFConverter::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { //The following says we do not know what parameters are allowed so do no validation diff --git a/L1Trigger/L1TMuon/plugins/L1TMicroGMTInputProducer.cc b/L1Trigger/L1TMuon/plugins/L1TMicroGMTInputProducer.cc index a8fde2a16727d..6711efe5d69e1 100644 --- a/L1Trigger/L1TMuon/plugins/L1TMicroGMTInputProducer.cc +++ b/L1Trigger/L1TMuon/plugins/L1TMicroGMTInputProducer.cc @@ -53,11 +53,6 @@ class L1TMicroGMTInputProducer : public edm::stream::EDProducer<> { private: void produce(edm::Event&, const edm::EventSetup&) override; - void beginRun(const edm::Run&, edm::EventSetup const&) override; - void endRun(const edm::Run&, edm::EventSetup const&) override; - void beginLuminosityBlock(const edm::LuminosityBlock&, edm::EventSetup const&) override; - void endLuminosityBlock(const edm::LuminosityBlock&, edm::EventSetup const&) override; - void openFile(); void skipHeader(); int convertToInt(std::string& bitstr) const; @@ -297,18 +292,6 @@ void L1TMicroGMTInputProducer::produce(edm::Event& iEvent, const edm::EventSetup m_currEvt++; } -// ------------ method called when starting to processes a run ------------ -void L1TMicroGMTInputProducer::beginRun(const edm::Run&, edm::EventSetup const&) {} - -// ------------ method called when ending the processing of a run ------------ -void L1TMicroGMTInputProducer::endRun(const edm::Run&, edm::EventSetup const&) {} - -// ------------ method called when starting to processes a luminosity block ------------ -void L1TMicroGMTInputProducer::beginLuminosityBlock(const edm::LuminosityBlock&, edm::EventSetup const&) {} - -// ------------ method called when ending the processing of a luminosity block ------------ -void L1TMicroGMTInputProducer::endLuminosityBlock(const edm::LuminosityBlock&, edm::EventSetup const&) {} - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void L1TMicroGMTInputProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { //The following says we do not know what parameters are allowed so do no validation diff --git a/L1Trigger/L1TMuon/plugins/L1TMicroGMTInputProducerFromGen.cc b/L1Trigger/L1TMuon/plugins/L1TMicroGMTInputProducerFromGen.cc index 0262221af860b..966c34df1e783 100644 --- a/L1Trigger/L1TMuon/plugins/L1TMicroGMTInputProducerFromGen.cc +++ b/L1Trigger/L1TMuon/plugins/L1TMicroGMTInputProducerFromGen.cc @@ -50,18 +50,12 @@ using namespace l1t; class L1TMicroGMTInputProducerFromGen : public edm::stream::EDProducer<> { public: explicit L1TMicroGMTInputProducerFromGen(const edm::ParameterSet&); - ~L1TMicroGMTInputProducerFromGen() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: void produce(edm::Event&, const edm::EventSetup&) override; - void beginRun(const edm::Run&, edm::EventSetup const&) override; - void endRun(const edm::Run&, edm::EventSetup const&) override; - void beginLuminosityBlock(const edm::LuminosityBlock&, edm::EventSetup const&) override; - void endLuminosityBlock(const edm::LuminosityBlock&, edm::EventSetup const&) override; - static bool compareMuons(const RegionalMuonCand&, const RegionalMuonCand&); // ----------member data --------------------------- @@ -93,11 +87,6 @@ L1TMicroGMTInputProducerFromGen::L1TMicroGMTInputProducerFromGen(const edm::Para produces("TriggerTowerSums"); } -L1TMicroGMTInputProducerFromGen::~L1TMicroGMTInputProducerFromGen() { - // do anything here that needs to be done at desctruction time - // (e.g. close files, deallocate resources etc.) -} - // // member functions // @@ -242,18 +231,6 @@ void L1TMicroGMTInputProducerFromGen::produce(edm::Event& iEvent, const edm::Eve m_currEvt++; } -// ------------ method called when starting to processes a run ------------ -void L1TMicroGMTInputProducerFromGen::beginRun(const edm::Run&, edm::EventSetup const&) {} - -// ------------ method called when ending the processing of a run ------------ -void L1TMicroGMTInputProducerFromGen::endRun(const edm::Run&, edm::EventSetup const&) {} - -// ------------ method called when starting to processes a luminosity block ------------ -void L1TMicroGMTInputProducerFromGen::beginLuminosityBlock(const edm::LuminosityBlock&, edm::EventSetup const&) {} - -// ------------ method called when ending the processing of a luminosity block ------------ -void L1TMicroGMTInputProducerFromGen::endLuminosityBlock(const edm::LuminosityBlock&, edm::EventSetup const&) {} - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void L1TMicroGMTInputProducerFromGen::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { //The following says we do not know what parameters are allowed so do no validation diff --git a/L1Trigger/L1TMuon/plugins/L1TMuonCaloSumProducer.cc b/L1Trigger/L1TMuon/plugins/L1TMuonCaloSumProducer.cc index 2147901201956..85c063aef847a 100644 --- a/L1Trigger/L1TMuon/plugins/L1TMuonCaloSumProducer.cc +++ b/L1Trigger/L1TMuon/plugins/L1TMuonCaloSumProducer.cc @@ -47,18 +47,12 @@ using namespace l1t; class L1TMuonCaloSumProducer : public edm::stream::EDProducer<> { public: explicit L1TMuonCaloSumProducer(const edm::ParameterSet&); - ~L1TMuonCaloSumProducer() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: void produce(edm::Event&, const edm::EventSetup&) override; - void beginRun(const edm::Run&, edm::EventSetup const&) override; - void endRun(const edm::Run&, edm::EventSetup const&) override; - void beginLuminosityBlock(const edm::LuminosityBlock&, edm::EventSetup const&) override; - void endLuminosityBlock(const edm::LuminosityBlock&, edm::EventSetup const&) override; - edm::EDGetTokenT m_caloTowerToken; edm::InputTag m_caloLabel; }; @@ -83,11 +77,6 @@ L1TMuonCaloSumProducer::L1TMuonCaloSumProducer(const edm::ParameterSet& iConfig) produces("TriggerTower2x2s"); } -L1TMuonCaloSumProducer::~L1TMuonCaloSumProducer() { - // do anything here that needs to be done at desctruction time - // (e.g. close files, deallocate resources etc.) -} - // // member functions // @@ -191,18 +180,6 @@ void L1TMuonCaloSumProducer::produce(edm::Event& iEvent, const edm::EventSetup& iEvent.put(std::move(tower2x2s), "TriggerTower2x2s"); } -// ------------ method called when starting to processes a run ------------ -void L1TMuonCaloSumProducer::beginRun(const edm::Run&, edm::EventSetup const&) {} - -// ------------ method called when ending the processing of a run ------------ -void L1TMuonCaloSumProducer::endRun(const edm::Run&, edm::EventSetup const&) {} - -// ------------ method called when starting to processes a luminosity block ------------ -void L1TMuonCaloSumProducer::beginLuminosityBlock(const edm::LuminosityBlock&, edm::EventSetup const&) {} - -// ------------ method called when ending the processing of a luminosity block ------------ -void L1TMuonCaloSumProducer::endLuminosityBlock(const edm::LuminosityBlock&, edm::EventSetup const&) {} - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void L1TMuonCaloSumProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { //The following says we do not know what parameters are allowed so do no validation diff --git a/L1Trigger/L1TMuon/plugins/L1TMuonProducer.cc b/L1Trigger/L1TMuon/plugins/L1TMuonProducer.cc index 94378e8c43249..4353d7ec3aff7 100644 --- a/L1Trigger/L1TMuon/plugins/L1TMuonProducer.cc +++ b/L1Trigger/L1TMuon/plugins/L1TMuonProducer.cc @@ -67,9 +67,6 @@ class L1TMuonProducer : public edm::stream::EDProducer<> { void produce(edm::Event&, const edm::EventSetup&) override; void beginRun(edm::Run const&, edm::EventSetup const&) override; - void endRun(edm::Run const&, edm::EventSetup const&) override; - void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; static bool compareMuons(const std::shared_ptr& mu1, const std::shared_ptr& mu2); @@ -617,15 +614,6 @@ void L1TMuonProducer::beginRun(edm::Run const& run, edm::EventSetup const& iSetu } } -// ------------ method called when ending the processing of a run ------------ -void L1TMuonProducer::endRun(edm::Run const&, edm::EventSetup const&) {} - -// ------------ method called when starting to processes a luminosity block ------------ -void L1TMuonProducer::beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) {} - -// ------------ method called when ending the processing of a luminosity block ------------ -void L1TMuonProducer::endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) {} - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void L1TMuonProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { //The following says we do not know what parameters are allowed so do no validation diff --git a/L1Trigger/L1TMuon/plugins/L1TMuonQualityAdjuster.cc b/L1Trigger/L1TMuon/plugins/L1TMuonQualityAdjuster.cc index 0215f67eb1ea4..95cf3bb6a52ff 100644 --- a/L1Trigger/L1TMuon/plugins/L1TMuonQualityAdjuster.cc +++ b/L1Trigger/L1TMuon/plugins/L1TMuonQualityAdjuster.cc @@ -34,16 +34,11 @@ using namespace l1t; class L1TMuonQualityAdjuster : public edm::stream::EDProducer<> { public: explicit L1TMuonQualityAdjuster(const edm::ParameterSet&); - ~L1TMuonQualityAdjuster() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: void produce(edm::Event&, const edm::EventSetup&) override; - void beginRun(const edm::Run&, edm::EventSetup const&) override; - void endRun(const edm::Run&, edm::EventSetup const&) override; - void beginLuminosityBlock(const edm::LuminosityBlock&, edm::EventSetup const&) override; - void endLuminosityBlock(const edm::LuminosityBlock&, edm::EventSetup const&) override; // ----------member data --------------------------- edm::EDGetTokenT m_barrelTfInputToken; edm::EDGetTokenT m_overlapTfInputToken; @@ -80,11 +75,6 @@ L1TMuonQualityAdjuster::L1TMuonQualityAdjuster(const edm::ParameterSet& iConfig) produces("EMTF"); } -L1TMuonQualityAdjuster::~L1TMuonQualityAdjuster() { - // do anything here that needs to be done at desctruction time - // (e.g. close files, deallocate resources etc.) -} - // // member functions // @@ -161,18 +151,6 @@ void L1TMuonQualityAdjuster::produce(edm::Event& iEvent, const edm::EventSetup& iEvent.put(std::move(filteredEMTFMuons), "EMTF"); } -// ------------ method called when starting to processes a run ------------ -void L1TMuonQualityAdjuster::beginRun(const edm::Run&, edm::EventSetup const&) {} - -// ------------ method called when ending the processing of a run ------------ -void L1TMuonQualityAdjuster::endRun(const edm::Run&, edm::EventSetup const&) {} - -// ------------ method called when starting to processes a luminosity block ------------ -void L1TMuonQualityAdjuster::beginLuminosityBlock(const edm::LuminosityBlock&, edm::EventSetup const&) {} - -// ------------ method called when ending the processing of a luminosity block ------------ -void L1TMuonQualityAdjuster::endLuminosityBlock(const edm::LuminosityBlock&, edm::EventSetup const&) {} - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void L1TMuonQualityAdjuster::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { //The following says we do not know what parameters are allowed so do no validation diff --git a/L1Trigger/L1TMuonEndCap/plugins/L1TMuonEndCapTrackProducer.cc b/L1Trigger/L1TMuonEndCap/plugins/L1TMuonEndCapTrackProducer.cc index e4d14200a6b24..20adbc0ab9e2f 100644 --- a/L1Trigger/L1TMuonEndCap/plugins/L1TMuonEndCapTrackProducer.cc +++ b/L1Trigger/L1TMuonEndCap/plugins/L1TMuonEndCapTrackProducer.cc @@ -8,8 +8,6 @@ L1TMuonEndCapTrackProducer::L1TMuonEndCapTrackProducer(const edm::ParameterSet& produces("EMTF"); // EMTF tracks output to uGMT } -L1TMuonEndCapTrackProducer::~L1TMuonEndCapTrackProducer() {} - void L1TMuonEndCapTrackProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { // Create pointers to output products auto out_hits_tmp = std::make_unique(); // before zero suppression diff --git a/L1Trigger/L1TMuonEndCap/plugins/L1TMuonEndCapTrackProducer.h b/L1Trigger/L1TMuonEndCap/plugins/L1TMuonEndCapTrackProducer.h index e63db89313cce..30aaba2ea5e2b 100644 --- a/L1Trigger/L1TMuonEndCap/plugins/L1TMuonEndCapTrackProducer.h +++ b/L1Trigger/L1TMuonEndCap/plugins/L1TMuonEndCapTrackProducer.h @@ -19,20 +19,12 @@ class L1TMuonEndCapTrackProducer : public edm::stream::EDProducer<> { public: explicit L1TMuonEndCapTrackProducer(const edm::ParameterSet&); - ~L1TMuonEndCapTrackProducer() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: void produce(edm::Event&, const edm::EventSetup&) override; - //void beginJob() override; - //void endJob() override; - //void beginRun(edm::Run const&, edm::EventSetup const&) override; - //void endRun(edm::Run const&, edm::EventSetup const&) override; - //void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - //void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - private: std::unique_ptr track_finder_; std::unique_ptr uGMT_converter_; diff --git a/L1Trigger/L1TMuonEndCap/src/helper.h b/L1Trigger/L1TMuonEndCap/src/helper.h index 9dc44b3248691..3888e3e1c8e24 100644 --- a/L1Trigger/L1TMuonEndCap/src/helper.h +++ b/L1Trigger/L1TMuonEndCap/src/helper.h @@ -148,9 +148,9 @@ namespace { const std::ptrdiff_t len = std::distance(first, last); typedef typename std::iterator_traits::value_type value_type; typedef typename std::iterator_traits::pointer pointer; - std::pair p = std::get_temporary_buffer(len); - pointer buf = p.first; - pointer buf_end = std::next(p.first, p.second); + std::unique_ptr p{new value_type[len]}; + pointer buf = p.get(); + pointer buf_end = buf + len; RandomAccessIterator first1 = first; RandomAccessIterator last1 = middle; @@ -171,9 +171,8 @@ namespace { *buf++ = *first2++; } - buf = p.first; + buf = p.get(); std::copy(buf, buf_end, first); - std::return_temporary_buffer(p.first); } // See above @@ -201,9 +200,9 @@ namespace { const std::ptrdiff_t len = std::distance(first, last); typedef typename std::iterator_traits::value_type value_type; typedef typename std::iterator_traits::pointer pointer; - std::pair p = std::get_temporary_buffer(len); - pointer buf = p.first; - pointer buf_end = std::next(p.first, p.second); + std::unique_ptr p{new value_type[len]}; + pointer buf = p.get(); + const pointer buf_end = buf + len; RandomAccessIterator first1 = first; RandomAccessIterator last1 = one_third; @@ -249,9 +248,8 @@ namespace { *buf++ = *first2++; } - buf = p.first; + buf = p.get(); std::copy(buf, buf_end, first); - std::return_temporary_buffer(p.first); } // See above diff --git a/L1Trigger/L1TMuonOverlapPhase1/interface/Omtf/OMTFConfiguration.h b/L1Trigger/L1TMuonOverlapPhase1/interface/Omtf/OMTFConfiguration.h index af09b6b90bb78..0b49e7f3f95ec 100644 --- a/L1Trigger/L1TMuonOverlapPhase1/interface/Omtf/OMTFConfiguration.h +++ b/L1Trigger/L1TMuonOverlapPhase1/interface/Omtf/OMTFConfiguration.h @@ -7,9 +7,6 @@ #include #include -//#undef BOOST_DISABLE_ASSERTS //TODO remove for production version -#include "boost/multi_array.hpp" - #include "L1Trigger/L1TMuonOverlapPhase1/interface/ProcConfigurationBase.h" #include "CondFormats/L1TObjects/interface/L1TMuonOverlapParams.h" #include "DataFormats/L1TMuon/interface/RegionalMuonCandFwd.h" diff --git a/L1Trigger/L1TMuonOverlapPhase1/src/Omtf/GoldenPattern.cc b/L1Trigger/L1TMuonOverlapPhase1/src/Omtf/GoldenPattern.cc index 94377b009bbef..2f7a8b991d747 100644 --- a/L1Trigger/L1TMuonOverlapPhase1/src/Omtf/GoldenPattern.cc +++ b/L1Trigger/L1TMuonOverlapPhase1/src/Omtf/GoldenPattern.cc @@ -1,8 +1,5 @@ #include "L1Trigger/L1TMuonOverlapPhase1/interface/Omtf/GoldenPattern.h" -#include "boost/multi_array/multi_array_ref.hpp" -#include "boost/multi_array/subarray.hpp" - #include #include diff --git a/L1Trigger/L1TMuonOverlapPhase1/src/Omtf/OMTFSorter.cc b/L1Trigger/L1TMuonOverlapPhase1/src/Omtf/OMTFSorter.cc index 51ce07f2e03ec..49edd617240c5 100644 --- a/L1Trigger/L1TMuonOverlapPhase1/src/Omtf/OMTFSorter.cc +++ b/L1Trigger/L1TMuonOverlapPhase1/src/Omtf/OMTFSorter.cc @@ -5,7 +5,6 @@ #include #include -#include #include #include diff --git a/L1Trigger/L1TMuonOverlapPhase1/src/Tools/DataROOTDumper2.cc b/L1Trigger/L1TMuonOverlapPhase1/src/Tools/DataROOTDumper2.cc index 4719b82b50fc4..b6fa6d73b14bf 100644 --- a/L1Trigger/L1TMuonOverlapPhase1/src/Tools/DataROOTDumper2.cc +++ b/L1Trigger/L1TMuonOverlapPhase1/src/Tools/DataROOTDumper2.cc @@ -15,15 +15,6 @@ #include "TFile.h" #include "TTree.h" -#include "TParameter.h" -#include "TObjString.h" - -#include -#include -#include -#include -#include -#include DataROOTDumper2::DataROOTDumper2(const edm::ParameterSet& edmCfg, const OMTFConfiguration* omtfConfig, diff --git a/L1Trigger/L1TNtuples/interface/L1AnalysisBMTFInputs.h b/L1Trigger/L1TNtuples/interface/L1AnalysisBMTFInputs.h index 9c3017cfcb1e2..0559cb217d4e8 100644 --- a/L1Trigger/L1TNtuples/interface/L1AnalysisBMTFInputs.h +++ b/L1Trigger/L1TNtuples/interface/L1AnalysisBMTFInputs.h @@ -9,12 +9,6 @@ #include "L1AnalysisBMTFInputsDataFormat.h" -//#include "FWCore/Framework/interface/Frameworkfwd.h" -//#include "FWCore/Framework/interface/EDAnalyzer.h" -//#include "FWCore/Framework/interface/Event.h" -//#include "FWCore/Framework/interface/MakerMacros.h" -//#include "FWCore/ParameterSet/interface/ParameterSet.h" - namespace L1Analysis { class L1AnalysisBMTFInputs { public: diff --git a/L1Trigger/L1TTrackMatch/plugins/L1TrackFastJetProducer.cc b/L1Trigger/L1TTrackMatch/plugins/L1TrackFastJetProducer.cc index b2c0cb4fa2eb5..4aaf2602fdd8b 100644 --- a/L1Trigger/L1TTrackMatch/plugins/L1TrackFastJetProducer.cc +++ b/L1Trigger/L1TTrackMatch/plugins/L1TrackFastJetProducer.cc @@ -1,14 +1,17 @@ /////////////////////////////////////////////////////////////////////////// // // -// Producer of TkJet, // +// Producer of TrackFastJet, // // Cluster L1 tracks using fastjet // // // +// Updates: Claire Savard (claire.savard@colorado.edu), Nov. 2023 // +// // /////////////////////////////////////////////////////////////////////////// // system include files #include // user include files +#include "DataFormats/Common/interface/RefVector.h" #include "FWCore/Framework/interface/Frameworkfwd.h" #include "FWCore/Framework/interface/stream/EDProducer.h" #include "FWCore/Framework/interface/Event.h" @@ -52,6 +55,7 @@ class L1TrackFastJetProducer : public edm::stream::EDProducer<> { public: typedef TTTrack L1TTTrackType; typedef std::vector L1TTTrackCollectionType; + typedef edm::RefVector L1TTTrackRefCollectionType; explicit L1TrackFastJetProducer(const edm::ParameterSet&); ~L1TrackFastJetProducer() override; @@ -59,49 +63,20 @@ class L1TrackFastJetProducer : public edm::stream::EDProducer<> { static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: - //virtual void beginJob(); void produce(edm::Event&, const edm::EventSetup&) override; - //virtual void endJob(); - - // track selection criteria - const float trkZMax_; // in [cm] - const float trkChi2dofMax_; // maximum track chi2dof - const double trkBendChi2Max_; // maximum track bendchi2 - const float trkPtMin_; // in [GeV] - const float trkEtaMax_; // in [rad] - const int trkNStubMin_; // minimum number of stubs - const int trkNPSStubMin_; // minimum number of PS stubs - const double deltaZ0Cut_; // save with |L1z-z0| < maxZ0 - const double coneSize_; // Use anti-kt with this cone size - const bool doTightChi2_; - const float trkPtTightChi2_; - const float trkChi2dofTightChi2_; - const bool displaced_; //use prompt/displaced tracks - - const edm::EDGetTokenT > > trackToken_; - edm::EDGetTokenT pvToken_; - edm::ESGetToken tTopoToken_; + + // jet configurations + const double coneSize_; // Use anti-kt with this cone size + const bool displaced_; //use prompt/displaced tracks + + const EDGetTokenT trackToken_; }; // constructor L1TrackFastJetProducer::L1TrackFastJetProducer(const edm::ParameterSet& iConfig) - : trkZMax_((float)iConfig.getParameter("trk_zMax")), - trkChi2dofMax_((float)iConfig.getParameter("trk_chi2dofMax")), - trkBendChi2Max_(iConfig.getParameter("trk_bendChi2Max")), - trkPtMin_((float)iConfig.getParameter("trk_ptMin")), - trkEtaMax_((float)iConfig.getParameter("trk_etaMax")), - trkNStubMin_((int)iConfig.getParameter("trk_nStubMin")), - trkNPSStubMin_((int)iConfig.getParameter("trk_nPSStubMin")), - deltaZ0Cut_((float)iConfig.getParameter("deltaZ0Cut")), - coneSize_((float)iConfig.getParameter("coneSize")), - doTightChi2_(iConfig.getParameter("doTightChi2")), - trkPtTightChi2_((float)iConfig.getParameter("trk_ptTightChi2")), - trkChi2dofTightChi2_((float)iConfig.getParameter("trk_chi2dofTightChi2")), + : coneSize_((float)iConfig.getParameter("coneSize")), displaced_(iConfig.getParameter("displaced")), - trackToken_(consumes > >( - iConfig.getParameter("L1TrackInputTag"))), - pvToken_(consumes(iConfig.getParameter("L1PrimaryVertexTag"))), - tTopoToken_(esConsumes(edm::ESInputTag("", ""))) { + trackToken_(consumes(iConfig.getParameter("L1TrackInputTag"))) { if (displaced_) produces("L1TrackFastJetsExtended"); else @@ -116,71 +91,22 @@ void L1TrackFastJetProducer::produce(edm::Event& iEvent, const edm::EventSetup& std::unique_ptr L1TrackFastJets(new TkJetCollection); // L1 tracks - edm::Handle > > TTTrackHandle; + edm::Handle TTTrackHandle; iEvent.getByToken(trackToken_, TTTrackHandle); - std::vector >::const_iterator iterL1Track; - - // Tracker Topology - const TrackerTopology& tTopo = iSetup.getData(tTopoToken_); - - edm::Handle L1PrimaryVertexHandle; - iEvent.getByToken(pvToken_, L1PrimaryVertexHandle); fastjet::JetDefinition jet_def(fastjet::antikt_algorithm, coneSize_); std::vector JetInputs; - float recoVtx = L1PrimaryVertexHandle->begin()->z0(); - unsigned int this_l1track = 0; - for (iterL1Track = TTTrackHandle->begin(); iterL1Track != TTTrackHandle->end(); iterL1Track++) { - this_l1track++; - float trk_pt = iterL1Track->momentum().perp(); - float trk_z0 = iterL1Track->z0(); - float trk_chi2dof = iterL1Track->chi2Red(); - float trk_bendchi2 = iterL1Track->stubPtConsistency(); - std::vector >, TTStub > > - theStubs = iterL1Track->getStubRefs(); - int trk_nstub = (int)theStubs.size(); - - if (std::abs(trk_z0) > trkZMax_) - continue; - if (std::abs(iterL1Track->momentum().eta()) > trkEtaMax_) - continue; - if (trk_pt < trkPtMin_) - continue; - if (trk_nstub < trkNStubMin_) - continue; - if (trk_chi2dof > trkChi2dofMax_) - continue; - if (trk_bendchi2 > trkBendChi2Max_) - continue; - if (doTightChi2_ && (trk_pt > trkPtTightChi2_ && trk_chi2dof > trkChi2dofTightChi2_)) - continue; - - int trk_nPS = 0; - for (int istub = 0; istub < trk_nstub; istub++) { - DetId detId(theStubs.at(istub)->getDetId()); - bool tmp_isPS = false; - if (detId.det() == DetId::Detector::Tracker) { - if (detId.subdetId() == StripSubdetector::TOB && tTopo.tobLayer(detId) <= 3) - tmp_isPS = true; - else if (detId.subdetId() == StripSubdetector::TID && tTopo.tidRing(detId) <= 9) - tmp_isPS = true; - } - if (tmp_isPS) - trk_nPS++; - } - if (trk_nPS < trkNPSStubMin_) - continue; - if (std::abs(recoVtx - trk_z0) > deltaZ0Cut_) - continue; + for (unsigned int this_l1track = 0; this_l1track < TTTrackHandle->size(); this_l1track++) { + edm::Ptr iterL1Track(TTTrackHandle, this_l1track); fastjet::PseudoJet psuedoJet(iterL1Track->momentum().x(), iterL1Track->momentum().y(), iterL1Track->momentum().z(), iterL1Track->momentum().mag()); - JetInputs.push_back(psuedoJet); // input tracks for clustering - JetInputs.back().set_user_index(this_l1track - 1); // save track index in the collection - } // end loop over tracks + JetInputs.push_back(psuedoJet); // input tracks for clustering + JetInputs.back().set_user_index(this_l1track); // save track index in the collection + } // end loop over tracks fastjet::ClusterSequence cs(JetInputs, jet_def); // define the output jet collection std::vector JetOutputs = @@ -213,16 +139,14 @@ void L1TrackFastJetProducer::produce(edm::Event& iEvent, const edm::EventSetup& iEvent.put(std::move(L1TrackFastJets), "L1TrackFastJets"); } -//void L1TrackFastJetProducer::beginJob() {} - -//void L1TrackFastJetProducer::endJob() {} - void L1TrackFastJetProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { //The following says we do not know what parameters are allowed so do no validation - // Please change this to state exactly what you do use, even if it is no parameters edm::ParameterSetDescription desc; - desc.setUnknown(); - descriptions.addDefault(desc); + desc.add( + "L1PVertexInputTag", + edm::InputTag("l1tTrackVertexAssociationProducerForJets", "Level1TTTracksSelectedAssociated")); + desc.add("coneSize", 0.5); + desc.add("displaced", false); } //define this as a plug-in diff --git a/L1Trigger/L1TTrackMatch/plugins/L1TrackJetClustering.h b/L1Trigger/L1TTrackMatch/plugins/L1TrackJetClustering.h index c4fdf3dfd1c02..fa549b0ae853e 100644 --- a/L1Trigger/L1TTrackMatch/plugins/L1TrackJetClustering.h +++ b/L1Trigger/L1TTrackMatch/plugins/L1TrackJetClustering.h @@ -17,6 +17,22 @@ namespace l1ttrackjet { const unsigned int ETA_INTPART_BITS{3}; const unsigned int kExtraGlobalPhiBit{4}; + static constexpr int kEtaWordLength = 15; + static constexpr int kPhiWordLength = 12; + + //Constants used for jet clustering in eta direction + static constexpr int kThirteenBitMask = 0b1111111111111; + static constexpr int kEtaFineBinEdge1 = 0b0011001100110; + static constexpr int kEtaFineBinEdge2 = 0b0110011001100; + static constexpr int kEtaFineBinEdge3 = 0b1001100110010; + static constexpr int kEtaFineBinEdge4 = 0b1100110011000; + + //Constants used for jet clustering in phi direction + static constexpr int kTwelveBitMask = 0b011111111111; + static constexpr int kPhiBinHalfWidth = 0b000100101111; + static constexpr int kNumPhiBins = 27; + static constexpr int kPhiBinZeroOffset = 12; // phi bin zero offset between firmware and emulator + typedef ap_ufixed pt_intern; typedef ap_fixed glbeta_intern; typedef ap_int glbphi_intern; @@ -113,6 +129,77 @@ namespace l1ttrackjet { return PassQuality; } + // Eta binning following the hardware logic + inline unsigned int eta_bin_firmwareStyle(int eta_word) { + //Function that reads in 15-bit eta word (in two's complement) and returns the index of eta bin in which it belongs + //Logic follows exactly according to the firmware + //We will first determine if eta is pos/neg from the first bit. Each half of the grid is then split into four coarse bins. Bits 2&3 determine which coarse bin to assign + //The coarse bins are split into 5 fine bins, final 13 bits determine which of these coarse bins this track needs to assign + int eta_coarse = 0; + int eta_fine = 0; + + if (eta_word & (1 << kEtaWordLength)) { //If eta is negative (first/leftmost bit is 1) + //Second and third bits contain information about which of four coarse bins + eta_coarse = 5 * ((eta_word & (3 << (kEtaWordLength - 2))) >> (kEtaWordLength - 2)); + } else { //else eta is positive (first/leftmost bit is 0) + eta_coarse = 5 * (4 + ((eta_word & (3 << (kEtaWordLength - 2))) >> (kEtaWordLength - 2))); + } + + //Now get the fine bin index. The numbers correspond to the decimal representation of fine bin edges in binary + int j = eta_word & kThirteenBitMask; + if (j < kEtaFineBinEdge1) + eta_fine = 0; + else if (j < kEtaFineBinEdge2) + eta_fine = 1; + else if (j < kEtaFineBinEdge3) + eta_fine = 2; + else if (j < kEtaFineBinEdge4) + eta_fine = 3; + else + eta_fine = 4; + + //Final eta bin is coarse bin index + fine bin index, subtract 8 to make eta_bin at eta=-2.4 have index=0 + int eta_ = (eta_coarse + eta_fine) - 8; + return eta_; + } + + // Phi binning following the hardware logic + inline unsigned int phi_bin_firmwareStyle(int phi_sector_raw, int phi_word) { + //Function that reads in decimal integers phi_sector_raw and phi_word and returns the index of phi bin in which it belongs + //phi_sector_raw is integer 0-8 correspoding to one of 9 phi sectors + //phi_word is 12 bit word representing the phi value measured w.r.t the center of the sector + + int phi_coarse = 3 * phi_sector_raw; //phi_coarse is index of phi coarse binning (sector edges) + int phi_fine = 0; //phi_fine is index of fine bin inside sectors. Each sector contains 3 fine bins + + //Determine fine bin. First bit is sign, next 11 bits determine fine bin + //303 is distance from 0 to first fine bin edge + //2047 is eleven 1's, use the &2047 to extract leftmost 11 bits. + + //The allowed range for phi goes further than the edges of bin 0 or 2 (bit value 909). There's an apparent risk of phi being > 909, however this will always mean the track is in the next link (i.e. track beyond bin 2 in this link means track is actually in bin 0 of adjacent link) + + if (phi_word & (1 << (kPhiWordLength - 1))) { //if phi is negative (first bit 1) + //Since negative, we 'flip' the phi word, then check if it is in fine bin 0 or 1 + if ((kTwelveBitMask - (phi_word & kTwelveBitMask)) > kPhiBinHalfWidth) { + phi_fine = 0; + } else if ((kTwelveBitMask - (phi_word & kTwelveBitMask)) < kPhiBinHalfWidth) { + phi_fine = 1; + } + } else { //else phi is positive (first bit 0) + //positive phi, no 'flip' necessary. Just check if in fine bin 1 or 2 + if ((phi_word & kTwelveBitMask) < kPhiBinHalfWidth) { + phi_fine = 1; + } else if ((phi_word & kTwelveBitMask) > kPhiBinHalfWidth) { + phi_fine = 2; + } + } + + // Final operation is a shift by pi (half a grid) to make bin at index=0 at -pi + int phi_bin_ = (phi_coarse + phi_fine + kPhiBinZeroOffset) % kNumPhiBins; + + return phi_bin_; + } + // L1 clustering (in eta) template inline std::vector L1_clustering(T *phislice, int etaBins_, Eta etaStep_) { diff --git a/L1Trigger/L1TTrackMatch/plugins/L1TrackJetEmulatorProducer.cc b/L1Trigger/L1TTrackMatch/plugins/L1TrackJetEmulatorProducer.cc index 7382ef37bb818..63f052d264fc3 100644 --- a/L1Trigger/L1TTrackMatch/plugins/L1TrackJetEmulatorProducer.cc +++ b/L1Trigger/L1TTrackMatch/plugins/L1TrackJetEmulatorProducer.cc @@ -3,9 +3,10 @@ // // Rewritting/Improvements: George Karathanasis, // georgios.karathanasis@cern.ch, CU Boulder +// Claire Savard (claire.savard@colorado.edu) // // Created: Wed, 01 Aug 2018 14:01:41 GMT -// Latest update: Nov 2022 (by GK) +// Latest update: Nov 2023 (by CS) // // Track jets are clustered in a two-layer process, first by clustering in phi, // then by clustering in eta. The code proceeds as following: putting all tracks// in a grid of eta vs phi space, and then cluster them. Finally we merge the cl @@ -63,16 +64,9 @@ class L1TrackJetEmulatorProducer : public stream::EDProducer<> { // ----------member data --------------------------- std::vector> L1TrkPtrs_; - vector tdtrk_; const float trkZMax_; const float trkPtMax_; - const float trkPtMin_; const float trkEtaMax_; - const float nStubs4PromptChi2_; - const float nStubs5PromptChi2_; - const float nStubs4PromptBend_; - const float nStubs5PromptBend_; - const int trkNPSStubMin_; const int lowpTJetMinTrackMultiplicity_; const float lowpTJetThreshold_; const int highpTJetMinTrackMultiplicity_; @@ -84,36 +78,22 @@ class L1TrackJetEmulatorProducer : public stream::EDProducer<> { const bool displaced_; const float d0CutNStubs4_; const float d0CutNStubs5_; - const float nStubs4DisplacedChi2_; - const float nStubs5DisplacedChi2_; - const float nStubs4DisplacedBend_; - const float nStubs5DisplacedBend_; const int nDisplacedTracks_; - const float dzPVTrk_; - float PVz; float zStep_; glbeta_intern etaStep_; glbphi_intern phiStep_; TTTrack_TrackWord trackword; - edm::ESGetToken tTopoToken_; const EDGetTokenT trackToken_; - const EDGetTokenT PVtxToken_; }; //constructor L1TrackJetEmulatorProducer::L1TrackJetEmulatorProducer(const ParameterSet &iConfig) : trkZMax_(iConfig.getParameter("trk_zMax")), trkPtMax_(iConfig.getParameter("trk_ptMax")), - trkPtMin_(iConfig.getParameter("trk_ptMin")), trkEtaMax_(iConfig.getParameter("trk_etaMax")), - nStubs4PromptChi2_(iConfig.getParameter("nStubs4PromptChi2")), - nStubs5PromptChi2_(iConfig.getParameter("nStubs5PromptChi2")), - nStubs4PromptBend_(iConfig.getParameter("nStubs4PromptBend")), - nStubs5PromptBend_(iConfig.getParameter("nStubs5PromptBend")), - trkNPSStubMin_(iConfig.getParameter("trk_nPSStubMin")), lowpTJetMinTrackMultiplicity_(iConfig.getParameter("lowpTJetMinTrackMultiplicity")), lowpTJetThreshold_(iConfig.getParameter("lowpTJetThreshold")), highpTJetMinTrackMultiplicity_(iConfig.getParameter("highpTJetMinTrackMultiplicity")), @@ -125,15 +105,8 @@ L1TrackJetEmulatorProducer::L1TrackJetEmulatorProducer(const ParameterSet &iConf displaced_(iConfig.getParameter("displaced")), d0CutNStubs4_(iConfig.getParameter("d0_cutNStubs4")), d0CutNStubs5_(iConfig.getParameter("d0_cutNStubs5")), - nStubs4DisplacedChi2_(iConfig.getParameter("nStubs4DisplacedChi2")), - nStubs5DisplacedChi2_(iConfig.getParameter("nStubs5DisplacedChi2")), - nStubs4DisplacedBend_(iConfig.getParameter("nStubs4DisplacedBend")), - nStubs5DisplacedBend_(iConfig.getParameter("nStubs5DisplacedBend")), nDisplacedTracks_(iConfig.getParameter("nDisplacedTracks")), - dzPVTrk_(iConfig.getParameter("MaxDzTrackPV")), - tTopoToken_(esConsumes(edm::ESInputTag("", ""))), - trackToken_(consumes(iConfig.getParameter("L1TrackInputTag"))), - PVtxToken_(consumes(iConfig.getParameter("L1PVertexInputTag"))) { + trackToken_(consumes(iConfig.getParameter("L1TrackInputTag"))) { zStep_ = 2.0 * trkZMax_ / (zBins_ + 1); // added +1 in denom etaStep_ = glbeta_intern(2.0 * trkEtaMax_ / etaBins_); //etaStep is the width of an etabin phiStep_ = DoubleToBit(2.0 * (M_PI) / phiBins_, @@ -148,58 +121,15 @@ L1TrackJetEmulatorProducer::L1TrackJetEmulatorProducer(const ParameterSet &iConf void L1TrackJetEmulatorProducer::produce(Event &iEvent, const EventSetup &iSetup) { unique_ptr L1TrackJetContainer(new l1t::TkJetWordCollection); - // Read inputs - const TrackerTopology &tTopo = iSetup.getData(tTopoToken_); + // L1 tracks edm::Handle TTTrackHandle; iEvent.getByToken(trackToken_, TTTrackHandle); - edm::Handle PVtx; - iEvent.getByToken(PVtxToken_, PVtx); - float PVz = (PVtx->at(0)).z0(); - L1TrkPtrs_.clear(); - tdtrk_.clear(); // track selection for (unsigned int this_l1track = 0; this_l1track < TTTrackHandle->size(); this_l1track++) { edm::Ptr trkPtr(TTTrackHandle, this_l1track); - float trk_pt = trkPtr->momentum().perp(); - int trk_nstubs = (int)trkPtr->getStubRefs().size(); - float trk_chi2dof = trkPtr->chi2Red(); - float trk_bendchi2 = trkPtr->stubPtConsistency(); - int trk_nPS = 0; - for (int istub = 0; istub < trk_nstubs; istub++) { - DetId detId(trkPtr->getStubRefs().at(istub)->getDetId()); - if (detId.det() == DetId::Detector::Tracker) { - if ((detId.subdetId() == StripSubdetector::TOB && tTopo.tobLayer(detId) <= 3) || - (detId.subdetId() == StripSubdetector::TID && tTopo.tidRing(detId) <= 9)) - trk_nPS++; - } - } - // selection tracks - supposed to happen on seperate module (kept for legacy/debug reasons) - if (trk_nPS < trkNPSStubMin_) - continue; - if (!TrackQualitySelection(trk_nstubs, - trk_chi2dof, - trk_bendchi2, - nStubs4PromptBend_, - nStubs5PromptBend_, - nStubs4PromptChi2_, - nStubs5PromptChi2_, - nStubs4DisplacedBend_, - nStubs5DisplacedBend_, - nStubs4DisplacedChi2_, - nStubs5DisplacedChi2_, - displaced_)) - continue; - if (std::abs(PVz - trkPtr->z0()) > dzPVTrk_ && dzPVTrk_ > 0) - continue; - if (std::abs(trkPtr->z0()) > trkZMax_) - continue; - if (std::abs(trkPtr->momentum().eta()) > trkEtaMax_) - continue; - if (trk_pt < trkPtMin_) - continue; L1TrkPtrs_.push_back(trkPtr); } @@ -256,22 +186,20 @@ void L1TrackJetEmulatorProducer::produce(Event &iEvent, const EventSetup &iSetup } } + //Begin Firmware-style clustering // logic: loop over z bins find tracks in this bin and arrange them in a 2D eta-phi matrix for (unsigned int zbin = 0; zbin < zmins.size(); ++zbin) { // initialize matrices for every z bin z0_intern zmin = zmins[zbin]; z0_intern zmax = zmaxs[zbin]; + TrackJetEmulationEtaPhiBin epbins[phiBins_][etaBins_]; + std::copy(&epbins_default[0][0], &epbins_default[0][0] + phiBins_ * etaBins_, &epbins[0][0]); - //clear containers L1clusters.clear(); L2clusters.clear(); - - // fill grid for (unsigned int k = 0; k < L1TrkPtrs_.size(); ++k) { - //// conversions - //-z0 z0_intern trkZ = L1TrkPtrs_[k]->getZ0Word(); if (zmax < trkZ) @@ -281,75 +209,40 @@ void L1TrackJetEmulatorProducer::produce(Event &iEvent, const EventSetup &iSetup if (zbin == 0 && zmin == trkZ) continue; - //-pt + // Pt ap_uint ptEmulationBits = L1TrkPtrs_[k]->getRinvWord(); pt_intern trkpt; trkpt.V = ptEmulationBits.range(); - //-eta - TTTrack_TrackWord::tanl_t etaEmulationBits = L1TrkPtrs_[k]->getTanlWord(); - glbeta_intern trketa; - trketa.V = etaEmulationBits.range(); - - //-phi - int Sector = L1TrkPtrs_[k]->phiSector(); - double sector_phi_value = 0; - if (Sector < 5) { - sector_phi_value = 2.0 * M_PI * Sector / 9.0; - } else { - sector_phi_value = (-1.0 * M_PI + M_PI / 9.0 + (Sector - 5) * 2.0 * M_PI / 9.0); - } - - glbphi_intern trkphiSector = DoubleToBit(sector_phi_value, - TTTrack_TrackWord::TrackBitWidths::kPhiSize + kExtraGlobalPhiBit, - TTTrack_TrackWord::stepPhi0); - glbphi_intern local_phi = 0; - local_phi.V = L1TrkPtrs_[k]->getPhiWord(); - glbphi_intern local_phi2 = - DoubleToBit(BitToDouble(local_phi, TTTrack_TrackWord::TrackBitWidths::kPhiSize, TTTrack_TrackWord::stepPhi0), - TTTrack_TrackWord::TrackBitWidths::kPhiSize + kExtraGlobalPhiBit, - TTTrack_TrackWord::stepPhi0); - glbphi_intern trkphi = local_phi2 + trkphiSector; - - //-d0 + // d0 d0_intern abs_trkD0 = L1TrkPtrs_[k]->getD0Word(); - //-nstub + // nstubs int trk_nstubs = (int)L1TrkPtrs_[k]->getStubRefs().size(); - // now fill the 2D grid with tracks - for (int i = 0; i < phiBins_; ++i) { - for (int j = 0; j < etaBins_; ++j) { - glbeta_intern eta_min = epbins[i][j].eta - etaStep_ / 2; //eta min - glbeta_intern eta_max = epbins[i][j].eta + etaStep_ / 2; //eta max - glbphi_intern phi_min = epbins[i][j].phi - phiStep_ / 2; //phi min - glbphi_intern phi_max = epbins[i][j].phi + phiStep_ / 2; //phi max - if ((trketa < eta_min) && j != 0) - continue; - if ((trketa > eta_max) && j != etaBins_ - 1) - continue; - if ((trkphi < phi_min) && i != 0) - continue; - if ((trkphi > phi_max) && i != phiBins_ - 1) - continue; - - if (trkpt < pt_intern(trkPtMax_)) - epbins[i][j].pTtot += trkpt; - else - epbins[i][j].pTtot += pt_intern(trkPtMax_); - if ((abs_trkD0 > - DoubleToBit(d0CutNStubs5_, TTTrack_TrackWord::TrackBitWidths::kD0Size, TTTrack_TrackWord::stepD0) && - trk_nstubs >= 5 && d0CutNStubs5_ >= 0) || - (abs_trkD0 > - DoubleToBit(d0CutNStubs4_, TTTrack_TrackWord::TrackBitWidths::kD0Size, TTTrack_TrackWord::stepD0) && - trk_nstubs == 4 && d0CutNStubs4_ >= 0)) - epbins[i][j].nxtracks += 1; - - epbins[i][j].trackidx.push_back(k); - ++epbins[i][j].ntracks; - } // for each etabin - } // for each phibin - } //end loop over tracks + // Phi bin + int i = phi_bin_firmwareStyle(L1TrkPtrs_[k]->phiSector(), + L1TrkPtrs_[k]->getPhiWord()); //Function defined in L1TrackJetClustering.h + + // Eta bin + int j = eta_bin_firmwareStyle(L1TrkPtrs_[k]->getTanlWord()); //Function defined in L1TrackJetClustering.h + + if (trkpt < pt_intern(trkPtMax_)) + epbins[i][j].pTtot += trkpt; + else + epbins[i][j].pTtot += pt_intern(trkPtMax_); + if ((abs_trkD0 > + DoubleToBit(d0CutNStubs5_, TTTrack_TrackWord::TrackBitWidths::kD0Size, TTTrack_TrackWord::stepD0) && + trk_nstubs >= 5 && d0CutNStubs5_ >= 0) || + (abs_trkD0 > + DoubleToBit(d0CutNStubs4_, TTTrack_TrackWord::TrackBitWidths::kD0Size, TTTrack_TrackWord::stepD0) && + trk_nstubs == 4 && d0CutNStubs4_ >= 0)) + epbins[i][j].nxtracks += 1; + + epbins[i][j].trackidx.push_back(k); + ++epbins[i][j].ntracks; + } + //End Firmware style clustering // first layer clustering - in eta using grid for (int phibin = 0; phibin < phiBins_; ++phibin) { @@ -385,9 +278,6 @@ void L1TrackJetEmulatorProducer::produce(Event &iEvent, const EventSetup &iSetup vector> L1TrackAssocJet; for (unsigned int j = 0; j < mzb.clusters.size(); ++j) { - if (mzb.clusters[j].pTtot < pt_intern(trkPtMin_)) - continue; - l1t::TkJetWord::glbeta_t jetEta = DoubleToBit(double(mzb.clusters[j].eta), TkJetWord::TkJetBitWidths::kGlbEtaSize, TkJetWord::MAX_ETA / (1 << TkJetWord::TkJetBitWidths::kGlbEtaSize)); @@ -402,7 +292,7 @@ void L1TrackJetEmulatorProducer::produce(Event &iEvent, const EventSetup &iSetup l1t::TkJetWord::dispflag_t dispflag = 0; l1t::TkJetWord::tkjetunassigned_t unassigned = 0; - if (total_disptracks > nDisplacedTracks_ || total_disptracks == nDisplacedTracks_) + if (total_disptracks >= nDisplacedTracks_) dispflag = 1; L1TrackAssocJet.clear(); for (unsigned int itrk = 0; itrk < mzb.clusters[j].trackidx.size(); itrk++) @@ -425,17 +315,9 @@ void L1TrackJetEmulatorProducer::fillDescriptions(ConfigurationDescriptions &des // Please change this to state exactly what you do use, even if it is no parameters ParameterSetDescription desc; desc.add("L1TrackInputTag", edm::InputTag("l1tTTTracksFromTrackletEmulation", "Level1TTTracks")); - desc.add("L1PVertexInputTag", edm::InputTag("l1tVertexFinderEmulator", "L1VerticesEmulation")); - desc.add("MaxDzTrackPV", 1.0); desc.add("trk_zMax", 15.0); desc.add("trk_ptMax", 200.0); - desc.add("trk_ptMin", 3.0); desc.add("trk_etaMax", 2.4); - desc.add("nStubs4PromptChi2", 5.0); - desc.add("nStubs4PromptBend", 1.7); - desc.add("nStubs5PromptChi2", 2.75); - desc.add("nStubs5PromptBend", 3.5); - desc.add("trk_nPSStubMin", -1); desc.add("minTrkJetpT", -1.0); desc.add("etaBins", 24); desc.add("phiBins", 27); @@ -447,10 +329,6 @@ void L1TrackJetEmulatorProducer::fillDescriptions(ConfigurationDescriptions &des desc.add("highpTJetMinTrackMultiplicity", 3); desc.add("highpTJetThreshold", 100.0); desc.add("displaced", false); - desc.add("nStubs4DisplacedChi2", 5.0); - desc.add("nStubs4DisplacedBend", 1.7); - desc.add("nStubs5DisplacedChi2", 2.75); - desc.add("nStubs5DisplacedBend", 3.5); desc.add("nDisplacedTracks", 2); descriptions.add("l1tTrackJetsEmulator", desc); } diff --git a/L1Trigger/L1TTrackMatch/plugins/L1TrackJetProducer.cc b/L1Trigger/L1TTrackMatch/plugins/L1TrackJetProducer.cc index 80da20559d827..9b329eed1f6f6 100644 --- a/L1Trigger/L1TTrackMatch/plugins/L1TrackJetProducer.cc +++ b/L1Trigger/L1TTrackMatch/plugins/L1TrackJetProducer.cc @@ -2,9 +2,10 @@ // // Rewritting/improvements: George Karathanasis, // georgios.karathanasis@cern.ch, CU Boulder +// Claire Savard (claire.savard@colorado.edu) // // Created: Wed, 01 Aug 2018 14:01:41 GMT -// Latest update: Nov 2022 (by GK) +// Latest update: Nov 2023 (by CS) // // Track jets are clustered in a two-layer process, first by clustering in phi, // then by clustering in eta. The code proceeds as following: putting all tracks @@ -57,16 +58,9 @@ class L1TrackJetProducer : public stream::EDProducer<> { // ----------member data --------------------------- vector> L1TrkPtrs_; - vector tdtrk_; const float trkZMax_; const float trkPtMax_; - const float trkPtMin_; const float trkEtaMax_; - const float nStubs4PromptChi2_; - const float nStubs5PromptChi2_; - const float nStubs4PromptBend_; - const float nStubs5PromptBend_; - const int trkNPSStubMin_; const int lowpTJetMinTrackMultiplicity_; const float lowpTJetThreshold_; const int highpTJetMinTrackMultiplicity_; @@ -81,28 +75,15 @@ class L1TrackJetProducer : public stream::EDProducer<> { const bool displaced_; const float d0CutNStubs4_; const float d0CutNStubs5_; - const float nStubs4DisplacedChi2_; - const float nStubs5DisplacedChi2_; - const float nStubs4DisplacedBend_; - const float nStubs5DisplacedBend_; const int nDisplacedTracks_; - const float dzPVTrk_; - edm::ESGetToken tTopoToken_; const EDGetTokenT trackToken_; - const EDGetTokenT PVtxToken_; }; L1TrackJetProducer::L1TrackJetProducer(const ParameterSet &iConfig) : trkZMax_(iConfig.getParameter("trk_zMax")), trkPtMax_(iConfig.getParameter("trk_ptMax")), - trkPtMin_(iConfig.getParameter("trk_ptMin")), trkEtaMax_(iConfig.getParameter("trk_etaMax")), - nStubs4PromptChi2_(iConfig.getParameter("nStubs4PromptChi2")), - nStubs5PromptChi2_(iConfig.getParameter("nStubs5PromptChi2")), - nStubs4PromptBend_(iConfig.getParameter("nStubs4PromptBend")), - nStubs5PromptBend_(iConfig.getParameter("nStubs5PromptBend")), - trkNPSStubMin_(iConfig.getParameter("trk_nPSStubMin")), lowpTJetMinTrackMultiplicity_(iConfig.getParameter("lowpTJetMinTrackMultiplicity")), lowpTJetThreshold_(iConfig.getParameter("lowpTJetThreshold")), highpTJetMinTrackMultiplicity_(iConfig.getParameter("highpTJetMinTrackMultiplicity")), @@ -114,15 +95,8 @@ L1TrackJetProducer::L1TrackJetProducer(const ParameterSet &iConfig) displaced_(iConfig.getParameter("displaced")), d0CutNStubs4_(iConfig.getParameter("d0_cutNStubs4")), d0CutNStubs5_(iConfig.getParameter("d0_cutNStubs5")), - nStubs4DisplacedChi2_(iConfig.getParameter("nStubs4DisplacedChi2")), - nStubs5DisplacedChi2_(iConfig.getParameter("nStubs5DisplacedChi2")), - nStubs4DisplacedBend_(iConfig.getParameter("nStubs4DisplacedBend")), - nStubs5DisplacedBend_(iConfig.getParameter("nStubs5DisplacedBend")), nDisplacedTracks_(iConfig.getParameter("nDisplacedTracks")), - dzPVTrk_(iConfig.getParameter("MaxDzTrackPV")), - tTopoToken_(esConsumes(edm::ESInputTag("", ""))), - trackToken_(consumes(iConfig.getParameter("L1TrackInputTag"))), - PVtxToken_(consumes(iConfig.getParameter("L1PVertexInputTag"))) { + trackToken_(consumes(iConfig.getParameter("L1TrackInputTag"))) { zStep_ = 2.0 * trkZMax_ / (zBins_ + 1); // added +1 in denom etaStep_ = 2.0 * trkEtaMax_ / etaBins_; //etaStep is the width of an etabin phiStep_ = 2 * M_PI / phiBins_; ////phiStep is the width of a phibin @@ -134,78 +108,26 @@ L1TrackJetProducer::L1TrackJetProducer(const ParameterSet &iConfig) } void L1TrackJetProducer::produce(Event &iEvent, const EventSetup &iSetup) { - unique_ptr L1L1TrackJetProducer(new TkJetCollection); - - // Read inputs - const TrackerTopology &tTopo = iSetup.getData(tTopoToken_); + unique_ptr L1TrackJetProducer(new TkJetCollection); + // L1 tracks edm::Handle TTTrackHandle; iEvent.getByToken(trackToken_, TTTrackHandle); - edm::Handle PVtx; - iEvent.getByToken(PVtxToken_, PVtx); - float PVz = (PVtx->at(0)).z0(); - L1TrkPtrs_.clear(); - tdtrk_.clear(); // track selection for (unsigned int this_l1track = 0; this_l1track < TTTrackHandle->size(); this_l1track++) { edm::Ptr trkPtr(TTTrackHandle, this_l1track); - float trk_pt = trkPtr->momentum().perp(); - int trk_nstubs = (int)trkPtr->getStubRefs().size(); - float trk_chi2dof = trkPtr->chi2Red(); - float trk_d0 = trkPtr->d0(); - float trk_bendchi2 = trkPtr->stubPtConsistency(); - - int trk_nPS = 0; - for (int istub = 0; istub < trk_nstubs; istub++) { // loop over the stubs - DetId detId(trkPtr->getStubRefs().at(istub)->getDetId()); - if (detId.det() == DetId::Detector::Tracker) { - if ((detId.subdetId() == StripSubdetector::TOB && tTopo.tobLayer(detId) <= 3) || - (detId.subdetId() == StripSubdetector::TID && tTopo.tidRing(detId) <= 9)) - trk_nPS++; - } - } - // select tracks - if (trk_nPS < trkNPSStubMin_) - continue; - if (!TrackQualitySelection(trk_nstubs, - trk_chi2dof, - trk_bendchi2, - nStubs4PromptBend_, - nStubs5PromptBend_, - nStubs4PromptChi2_, - nStubs5PromptChi2_, - nStubs4DisplacedBend_, - nStubs5DisplacedBend_, - nStubs4DisplacedChi2_, - nStubs5DisplacedChi2_, - displaced_)) - continue; - if (std::abs(PVz - trkPtr->z0()) > dzPVTrk_ && dzPVTrk_ > 0) - continue; - if (std::abs(trkPtr->z0()) > trkZMax_) - continue; - if (std::abs(trkPtr->momentum().eta()) > trkEtaMax_) - continue; - if (trk_pt < trkPtMin_) - continue; L1TrkPtrs_.push_back(trkPtr); - - if ((std::abs(trk_d0) > d0CutNStubs5_ && trk_nstubs >= 5 && d0CutNStubs5_ >= 0) || - (trk_nstubs == 4 && std::abs(trk_d0) > d0CutNStubs4_ && d0CutNStubs4_ >= 0)) - tdtrk_.push_back(1); //displaced track - else - tdtrk_.push_back(0); // not displaced track } // if no tracks pass selection return empty containers if (L1TrkPtrs_.empty()) { if (displaced_) - iEvent.put(std::move(L1L1TrackJetProducer), "L1TrackJetsExtended"); + iEvent.put(std::move(L1TrackJetProducer), "L1TrackJetsExtended"); else - iEvent.put(std::move(L1L1TrackJetProducer), "L1TrackJets"); + iEvent.put(std::move(L1TrackJetProducer), "L1TrackJets"); return; } @@ -273,6 +195,8 @@ void L1TrackJetProducer::produce(Event &iEvent, const EventSetup &iSetup) { float trkpt = L1TrkPtrs_[k]->momentum().perp(); float trketa = L1TrkPtrs_[k]->momentum().eta(); float trkphi = L1TrkPtrs_[k]->momentum().phi(); + float trkd0 = L1TrkPtrs_[k]->d0(); + int trknstubs = (int)L1TrkPtrs_[k]->getStubRefs().size(); for (int i = 0; i < phiBins_; ++i) { for (int j = 0; j < etaBins_; ++j) { float eta_min = epbins[i][j].eta - etaStep_ / 2.0; //eta min @@ -287,7 +211,9 @@ void L1TrackJetProducer::produce(Event &iEvent, const EventSetup &iSetup) { epbins[i][j].pTtot += trkpt; else epbins[i][j].pTtot += trkPtMax_; - epbins[i][j].nxtracks += tdtrk_[k]; + if ((std::abs(trkd0) > d0CutNStubs5_ && trknstubs >= 5 && d0CutNStubs5_ >= 0) || + (trknstubs == 4 && std::abs(trkd0) > d0CutNStubs4_ && d0CutNStubs4_ >= 0)) + epbins[i][j].nxtracks += 1; epbins[i][j].trackidx.push_back(k); ++epbins[i][j].ntracks; } // for each etabin @@ -335,7 +261,7 @@ void L1TrackJetProducer::produce(Event &iEvent, const EventSetup &iSetup) { float jetPz = jetPt * sinh(jetEta); float jetP = jetPt * cosh(jetEta); int totalDisptrk = mzb.clusters[j].nxtracks; - bool isDispJet = (totalDisptrk > nDisplacedTracks_ || totalDisptrk == nDisplacedTracks_); + bool isDispJet = (totalDisptrk >= nDisplacedTracks_); math::XYZTLorentzVector jetP4(jetPx, jetPy, jetPz, jetP); L1TrackAssocJet.clear(); @@ -344,31 +270,23 @@ void L1TrackJetProducer::produce(Event &iEvent, const EventSetup &iSetup) { TkJet trkJet(jetP4, L1TrackAssocJet, mzb.zbincenter, mzb.clusters[j].ntracks, 0, totalDisptrk, 0, isDispJet); - L1L1TrackJetProducer->push_back(trkJet); + L1TrackJetProducer->push_back(trkJet); } - std::sort( - L1L1TrackJetProducer->begin(), L1L1TrackJetProducer->end(), [](auto &a, auto &b) { return a.pt() > b.pt(); }); + std::sort(L1TrackJetProducer->begin(), L1TrackJetProducer->end(), [](auto &a, auto &b) { return a.pt() > b.pt(); }); if (displaced_) - iEvent.put(std::move(L1L1TrackJetProducer), "L1TrackJetsExtended"); + iEvent.put(std::move(L1TrackJetProducer), "L1TrackJetsExtended"); else - iEvent.put(std::move(L1L1TrackJetProducer), "L1TrackJets"); + iEvent.put(std::move(L1TrackJetProducer), "L1TrackJets"); } void L1TrackJetProducer::fillDescriptions(ConfigurationDescriptions &descriptions) { ParameterSetDescription desc; - desc.add("L1TrackInputTag", edm::InputTag("l1tTTTracksFromTrackletEmulation", "Level1TTTracks")); - desc.add("L1PVertexInputTag", edm::InputTag("l1tVertexFinderEmulator", "L1VerticesEmulation")); - desc.add("MaxDzTrackPV", 1.0); + desc.add( + "L1TrackInputTag", edm::InputTag("l1tTrackVertexAssociationProducerForJets", "Level1TTTracksSelectedAssociated")); desc.add("trk_zMax", 15.0); desc.add("trk_ptMax", 200.0); - desc.add("trk_ptMin", 3.0); desc.add("trk_etaMax", 2.4); - desc.add("nStubs4PromptChi2", 5.0); - desc.add("nStubs4PromptBend", 1.7); - desc.add("nStubs5PromptChi2", 2.75); - desc.add("nStubs5PromptBend", 3.5); - desc.add("trk_nPSStubMin", -1); desc.add("minTrkJetpT", -1.0); desc.add("etaBins", 24); desc.add("phiBins", 27); @@ -380,10 +298,6 @@ void L1TrackJetProducer::fillDescriptions(ConfigurationDescriptions &description desc.add("highpTJetMinTrackMultiplicity", 3); desc.add("highpTJetThreshold", 100.0); desc.add("displaced", false); - desc.add("nStubs4DisplacedChi2", 5.0); - desc.add("nStubs4DisplacedBend", 1.7); - desc.add("nStubs5DisplacedChi2", 2.75); - desc.add("nStubs5DisplacedBend", 3.5); desc.add("nDisplacedTracks", 2); descriptions.add("l1tTrackJets", desc); } diff --git a/L1Trigger/L1TTrackMatch/plugins/L1TrackSelectionProducer.cc b/L1Trigger/L1TTrackMatch/plugins/L1TrackSelectionProducer.cc index aa8fcc97752f8..3c71f53073875 100644 --- a/L1Trigger/L1TTrackMatch/plugins/L1TrackSelectionProducer.cc +++ b/L1Trigger/L1TTrackMatch/plugins/L1TrackSelectionProducer.cc @@ -19,6 +19,7 @@ // Original Author: Alexx Perloff // Created: Thu, 16 Dec 2021 19:02:50 GMT // +// Updates: Claire Savard (claire.savard@colorado.edu), Nov. 2023 // // system include files @@ -201,6 +202,26 @@ class L1TrackSelectionProducer : public edm::global::EDProducer<> { double nPSStubsMin_; const TrackerTopology& tTopo_; }; + struct TTTrackPromptMVAMinSelector { + TTTrackPromptMVAMinSelector(double promptMVAMin) : promptMVAMin_(promptMVAMin) {} + TTTrackPromptMVAMinSelector(const edm::ParameterSet& cfg) + : promptMVAMin_(cfg.template getParameter("promptMVAMin")) {} + bool operator()(const L1Track& t) const { return t.trkMVA1() >= promptMVAMin_; } + + private: + double promptMVAMin_; + }; + struct TTTrackWordPromptMVAMinSelector { + TTTrackWordPromptMVAMinSelector(double promptMVAMin) : promptMVAMin_(promptMVAMin) {} + TTTrackWordPromptMVAMinSelector(const edm::ParameterSet& cfg) + : promptMVAMin_(cfg.template getParameter("promptMVAMin")) {} + bool operator()(const L1Track& t) const { + return t.trkMVA1() >= promptMVAMin_; + } //change when mva bins in word are set + + private: + double promptMVAMin_; + }; struct TTTrackBendChi2MaxSelector { TTTrackBendChi2MaxSelector(double bendChi2Max) : bendChi2Max_(bendChi2Max) {} TTTrackBendChi2MaxSelector(const edm::ParameterSet& cfg) @@ -255,6 +276,96 @@ class L1TrackSelectionProducer : public edm::global::EDProducer<> { private: double reducedChi2RPhiMax_; }; + struct TTTrackChi2RZMaxNstubSelector { + TTTrackChi2RZMaxNstubSelector(double reducedChi2RZMaxNstub4, double reducedChi2RZMaxNstub5) + : reducedChi2RZMaxNstub4_(reducedChi2RZMaxNstub4), reducedChi2RZMaxNstub5_(reducedChi2RZMaxNstub5) {} + TTTrackChi2RZMaxNstubSelector(const edm::ParameterSet& cfg) + : reducedChi2RZMaxNstub4_(cfg.template getParameter("reducedChi2RZMaxNstub4")), + reducedChi2RZMaxNstub5_(cfg.template getParameter("reducedChi2RZMaxNstub5")) {} + bool operator()(const L1Track& t) const { + return (((t.chi2ZRed() < reducedChi2RZMaxNstub4_) && (t.getStubRefs().size() == 4)) || + ((t.chi2ZRed() < reducedChi2RZMaxNstub5_) && (t.getStubRefs().size() > 4))); + } + + private: + double reducedChi2RZMaxNstub4_; + double reducedChi2RZMaxNstub5_; + }; + struct TTTrackWordChi2RZMaxNstubSelector { + TTTrackWordChi2RZMaxNstubSelector(double reducedChi2RZMaxNstub4, double reducedChi2RZMaxNstub5) + : reducedChi2RZMaxNstub4_(reducedChi2RZMaxNstub4), reducedChi2RZMaxNstub5_(reducedChi2RZMaxNstub5) {} + TTTrackWordChi2RZMaxNstubSelector(const edm::ParameterSet& cfg) + : reducedChi2RZMaxNstub4_(cfg.template getParameter("reducedChi2RZMaxNstub4")), + reducedChi2RZMaxNstub5_(cfg.template getParameter("reducedChi2RZMaxNstub5")) {} + bool operator()(const L1Track& t) const { + return (((t.getChi2RZ() < reducedChi2RZMaxNstub4_) && (t.getNStubs() == 4)) || + ((t.getChi2RZ() < reducedChi2RZMaxNstub5_) && (t.getNStubs() > 4))); + } + + private: + double reducedChi2RZMaxNstub4_; + double reducedChi2RZMaxNstub5_; + }; + struct TTTrackChi2RPhiMaxNstubSelector { + TTTrackChi2RPhiMaxNstubSelector(double reducedChi2RPhiMaxNstub4, double reducedChi2RPhiMaxNstub5) + : reducedChi2RPhiMaxNstub4_(reducedChi2RPhiMaxNstub4), reducedChi2RPhiMaxNstub5_(reducedChi2RPhiMaxNstub5) {} + TTTrackChi2RPhiMaxNstubSelector(const edm::ParameterSet& cfg) + : reducedChi2RPhiMaxNstub4_(cfg.template getParameter("reducedChi2RPhiMaxNstub4")), + reducedChi2RPhiMaxNstub5_(cfg.template getParameter("reducedChi2RPhiMaxNstub5")) {} + bool operator()(const L1Track& t) const { + return (((t.chi2XYRed() < reducedChi2RPhiMaxNstub4_) && (t.getStubRefs().size() == 4)) || + ((t.chi2XYRed() < reducedChi2RPhiMaxNstub5_) && (t.getStubRefs().size() > 4))); + } + + private: + double reducedChi2RPhiMaxNstub4_; + double reducedChi2RPhiMaxNstub5_; + }; + struct TTTrackWordChi2RPhiMaxNstubSelector { // using simulated chi2 since not implemented in track word, updates needed + TTTrackWordChi2RPhiMaxNstubSelector(double reducedChi2RPhiMaxNstub4, double reducedChi2RPhiMaxNstub5) + : reducedChi2RPhiMaxNstub4_(reducedChi2RPhiMaxNstub4), reducedChi2RPhiMaxNstub5_(reducedChi2RPhiMaxNstub5) {} + TTTrackWordChi2RPhiMaxNstubSelector(const edm::ParameterSet& cfg) + : reducedChi2RPhiMaxNstub4_(cfg.template getParameter("reducedChi2RPhiMaxNstub4")), + reducedChi2RPhiMaxNstub5_(cfg.template getParameter("reducedChi2RPhiMaxNstub5")) {} + bool operator()(const L1Track& t) const { + return (((t.getChi2RPhi() < reducedChi2RPhiMaxNstub4_) && (t.getNStubs() == 4)) || + ((t.getChi2RPhi() < reducedChi2RPhiMaxNstub5_) && (t.getNStubs() > 4))); + } + + private: + double reducedChi2RPhiMaxNstub4_; + double reducedChi2RPhiMaxNstub5_; + }; + struct TTTrackBendChi2MaxNstubSelector { + TTTrackBendChi2MaxNstubSelector(double reducedBendChi2MaxNstub4, double reducedBendChi2MaxNstub5) + : reducedBendChi2MaxNstub4_(reducedBendChi2MaxNstub4), reducedBendChi2MaxNstub5_(reducedBendChi2MaxNstub5) {} + TTTrackBendChi2MaxNstubSelector(const edm::ParameterSet& cfg) + : reducedBendChi2MaxNstub4_(cfg.template getParameter("reducedBendChi2MaxNstub4")), + reducedBendChi2MaxNstub5_(cfg.template getParameter("reducedBendChi2MaxNstub5")) {} + bool operator()(const L1Track& t) const { + return (((t.stubPtConsistency() < reducedBendChi2MaxNstub4_) && (t.getStubRefs().size() == 4)) || + ((t.stubPtConsistency() < reducedBendChi2MaxNstub5_) && (t.getStubRefs().size() > 4))); + } + + private: + double reducedBendChi2MaxNstub4_; + double reducedBendChi2MaxNstub5_; + }; + struct TTTrackWordBendChi2MaxNstubSelector { + TTTrackWordBendChi2MaxNstubSelector(double reducedBendChi2MaxNstub4, double reducedBendChi2MaxNstub5) + : reducedBendChi2MaxNstub4_(reducedBendChi2MaxNstub4), reducedBendChi2MaxNstub5_(reducedBendChi2MaxNstub5) {} + TTTrackWordBendChi2MaxNstubSelector(const edm::ParameterSet& cfg) + : reducedBendChi2MaxNstub4_(cfg.template getParameter("reducedBendChi2MaxNstub4")), + reducedBendChi2MaxNstub5_(cfg.template getParameter("reducedBendChi2MaxNstub5")) {} + bool operator()(const L1Track& t) const { + return (((t.getBendChi2() < reducedBendChi2MaxNstub4_) && (t.getNStubs() == 4)) || + ((t.getBendChi2() < reducedBendChi2MaxNstub5_) && (t.getNStubs() > 4))); + } + + private: + double reducedBendChi2MaxNstub4_; + double reducedBendChi2MaxNstub5_; + }; typedef AndSelector TTTrackPtMinEtaMaxZ0MaxNStubsMinSelector; @@ -267,13 +378,21 @@ class L1TrackSelectionProducer : public edm::global::EDProducer<> { TTTrackBendChi2Chi2RZChi2RPhiMaxSelector; typedef AndSelector TTTrackWordBendChi2Chi2RZChi2RPhiMaxSelector; + typedef AndSelector + TTTrackChi2MaxNstubSelector; + typedef AndSelector + TTTrackWordChi2MaxNstubSelector; // ----------member data --------------------------- const edm::EDGetTokenT l1TracksToken_; edm::ESGetToken tTopoToken_; const std::string outputCollectionName_; const edm::ParameterSet cutSet_; - const double ptMin_, absEtaMax_, absZ0Max_, bendChi2Max_, reducedChi2RZMax_, reducedChi2RPhiMax_; + const double ptMin_, absEtaMax_, absZ0Max_, promptMVAMin_, bendChi2Max_, reducedChi2RZMax_, reducedChi2RPhiMax_; + const double reducedChi2RZMaxNstub4_, reducedChi2RZMaxNstub5_, reducedChi2RPhiMaxNstub4_, reducedChi2RPhiMaxNstub5_, + reducedBendChi2MaxNstub4_, reducedBendChi2MaxNstub5_; const int nStubsMin_, nPSStubsMin_; bool processSimulatedTracks_, processEmulatedTracks_; int debug_; @@ -291,9 +410,16 @@ L1TrackSelectionProducer::L1TrackSelectionProducer(const edm::ParameterSet& iCon ptMin_(cutSet_.getParameter("ptMin")), absEtaMax_(cutSet_.getParameter("absEtaMax")), absZ0Max_(cutSet_.getParameter("absZ0Max")), + promptMVAMin_(cutSet_.getParameter("promptMVAMin")), bendChi2Max_(cutSet_.getParameter("reducedBendChi2Max")), reducedChi2RZMax_(cutSet_.getParameter("reducedChi2RZMax")), reducedChi2RPhiMax_(cutSet_.getParameter("reducedChi2RPhiMax")), + reducedChi2RZMaxNstub4_(cutSet_.getParameter("reducedChi2RZMaxNstub4")), + reducedChi2RZMaxNstub5_(cutSet_.getParameter("reducedChi2RZMaxNstub5")), + reducedChi2RPhiMaxNstub4_(cutSet_.getParameter("reducedChi2RPhiMaxNstub4")), + reducedChi2RPhiMaxNstub5_(cutSet_.getParameter("reducedChi2RPhiMaxNstub5")), + reducedBendChi2MaxNstub4_(cutSet_.getParameter("reducedBendChi2MaxNstub4")), + reducedBendChi2MaxNstub5_(cutSet_.getParameter("reducedBendChi2MaxNstub5")), nStubsMin_(cutSet_.getParameter("nStubsMin")), nPSStubsMin_(cutSet_.getParameter("nPSStubsMin")), processSimulatedTracks_(iConfig.getParameter("processSimulatedTracks")), @@ -417,17 +543,26 @@ void L1TrackSelectionProducer::produce(edm::StreamID, edm::Event& iEvent, const TTTrackBendChi2Chi2RZChi2RPhiMaxSelector chi2Sel(bendChi2Max_, reducedChi2RZMax_, reducedChi2RPhiMax_); TTTrackWordBendChi2Chi2RZChi2RPhiMaxSelector chi2SelEmu(bendChi2Max_, reducedChi2RZMax_, reducedChi2RPhiMax_); TTTrackNPSStubsMinSelector nPSStubsSel(nPSStubsMin_, tTopo); + TTTrackPromptMVAMinSelector mvaSel(promptMVAMin_); + TTTrackWordPromptMVAMinSelector mvaSelEmu(promptMVAMin_); + TTTrackChi2MaxNstubSelector chi2NstubSel({reducedChi2RZMaxNstub4_, reducedChi2RZMaxNstub5_}, + {reducedChi2RPhiMaxNstub4_, reducedChi2RPhiMaxNstub5_}, + {reducedBendChi2MaxNstub4_, reducedBendChi2MaxNstub5_}); + TTTrackWordChi2MaxNstubSelector chi2NstubSelEmu({reducedChi2RZMaxNstub4_, reducedChi2RZMaxNstub5_}, + {reducedChi2RPhiMaxNstub4_, reducedChi2RPhiMaxNstub5_}, + {reducedBendChi2MaxNstub4_, reducedBendChi2MaxNstub5_}); for (size_t i = 0; i < nOutputApproximate; i++) { const auto& track = l1TracksHandle->at(i); // Select tracks based on the floating point TTTrack - if (processSimulatedTracks_ && kinSel(track) && nPSStubsSel(track) && chi2Sel(track)) { + if (processSimulatedTracks_ && kinSel(track) && nPSStubsSel(track) && chi2Sel(track) && mvaSel(track) && + chi2NstubSel(track)) { vTTTrackOutput->push_back(TTTrackRef(l1TracksHandle, i)); } // Select tracks based on the bitwise accurate TTTrack_TrackWord - if (processEmulatedTracks_ && kinSelEmu(track) && chi2SelEmu(track)) { + if (processEmulatedTracks_ && kinSelEmu(track) && chi2SelEmu(track) && mvaSelEmu(track) && chi2NstubSelEmu(track)) { vTTTrackEmulationOutput->push_back(TTTrackRef(l1TracksHandle, i)); } } @@ -460,9 +595,22 @@ void L1TrackSelectionProducer::fillDescriptions(edm::ConfigurationDescriptions& descCutSet.add("nPSStubsMin", 0) ->setComment("number of stubs in the PS Modules must be greater than or equal to this value"); + descCutSet.add("promptMVAMin", -1.0)->setComment("MVA must be greater than this value"); descCutSet.add("reducedBendChi2Max", 2.25)->setComment("bend chi2 must be less than this value"); descCutSet.add("reducedChi2RZMax", 5.0)->setComment("chi2rz/dof must be less than this value"); descCutSet.add("reducedChi2RPhiMax", 20.0)->setComment("chi2rphi/dof must be less than this value"); + descCutSet.add("reducedChi2RZMaxNstub4", 999.9) + ->setComment("chi2rz/dof must be less than this value in nstub==4"); + descCutSet.add("reducedChi2RZMaxNstub5", 999.9) + ->setComment("chi2rz/dof must be less than this value in nstub>4"); + descCutSet.add("reducedChi2RPhiMaxNstub4", 999.9) + ->setComment("chi2rphi/dof must be less than this value in nstub==4"); + descCutSet.add("reducedChi2RPhiMaxNstub5", 999.9) + ->setComment("chi2rphi/dof must be less than this value in nstub>4"); + descCutSet.add("reducedBendChi2MaxNstub4", 999.9) + ->setComment("bend chi2 must be less than this value in nstub==4"); + descCutSet.add("reducedBendChi2MaxNstub5", 999.9) + ->setComment("bend chi2 must be less than this value in nstub>4"); desc.add("cutSet", descCutSet); } diff --git a/L1Trigger/L1TTrackMatch/plugins/L1TruthTrackFastJetProducer.cc b/L1Trigger/L1TTrackMatch/plugins/L1TruthTrackFastJetProducer.cc new file mode 100644 index 0000000000000..6b8a1b2cbbd80 --- /dev/null +++ b/L1Trigger/L1TTrackMatch/plugins/L1TruthTrackFastJetProducer.cc @@ -0,0 +1,220 @@ +/////////////////////////////////////////////////////////////////////////// +// // +// Producer of TruTrkFastJet, // +// Cluster L1 tracks with truth info using fastjet // +// // +// Created by: Claire Savard (Oct. 2023) // +// // +/////////////////////////////////////////////////////////////////////////// + +// system include files +#include + +// user include files +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "DataFormats/Common/interface/Handle.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "Geometry/Records/interface/TrackerDigiGeometryRecord.h" +#include "Geometry/CommonDetUnit/interface/PixelGeomDetUnit.h" +#include "Geometry/TrackerGeometryBuilder/interface/TrackerGeometry.h" +#include "DataFormats/Math/interface/LorentzVector.h" + +// L1 objects +#include "DataFormats/L1TrackTrigger/interface/TTTypes.h" +#include "DataFormats/L1TCorrelator/interface/TkJet.h" +#include "DataFormats/L1TCorrelator/interface/TkJetFwd.h" +#include "DataFormats/L1Trigger/interface/Vertex.h" + +// MC +#include "SimTracker/TrackTriggerAssociation/interface/TTTrackAssociationMap.h" +#include "SimDataFormats/TrackingAnalysis/interface/TrackingParticle.h" + +// geometry +#include "Geometry/Records/interface/TrackerTopologyRcd.h" +#include "DataFormats/TrackerCommon/interface/TrackerTopology.h" + +#include + +#include +#include "TMath.h" +#include "TH1.h" + +using namespace l1t; +using namespace edm; +using namespace std; + +////////////////////////////// +// // +// CLASS DEFINITION // +// // +////////////////////////////// + +class L1TruthTrackFastJetProducer : public edm::stream::EDProducer<> { +public: + typedef TTTrack L1TTTrackType; + typedef std::vector L1TTTrackCollectionType; + + explicit L1TruthTrackFastJetProducer(const edm::ParameterSet&); + ~L1TruthTrackFastJetProducer() override = default; + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + +private: + void produce(edm::Event&, const edm::EventSetup&) override; + + // track selection criteria + const float trkZMax_; // in [cm] + const float trkPtMin_; // in [GeV] + const float trkEtaMax_; // in [rad] + const int trkNStubMin_; // minimum number of stubs + const int trkNPSStubMin_; // minimum number of PS stubs + const double coneSize_; // Use anti-kt with this cone size + const bool displaced_; //use prompt/displaced tracks + + const edm::EDGetTokenT > > trackToken_; + edm::ESGetToken tTopoToken_; + const edm::EDGetTokenT > ttTrackMCTruthToken_; +}; + +// constructor +L1TruthTrackFastJetProducer::L1TruthTrackFastJetProducer(const edm::ParameterSet& iConfig) + : trkZMax_((float)iConfig.getParameter("trk_zMax")), + trkPtMin_((float)iConfig.getParameter("trk_ptMin")), + trkEtaMax_((float)iConfig.getParameter("trk_etaMax")), + trkNStubMin_((int)iConfig.getParameter("trk_nStubMin")), + trkNPSStubMin_((int)iConfig.getParameter("trk_nPSStubMin")), + coneSize_((float)iConfig.getParameter("coneSize")), + displaced_(iConfig.getParameter("displaced")), + trackToken_(consumes > >( + iConfig.getParameter("L1TrackInputTag"))), + tTopoToken_(esConsumes(edm::ESInputTag("", ""))), + ttTrackMCTruthToken_(consumes >( + iConfig.getParameter("MCTruthTrackInputTag"))) { + if (displaced_) + produces("L1TruthTrackFastJetsExtended"); + else + produces("L1TruthTrackFastJets"); +} + +// producer +void L1TruthTrackFastJetProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { + std::unique_ptr L1TrackFastJets(new TkJetCollection); + + // L1 tracks + edm::Handle > > TTTrackHandle; + iEvent.getByToken(trackToken_, TTTrackHandle); + std::vector >::const_iterator iterL1Track; + + // MC truth + edm::Handle > MCTruthTTTrackHandle; + iEvent.getByToken(ttTrackMCTruthToken_, MCTruthTTTrackHandle); + + // Tracker Topology + const TrackerTopology& tTopo = iSetup.getData(tTopoToken_); + + fastjet::JetDefinition jet_def(fastjet::antikt_algorithm, coneSize_); + std::vector JetInputs; + + unsigned int this_l1track = 0; + for (iterL1Track = TTTrackHandle->begin(); iterL1Track != TTTrackHandle->end(); iterL1Track++) { + edm::Ptr > l1track_ptr(TTTrackHandle, this_l1track); + this_l1track++; + std::vector >, TTStub > > + theStubs = iterL1Track->getStubRefs(); + + // standard quality cuts + if (std::abs(iterL1Track->z0()) > trkZMax_) + continue; + if (std::abs(iterL1Track->momentum().eta()) > trkEtaMax_) + continue; + if (iterL1Track->momentum().perp() < trkPtMin_) + continue; + int trk_nstub = (int)theStubs.size(); + if (trk_nstub < trkNStubMin_) + continue; + + int trk_nPS = 0; + for (int istub = 0; istub < trk_nstub; istub++) { + DetId detId(theStubs.at(istub)->getDetId()); + bool tmp_isPS = false; + if (detId.det() == DetId::Detector::Tracker) { + if (detId.subdetId() == StripSubdetector::TOB && tTopo.tobLayer(detId) <= 3) + tmp_isPS = true; + else if (detId.subdetId() == StripSubdetector::TID && tTopo.tidRing(detId) <= 9) + tmp_isPS = true; + } + if (tmp_isPS) + trk_nPS++; + } + if (trk_nPS < trkNPSStubMin_) + continue; + + // check that trk is real and from hard interaction + edm::Ptr my_tp = MCTruthTTTrackHandle->findTrackingParticlePtr(l1track_ptr); + if (my_tp.isNull()) // there is no tp match so the track is fake + continue; + int tp_eventid = my_tp->eventId().event(); + if (tp_eventid > 0) // matched tp is from pileup + continue; + + fastjet::PseudoJet psuedoJet(iterL1Track->momentum().x(), + iterL1Track->momentum().y(), + iterL1Track->momentum().z(), + iterL1Track->momentum().mag()); + JetInputs.push_back(psuedoJet); // input tracks for clustering + JetInputs.back().set_user_index(this_l1track - 1); // save track index in the collection + } // end loop over tracks + + fastjet::ClusterSequence cs(JetInputs, jet_def); // define the output jet collection + std::vector JetOutputs = + fastjet::sorted_by_pt(cs.inclusive_jets(0)); // output jet collection, pT-ordered + + for (unsigned int ijet = 0; ijet < JetOutputs.size(); ++ijet) { + math::XYZTLorentzVector jetP4( + JetOutputs[ijet].px(), JetOutputs[ijet].py(), JetOutputs[ijet].pz(), JetOutputs[ijet].modp()); + float sumpt = 0; + float avgZ = 0; + std::vector > L1TrackPtrs; + std::vector fjConstituents = fastjet::sorted_by_pt(cs.constituents(JetOutputs[ijet])); + + for (unsigned int i = 0; i < fjConstituents.size(); ++i) { + auto index = fjConstituents[i].user_index(); + edm::Ptr trkPtr(TTTrackHandle, index); + L1TrackPtrs.push_back(trkPtr); // L1Tracks in the jet + sumpt = sumpt + trkPtr->momentum().perp(); + avgZ = avgZ + trkPtr->momentum().perp() * trkPtr->z0(); + } + avgZ = avgZ / sumpt; + edm::Ref jetRef; + TkJet trkJet(jetP4, jetRef, L1TrackPtrs, avgZ); + L1TrackFastJets->push_back(trkJet); + } //end loop over Jet Outputs + + if (displaced_) + iEvent.put(std::move(L1TrackFastJets), "L1TruthTrackFastJetsExtended"); + else + iEvent.put(std::move(L1TrackFastJets), "L1TruthTrackFastJets"); +} + +void L1TruthTrackFastJetProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + // The following says we do not know what parameters are allowed so do no validation + edm::ParameterSetDescription desc; + desc.add("L1TrackInputTag", edm::InputTag("l1tTTTracksFromTrackletEmulation", "Level1TTTracks")); + desc.add("MCTruthTrackInputTag", edm::InputTag("TTTrackAssociatorFromPixelDigis", "Level1TTTracks")); + desc.add("trk_zMax", 15.); + desc.add("trk_ptMin", 2.0); + desc.add("trk_etaMax", 2.4); + desc.add("trk_nStubMin", 4); + desc.add("trk_nPSStubMin", -1); + desc.add("coneSize", 0.4); + desc.add("displaced", false); + descriptions.add("l1tTruthTrackFastJets", desc); +} + +//define this as a plug-in +DEFINE_FWK_MODULE(L1TruthTrackFastJetProducer); diff --git a/L1Trigger/L1TTrackMatch/plugins/TPFastJetProducer.cc b/L1Trigger/L1TTrackMatch/plugins/TPFastJetProducer.cc new file mode 100644 index 0000000000000..c30e1e0aee0b9 --- /dev/null +++ b/L1Trigger/L1TTrackMatch/plugins/TPFastJetProducer.cc @@ -0,0 +1,216 @@ +/////////////////////////////////////////////////////////////////////////// +// // +// Producer of TPFastJets, // +// Cluster tracking particles using fastjet // +// // +// Created by: Claire Savard (Oct. 2023) // +// // +/////////////////////////////////////////////////////////////////////////// + +// system include files +#include + +// user include files +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "DataFormats/Common/interface/Handle.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "Geometry/Records/interface/TrackerDigiGeometryRecord.h" +#include "Geometry/CommonDetUnit/interface/PixelGeomDetUnit.h" +#include "Geometry/TrackerGeometryBuilder/interface/TrackerGeometry.h" +#include "DataFormats/Math/interface/LorentzVector.h" + +// L1 objects +#include "DataFormats/L1TrackTrigger/interface/TTTypes.h" +#include "DataFormats/L1TCorrelator/interface/TkJet.h" +#include "DataFormats/L1TCorrelator/interface/TkJetFwd.h" +#include "DataFormats/L1Trigger/interface/Vertex.h" +#include "DataFormats/L1TrackTrigger/interface/TTStub.h" + +// truth object +#include "SimDataFormats/TrackingAnalysis/interface/TrackingParticle.h" +#include "SimTracker/TrackTriggerAssociation/interface/TTStubAssociationMap.h" + +// geometry +#include "Geometry/Records/interface/TrackerTopologyRcd.h" +#include "DataFormats/TrackerCommon/interface/TrackerTopology.h" + +#include + +#include +#include "TMath.h" +#include "TH1.h" + +using namespace l1t; +using namespace edm; +using namespace std; + +////////////////////////////// +// // +// CLASS DEFINITION // +// // +////////////////////////////// + +class TPFastJetProducer : public edm::stream::EDProducer<> { +public: + explicit TPFastJetProducer(const edm::ParameterSet&); + ~TPFastJetProducer() override = default; + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + +private: + void produce(edm::Event&, const edm::EventSetup&) override; + + // track selection criteria + const float tpPtMin_; + const float tpEtaMax_; + const float tpZMax_; + const int tpNStubMin_; + const int tpNStubLayerMin_; + const float coneSize_; // Use anti-kt with this cone size + + edm::EDGetTokenT> trackingParticleToken_; + edm::EDGetTokenT> ttStubMCTruthToken_; + edm::ESGetToken tTopoToken_; +}; + +// constructor +TPFastJetProducer::TPFastJetProducer(const edm::ParameterSet& iConfig) + : tpPtMin_((float)iConfig.getParameter("tp_ptMin")), + tpEtaMax_((float)iConfig.getParameter("tp_etaMax")), + tpZMax_((float)iConfig.getParameter("tp_zMax")), + tpNStubMin_((int)iConfig.getParameter("tp_nStubMin")), + tpNStubLayerMin_((int)iConfig.getParameter("tp_nStubLayerMin")), + coneSize_((float)iConfig.getParameter("coneSize")), + trackingParticleToken_( + consumes>(iConfig.getParameter("TrackingParticleInputTag"))), + ttStubMCTruthToken_(consumes>( + iConfig.getParameter("MCTruthStubInputTag"))), + tTopoToken_(esConsumes(edm::ESInputTag("", ""))) { + produces("TPFastJets"); +} + +// producer +void TPFastJetProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { + std::unique_ptr TPFastJets(new TkJetCollection); + + // Tracking particles + edm::Handle> TrackingParticleHandle; + iEvent.getByToken(trackingParticleToken_, TrackingParticleHandle); + std::vector::const_iterator iterTP; + + // MC truth association maps + edm::Handle> MCTruthTTStubHandle; + iEvent.getByToken(ttStubMCTruthToken_, MCTruthTTStubHandle); + + // Tracker Topology + const TrackerTopology& tTopo = iSetup.getData(tTopoToken_); + + fastjet::JetDefinition jet_def(fastjet::antikt_algorithm, coneSize_); + std::vector JetInputs; + + // loop over tps + unsigned int this_tp = 0; + for (iterTP = TrackingParticleHandle->begin(); iterTP != TrackingParticleHandle->end(); iterTP++) { + edm::Ptr tp_ptr(TrackingParticleHandle, this_tp); + this_tp++; + + std::vector>, TTStub>> + theStubRefs = MCTruthTTStubHandle->findTTStubRefs(tp_ptr); + int nStubTP = (int)theStubRefs.size(); + + // how many layers/disks have stubs? + int hasStubInLayer[11] = {0}; + for (auto& theStubRef : theStubRefs) { + DetId detid(theStubRef->getDetId()); + + int layer = -1; + if (detid.subdetId() == StripSubdetector::TOB) { + layer = static_cast(tTopo.layer(detid)) - 1; //fill in array as entries 0-5 + } else if (detid.subdetId() == StripSubdetector::TID) { + layer = static_cast(tTopo.layer(detid)) + 5; //fill in array as entries 6-10 + } + + //treat genuine stubs separately (==2 is genuine, ==1 is not) + if (MCTruthTTStubHandle->findTrackingParticlePtr(theStubRef).isNull() && hasStubInLayer[layer] < 2) + hasStubInLayer[layer] = 1; + else + hasStubInLayer[layer] = 2; + } + + int nStubLayerTP = 0; + for (int isum : hasStubInLayer) { + if (isum >= 1) + nStubLayerTP += 1; + } + + // tp quality cuts to match L1 tracks + if (iterTP->pt() < tpPtMin_) + continue; + if (fabs(iterTP->eta()) > tpEtaMax_) + continue; + if (nStubTP < tpNStubMin_) + continue; + if (nStubLayerTP < tpNStubLayerMin_) + continue; + if (fabs(iterTP->z0()) > tpZMax_) + continue; + if (iterTP->charge() == 0.) // extra check that all tps are charged + continue; + if (iterTP->eventId().event() > 0) // only select hard interaction tps + continue; + + fastjet::PseudoJet psuedoJet(iterTP->px(), iterTP->py(), iterTP->pz(), iterTP->energy()); + JetInputs.push_back(psuedoJet); // input tps for clustering + JetInputs.back().set_user_index(this_tp - 1); // save tp index in the collection + } // end loop over tps + + fastjet::ClusterSequence cs(JetInputs, jet_def); // define the output jet collection + std::vector JetOutputs = + fastjet::sorted_by_pt(cs.inclusive_jets(0)); // output jet collection, pT-ordered + + for (unsigned int ijet = 0; ijet < JetOutputs.size(); ++ijet) { + math::XYZTLorentzVector jetP4( + JetOutputs[ijet].px(), JetOutputs[ijet].py(), JetOutputs[ijet].pz(), JetOutputs[ijet].modp()); + float sumpt = 0; + float avgZ = 0; + std::vector> tpPtrs; + std::vector fjConstituents = fastjet::sorted_by_pt(cs.constituents(JetOutputs[ijet])); + + for (unsigned int i = 0; i < fjConstituents.size(); ++i) { + auto index = fjConstituents[i].user_index(); + edm::Ptr tpPtr(TrackingParticleHandle, index); + tpPtrs.push_back(tpPtr); // tracking particles in the jet + sumpt = sumpt + tpPtr->pt(); + avgZ = avgZ + tpPtr->pt() * tpPtr->z0(); + } + avgZ = avgZ / sumpt; + edm::Ref jetRef; + std::vector>> dummyL1TrackPtrs; // can't create TkJet with tp references + TkJet tpJet(jetP4, dummyL1TrackPtrs, avgZ, fjConstituents.size(), 0, 0, 0, false); + TPFastJets->push_back(tpJet); + } //end loop over Jet Outputs + + iEvent.put(std::move(TPFastJets), "TPFastJets"); +} + +void TPFastJetProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + // The following says we do not know what parameters are allowed so do no validation + edm::ParameterSetDescription desc; + desc.add("TrackingParticleInputTag", edm::InputTag("mix", "MergedTrackTruth")); + desc.add("MCTruthStubInputTag", edm::InputTag("TTStubAssociatorFromPixelDigis", "StubAccepted")); + desc.add("tp_ptMin", 2.0); + desc.add("tp_etaMax", 2.4); + desc.add("tp_zMax", 15.); + desc.add("tp_nStubMin", 4); + desc.add("tp_nStubLayerMin", 4); + desc.add("coneSize", 0.4); + descriptions.add("tpFastJets", desc); +} + +//define this as a plug-in +DEFINE_FWK_MODULE(TPFastJetProducer); diff --git a/L1Trigger/L1TTrackMatch/python/l1tTrackFastJets_cfi.py b/L1Trigger/L1TTrackMatch/python/l1tTrackFastJets_cfi.py index 183244e7d0ad1..91cb409c84959 100644 --- a/L1Trigger/L1TTrackMatch/python/l1tTrackFastJets_cfi.py +++ b/L1Trigger/L1TTrackMatch/python/l1tTrackFastJets_cfi.py @@ -1,37 +1,13 @@ import FWCore.ParameterSet.Config as cms l1tTrackFastJets = cms.EDProducer("L1TrackFastJetProducer", - L1TrackInputTag = cms.InputTag("l1tTTTracksFromTrackletEmulation", "Level1TTTracks"), - L1PrimaryVertexTag=cms.InputTag("l1tVertexFinder", "L1Vertices"), - trk_zMax = cms.double(15.), # max track z0 [cm] - trk_chi2dofMax = cms.double(10.), # max track chi2/dof - trk_bendChi2Max = cms.double(2.2),# max bendChi2 cut - trk_ptMin = cms.double(2.0), # minimum track pt [GeV] - trk_etaMax = cms.double(2.5), # maximum track eta - trk_nStubMin = cms.int32(4), # minimum number of stubs in track - trk_nPSStubMin = cms.int32(-1), # minimum number of PS stubs in track - deltaZ0Cut=cms.double(0.5), # cluster tracks within |dz| 20 - trk_ptTightChi2 = cms.double(20.0), - trk_chi2dofTightChi2 = cms.double(5.0), + L1TrackInputTag = cms.InputTag("l1tTrackVertexAssociationProducerForJets", "Level1TTTracksSelectedAssociated"), coneSize=cms.double(0.4), #cone size for anti-kt fast jet displaced = cms.bool(False) # use prompt/displaced tracks ) l1tTrackFastJetsExtended = cms.EDProducer("L1TrackFastJetProducer", - L1TrackInputTag = cms.InputTag("l1tTTTracksFromExtendedTrackletEmulation", "Level1TTTracks"), - L1PrimaryVertexTag=cms.InputTag("l1tVertexFinder", "L1Vertices"), - trk_zMax = cms.double(15.), # max track z0 [cm] - trk_chi2dofMax = cms.double(40.), # max track chi2 for extended tracks - trk_bendChi2Max = cms.double(2.4),#Bendchi2 cut for extended tracks - trk_ptMin = cms.double(3.0), # minimum track pt [GeV] - trk_etaMax = cms.double(2.5), # maximum track eta - trk_nStubMin = cms.int32(4), # minimum number of stubs on track - trk_nPSStubMin = cms.int32(-1), # minimum number of stubs in PS modules on track - deltaZ0Cut=cms.double(3.0), #cluster tracks within |dz| 20 - trk_ptTightChi2 = cms.double(20.0), - trk_chi2dofTightChi2 = cms.double(5.0), + L1TrackInputTag = cms.InputTag("l1tTrackVertexAssociationProducerExtendedForJets", "Level1TTTracksExtendedSelectedAssociated"), coneSize=cms.double(0.4), #cone size for anti-kt fast jet displaced = cms.bool(True) # use prompt/displaced tracks ) diff --git a/L1Trigger/L1TTrackMatch/python/l1tTrackJetsEmulation_cfi.py b/L1Trigger/L1TTrackMatch/python/l1tTrackJetsEmulation_cfi.py index 9a227295a7257..f48ff4faaa28a 100644 --- a/L1Trigger/L1TTrackMatch/python/l1tTrackJetsEmulation_cfi.py +++ b/L1Trigger/L1TTrackMatch/python/l1tTrackJetsEmulation_cfi.py @@ -2,17 +2,9 @@ l1tTrackJetsEmulation = cms.EDProducer('L1TrackJetEmulatorProducer', L1TrackInputTag= cms.InputTag("l1tTrackVertexAssociationProducerForJets", "Level1TTTracksSelectedAssociatedEmulation"), - L1PVertexInputTag=cms.InputTag("l1tVertexFinderEmulator","L1VerticesEmulation"), - MaxDzTrackPV = cms.double(1.0), trk_zMax = cms.double (15.) , # maximum track z trk_ptMax = cms.double(200.), # maximumum track pT before saturation [GeV] - trk_ptMin = cms.double(2.0), # minimum track pt [GeV] trk_etaMax = cms.double(2.4), # maximum track eta - nStubs4PromptChi2=cms.double(10.0), #Prompt track quality flags for loose/tight - nStubs4PromptBend=cms.double(2.2), - nStubs5PromptChi2=cms.double(10.0), - nStubs5PromptBend=cms.double(2.2), - trk_nPSStubMin=cms.int32(-1), # minimum PS stubs, -1 means no cut minTrkJetpT=cms.double(-1.), # minimum track pt to be considered for track jet etaBins=cms.int32(24), phiBins=cms.int32(27), @@ -24,24 +16,14 @@ highpTJetMinTrackMultiplicity=cms.int32(3), highpTJetThreshold=cms.double(100.), displaced=cms.bool(False), #Flag for displaced tracks - nStubs4DisplacedChi2=cms.double(5.0), #Displaced track quality flags for loose/tight - nStubs4DisplacedBend=cms.double(1.7), - nStubs5DisplacedChi2=cms.double(2.75), - nStubs5DisplacedBend=cms.double(3.5), nDisplacedTracks=cms.int32(2) #Number of displaced tracks required per jet ) l1tTrackJetsExtendedEmulation = l1tTrackJetsEmulation.clone( L1TrackInputTag= cms.InputTag("l1tTrackVertexAssociationProducerExtendedForJets", "Level1TTTracksExtendedSelectedAssociatedEmulation"), - L1PVertexInputTag=cms.InputTag("l1tVertexFinderEmulator", "L1VerticesEmulation"), minTrkJetpT= 5.0, # minimum track pt to be considered for track jet - MaxDzTrackPV = 5.0, d0_cutNStubs4= -1, # -1 excludes nstub=4 from disp tag d0_cutNStubs5= 0.22, displaced= True, #Flag for displaced tracks - nStubs4DisplacedChi2= 3.3, #Disp tracks selection [trkcut excluded minTrkJetpT = 5., # min track jet pt to be considered for most energetic zbin finding d0_cutNStubs5 = 0.22, # -1 excludes nstub>4 from disp tag process displaced = True, #Flag for displaced tracks - nStubs4DisplacedChi2 = 3.3, #Disp tracks selection [trk4 must be less than this value + reducedChi2RPhiMaxNstub4 = cms.double(999.9), # chi2rphi/dof with nstub==4 must be less than this value + reducedChi2RPhiMaxNstub5 = cms.double(999.9), # chi2rphi/dof with nstub>4 must be less than this value + reducedBendChi2MaxNstub4 = cms.double(999.9), # bend chi2 with nstub==4 must be less than this value + reducedBendChi2MaxNstub5 = cms.double(999.9), # bend chi2 with nstub>4 must be less than this value ), processSimulatedTracks = cms.bool(True), # return selected tracks after cutting on the floating point values processEmulatedTracks = cms.bool(True), # return selected tracks after cutting on the bitwise emulated values @@ -29,9 +36,16 @@ nStubsMin = 4, # number of stubs must be greater than or equal to this value nPSStubsMin = 0, # the number of stubs in the PS Modules must be greater than or equal to this value + promptMVAMin = -1.0, # MVA must be greater than this value reducedBendChi2Max = 2.4, # bend chi2 must be less than this value reducedChi2RZMax = 10.0, # chi2rz/dof must be less than this value reducedChi2RPhiMax = 40.0, # chi2rphi/dof must be less than this value + reducedChi2RZMaxNstub4 = cms.double(999.9), # chi2rz/dof with nstub==4 must be less than this value + reducedChi2RZMaxNstub5 = cms.double(999.9), # chi2rz/dof with nstub>4 must be less than this value + reducedChi2RPhiMaxNstub4 = cms.double(999.9), # chi2rphi/dof with nstub==4 must be less than this value + reducedChi2RPhiMaxNstub5 = cms.double(999.9), # chi2rphi/dof with nstub>4 must be less than this value + reducedBendChi2MaxNstub4 = 999.9, # bend chi2 with nstub==4 must be less than this value + reducedBendChi2MaxNstub5 = 999.9, # bend chi2 with nstub>4 must be less than this value ), processSimulatedTracks = cms.bool(True), # return selected tracks after cutting on the floating point values processEmulatedTracks = cms.bool(True), # return selected tracks after cutting on the bitwise emulated values @@ -39,29 +53,43 @@ l1tTrackSelectionProducerForJets = l1tTrackSelectionProducer.clone( cutSet = dict( - ptMin = 0.0, # pt must be greater than this value, [GeV] - absEtaMax = 999.9, # absolute value of eta must be less than this value - absZ0Max = 999.9, # z0 must be less than this value, [cm] - nStubsMin = 0, # number of stubs must be greater than or equal to this value + ptMin = 2.0, # pt must be greater than this value, [GeV] + absEtaMax = 2.4, # absolute value of eta must be less than this value + absZ0Max = 15.0, # z0 must be less than this value, [cm] + nStubsMin = 4, # number of stubs must be greater than or equal to this value nPSStubsMin = 0, # the number of stubs in the PS Modules must be greater than or equal to this value + promptMVAMin = 0.1, # MVA must be greater than this value reducedBendChi2Max = 999.9, # bend chi2 must be less than this value reducedChi2RZMax = 999.9, # chi2rz/dof must be less than this value reducedChi2RPhiMax = 999.9, # chi2rphi/dof must be less than this value + reducedChi2RZMaxNstub4 = cms.double(999.9), # chi2rz/dof with nstub==4 must be less than this value + reducedChi2RZMaxNstub5 = cms.double(999.9), # chi2rz/dof with nstub>4 must be less than this value + reducedChi2RPhiMaxNstub4 = cms.double(999.9), # chi2rphi/dof with nstub==4 must be less than this value + reducedChi2RPhiMaxNstub5 = cms.double(999.9), # chi2rphi/dof with nstub>4 must be less than this value + reducedBendChi2MaxNstub4 = 999.9, # bend chi2 with nstub==4 must be less than this value + reducedBendChi2MaxNstub5 = 999.9, # bend chi2 with nstub>4 must be less than this value ), ) l1tTrackSelectionProducerExtendedForJets = l1tTrackSelectionProducerExtended.clone( cutSet = dict( - ptMin = 0.0, # pt must be greater than this value, [GeV] - absEtaMax = 999.9, # absolute value of eta must be less than this value - absZ0Max = 999.9, # z0 must be less than this value, [cm] - nStubsMin = 0, # number of stubs must be greater than or equal to this value + ptMin = 2.0, # pt must be greater than this value, [GeV] + absEtaMax = 2.4, # absolute value of eta must be less than this value + absZ0Max = 15.0, # z0 must be less than this value, [cm] + nStubsMin = 4, # number of stubs must be greater than or equal to this value nPSStubsMin = 0, # the number of stubs in the PS Modules must be greater than or equal to this value + promptMVAMin = -1.0, # MVA must be greater than this value reducedBendChi2Max = 999.9, # bend chi2 must be less than this value reducedChi2RZMax = 999.9, # chi2rz/dof must be less than this value reducedChi2RPhiMax = 999.9, # chi2rphi/dof must be less than this value + reducedChi2RZMaxNstub4 = cms.double(5.0), # chi2rz/dof with nstub==4 must be less than this value + reducedChi2RZMaxNstub5 = cms.double(5.0), # chi2rz/dof with nstub>4 must be less than this value + reducedChi2RPhiMaxNstub4 = cms.double(6.0), # chi2rphi/dof with nstub==4 must be less than this value + reducedChi2RPhiMaxNstub5 = cms.double(35.0), # chi2rphi/dof with nstub>4 must be less than this value + reducedBendChi2MaxNstub4 = cms.double(2.25), # bend chi2 with nstub==4 must be less than this value + reducedBendChi2MaxNstub5 = cms.double(3.5), # bend chi2 with nstub>4 must be less than this value ), ) diff --git a/L1Trigger/L1TTrackMatch/python/l1tTrackVertexAssociationProducer_cfi.py b/L1Trigger/L1TTrackMatch/python/l1tTrackVertexAssociationProducer_cfi.py index 749b6bdc2825a..04f1cd320b3da 100644 --- a/L1Trigger/L1TTrackMatch/python/l1tTrackVertexAssociationProducer_cfi.py +++ b/L1Trigger/L1TTrackMatch/python/l1tTrackVertexAssociationProducer_cfi.py @@ -41,8 +41,8 @@ cutSet = cms.PSet( #deltaZMaxEtaBounds = cms.vdouble(0.0, absEtaMax.value), # these values define the bin boundaries in |eta| #deltaZMax = cms.vdouble(0.5), # delta z must be less than these values, there will be one less value here than in deltaZMaxEtaBounds, [cm] - deltaZMaxEtaBounds = cms.vdouble(0.0, 0.7, 1.0, 1.2, 1.6, 2.0, 2.4), # these values define the bin boundaries in |eta| - deltaZMax = cms.vdouble(999.0, 999.0, 999.0, 999.0, 999.0, 999.0), # delta z must be less than these values, there will be one less value here than in deltaZMaxEtaBounds, [cm] + deltaZMaxEtaBounds = cms.vdouble(0.0, 2.4), # these values define the bin boundaries in |eta| + deltaZMax = cms.vdouble(0.55), # delta z must be less than these values, there will be one less value here than in deltaZMaxEtaBounds, [cm] ), ) @@ -52,8 +52,8 @@ cutSet = cms.PSet( #deltaZMaxEtaBounds = cms.vdouble(0.0, absEtaMax.value), # these values define the bin boundaries in |eta| #deltaZMax = cms.vdouble(0.5), # delta z must be less than these values, there will be one less value here than in deltaZMaxEtaBounds, [cm] - deltaZMaxEtaBounds = cms.vdouble(0.0, 0.7, 1.0, 1.2, 1.6, 2.0, 2.4), # these values define the bin boundaries in |eta| - deltaZMax = cms.vdouble(999.0, 999.0, 999.0, 999.0, 999.0, 999.0), # delta z must be less than these values, there will be one less value here than in deltaZMaxEtaBounds, [cm] + deltaZMaxEtaBounds = cms.vdouble(0.0, 2.4), # these values define the bin boundaries in |eta| + deltaZMax = cms.vdouble(5.0), # delta z must be less than these values, there will be one less value here than in deltaZMaxEtaBounds, [cm] ), ) diff --git a/L1Trigger/L1TTrackMatch/python/l1tTrackerEmuHTMiss_cfi.py b/L1Trigger/L1TTrackMatch/python/l1tTrackerEmuHTMiss_cfi.py index cfbf78122e1a2..57399b909552a 100644 --- a/L1Trigger/L1TTrackMatch/python/l1tTrackerEmuHTMiss_cfi.py +++ b/L1Trigger/L1TTrackMatch/python/l1tTrackerEmuHTMiss_cfi.py @@ -4,9 +4,9 @@ L1TkJetEmulationInputTag = cms.InputTag("l1tTrackJetsEmulation", "L1TrackJets"), L1MHTCollectionName = cms.string("L1TrackerEmuHTMiss"), jet_maxEta = cms.double(2.4), - jet_minPt = cms.double(5.0), - jet_minNtracksLowPt = cms.int32(2), - jet_minNtracksHighPt = cms.int32(3), + jet_minPt = cms.double(3.0), + jet_minNtracksLowPt = cms.int32(0), + jet_minNtracksHighPt = cms.int32(0), debug = cms.bool(False), displaced = cms.bool(False) ) @@ -15,9 +15,9 @@ L1TkJetEmulationInputTag = cms.InputTag("l1tTrackJetsExtendedEmulation", "L1TrackJetsExtended"), L1MHTCollectionName = cms.string("L1TrackerEmuHTMissExtended"), jet_maxEta = cms.double(2.4), - jet_minPt = cms.double(5.0), - jet_minNtracksLowPt = cms.int32(2), - jet_minNtracksHighPt = cms.int32(3), + jet_minPt = cms.double(3.0), + jet_minNtracksLowPt = cms.int32(0), + jet_minNtracksHighPt = cms.int32(0), debug = cms.bool(False), displaced = cms.bool(True) ) diff --git a/L1Trigger/L1TTrackMatch/python/l1tTrackerHTMiss_cfi.py b/L1Trigger/L1TTrackMatch/python/l1tTrackerHTMiss_cfi.py index 79fac31823c8b..cf3e3b6f2501a 100644 --- a/L1Trigger/L1TTrackMatch/python/l1tTrackerHTMiss_cfi.py +++ b/L1Trigger/L1TTrackMatch/python/l1tTrackerHTMiss_cfi.py @@ -22,9 +22,9 @@ L1TkJetInputTag = cms.InputTag("l1tTrackJets", "L1TrackJets"), L1VertexInputTag = cms.InputTag("l1tVertexFinder", "L1Vertices"), jet_maxEta = cms.double(2.4), - jet_minPt = cms.double(5.0), - jet_minNtracksLowPt=cms.int32(2), - jet_minNtracksHighPt=cms.int32(3), + jet_minPt = cms.double(3.0), + jet_minNtracksLowPt=cms.int32(0), + jet_minNtracksHighPt=cms.int32(0), jet_minJetEtLowPt=cms.double(50.0), # Track jet quality criteria jet_minJetEtHighPt=cms.double(100.0), useCaloJets = cms.bool(False), @@ -38,9 +38,9 @@ L1TkJetInputTag = cms.InputTag("l1tTrackJetsExtended", "L1TrackJetsExtended"), L1VertexInputTag = cms.InputTag("l1tVertexFinder", "L1Vertices"), jet_maxEta = cms.double(2.4), - jet_minPt = cms.double(5.0), - jet_minNtracksLowPt=cms.int32(2), - jet_minNtracksHighPt=cms.int32(3), + jet_minPt = cms.double(3.0), + jet_minNtracksLowPt=cms.int32(0), + jet_minNtracksHighPt=cms.int32(0), jet_minJetEtLowPt=cms.double(50.0), # Track jet quality criteria jet_minJetEtHighPt=cms.double(100.0), useCaloJets = cms.bool(False), diff --git a/L1Trigger/L1TTrackMatch/python/l1tTruthTrackFastJets_cfi.py b/L1Trigger/L1TTrackMatch/python/l1tTruthTrackFastJets_cfi.py new file mode 100644 index 0000000000000..7fdcdf327cb03 --- /dev/null +++ b/L1Trigger/L1TTrackMatch/python/l1tTruthTrackFastJets_cfi.py @@ -0,0 +1,25 @@ +import FWCore.ParameterSet.Config as cms + +l1tTruthTrackFastJets = cms.EDProducer("L1TruthTrackFastJetProducer", + L1TrackInputTag = cms.InputTag("l1tTTTracksFromTrackletEmulation", "Level1TTTracks"), + MCTruthTrackInputTag = cms.InputTag("TTTrackAssociatorFromPixelDigis", "Level1TTTracks"), + trk_zMax = cms.double(15.), # max track z0 [cm] + trk_ptMin = cms.double(2.0), # minimum track pt [GeV] + trk_etaMax = cms.double(2.4), # maximum track eta + trk_nStubMin = cms.int32(4), # minimum number of stubs in track + trk_nPSStubMin = cms.int32(-1), # minimum number of PS stubs in track + coneSize = cms.double(0.4), #cone size for anti-kt fast jet + displaced = cms.bool(False) # use prompt/displaced tracks +) + +l1tTruthTrackFastJetsExtended = cms.EDProducer("L1TruthTrackFastJetProducer", + L1TrackInputTag = cms.InputTag("l1tTTTracksFromExtendedTrackletEmulation", "Level1TTTracks"), + MCTruthTrackInputTag = cms.InputTag("TTTrackAssociatorFromPixelDigis", "Level1TTTracks"), + trk_zMax = cms.double(15.), # max track z0 [cm] + trk_ptMin = cms.double(3.0), # minimum track pt [GeV] + trk_etaMax = cms.double(2.5), # maximum track eta + trk_nStubMin = cms.int32(4), # minimum number of stubs on track + trk_nPSStubMin = cms.int32(-1), # minimum number of stubs in PS modules on track + coneSize=cms.double(0.4), #cone size for anti-kt fast jet + displaced = cms.bool(True) # use prompt/displaced tracks +) diff --git a/L1Trigger/L1TTrackMatch/python/tpFastJets_cfi.py b/L1Trigger/L1TTrackMatch/python/tpFastJets_cfi.py new file mode 100644 index 0000000000000..3e551704a8194 --- /dev/null +++ b/L1Trigger/L1TTrackMatch/python/tpFastJets_cfi.py @@ -0,0 +1,12 @@ +import FWCore.ParameterSet.Config as cms + +tpFastJets = cms.EDProducer("TPFastJetProducer", + TrackingParticleInputTag = cms.InputTag("mix", "MergedTrackTruth"), + MCTruthStubInputTag = cms.InputTag("TTStubAssociatorFromPixelDigis", "StubAccepted"), + tp_ptMin = cms.double(2.0), # minimum tp pt [GeV] + tp_etaMax = cms.double(2.4), # maximum tp eta + tp_zMax = cms.double(15.), # max tp z0 [cm] + tp_nStubMin = cms.int32(4), # minimum number of stubs + tp_nStubLayerMin = cms.int32(4), # minimum number of layers with stubs + coneSize=cms.double(0.4), # cone size for anti-kt fast jet +) diff --git a/L1Trigger/L1TZDC/plugins/L1TZDCProducer.cc b/L1Trigger/L1TZDC/plugins/L1TZDCProducer.cc index 9fc2f680861fe..8db6de8069b02 100644 --- a/L1Trigger/L1TZDC/plugins/L1TZDCProducer.cc +++ b/L1Trigger/L1TZDC/plugins/L1TZDCProducer.cc @@ -66,8 +66,6 @@ class L1TZDCProducer : public edm::stream::EDProducer<> { private: void produce(edm::Event&, const edm::EventSetup&) override; - void beginRun(edm::Run const&, edm::EventSetup const&) override; - // ----------member data --------------------------- // input tokens @@ -157,9 +155,6 @@ void L1TZDCProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) iEvent.emplace(etToken_, std::move(etsumsReduced)); } -// ------------ method called when starting to processes a run ------------ -void L1TZDCProducer::beginRun(edm::Run const& iRun, edm::EventSetup const& iSetup) {} - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void L1TZDCProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { edm::ParameterSetDescription desc; diff --git a/L1Trigger/Phase2L1GT/python/l1tGTProducer_cff.py b/L1Trigger/Phase2L1GT/python/l1tGTProducer_cff.py index 141fe62b8f5e5..26242b1495839 100644 --- a/L1Trigger/Phase2L1GT/python/l1tGTProducer_cff.py +++ b/L1Trigger/Phase2L1GT/python/l1tGTProducer_cff.py @@ -10,10 +10,10 @@ GMTSaPromptMuons = cms.InputTag("l1tSAMuonsGmt", "promptSAMuons"), GMTSaDisplacedMuons = cms.InputTag("l1tSAMuonsGmt", "displacedSAMuons"), GMTTkMuons = cms.InputTag("l1tTkMuonsGmtLowPtFix", "l1tTkMuonsGmtLowPtFix"), - CL2Jets = cms.InputTag("l1tSCPFL1PuppiCorrectedEmulator"), + CL2Jets = cms.InputTag("l1tSC4PFL1PuppiCorrectedEmulator"), CL2Electrons = cms.InputTag("l1tLayer2EG", "L1CtTkElectron"), CL2Photons = cms.InputTag("l1tLayer2EG", "L1CtTkEm"), CL2Taus = cms.InputTag("l1tNNTauProducerPuppi", "L1PFTausNN"), CL2EtSum = cms.InputTag("l1tMETPFProducer"), - CL2HtSum = cms.InputTag("l1tSCPFL1PuppiCorrectedEmulatorMHT") + CL2HtSum = cms.InputTag("l1tSC4PFL1PuppiCorrectedEmulatorMHT") ) diff --git a/L1Trigger/Phase2L1GT/python/l1tGTScales.py b/L1Trigger/Phase2L1GT/python/l1tGTScales.py index 350e71ce2476d..f22968d631913 100644 --- a/L1Trigger/Phase2L1GT/python/l1tGTScales.py +++ b/L1Trigger/Phase2L1GT/python/l1tGTScales.py @@ -1,4 +1,3 @@ -from libL1TriggerPhase2L1GT import L1GTScales as CppScales import FWCore.ParameterSet.Config as cms import math @@ -18,5 +17,3 @@ pos_chg=cms.int32(1), neg_chg=cms.int32(0) ) - -l1tGTScales = CppScales(*[param.value() for param in scale_parameter.parameters_().values()]) diff --git a/L1Trigger/Phase2L1GT/src/classes_def.xml b/L1Trigger/Phase2L1GT/src/classes_def.xml index 7b480eec42392..95664ce9398f5 100644 --- a/L1Trigger/Phase2L1GT/src/classes_def.xml +++ b/L1Trigger/Phase2L1GT/src/classes_def.xml @@ -1,5 +1,5 @@ - - - + + + diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/egamma/pftkegalgo_ref.h b/L1Trigger/Phase2L1ParticleFlow/interface/egamma/pftkegalgo_ref.h index e1d170b9a80f1..58695d7fdc7a6 100644 --- a/L1Trigger/Phase2L1ParticleFlow/interface/egamma/pftkegalgo_ref.h +++ b/L1Trigger/Phase2L1ParticleFlow/interface/egamma/pftkegalgo_ref.h @@ -65,8 +65,8 @@ namespace l1ct { CompIDParameters(const edm::ParameterSet &); CompIDParameters(double bdtScore_loose_wp, double bdtScore_tight_wp, const std::string &model) : bdtScore_loose_wp(bdtScore_loose_wp), bdtScore_tight_wp(bdtScore_tight_wp), conifer_model(model) {} - const double bdtScore_loose_wp; // XGBOOST score - const double bdtScore_tight_wp; // XGBOOST score + const id_score_t bdtScore_loose_wp; // Conifer score/4 + const id_score_t bdtScore_tight_wp; // Conifer score/4 const std::string conifer_model; static edm::ParameterSetDescription getParameterSetDescription(); }; @@ -175,7 +175,7 @@ namespace l1ct { const std::vector &emcalo, const std::vector &track, std::vector &emCalo2tk, - std::vector &emCaloTkBdtScore) const; + std::vector &emCaloTkBdtScore) const; struct CompositeCandidate { unsigned int cluster_idx; @@ -183,10 +183,10 @@ namespace l1ct { double dpt; // For sorting }; - float compute_composite_score(CompositeCandidate &cand, - const std::vector &emcalo, - const std::vector &track, - const PFTkEGAlgoEmuConfig::CompIDParameters ¶ms) const; + id_score_t compute_composite_score(CompositeCandidate &cand, + const std::vector &emcalo, + const std::vector &track, + const PFTkEGAlgoEmuConfig::CompIDParameters ¶ms) const; //FIXME: still needed float deltaPhi(float phi1, float phi2) const; @@ -200,7 +200,7 @@ namespace l1ct { const std::vector &track, const std::vector &emCalo2emCalo, const std::vector &emCalo2tk, - const std::vector &emCaloTkBdtScore, + const std::vector &emCaloTkBdtScore, std::vector &egstas, std::vector &egobjs, std::vector &egeleobjs) const; @@ -214,7 +214,7 @@ namespace l1ct { const unsigned int hwQual, const pt_t ptCorr, const int tk_idx, - const float bdtScore, + const id_score_t bdtScore, const std::vector &components = {}) const; EGObjEmu &addEGStaToPF(std::vector &egobjs, @@ -233,7 +233,7 @@ namespace l1ct { const TkObjEmu &track, const unsigned int hwQual, const pt_t ptCorr, - const float bdtScore) const; + const id_score_t bdtScore) const; // FIXME: reimplemented from PFAlgoEmulatorBase template diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/puppi/linpuppi_bits.h b/L1Trigger/Phase2L1ParticleFlow/interface/puppi/linpuppi_bits.h index f1aa84e38104b..efe7b10086d20 100644 --- a/L1Trigger/Phase2L1ParticleFlow/interface/puppi/linpuppi_bits.h +++ b/L1Trigger/Phase2L1ParticleFlow/interface/puppi/linpuppi_bits.h @@ -1,17 +1,19 @@ -#ifndef FIRMWARE_LINPUPPI_BITS_H -#define FIRMWARE_LINPUPPI_BITS_H +#ifndef L1Trigger_Phase2L1ParticleFlow_LINPUPPI_BITS_H +#define L1Trigger_Phase2L1ParticleFlow_LINPUPPI_BITS_H -#define LINPUPPI_ptLSB 0.25 -#define LINPUPPI_DR2LSB 1.9e-5 -#define LINPUPPI_dzLSB 0.05 -#define LINPUPPI_pt2LSB LINPUPPI_ptLSB* LINPUPPI_ptLSB -#define LINPUPPI_pt2DR2_scale LINPUPPI_ptLSB* LINPUPPI_ptLSB / LINPUPPI_DR2LSB +#include "DataFormats/L1TParticleFlow/interface/datatypes.h" -#define LINPUPPI_sum_bitShift 15 -#define LINPUPPI_x2_bits 6 // decimal bits the discriminator values -#define LINPUPPI_alpha_bits 5 // decimal bits of the alpha values -#define LINPUPPI_alphaSlope_bits 5 // decimal bits of the alphaSlope values -#define LINPUPPI_ptSlope_bits 6 // decimal bits of the ptSlope values -#define LINPUPPI_weight_bits 8 +namespace linpuppi { + typedef ap_ufixed<12, 6, AP_TRN, AP_SAT> sumTerm_t; + typedef ap_ufixed<16, 0, AP_RND, AP_SAT> dr2inv_t; + typedef ap_fixed<12, 7, AP_TRN, AP_SAT> x2_t; + typedef ap_ufixed<7, 2, AP_RND, AP_WRAP> alphaSlope_t; + typedef ap_fixed<12, 8, AP_RND, AP_WRAP> alpha_t; + typedef ap_ufixed<6, 0, AP_TRN, AP_WRAP> ptSlope_t; + + constexpr float DR2_LSB = l1ct::Scales::ETAPHI_LSB * l1ct::Scales::ETAPHI_LSB; + constexpr float PT2DR2_LSB = l1ct::Scales::INTPT_LSB * l1ct::Scales::INTPT_LSB / DR2_LSB; + constexpr int SUM_BITSHIFT = sumTerm_t::width - sumTerm_t::iwidth; +} // namespace linpuppi #endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/puppi/linpuppi_ref.h b/L1Trigger/Phase2L1ParticleFlow/interface/puppi/linpuppi_ref.h index 4591f5ef3dc4f..0e4b5035ecc13 100644 --- a/L1Trigger/Phase2L1ParticleFlow/interface/puppi/linpuppi_ref.h +++ b/L1Trigger/Phase2L1ParticleFlow/interface/puppi/linpuppi_ref.h @@ -2,6 +2,7 @@ #define LINPUPPI_REF_H #include "DataFormats/L1TParticleFlow/interface/layer1_emulator.h" +#include "linpuppi_bits.h" #include @@ -217,7 +218,8 @@ namespace l1ct { bool fakePuppi_; // utility unsigned int find_ieta(const PFRegionEmu ®ion, eta_t eta) const; - std::pair sum2puppiPt_ref(uint64_t sum, pt_t pt, unsigned int ieta, bool isEM, int icand) const; + std::pair sum2puppiPt_ref( + linpuppi::sumTerm_t sum, pt_t pt, unsigned int ieta, bool isEM, int icand) const; std::pair sum2puppiPt_flt(float sum, float pt, unsigned int ieta, bool isEM, int icand) const; }; diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/regionizer/buffered_folded_multififo_regionizer_ref.h b/L1Trigger/Phase2L1ParticleFlow/interface/regionizer/buffered_folded_multififo_regionizer_ref.h index 9e171a75cfd6a..8e446691aa2fe 100644 --- a/L1Trigger/Phase2L1ParticleFlow/interface/regionizer/buffered_folded_multififo_regionizer_ref.h +++ b/L1Trigger/Phase2L1ParticleFlow/interface/regionizer/buffered_folded_multififo_regionizer_ref.h @@ -6,38 +6,6 @@ #include #include -namespace l1ct { - namespace multififo_regionizer { - template - inline bool local_eta_window(const T& t, const l1ct::glbeta_t& etaMin, const l1ct::glbeta_t& etaMax); - template <> - inline bool local_eta_window(const l1ct::TkObjEmu& t, - const l1ct::glbeta_t& etaMin, - const l1ct::glbeta_t& etaMax); - - template - class EtaBuffer { - public: - EtaBuffer() {} - EtaBuffer(unsigned int maxitems, const l1ct::glbeta_t& etaMin = 0, const l1ct::glbeta_t& etaMax = 0) - : size_(maxitems), iwrite_(0), iread_(0), etaMin_(etaMin), etaMax_(etaMax) {} - void maybe_push(const T& t); - void writeNewEvent() { - iwrite_ = 1 - iwrite_; - items_[iwrite_].clear(); - } - void readNewEvent() { iread_ = 1 - iread_; } - T pop(); - unsigned int writeSize() const { return items_[iwrite_].size(); } - unsigned int readSize() const { return items_[iread_].size(); } - - private: - unsigned int size_, iwrite_, iread_; - l1ct::glbeta_t etaMin_, etaMax_; - std::deque items_[2]; - }; - } // namespace multififo_regionizer -} // namespace l1ct namespace l1ct { class BufferedFoldedMultififoRegionizerEmulator : public FoldedMultififoRegionizerEmulator { public: @@ -88,9 +56,9 @@ namespace l1ct { } protected: - std::vector> tkBuffers_; - std::vector> caloBuffers_; - std::vector> muBuffers_; + std::vector> tkBuffers_; + std::vector> caloBuffers_; + std::vector> muBuffers_; void findEtaBounds_(const l1ct::PFRegionEmu& sec, const std::vector& reg, @@ -106,41 +74,4 @@ namespace l1ct { }; } // namespace l1ct -template -inline bool l1ct::multififo_regionizer::local_eta_window(const T& t, - const l1ct::glbeta_t& etaMin, - const l1ct::glbeta_t& etaMax) { - return (etaMin == etaMax) || (etaMin <= t.hwEta && t.hwEta <= etaMax); -} -template <> -inline bool l1ct::multififo_regionizer::local_eta_window(const l1ct::TkObjEmu& t, - const l1ct::glbeta_t& etaMin, - const l1ct::glbeta_t& etaMax) { - return (etaMin == etaMax) || (etaMin <= t.hwEta && t.hwEta <= etaMax) || - (etaMin <= t.hwVtxEta() && t.hwVtxEta() <= etaMax); -} -template -void l1ct::multififo_regionizer::EtaBuffer::maybe_push(const T& t) { - if ((t.hwPt != 0) && local_eta_window(t, etaMin_, etaMax_)) { - if (items_[iwrite_].size() < size_) { - items_[iwrite_].push_back(t); - } else { - // uncommenting the message below may be useful for debugging - //dbgCout() << "WARNING: sector buffer is full for " << typeid(T).name() << ", pt = " << t.intPt() - // << ", eta = " << t.intEta() << ", phi = " << t.intPhi() << "\n"; - } - } -} - -template -T l1ct::multififo_regionizer::EtaBuffer::pop() { - T ret; - ret.clear(); - if (!items_[iread_].empty()) { - ret = items_[iread_].front(); - items_[iread_].pop_front(); - } - return ret; -} - #endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/regionizer/middle_buffer_multififo_regionizer_ref.h b/L1Trigger/Phase2L1ParticleFlow/interface/regionizer/middle_buffer_multififo_regionizer_ref.h new file mode 100644 index 0000000000000..b53d34f05a6f3 --- /dev/null +++ b/L1Trigger/Phase2L1ParticleFlow/interface/regionizer/middle_buffer_multififo_regionizer_ref.h @@ -0,0 +1,125 @@ +#ifndef middle_buffer_multififo_regionizer_ref_h +#define middle_buffer_multififo_regionizer_ref_h + +#include "L1Trigger/Phase2L1ParticleFlow/interface/regionizer/multififo_regionizer_ref.h" +#include "L1Trigger/Phase2L1ParticleFlow/interface/dbgPrintf.h" +#include +#include + +namespace l1ct { + class MiddleBufferMultififoRegionizerEmulator : public RegionizerEmulator { + public: + MiddleBufferMultififoRegionizerEmulator(unsigned int nclocks, + unsigned int nbuffers, + unsigned int etabufferDepth, + unsigned int ntklinks, + unsigned int nHCalLinks, + unsigned int nECalLinks, + unsigned int ntk, + unsigned int ncalo, + unsigned int nem, + unsigned int nmu, + bool streaming, + unsigned int outii, + unsigned int pauseii, + bool useAlsoVtxCoords); + // note: this one will work only in CMSSW + MiddleBufferMultififoRegionizerEmulator(const edm::ParameterSet& iConfig); + + ~MiddleBufferMultififoRegionizerEmulator() override; + + static edm::ParameterSetDescription getParameterSetDescription(); + + void initSectorsAndRegions(const RegionizerDecodedInputs& in, const std::vector& out) override; + + void run(const RegionizerDecodedInputs& in, std::vector& out) override; + + // link emulation from decoded inputs (for simulation) + void fillLinks(unsigned int iclock, + const RegionizerDecodedInputs& in, + std::vector& links, + std::vector& valid); + void fillLinks(unsigned int iclock, + const RegionizerDecodedInputs& in, + std::vector& links, + std::vector& valid); + void fillLinks(unsigned int iclock, + const RegionizerDecodedInputs& in, + std::vector& links, + std::vector& valid); + void fillLinks(unsigned int iclock, + const RegionizerDecodedInputs& in, + std::vector& links, + std::vector& valid); + template + void fillLinks(unsigned int iclock, const RegionizerDecodedInputs& in, std::vector& links) { + std::vector unused; + fillLinks(iclock, in, links, unused); + } + + void destream(int iclock, + const std::vector& tk_out, + const std::vector& em_out, + const std::vector& calo_out, + const std::vector& mu_out, + PFInputRegion& out); + + // clock-cycle emulation + bool step(bool newEvent, + const std::vector& links_tk, + const std::vector& links_hadCalo, + const std::vector& links_emCalo, + const std::vector& links_mu, + std::vector& out_tk, + std::vector& out_hadCalo, + std::vector& out_emCalo, + std::vector& out_mu, + bool /*unused*/); + + template + void toFirmware(const std::vector& emu, TFw fw[]) { + for (unsigned int i = 0, n = emu.size(); i < n; ++i) { + fw[i] = emu[i]; + } + } + + void reset(); + + protected: + const unsigned int NTK_SECTORS, NCALO_SECTORS; + const unsigned int NTK_LINKS, HCAL_LINKS, ECAL_LINKS, NMU_LINKS; + unsigned int nclocks_, nbuffers_, etabuffer_depth_, ntk_, ncalo_, nem_, nmu_, outii_, pauseii_, nregions_pre_, + nregions_post_; + bool streaming_; + bool init_; + unsigned int iclock_; + std::vector mergedRegions_, outputRegions_; + multififo_regionizer::Regionizer tkRegionizerPre_, tkRegionizerPost_; + multififo_regionizer::Regionizer hadCaloRegionizerPre_, hadCaloRegionizerPost_; + multififo_regionizer::Regionizer emCaloRegionizerPre_, emCaloRegionizerPost_; + multififo_regionizer::Regionizer muRegionizerPre_, muRegionizerPost_; + std::vector tkRoutes_, caloRoutes_, emCaloRoutes_, muRoutes_; + std::vector> tkBuffers_; + std::vector> hadCaloBuffers_; + std::vector> emCaloBuffers_; + std::vector> muBuffers_; + + template + void fillCaloLinks_(unsigned int iclock, + const std::vector>& in, + std::vector& links, + std::vector& valid); + + void fillSharedCaloLinks(unsigned int iclock, + const std::vector>& em_in, + const std::vector>& had_in, + std::vector& links, + std::vector& valid); + + void encode(const l1ct::EmCaloObjEmu& from, l1ct::HadCaloObjEmu& to); + void encode(const l1ct::HadCaloObjEmu& from, l1ct::HadCaloObjEmu& to); + void decode(l1ct::HadCaloObjEmu& had, l1ct::EmCaloObjEmu& em); + }; +} // namespace l1ct + +#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/regionizer/multififo_regionizer_elements_ref.h b/L1Trigger/Phase2L1ParticleFlow/interface/regionizer/multififo_regionizer_elements_ref.h index 91d36a366a1ae..0d7adcf55fe30 100644 --- a/L1Trigger/Phase2L1ParticleFlow/interface/regionizer/multififo_regionizer_elements_ref.h +++ b/L1Trigger/Phase2L1ParticleFlow/interface/regionizer/multififo_regionizer_elements_ref.h @@ -5,6 +5,7 @@ #include #include +#include #include namespace l1ct { @@ -76,6 +77,60 @@ namespace l1ct { T pop_queue_(std::vector& queue); }; + template + inline bool local_eta_phi_window(const T& t, + const l1ct::glbeta_t& etaMin, + const l1ct::glbeta_t& etaMax, + const l1ct::glbphi_t& phiMin, + const l1ct::glbphi_t& phiMax); + template <> + inline bool local_eta_phi_window(const l1ct::TkObjEmu& t, + const l1ct::glbeta_t& etaMin, + const l1ct::glbeta_t& etaMax, + const l1ct::glbphi_t& phiMin, + const l1ct::glbphi_t& phiMax); + + template + class EtaPhiBuffer { + public: + EtaPhiBuffer() {} + EtaPhiBuffer(unsigned int maxitems, + const l1ct::glbeta_t& etaMin = 0, + const l1ct::glbeta_t& etaMax = 0, + const l1ct::glbeta_t& etaShift = 0, + const l1ct::glbphi_t& phiMin = 0, + const l1ct::glbphi_t& phiMax = 0, + const l1ct::glbphi_t& phiShift = 0) + : size_(maxitems), + iwrite_(0), + iread_(0), + etaMin_(etaMin), + etaMax_(etaMax), + etaShift_(etaShift), + phiMin_(phiMin), + phiMax_(phiMax), + phiShift_(phiShift) {} + void maybe_push(const T& t); + void writeNewEvent() { + iwrite_ = 1 - iwrite_; + items_[iwrite_].clear(); + } + void readNewEvent() { iread_ = 1 - iread_; } + T pop(); + unsigned int writeSize() const { return items_[iwrite_].size(); } + unsigned int readSize() const { return items_[iread_].size(); } + unsigned int maxSize() const { return size_; } + void reset(); + + private: + unsigned int size_, iwrite_, iread_; + l1ct::glbeta_t etaMin_, etaMax_; + l1ct::glbeta_t etaShift_; + l1ct::glbphi_t phiMin_, phiMax_; + l1ct::glbphi_t phiShift_; + std::deque items_[2]; + }; + // forward decl for later template class RegionMux; @@ -183,4 +238,57 @@ namespace l1ct { } // namespace multififo_regionizer } // namespace l1ct +template +inline bool l1ct::multififo_regionizer::local_eta_phi_window(const T& t, + const l1ct::glbeta_t& etaMin, + const l1ct::glbeta_t& etaMax, + const l1ct::glbphi_t& phiMin, + const l1ct::glbphi_t& phiMax) { + return (etaMin == etaMax) || + (etaMin <= t.hwEta && t.hwEta <= etaMax && ((phiMin == phiMax) || (phiMin <= t.hwPhi && t.hwPhi <= phiMax))); +} +template <> +inline bool l1ct::multififo_regionizer::local_eta_phi_window(const l1ct::TkObjEmu& t, + const l1ct::glbeta_t& etaMin, + const l1ct::glbeta_t& etaMax, + const l1ct::glbphi_t& phiMin, + const l1ct::glbphi_t& phiMax) { + return (etaMin == etaMax) || + (etaMin <= t.hwEta && t.hwEta <= etaMax && ((phiMin == phiMax) || (phiMin <= t.hwPhi && t.hwPhi <= phiMax))) || + (etaMin <= t.hwVtxEta() && t.hwVtxEta() <= etaMax && + ((phiMin == phiMax) || (phiMin <= t.hwVtxPhi() && t.hwVtxPhi() <= phiMax))); +} +template +void l1ct::multififo_regionizer::EtaPhiBuffer::maybe_push(const T& t) { + if ((t.hwPt != 0) && local_eta_phi_window(t, etaMin_, etaMax_, phiMin_, phiMax_)) { + if (items_[iwrite_].size() < size_) { + items_[iwrite_].push_back(t); + items_[iwrite_].back().hwEta += etaShift_; + items_[iwrite_].back().hwPhi += phiShift_; + } else { + // uncommenting the message below may be useful for debugging + //dbgCout() << "WARNING: sector buffer is full for " << typeid(T).name() << ", pt = " << t.intPt() + // << ", eta = " << t.intEta() << ", phi = " << t.intPhi() << "\n"; + } + } +} + +template +T l1ct::multififo_regionizer::EtaPhiBuffer::pop() { + T ret; + ret.clear(); + if (!items_[iread_].empty()) { + ret = items_[iread_].front(); + items_[iread_].pop_front(); + } + return ret; +} +template +void l1ct::multififo_regionizer::EtaPhiBuffer::reset() { + iread_ = 0; + iwrite_ = 0; + items_[0].clear(); + items_[1].clear(); +} + #endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/regionizer/multififo_regionizer_elements_ref.icc b/L1Trigger/Phase2L1ParticleFlow/interface/regionizer/multififo_regionizer_elements_ref.icc index 66b0538ac38fe..a884ed2f91780 100644 --- a/L1Trigger/Phase2L1ParticleFlow/interface/regionizer/multififo_regionizer_elements_ref.icc +++ b/L1Trigger/Phase2L1ParticleFlow/interface/regionizer/multififo_regionizer_elements_ref.icc @@ -41,6 +41,11 @@ void l1ct::multififo_regionizer::maybe_push(const l1ct::TkObjEmu template void l1ct::multififo_regionizer::RegionBuffer::initFifos(unsigned int nfifos) { assert(nfifos_ == 0); + bool isGood = + (nfifos == 1 || nfifos == 2 || nfifos == 3 || nfifos == 4 || nfifos == 6 || nfifos == 8 || nfifos == 12); + if (!isGood) { + dbgCerr() << "Error, created regionizer for nfifos == " << nfifos << ", not supported." << std::endl; + } nfifos_ = nfifos; fifos_.resize(nfifos); unsigned int nmerged = nfifos; @@ -53,11 +58,6 @@ void l1ct::multififo_regionizer::RegionBuffer::initFifos(unsigned int nfifos) for (auto& t : queues_.back().second) t.clear(); } - bool isGood = - (nfifos == 1 || nfifos == 2 || nfifos == 3 || nfifos == 4 || nfifos == 6 || nfifos == 8 || nfifos == 12); - if (!isGood) { - dbgCerr() << "Error, created regionizer for nfifos == " << nfifos << ", not supported." << std::endl; - } assert(isGood); } diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/regionizer/multififo_regionizer_ref.h b/L1Trigger/Phase2L1ParticleFlow/interface/regionizer/multififo_regionizer_ref.h index d2b6bbce52b72..965f87dbadcdf 100644 --- a/L1Trigger/Phase2L1ParticleFlow/interface/regionizer/multififo_regionizer_ref.h +++ b/L1Trigger/Phase2L1ParticleFlow/interface/regionizer/multififo_regionizer_ref.h @@ -34,6 +34,7 @@ namespace l1ct { enum class BarrelSetup { Full54, Full27, Central18, Central9, Phi18, Phi9 }; MultififoRegionizerEmulator(BarrelSetup barrelSetup, + unsigned int ntklinks, unsigned int nHCalLinks, unsigned int nECalLinks, unsigned int nclocks, diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/TauNNIdHW.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/TauNNIdHW.h index cee3fa5f2c11c..cf8302dcbf74c 100644 --- a/L1Trigger/Phase2L1ParticleFlow/interface/taus/TauNNIdHW.h +++ b/L1Trigger/Phase2L1ParticleFlow/interface/taus/TauNNIdHW.h @@ -1,32 +1,24 @@ #ifndef L1Trigger_Phase2L1ParticleFlow_TAUNNIDHW_H_ #define L1Trigger_Phase2L1ParticleFlow_TAUNNIDHW_H_ -#include "DataFormats/L1TParticleFlow/interface/layer1_emulator.h" - #include #include -#include "ap_int.h" -#include "ap_fixed.h" + #include "L1Trigger/Phase2L1ParticleFlow/interface/taus/tau_parameters.h" +#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/defines.h" +#include "DataFormats/L1TParticleFlow/interface/layer1_emulator.h" #include "DataFormats/L1TParticleFlow/interface/PFCandidate.h" -#include "L1Trigger/Phase2L1ParticleFlow/interface/common/nnet_layer.h" -#include "L1Trigger/Phase2L1ParticleFlow/interface/common/nnet_activation.h" - -//hls-fpga-machine-learning insert weights -#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w1.h" -#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b1.h" -#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w2.h" -#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b2.h" -#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w3.h" -#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b3.h" -#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w4.h" -#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b4.h" - typedef ap_ufixed<16, 14> pt_t; typedef ap_fixed<10, 4> etaphi_t; +// Tau NN returns two values +struct Tau_NN_Result { + result_t nn_pt_correction; + result_t nn_id; +}; + namespace L1TauEmu { // Data types and constants used in the FPGA and FPGA-optimized functions //etaphi_base maps physical eta phi units onto bits @@ -148,8 +140,8 @@ class TauNNIdHW { void initialize(const std::string &iName, int iNParticles); void SetNNVectorVar(); input_t *NNVectorVar() { return NNvectorVar_.data(); } - result_t EvaluateNN(); - result_t compute(const l1t::PFCandidate &iSeed, std::vector &iParts); + Tau_NN_Result EvaluateNN(); + Tau_NN_Result compute(const l1t::PFCandidate &iSeed, std::vector &iParts); //void print(); std::string fInput_; diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/defines.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/defines.h new file mode 100644 index 0000000000000..1f1a2f73dbb98 --- /dev/null +++ b/L1Trigger/Phase2L1ParticleFlow/interface/taus/defines.h @@ -0,0 +1,70 @@ +#ifndef DEFINES_H_ +#define DEFINES_H_ + +#include "ap_fixed.h" +#include "ap_int.h" + +#include +#include + +// hls-fpga-machine-learning insert numbers +#define N_INPUT_1_1 80 +#define N_LAYER_2 25 +#define N_LAYER_2 25 +#define N_LAYER_5 25 +#define N_LAYER_5 25 +#define N_LAYER_8 15 +#define N_LAYER_8 15 +#define N_LAYER_11 15 +#define N_LAYER_11 15 +#define N_LAYER_14 10 +#define N_LAYER_14 10 +#define N_LAYER_17 1 +#define N_LAYER_17 1 +#define N_LAYER_20 1 + +// hls-fpga-machine-learning insert layer-precision +typedef ap_fixed<16, 6> input_t; +typedef ap_fixed<24, 12> input2_t; +typedef ap_fixed<16, 6> model_default_t; +typedef ap_fixed<16, 6> layer2_t; +typedef ap_fixed<9, 3> weight2_t; +typedef ap_fixed<9, 3> bias2_t; +typedef ap_uint<1> layer2_index; +typedef ap_ufixed<9, 0, AP_RND_CONV, AP_SAT> layer4_t; +typedef ap_fixed<18, 8> relu_1_table_t; +typedef ap_fixed<16, 6> layer5_t; +typedef ap_fixed<9, 3> weight5_t; +typedef ap_fixed<9, 3> bias5_t; +typedef ap_uint<1> layer5_index; +typedef ap_ufixed<9, 0, AP_RND_CONV, AP_SAT> layer7_t; +typedef ap_fixed<18, 8> relu_2_table_t; +typedef ap_fixed<16, 6> layer8_t; +typedef ap_fixed<9, 3> weight8_t; +typedef ap_fixed<9, 3> bias8_t; +typedef ap_uint<1> layer8_index; +typedef ap_ufixed<9, 0, AP_RND_CONV, AP_SAT> layer10_t; +typedef ap_fixed<18, 8> relu_3_table_t; +typedef ap_fixed<16, 6> layer11_t; +typedef ap_fixed<9, 3> weight11_t; +typedef ap_fixed<9, 3> bias11_t; +typedef ap_uint<1> layer11_index; +typedef ap_ufixed<9, 0, AP_RND_CONV, AP_SAT> layer13_t; +typedef ap_fixed<18, 8> relu_4_table_t; +typedef ap_fixed<16, 6> layer14_t; +typedef ap_fixed<9, 3> weight14_t; +typedef ap_fixed<9, 3> bias14_t; +typedef ap_uint<1> layer14_index; +typedef ap_ufixed<9, 0, AP_RND_CONV, AP_SAT> layer16_t; +typedef ap_fixed<18, 8> relu_5_table_t; +typedef ap_fixed<16, 6> layer17_t; +typedef ap_fixed<16, 7> weight17_t; +typedef ap_fixed<16, 7> bias17_t; +typedef ap_uint<1> layer17_index; +typedef ap_fixed<16, 6> result_t; +typedef ap_fixed<18, 8> jetID_output_table_t; +typedef ap_fixed<16, 7> weight20_t; +typedef ap_fixed<16, 7> bias20_t; +typedef ap_uint<1> layer20_index; + +#endif \ No newline at end of file diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/nnet_utils/nnet_activation.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/nnet_utils/nnet_activation.h new file mode 100644 index 0000000000000..e5413aedf5fc5 --- /dev/null +++ b/L1Trigger/Phase2L1ParticleFlow/interface/taus/nnet_utils/nnet_activation.h @@ -0,0 +1,94 @@ +#ifndef NNET_ACTIVATION_H_ +#define NNET_ACTIVATION_H_ + +#include +#include "ap_fixed.h" +#include "nnet_common.h" + +namespace nnet { + + struct activ_config { + // IO size + static const unsigned n_in = 10; + + // Internal info + static const unsigned table_size = 1024; + + // Resource reuse info + static const unsigned io_type = io_parallel; + static const unsigned reuse_factor = 1; + + // Internal data type definitions + typedef ap_fixed<18, 8> table_t; + }; + + // ************************************************* + // LINEAR Activation -- See Issue 53 + // ************************************************* + template + void linear(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { + for (unsigned ii = 0; ii < CONFIG_T::n_in; ii++) { + res[ii] = data[ii]; + } + } + + // ************************************************* + // RELU Activation + // ************************************************* + template + void relu(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { + data_T datareg; + for (unsigned ii = 0; ii < CONFIG_T::n_in; ii++) { + datareg = data[ii]; + if (datareg > 0) + res[ii] = datareg; + else + res[ii] = 0; + } + } + + // ************************************************* + // Sigmoid Activation + // ************************************************* + template + inline out_T sigmoid_fcn_float(float input) { + return 1.0 / (1 + exp(-input)); + } + + template + void init_sigmoid_table(res_T table_out[N_TABLE]) { + // Default logistic sigmoid function: + // result = 1/(1+e^(-x)) + for (unsigned ii = 0; ii < N_TABLE; ii++) { + // First, convert from table index to X-value (signed 8-bit, range -8 to +8) + float in_val = 2 * 8.0 * (ii - float(N_TABLE) / 2.0) / float(N_TABLE); + // Next, compute lookup table function + res_T real_val = sigmoid_fcn_float(in_val); + //std::cout << "Lookup table In Value: " << in_val << " Result: " << real_val << std::endl; + table_out[ii] = (res_T)real_val; + } + } + + template + void sigmoid(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { + // Initialize the lookup table + res_T sigmoid_table[CONFIG_T::table_size]; + init_sigmoid_table(sigmoid_table); + + // Index into the lookup table based on data + int data_round; + unsigned index; + for (unsigned ii = 0; ii < CONFIG_T::n_in; ii++) { + data_round = data[ii] * CONFIG_T::table_size / 16; + index = data_round + 8 * CONFIG_T::table_size / 16; + /*if (index < 0) + index = 0;*/ + if (index > CONFIG_T::table_size - 1) + index = CONFIG_T::table_size - 1; + res[ii] = (res_T)sigmoid_table[index]; + } + } + +} // namespace nnet + +#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/nnet_utils/nnet_common.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/nnet_utils/nnet_common.h new file mode 100644 index 0000000000000..8441cca4412c2 --- /dev/null +++ b/L1Trigger/Phase2L1ParticleFlow/interface/taus/nnet_utils/nnet_common.h @@ -0,0 +1,52 @@ +#ifndef NNET_COMMON_H_ +#define NNET_COMMON_H_ + +#include "ap_fixed.h" + +// This is a substitute for "ceil(n/(float)d)". +#define DIV_ROUNDUP(n, d) ((n + d - 1) / d) +#define MIN(n, d) (n > d ? d : n) +#define MAX(n, d) (n > d ? n : d) + +#define STRINGIFY(x) #x +#define EXPAND_STRING(x) STRINGIFY(x) + +namespace nnet { + + // Common type definitions + enum io_type { io_parallel = 0, io_stream }; + enum strategy { latency, resource }; + + template + class Op_add { + public: + T operator()(T a, T b) { return a + b; } + }; + + template + class Op_and { + public: + T operator()(T a, T b) { return a && b; } + }; + + template + class Op_or { + public: + T operator()(T a, T b) { return a || b; } + }; + + template + class Op_max { + public: + T operator()(T a, T b) { return a >= b ? a : b; } + }; + + template + class Op_min { + public: + T operator()(T a, T b) { return a <= b ? a : b; } + }; + +} // namespace nnet + +#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/nnet_utils/nnet_dense.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/nnet_utils/nnet_dense.h new file mode 100644 index 0000000000000..22edbcbf501bd --- /dev/null +++ b/L1Trigger/Phase2L1ParticleFlow/interface/taus/nnet_utils/nnet_dense.h @@ -0,0 +1,72 @@ +#ifndef NNET_DENSE_H_ +#define NNET_DENSE_H_ + +#include "nnet_common.h" +#include "nnet_mult.h" +#include + +namespace nnet { + + struct dense_config { + // Internal data type definitions + typedef float bias_t; + typedef float weight_t; + typedef float accum_t; + + // Layer Sizes + static const unsigned n_in = 10; + static const unsigned n_out = 10; + + // Resource reuse info + int io_type = io_parallel; + int strategy = latency; + int reuse_factor = 1; + static const bool store_weights_in_bram = false; + int n_zeros = 0; + // partitioning arrays cyclically to go with roll factors? + // Product function to use + template + using product = nnet::product::mult; + }; + + template + void dense(data_T data[CONFIG_T::n_in], + res_T res[CONFIG_T::n_out], + typename CONFIG_T::weight_t weights[CONFIG_T::n_in * CONFIG_T::n_out], + typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { + data_T cache; + typename CONFIG_T::accum_t mult[CONFIG_T::n_in * CONFIG_T::n_out]; + typename CONFIG_T::accum_t acc[CONFIG_T::n_out]; + + // Do the matrix-multiply + for (unsigned ii = 0; ii < CONFIG_T::n_in; ii++) { + cache = data[ii]; + for (unsigned jj = 0; jj < CONFIG_T::n_out; jj++) { + unsigned index = ii * CONFIG_T::n_out + jj; + mult[index] = CONFIG_T::template product::product(cache, weights[index]); + } + } + + // Initialize accumulator with input biases + for (unsigned iacc = 0; iacc < CONFIG_T::n_out; iacc++) { + acc[iacc] = (typename CONFIG_T::accum_t)biases[iacc]; + } + + // Accumulate multiplication result + for (unsigned ii = 0; ii < CONFIG_T::n_in; ii++) { + for (unsigned jj = 0; jj < CONFIG_T::n_out; jj++) { + unsigned index = ii * CONFIG_T::n_out + jj; + acc[jj] += mult[index]; + } + } + + // Cast to "res_t" type + for (unsigned ires = 0; ires < CONFIG_T::n_out; ires++) { + // res[ires] = (res_T) (acc[ires]); + res[ires] = cast(acc[ires]); + } + } + +} // namespace nnet + +#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/nnet_utils/nnet_mult.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/nnet_utils/nnet_mult.h new file mode 100644 index 0000000000000..3e2ce5b84b080 --- /dev/null +++ b/L1Trigger/Phase2L1ParticleFlow/interface/taus/nnet_utils/nnet_mult.h @@ -0,0 +1,108 @@ +#ifndef NNET_MULT_H_ +#define NNET_MULT_H_ + +#include "nnet_common.h" +#include +#include + +namespace nnet { + + constexpr int ceillog2(int x) { return (x <= 2) ? 1 : 1 + ceillog2((x + 1) / 2); } + + namespace product { + + /* --- + * different methods to perform the product of input and weight, depending on the + * types of each. + * --- */ + + class Product {}; + + template + class both_binary : public Product { + public: + static x_T product(x_T a, w_T w) { return a == w; } + }; + + template + class weight_binary : public Product { + public: + static auto product(x_T a, w_T w) -> decltype(-a) { + if (w == 0) + return -a; + else + return a; + } + }; + + template + class data_binary : public Product { + public: + static auto product(x_T a, w_T w) -> decltype(-w) { + if (a == 0) + return -w; + else + return w; + } + }; + + template + class weight_ternary : public Product { + public: + static auto product(x_T a, w_T w) -> decltype(-a) { + if (w == 0) + return 0; + else if (w == -1) + return -a; + else + return a; // if(w == 1) + } + }; + + template + class mult : public Product { + public: + static auto product(x_T a, w_T w) -> decltype(a * w) { return a * w; } + }; + + template + class weight_exponential : public Product { + public: + using r_T = + ap_fixed<2 * (decltype(w_T::weight)::width + x_T::width), (decltype(w_T::weight)::width + x_T::width)>; + static r_T product(x_T a, w_T w) { + // Shift by the exponent. Negative weights shift right + r_T y = static_cast(a) << w.weight; + + // Negate or not depending on weight sign + return w.sign == 1 ? y : static_cast(-y); + } + }; + + } // namespace product + + template + inline typename std::enable_if>::value && + std::is_same>::value, + ap_int>::type + cast(typename CONFIG_T::accum_t x) { + return (ap_int)(x - CONFIG_T::n_in / 2) * 2; + } + + template + inline typename std::enable_if>::value && + !std::is_same>::value, + res_T>::type + cast(typename CONFIG_T::accum_t x) { + return (res_T)x; + } + + template + inline typename std::enable_if<(!std::is_same>::value), res_T>::type cast( + typename CONFIG_T::accum_t x) { + return (res_T)x; + } + +} // namespace nnet + +#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/tau_parameters.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/tau_parameters.h index c6344e19f7c52..e488ac1bf4902 100644 --- a/L1Trigger/Phase2L1ParticleFlow/interface/taus/tau_parameters.h +++ b/L1Trigger/Phase2L1ParticleFlow/interface/taus/tau_parameters.h @@ -1,100 +1,218 @@ #ifndef PARAMETERS_H_ #define PARAMETERS_H_ -#include -#include "ap_int.h" #include "ap_fixed.h" -#include "L1Trigger/Phase2L1ParticleFlow/interface/common/nnet_layer.h" -#include "L1Trigger/Phase2L1ParticleFlow/interface/common/nnet_activation.h" -#include "L1Trigger/Phase2L1ParticleFlow/interface/common/nnet_common.h" - -//hls-fpga-machine-learning insert numbers -#define N_INPUTS 80 -#define N_LAYER_1 25 -#define N_LAYER_2 10 -#define N_LAYER_3 10 -#define N_OUTPUTS 1 - -//hls-fpga-machine-learning insert layer-precision - -typedef ap_fixed<24, 12> input2_t; -typedef ap_fixed<16, 8> input_t; -typedef ap_fixed<16, 8> layer1_t; -typedef ap_fixed<16, 8> layer2_t; -typedef ap_fixed<16, 8> layer3_t; -typedef ap_fixed<16, 8> result_t; -typedef ap_fixed<16, 8> accum_default_t; -typedef ap_fixed<16, 8> weight_default_t; -typedef ap_fixed<16, 8> bias_default_t; - -//hls-fpga-machine-learning insert layer-config -struct config1 : nnet::layer_config { - static const unsigned n_in = N_INPUTS; - static const unsigned n_out = N_LAYER_1; +#include "ap_int.h" + +#include + +// Tau NN components +#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/nnet_utils/nnet_activation.h" +#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/nnet_utils/nnet_dense.h" +#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/defines.h" + +// Load the NN weights +#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w2.h" +#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b2.h" +#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w5.h" +#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b5.h" +#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w8.h" +#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b8.h" +#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w11.h" +#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b11.h" +#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w14.h" +#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b14.h" +#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w17.h" +#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b17.h" +#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w20.h" +#include "L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b20.h" + +// hls-fpga-machine-learning insert layer-config +// Dense_1 +struct config2 : nnet::dense_config { + static const unsigned n_in = 80; + static const unsigned n_out = 25; + static const unsigned io_type = nnet::io_parallel; + static const unsigned strategy = nnet::latency; + static const unsigned reuse_factor = 1; + static const unsigned n_zeros = 1205; + static const unsigned n_nonzeros = 795; + static const unsigned multiplier_limit = DIV_ROUNDUP(n_in * n_out, reuse_factor) - n_zeros / reuse_factor; + static const bool store_weights_in_bram = false; + typedef model_default_t accum_t; + typedef bias2_t bias_t; + typedef weight2_t weight_t; + typedef layer2_index index_t; + template + using product = nnet::product::mult; +}; + +// relu_1 +struct relu_config4 : nnet::activ_config { + static const unsigned n_in = 25; + static const unsigned table_size = 1024; + static const unsigned io_type = nnet::io_parallel; + static const unsigned reuse_factor = 1; + typedef relu_1_table_t table_t; +}; + +// Dense_2 +struct config5 : nnet::dense_config { + static const unsigned n_in = 25; + static const unsigned n_out = 25; + static const unsigned io_type = nnet::io_parallel; + static const unsigned strategy = nnet::latency; + static const unsigned reuse_factor = 1; + static const unsigned n_zeros = 375; + static const unsigned n_nonzeros = 250; + static const unsigned multiplier_limit = DIV_ROUNDUP(n_in * n_out, reuse_factor) - n_zeros / reuse_factor; + static const bool store_weights_in_bram = false; + typedef model_default_t accum_t; + typedef bias5_t bias_t; + typedef weight5_t weight_t; + typedef layer5_index index_t; + template + using product = nnet::product::mult; +}; + +// relu_2 +struct relu_config7 : nnet::activ_config { + static const unsigned n_in = 25; + static const unsigned table_size = 1024; + static const unsigned io_type = nnet::io_parallel; + static const unsigned reuse_factor = 1; + typedef relu_2_table_t table_t; +}; + +// Dense_3 +struct config8 : nnet::dense_config { + static const unsigned n_in = 25; + static const unsigned n_out = 15; static const unsigned io_type = nnet::io_parallel; + static const unsigned strategy = nnet::latency; static const unsigned reuse_factor = 1; - //static const unsigned reuse_factor = 6; - static const unsigned n_zeros = 0; + static const unsigned n_zeros = 225; + static const unsigned n_nonzeros = 150; + static const unsigned multiplier_limit = DIV_ROUNDUP(n_in * n_out, reuse_factor) - n_zeros / reuse_factor; static const bool store_weights_in_bram = false; - typedef accum_default_t accum_t; - typedef bias_default_t bias_t; - typedef weight_default_t weight_t; + typedef model_default_t accum_t; + typedef bias8_t bias_t; + typedef weight8_t weight_t; + typedef layer8_index index_t; + template + using product = nnet::product::mult; }; -struct relu_config1 : nnet::activ_config { - static const unsigned n_in = N_LAYER_1; + +// relu_3 +struct relu_config10 : nnet::activ_config { + static const unsigned n_in = 15; static const unsigned table_size = 1024; static const unsigned io_type = nnet::io_parallel; + static const unsigned reuse_factor = 1; + typedef relu_3_table_t table_t; }; -struct config2 : nnet::layer_config { - static const unsigned n_in = N_LAYER_1; - static const unsigned n_out = N_LAYER_2; + +// Dense_4 +struct config11 : nnet::dense_config { + static const unsigned n_in = 15; + static const unsigned n_out = 15; static const unsigned io_type = nnet::io_parallel; + static const unsigned strategy = nnet::latency; static const unsigned reuse_factor = 1; - //static const unsigned reuse_factor = 6; - static const unsigned n_zeros = 0; + static const unsigned n_zeros = 135; + static const unsigned n_nonzeros = 90; + static const unsigned multiplier_limit = DIV_ROUNDUP(n_in * n_out, reuse_factor) - n_zeros / reuse_factor; static const bool store_weights_in_bram = false; - typedef accum_default_t accum_t; - typedef bias_default_t bias_t; - typedef weight_default_t weight_t; + typedef model_default_t accum_t; + typedef bias11_t bias_t; + typedef weight11_t weight_t; + typedef layer11_index index_t; + template + using product = nnet::product::mult; }; -struct relu_config2 : nnet::activ_config { - static const unsigned n_in = N_LAYER_2; + +// relu_4 +struct relu_config13 : nnet::activ_config { + static const unsigned n_in = 15; static const unsigned table_size = 1024; static const unsigned io_type = nnet::io_parallel; + static const unsigned reuse_factor = 1; + typedef relu_4_table_t table_t; }; -struct config3 : nnet::layer_config { - static const unsigned n_in = N_LAYER_2; - static const unsigned n_out = N_LAYER_3; + +// Dense_5 +struct config14 : nnet::dense_config { + static const unsigned n_in = 15; + static const unsigned n_out = 10; static const unsigned io_type = nnet::io_parallel; + static const unsigned strategy = nnet::latency; static const unsigned reuse_factor = 1; - //static const unsigned reuse_factor = 6; - static const unsigned n_zeros = 0; + static const unsigned n_zeros = 90; + static const unsigned n_nonzeros = 60; + static const unsigned multiplier_limit = DIV_ROUNDUP(n_in * n_out, reuse_factor) - n_zeros / reuse_factor; static const bool store_weights_in_bram = false; - typedef accum_default_t accum_t; - typedef bias_default_t bias_t; - typedef weight_default_t weight_t; + typedef model_default_t accum_t; + typedef bias14_t bias_t; + typedef weight14_t weight_t; + typedef layer14_index index_t; + template + using product = nnet::product::mult; }; -struct relu_config3 : nnet::activ_config { - static const unsigned n_in = N_LAYER_3; + +// relu_5 +struct relu_config16 : nnet::activ_config { + static const unsigned n_in = 10; static const unsigned table_size = 1024; static const unsigned io_type = nnet::io_parallel; + static const unsigned reuse_factor = 1; + typedef relu_5_table_t table_t; }; -struct config4 : nnet::layer_config { - static const unsigned n_in = N_LAYER_3; - static const unsigned n_out = N_OUTPUTS; + +// Dense_6 +struct config17 : nnet::dense_config { + static const unsigned n_in = 10; + static const unsigned n_out = 1; static const unsigned io_type = nnet::io_parallel; + static const unsigned strategy = nnet::latency; static const unsigned reuse_factor = 1; - //static const unsigned reuse_factor = 6; - static const unsigned n_zeros = 0; + static const unsigned n_zeros = 6; + static const unsigned n_nonzeros = 4; + static const unsigned multiplier_limit = DIV_ROUNDUP(n_in * n_out, reuse_factor) - n_zeros / reuse_factor; static const bool store_weights_in_bram = false; - typedef accum_default_t accum_t; - typedef bias_default_t bias_t; - typedef weight_default_t weight_t; + typedef model_default_t accum_t; + typedef bias17_t bias_t; + typedef weight17_t weight_t; + typedef layer17_index index_t; + template + using product = nnet::product::mult; }; -struct sigmoid_config4 : nnet::activ_config { - static const unsigned n_in = N_OUTPUTS; + +// jetID_output +struct sigmoid_config19 : nnet::activ_config { + static const unsigned n_in = 1; static const unsigned table_size = 1024; static const unsigned io_type = nnet::io_parallel; + static const unsigned reuse_factor = 1; + typedef jetID_output_table_t table_t; +}; + +// pT_output +struct config20 : nnet::dense_config { + static const unsigned n_in = 10; + static const unsigned n_out = 1; + static const unsigned io_type = nnet::io_parallel; + static const unsigned strategy = nnet::latency; + static const unsigned reuse_factor = 1; + static const unsigned n_zeros = 6; + static const unsigned n_nonzeros = 4; + static const unsigned multiplier_limit = DIV_ROUNDUP(n_in * n_out, reuse_factor) - n_zeros / reuse_factor; + static const bool store_weights_in_bram = false; + typedef model_default_t accum_t; + typedef bias20_t bias_t; + typedef weight20_t weight_t; + typedef layer20_index index_t; + template + using product = nnet::product::mult; }; -#endif +#endif \ No newline at end of file diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b1.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b1.h deleted file mode 100644 index cc14299dc03eb..0000000000000 --- a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b1.h +++ /dev/null @@ -1,14 +0,0 @@ -//Numpy array shape [25] -//Min -0.734825849533 -//Max 1.288661003113 -//Number of zeros 0 - -#ifndef B1_H_ -#define B1_H_ - -weight_default_t b1[25] = {-0.12057505, -0.05409636, 0.27422485, 0.49775919, -0.73482585, 0.44995615, 0.52624124, - -0.71328187, -0.43596983, 0.10772870, -0.68372047, 0.22197038, -0.53673136, -0.00771000, - 0.06140821, 1.28866100, -0.12453079, 0.16897179, 0.18858922, -0.17255782, -0.24242370, - -0.21922758, 0.40799412, 0.46138164, 0.85911417}; - -#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b11.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b11.h new file mode 100644 index 0000000000000..289d5b76f7f7f --- /dev/null +++ b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b11.h @@ -0,0 +1,25 @@ +//Numpy array shape [15] +//Min -0.062500000000 +//Max 0.250000000000 +//Number of zeros 3 + +#ifndef B11_H_ +#define B11_H_ + +bias11_t b11[15] = {0.031250, + 0.000000, + 0.000000, + 0.078125, + 0.234375, + -0.062500, + 0.093750, + 0.000000, + 0.062500, + 0.109375, + -0.062500, + -0.015625, + 0.250000, + 0.109375, + -0.046875}; + +#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b14.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b14.h new file mode 100644 index 0000000000000..9de3170588cec --- /dev/null +++ b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b14.h @@ -0,0 +1,12 @@ +//Numpy array shape [10] +//Min -0.031250000000 +//Max 0.250000000000 +//Number of zeros 0 + +#ifndef B14_H_ +#define B14_H_ + +bias14_t b14[10] = { + 0.031250, 0.015625, 0.046875, -0.015625, -0.031250, 0.046875, 0.203125, 0.015625, 0.250000, -0.015625}; + +#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b17.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b17.h new file mode 100644 index 0000000000000..540e383f1cdf0 --- /dev/null +++ b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b17.h @@ -0,0 +1,11 @@ +//Numpy array shape [1] +//Min -0.714843750000 +//Max -0.714843750000 +//Number of zeros 0 + +#ifndef B17_H_ +#define B17_H_ + +bias17_t b17[1] = {-0.714844}; + +#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b2.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b2.h index 8a0d269df26f1..b9cf57fb0c52d 100644 --- a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b2.h +++ b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b2.h @@ -1,20 +1,13 @@ -//Numpy array shape [10] -//Min -0.380347400904 -//Max 0.551839828491 +//Numpy array shape [25] +//Min -0.640625000000 +//Max 1.328125000000 //Number of zeros 0 #ifndef B2_H_ #define B2_H_ -weight_default_t b2[10] = {0.55183983, - 0.36323273, - -0.13108490, - -0.38034740, - 0.08559006, - 0.01700789, - 0.13562575, - -0.32752651, - 0.48282012, - -0.15096320}; +bias2_t b2[25] = {-0.312500, -0.281250, 0.687500, -0.250000, -0.640625, 0.656250, 0.500000, 0.265625, 0.171875, + -0.046875, -0.093750, 0.156250, -0.156250, -0.093750, -0.171875, 0.234375, 0.046875, 0.125000, + -0.140625, 0.187500, 0.937500, -0.046875, -0.250000, -0.250000, 1.328125}; #endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b20.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b20.h new file mode 100644 index 0000000000000..8887c33e169f4 --- /dev/null +++ b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b20.h @@ -0,0 +1,11 @@ +//Numpy array shape [1] +//Min 0.238281250000 +//Max 0.238281250000 +//Number of zeros 0 + +#ifndef B20_H_ +#define B20_H_ + +bias20_t b20[1] = {0.238281}; + +#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b3.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b3.h deleted file mode 100644 index 740a1482c32e2..0000000000000 --- a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b3.h +++ /dev/null @@ -1,20 +0,0 @@ -//Numpy array shape [10] -//Min -0.936354994774 -//Max 0.407682240009 -//Number of zeros 0 - -#ifndef B3_H_ -#define B3_H_ - -weight_default_t b3[10] = {-0.58549309, - -0.06117089, - -0.24173595, - 0.17925857, - -0.93635499, - 0.18813914, - 0.13134949, - 0.04132507, - 0.40768224, - 0.29987794}; - -#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b4.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b4.h deleted file mode 100644 index 07d968b7f0a5a..0000000000000 --- a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b4.h +++ /dev/null @@ -1,11 +0,0 @@ -//Numpy array shape [1] -//Min 0.023343238980 -//Max 0.023343238980 -//Number of zeros 0 - -#ifndef B4_H_ -#define B4_H_ - -weight_default_t b4[1] = {0.02334324}; - -#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b5.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b5.h new file mode 100644 index 0000000000000..82ab448e8b98e --- /dev/null +++ b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b5.h @@ -0,0 +1,13 @@ +//Numpy array shape [25] +//Min -0.125000000000 +//Max 0.265625000000 +//Number of zeros 1 + +#ifndef B5_H_ +#define B5_H_ + +bias5_t b5[25] = {-0.015625, 0.046875, -0.109375, 0.078125, 0.171875, 0.156250, 0.062500, 0.171875, 0.109375, + 0.265625, 0.234375, 0.125000, -0.046875, -0.062500, 0.015625, -0.062500, 0.156250, 0.093750, + 0.078125, -0.109375, 0.109375, 0.093750, 0.000000, -0.125000, 0.140625}; + +#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b8.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b8.h new file mode 100644 index 0000000000000..2cfe199fc7265 --- /dev/null +++ b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/b8.h @@ -0,0 +1,25 @@ +//Numpy array shape [15] +//Min -0.109375000000 +//Max 0.265625000000 +//Number of zeros 0 + +#ifndef B8_H_ +#define B8_H_ + +bias8_t b8[15] = {0.093750, + 0.046875, + -0.015625, + 0.265625, + 0.046875, + -0.078125, + 0.031250, + -0.062500, + -0.015625, + 0.015625, + 0.062500, + 0.062500, + -0.109375, + -0.046875, + 0.140625}; + +#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w1.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w1.h deleted file mode 100644 index 34e95d5f9469b..0000000000000 --- a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w1.h +++ /dev/null @@ -1,234 +0,0 @@ -//Numpy array shape [80, 25] -//Min -2.676796197891 -//Max 3.172224998474 -//Number of zeros 0 - -#ifndef W1_H_ -#define W1_H_ - -weight_default_t w1[2000] = { - -0.24994563, -0.01592130, 0.02984710, -0.03246316, -0.19607241, -0.02163565, -0.00572438, 0.01376110, 0.06863546, - -0.21986796, -0.05606571, 0.06991967, 0.07118288, -0.00832175, -0.03974251, -0.12646708, 0.01565769, 0.02143256, - 0.04822187, -0.01806841, -0.00104700, 0.08732048, -0.19190465, -0.00814264, -0.15155232, 1.28681350, 0.13748017, - 0.39079481, -0.31852159, -0.28351930, 0.04033466, -0.42634365, -0.13127394, -0.20465094, -0.26889697, 0.70893532, - 0.43394735, 0.70848930, 0.04386011, 0.51139277, 0.40600044, -0.50317824, -0.25791675, 0.66503000, -0.40065920, - 0.65473962, -0.00253041, -0.33962274, -0.08765265, -0.11767972, 0.06714126, -0.60412002, -0.38466910, 0.15584932, - -0.06518575, -0.71045715, 0.46661234, 0.24567224, 0.29929164, 0.07424527, 0.00885416, 0.34313247, -0.40707770, - 0.49099818, -0.74588889, -0.10123775, 0.53515995, -0.42928374, -0.12010060, 0.12295905, -0.69315630, 0.38088876, - 0.01870272, 0.10997168, 0.26679659, 0.93503916, 1.01998508, -0.10151886, 0.15381983, 0.08079384, 0.12287367, - -0.29244336, 0.00544387, -0.31489906, -0.21896380, -0.07303306, 0.38462871, -0.88183403, 0.03077884, 0.05728795, - 1.33863544, 0.30312559, 0.28246397, -0.04266880, -0.50959545, 0.60257953, 0.41271350, 0.17042276, 0.60114610, - 0.22945273, 0.73635733, 0.20843484, 0.33445162, -1.11147344, -0.30075341, 0.51580977, -0.18865807, -0.91119158, - -0.39129823, 0.03684294, -0.68891215, -0.59370238, 0.65122741, -0.17603758, 0.66590655, 1.25159955, -0.37625167, - 0.32980818, 0.77502394, -1.58798909, 0.22748075, -0.95392829, -0.37276012, -0.61004913, 0.92332447, -0.09721770, - -0.72238702, -0.34178290, 1.19741392, -0.44719616, -0.75276721, 0.17758735, 0.02224001, -0.08087371, 0.25889483, - 0.03444218, 0.33437315, -0.78717542, 0.07385066, -1.37520838, -0.00982160, 0.42453086, 0.38150313, 0.19821575, - 0.93540424, -1.05033779, 0.57981151, 0.33810690, 0.24172245, -0.02910789, -2.12628174, -0.63280356, 0.29773250, - 3.17222500, -0.49264374, 0.29331800, 1.39211619, -0.76175922, -0.45130953, 0.42933312, -1.54781485, 0.02419901, - -2.16194177, -0.48207095, -0.31510717, -2.11770678, 0.76639241, 1.87467754, -0.52695322, 1.70596778, -2.33492446, - 0.41794685, 1.27378273, -0.10234303, -0.49648684, 1.86050689, -0.50189257, -0.16991651, -0.46694538, -0.00889901, - 0.58299482, 0.08892005, -0.20688151, 0.31415370, -0.20101114, -0.33977607, 0.07279664, 0.60349727, -0.34133548, - -0.48008150, 1.27710438, -0.29224181, -0.02558731, 0.86315304, 0.27016857, 0.12425910, -0.73576742, -0.41963914, - 0.04202708, 0.95604628, 0.15431352, 0.01539763, -0.01423682, 0.10296305, -0.08639759, 0.11045294, -0.03732327, - 0.13618803, 0.10233897, 0.21258777, 0.09800927, -0.05657235, -0.05041919, -0.16974531, 0.12601873, -0.14161462, - 0.12978475, -0.18991150, 0.07589593, 0.06911660, 0.10042754, -0.24628711, -0.28696042, -0.25114185, 0.07191065, - 0.36939719, 0.20196825, 0.19622889, 0.01961792, -0.52867746, 0.02060650, 0.32239082, 0.21925712, -0.29252347, - -0.07450268, 0.07430969, 0.19808058, -0.08999527, -0.41864324, 0.12763937, 0.10551479, 0.13380286, 0.26029557, - -0.09461474, 0.01125190, -0.01001714, 0.38791370, -0.59581864, -0.31798267, 0.13044289, -0.29599217, 0.06823352, - -0.13354187, -0.04749685, 0.44879583, -0.51446730, 0.37497100, -0.19995253, 0.11865021, -0.35735917, 0.28974858, - 0.12435340, 0.00421873, 0.08717419, -0.20247488, -0.05830143, -0.04514945, 0.03859331, -0.02609805, -0.22617900, - 0.09613950, -0.33556163, 0.23096344, 0.06258421, -0.05920995, -0.81900448, -0.61287695, 0.12958381, 0.79978222, - -0.32717428, 0.23371933, -0.11298512, 0.06942000, 0.52603680, 0.06176474, -1.15612555, 0.38241693, 0.13082752, - -0.69803941, -0.44519529, 0.24077913, -0.18162382, -0.41263813, 0.00626828, -0.56472856, -0.09948663, -0.18302669, - 0.95950598, -0.30670726, 0.02000829, 1.03344405, 0.48644996, 0.34142539, -0.14057057, -0.08406556, 0.58438534, - -0.22699004, -1.44362915, -0.95808500, 0.35579941, 0.29348719, -0.11956982, 0.27683857, 0.57247722, 1.37406516, - 0.18090977, -0.48121765, 0.06628983, -0.17082840, -0.86288124, -0.12994859, -0.10934682, -0.44934374, 0.64140421, - 0.43463030, -0.40653625, -0.92759031, -0.26878390, 0.47970986, -0.45654160, -0.31465080, 0.94702774, -0.16918387, - -0.42442611, 0.00438548, 0.00923580, 0.90002447, -0.55483723, -0.11998936, -0.24002072, 0.48533896, 0.11834613, - -0.07799944, -0.42383757, -0.16936988, -0.90337831, 0.50523067, 0.72644299, 0.32532176, 0.05432085, 0.68847007, - -0.60442829, 0.80158204, 0.99259549, -0.30103669, 0.05667507, 0.36611405, -0.94233608, 0.23696357, -0.61583829, - 0.52145499, 0.14344153, 0.00328588, 0.36766458, -0.71754855, 0.42685041, -0.52480674, -0.41898140, 0.29636848, - -0.70113719, -0.20197862, -0.37344661, -0.17589939, -0.54375410, -0.72718179, -1.11110735, 0.32055253, -0.25643155, - -0.30023971, -0.58632553, 0.50699002, 0.73423439, 0.04769143, -0.69287294, 0.71418941, -0.98707741, -0.48756132, - 0.14591850, -0.00972518, -0.18280549, 0.33020869, 0.07739078, -0.27737662, 0.32508579, 0.52724129, -0.13557516, - 0.27238563, 0.73315942, 0.99855763, 0.84088510, 0.08538753, 0.13155562, -0.14930172, 0.02797297, 0.10585469, - 0.02903437, -0.30760777, -0.08591483, -0.16532275, -0.04133916, 0.12315685, 0.08694953, 0.12440344, -0.39931026, - -0.06502170, -0.07466459, 0.25206035, -0.11492801, 0.01234671, 0.02252278, 0.10730736, 0.00707526, -0.04144976, - 0.16572779, -0.06111651, -0.05090914, 0.22255808, 0.21892326, 0.02140033, -0.26701146, -0.09199855, -0.34320089, - 0.04647652, 0.24066357, 0.00315903, -0.14013545, 0.20657861, 0.19460022, 0.13409390, -0.12650517, -0.00711635, - -0.36546883, -0.27223793, 0.22557122, 0.15888590, -0.19231611, -0.01208463, -0.42694032, 0.00924643, -0.04871246, - 0.14489457, 0.19934957, 0.03268532, -0.26802376, -0.06917346, -0.08818764, -0.06936200, -0.00991716, -0.14875649, - 0.20260695, -0.03016085, -0.11772685, -0.06528303, 0.33984911, -0.42861041, 0.04678998, 0.24468878, -0.26212654, - 0.05760178, -0.01277140, 0.25944546, 0.21451963, -0.22919317, 0.08311309, 0.01015522, -0.07370505, -0.28775448, - 0.28065524, 0.80104679, -0.69237137, 0.09623399, 0.04745018, -0.11536954, 0.21645974, -0.17521553, -0.12839432, - -0.32616171, -0.19263010, -0.05076053, -0.32757092, 0.14068608, -0.31371123, -0.36700678, 1.02383471, 0.41596910, - -0.39243886, -0.39699236, -0.18623418, 0.23590773, 0.44462955, -0.01158825, 0.15543512, 0.36914709, -0.19606984, - 0.04083448, 0.11609410, 0.10854912, -0.93667829, -0.59664226, -0.10577209, -0.03770705, -0.82973319, 0.07100462, - 0.46515539, 0.70493704, 0.11769867, -0.09642658, 0.19184169, -0.60267162, -0.15556012, -0.06323973, 0.25728056, - 0.32714555, 0.37354282, 0.64966816, -0.85379928, -0.52891093, -0.53338081, 0.00071357, -0.80146301, -0.23014045, - 0.31370798, 0.19652064, -0.30330509, 0.59732527, -0.61618036, 0.43174049, -0.33461112, -0.09222537, -0.57418764, - 0.31234556, -0.06441883, -0.29923901, 0.04574157, 0.00199618, -0.07604899, 0.18836573, 0.22399814, 0.11964659, - 0.27587023, 0.54073912, -0.07070547, 1.09669447, 0.60586989, -0.56627184, 0.73163223, -0.06587803, -0.95469141, - 0.05797904, -0.32544577, 0.46618402, -0.42818251, -0.15697184, -0.07984095, -0.06863761, 0.27211952, -0.63966370, - -0.85368210, 1.04474986, -0.03273144, 0.04721467, -0.57129002, -0.51463783, -0.01716019, 0.41848388, -0.92354447, - -0.02085111, -0.35023081, -0.28929639, -0.12352847, -0.06491212, 0.62791741, 0.52128577, -0.08786132, 0.50663567, - -0.85222739, -0.67956436, -0.07901944, -0.20291066, -0.12427756, 0.21070847, 0.36405188, 0.01811016, -0.35558707, - -0.07505420, -0.51016599, 0.08317504, 0.78687006, 0.26184845, 0.32996735, -0.11742579, 0.13708171, 0.09675904, - 0.00351471, 0.17156938, 0.04663955, -0.01313619, 0.07353903, 0.11845510, -0.03040916, -0.11860044, -0.05890951, - 0.13578244, -0.27024615, 0.12044270, -0.06773756, 0.26196989, 0.03754797, -0.69103962, 0.11767364, 0.08418153, - -0.07073509, -0.08945126, -0.04465364, -0.01228451, 0.61217988, 0.36905605, -0.06841971, 0.01820075, 0.22142294, - 0.20999679, -0.00854848, -0.21310976, -0.48690179, -0.06172886, 0.09083650, 0.47623742, -0.38875908, 0.29984316, - 0.35164335, 0.07724196, 0.25907773, -0.03366175, 0.00794181, -0.16796382, 0.12707716, 0.67827290, -0.46863237, - 0.40221474, -0.01072991, -0.01881496, -0.22039062, -0.00463564, -0.20179020, 0.14899430, 0.09645735, 0.08785056, - 0.05667125, -0.08783643, -0.57045329, -0.27956113, 0.32969514, -0.32422251, 0.03947007, 0.04782788, 0.12597121, - 0.12803499, 0.24237561, 0.03641291, 0.02941555, -0.13378389, 0.71286631, 0.13059177, -0.11221728, -0.04303265, - 0.32258469, 0.03121127, 0.19749436, 0.80445844, -0.83933711, 0.40717539, -0.08058111, 0.18654235, 0.58147413, - -0.22004756, -0.21094175, -0.49914742, -0.07245248, 0.21281776, -0.72978270, 0.11609764, -0.12739497, -0.49795446, - 0.91565651, 0.71345496, -0.19992878, 0.12728572, -0.34958413, 0.51537168, 0.36229423, -0.20545541, -0.04014085, - -0.15503673, 0.46182132, -0.18324539, -0.02288571, -0.12150281, -0.35487393, -0.25479561, 0.34288880, 0.01429710, - 0.03762121, 0.01153337, 0.11537866, -0.11222634, -0.04343228, 0.09371492, 0.24208696, 0.02680596, 0.08744393, - -0.01195653, -0.02051427, 0.46111181, 0.34674245, 0.17142926, -0.32360074, -0.30470049, -0.08778754, 0.56703365, - -0.39670938, 0.01970642, -0.24996454, 0.59148031, -0.04976763, 0.42775628, -0.51978588, 0.74823248, -0.24332942, - -0.10120203, -0.71067011, -0.05833459, 0.24460207, 0.12378100, 0.39883280, 0.15179272, -0.45821238, -0.26472330, - -0.11036454, 0.47337988, -0.19236894, 0.44863826, 0.19078662, 0.46045646, 0.55434424, -1.23575699, 0.89674824, - 0.14763579, -0.29703000, 0.11096095, -0.23884353, 0.32712832, -0.55054861, 0.67220551, -0.28248659, -0.10569336, - 0.04621894, -0.49375376, -0.12733379, 0.67400223, -0.12935409, 0.09695239, -0.28661168, -0.36145869, 0.06896356, - 0.46334738, -0.83616781, -0.68781477, -0.22872619, -0.02656318, -0.46397430, -0.16735579, 0.57318032, -0.05219025, - -0.06242780, 0.30701312, -0.43937260, -0.05616235, -0.35246953, 0.47527167, -0.36845928, 0.13797158, 0.46169606, - 0.03073783, -0.16647297, 0.35587814, -0.52273571, 0.22240485, 0.32394350, 0.29325587, 0.38622752, -0.12588513, - 0.21903162, -0.03870760, -0.07586532, 0.09732155, -0.44541699, 0.01353051, 0.07500879, -0.22210084, -0.02879842, - -0.02839135, 0.02233995, 0.01847041, -0.22886260, 0.09602077, -0.10249722, 0.02895709, -0.11213382, -0.32242554, - 0.21315952, 0.13921122, -0.05876900, -0.14110731, 0.17718993, 0.06612965, -0.03701587, 0.34920025, -0.22553837, - -0.25041988, 0.16762421, -0.04839466, -0.57936865, 0.20034809, 0.28770819, 0.07073146, 0.06286270, -0.14398633, - 0.08881986, -0.26472491, 0.27725342, 0.22914961, 0.32062715, 0.15277733, -0.33009961, 0.21074554, -0.15565939, - 0.47236079, 0.03225322, 0.06781324, -0.16307135, 0.73327172, -0.11553932, -0.13312288, -0.30246657, -0.04846320, - -0.39416528, 0.15607847, 0.08472254, -0.12179766, 0.23342557, -0.02313556, -0.16107082, 0.19552790, -0.05060831, - 0.08372914, 0.37613615, -0.26624736, 0.05994382, 0.57154304, -0.03778595, 0.15102805, 0.26144159, -0.64846903, - -0.11667332, 0.64444566, 0.53041399, -0.37275234, 0.12701584, 0.25457710, -0.91777927, 0.63840097, -0.04469256, - -0.01554284, 0.52316505, -0.07778227, -0.11871518, 0.13643374, -0.16263111, 0.12193766, -0.43915382, 0.17769964, - 0.06158905, -0.40595376, 0.36887977, 0.21324196, -0.16621692, 0.07623006, -0.07362154, 0.53180701, 0.40119246, - -0.41867191, -0.17060547, 0.11066595, 0.33041847, -0.30610490, -0.01155049, 0.06596804, 0.06266157, 0.11539320, - 0.53958863, -0.19265023, 0.19687888, -0.32241911, 0.17509246, 0.06316098, 0.22965759, -0.10924519, 0.13696006, - 0.34725070, 0.05508206, -0.31879237, -0.07152238, 0.30400902, 0.47540823, 0.05332027, -1.34034514, -0.63157010, - -0.20077212, 0.82977784, -0.83980680, 0.05455742, 0.23470649, 0.15096639, -0.02279334, 0.74177665, -0.51908326, - 0.57153726, -0.20008761, -0.44515362, -0.52133244, -0.53501129, 0.30665237, 0.03230446, -0.27042213, 0.69568527, - -0.53271943, 0.12585282, 0.84569460, 1.16614997, 0.30099568, 1.01664233, -0.04021535, 0.35936305, 0.12363404, - -0.44788554, 0.65720278, 0.14622304, -0.57894391, -0.17871566, -0.13646793, 0.06899100, -0.13851331, 0.07404158, - -0.32255191, 0.22225420, 0.05467210, -0.22595364, -0.09422892, 0.08064129, -0.14696676, 0.24685700, -0.36180913, - -0.50487852, 0.09818821, 0.23832101, -1.06369340, -0.94318706, 0.00698828, 0.28264612, -0.01870376, -0.69367069, - 0.32556781, 0.29627222, 0.17554468, 0.22879148, -0.32052159, 0.18480402, -0.76028723, 0.17409454, -0.52946806, - -1.31131041, 0.72142994, -0.21024033, 0.65006751, 0.28911707, -0.45603541, 0.30260912, 0.22917707, 0.76010191, - 0.50517660, -0.43544480, 0.01703142, 0.15579990, -0.06952365, 0.26123571, -0.32477272, -0.07388589, 0.23853466, - 0.02649050, -0.04410565, 0.35238847, 0.10454764, -0.21788062, -0.05252795, 0.12990016, -0.20476976, 0.02988371, - -0.20392458, 0.07077907, 0.07255822, 0.03174250, 0.19428524, -0.27959460, 0.17289197, -0.06749524, 0.07314484, - 0.04101936, 0.00711376, 0.39040637, -0.09693181, -0.13249642, 0.06778622, -0.20384689, -0.08403887, -0.06206702, - 0.39903295, 0.01676942, 0.16174519, -0.24540325, -0.15171684, 0.36854738, -0.04578711, -0.20637585, -0.58331889, - 0.23066565, -0.40027916, -0.33852276, 0.22725138, -0.22780336, -0.45288083, 0.05498514, -0.15462326, -0.01167145, - 0.14075157, -0.23809917, -0.04884083, -0.15133418, 0.16887660, 0.08024041, -0.26719818, -0.08086196, 0.27881959, - 0.03904902, -0.05400108, -0.14138514, 0.16911660, -0.10002459, 0.31475541, 0.20939967, 0.07277112, 0.10095973, - 0.33317840, -0.23609909, 0.10387685, 0.08162952, 0.30970895, -0.19202805, 0.11137805, -0.08374452, -0.64609599, - 0.49284625, -0.02431013, 0.22352953, 0.35399213, -0.04173037, 0.01117679, -0.26933041, -0.07039601, 0.30380678, - -0.05741419, 0.47689995, 0.20879868, -0.06093958, -0.08551129, -0.07670606, -0.23868953, -0.26600242, -0.24509941, - 0.40901592, 0.42221358, -0.76004744, 0.13680586, -0.25070697, 0.08168428, -0.19393569, -0.23131981, -0.35523322, - 0.31124046, -0.02291389, 0.52390915, -0.46724460, 0.13923384, -0.12886441, -0.03299529, -0.27032244, -1.19288146, - 0.24336755, -0.20915434, -0.14846808, 0.10754984, 0.02535326, 0.28236297, 0.90321386, 0.28560060, 0.31486535, - -0.78192097, -0.21997991, -0.19503053, 0.71680617, -0.23815078, -0.38749680, 0.09747923, -0.11504970, 0.19734858, - 0.98412722, -0.13073727, 0.75299066, -0.85745215, -0.40456349, -0.51684064, -0.47700635, 0.39638016, 0.17537507, - 0.52784997, 0.63105047, -0.69734496, -0.28434739, 0.58557647, 0.96909130, 0.17804323, 0.09428761, 0.17061329, - 0.33784506, -0.14671242, -0.48270255, 0.31931961, 0.04116327, -0.46874690, -0.45884821, -0.19885214, -0.39863971, - -0.41624883, 0.43567199, -0.28685057, 0.40880397, 0.18431477, -0.15750097, -0.56084317, -0.13018279, 0.18903515, - 0.30848095, -0.34719062, -0.19633505, -0.02658261, 0.24495831, -0.78052413, -0.85096359, -0.37101209, 0.22163752, - -0.14013411, -0.24140479, 0.23052573, 0.54393709, 0.13316275, 0.12203635, 0.20220585, 0.49100202, -0.62808341, - 0.16586047, -0.38358831, -1.00215280, 0.77456385, -0.27937427, 0.11909273, 0.50655580, -0.87544155, 0.59288806, - 0.01167453, 0.57931119, -0.02249480, -0.12532967, -0.25048557, -0.28306130, 0.06188992, 0.48368040, -0.36783400, - -0.21773575, 0.14827894, 0.13848552, 0.04230130, -0.04214389, -0.07091486, -0.04140090, -0.30136281, 0.00464335, - -0.21866782, -0.02765239, -0.17025313, 0.08577426, 0.06893988, 0.11575132, 0.07546596, 0.02867554, 0.19112501, - 0.27582642, 0.12627265, 0.10898180, -0.18745209, 0.23613420, 0.23121634, 0.28491151, 0.02902788, 0.15380767, - 0.03966511, -0.01862929, -0.00648489, 0.01908036, -0.19008325, -0.18426324, -0.07000075, -0.29073888, -0.22639032, - -0.11762336, 0.33500755, -0.21507888, -0.07346634, -0.03355709, -0.04096937, -0.33768243, -0.19027354, -0.18297306, - -0.50098300, -0.02807480, 0.23949267, -0.15996224, -0.07754000, -0.17378184, 0.00657926, 0.39929193, 0.45185298, - -0.34957576, -0.24467568, 0.21933684, -0.10674803, -0.35011348, 0.35258722, -0.14792293, 0.02977267, 0.63623291, - 0.01652745, 0.28561106, -0.24670583, 0.39176771, 0.05463742, 0.32333028, 0.14167164, -0.06670932, 0.23938650, - 0.31829852, -0.41095898, 0.35032102, 0.03883050, 0.14073621, 0.64508480, 0.25743634, -0.24900754, 0.26631746, - -0.12656187, 0.01745303, -0.18157384, 0.34143060, 0.32021353, 0.30565801, -0.26965511, 0.23778385, -0.02008655, - -0.08103817, -0.07159230, 0.32048982, 0.06949183, -2.33522058, 0.02816298, -0.10037031, 0.37423018, -0.22492132, - -0.36854437, 0.40015242, 0.28485346, 0.22778602, 0.19501299, -0.93215930, 0.07858350, -0.40451255, -0.27547240, - -0.02443204, -1.41666114, 0.05133143, -0.06660908, 0.50325763, 0.31695950, -0.18681468, -0.12560664, 2.13244534, - 0.22775133, 0.42665431, -1.29449880, -0.23370074, 0.01759187, 0.25374168, 0.06429626, -0.52347112, 0.34470561, - -0.26376522, -0.04219850, -0.01756793, -0.43413332, -0.22707182, 0.05281873, -0.45199049, 0.04030637, -0.54730064, - -0.13315515, 0.10807105, -0.34840381, -0.12949815, -0.38297817, -0.13845149, 0.97675931, 0.20487542, 0.41703507, - 0.23882188, 0.23847181, 0.40595204, 0.22122343, -0.59291810, 0.16200593, -0.23582739, -0.33778340, -0.05766481, - -0.25944924, -0.28257781, -0.02519164, 0.15628809, 0.22581941, 0.29877603, 0.11747632, -0.13611910, -0.68844485, - 0.10147709, -0.19454663, 0.21278845, 0.02120594, 0.12139316, -0.17088807, 0.38014871, -0.78083509, -0.60448849, - 0.05090213, 0.61401623, -0.32977888, -0.38970327, 0.26832360, 0.53781092, 0.20194471, 0.82220250, 0.23819874, - 0.49616402, -0.43314114, -0.50223577, -0.46702045, -1.17008650, 0.48856854, -0.03626145, 0.75825346, 0.49573380, - -0.68196982, 0.29390180, 0.22509925, 0.79214412, 0.17140889, -0.22514503, 0.10945672, -0.20663217, -0.00440216, - 0.21418120, -0.34781390, -0.11805713, 0.12930803, -0.02661256, -0.16583513, 0.50446808, 0.12406299, -0.18522657, - -0.42358905, 0.14963409, -1.34551275, 0.13522045, 0.17906164, 0.25551242, 0.31629464, -0.21916427, -0.00383488, - -0.16207457, 0.18151720, -0.08251988, 0.89760458, 0.44453332, -0.27497387, 0.29844183, 0.01738336, 0.12566963, - 0.00516657, -0.15164798, -0.07898259, -0.25138238, 0.47278392, 0.46358061, 0.20548722, 0.38698843, -0.07769089, - 0.21403231, -0.12140352, 0.01454621, 0.27465621, -0.04136071, -0.18499696, -0.33877471, -0.52207792, -0.06982010, - -0.67964226, -0.37841988, -0.05654239, -0.44023779, -0.34978950, -0.11707290, 0.43336329, 0.23595251, 0.51182544, - 0.45589104, 0.46062201, -0.28254399, 0.04058569, 0.35703275, 0.09476561, -0.19271792, -0.85225898, 0.18226382, - 0.07547066, -0.23841362, 0.07214766, 0.05686964, -0.64615160, 0.89725614, -0.09489815, -0.24773495, 0.18898845, - -0.05227394, -0.04989563, -0.04141004, -0.68845397, 0.44256380, 0.15174553, 0.16641839, 0.20559123, 0.18821712, - -0.18444933, 0.75212121, 0.04695220, -0.14553900, -0.25279966, -0.78429103, 0.21485479, 0.24854848, -0.34898055, - 0.12131061, -0.01442323, 0.31166860, -0.03168157, 0.23537874, -0.04150987, -0.73491955, 0.30445504, 0.01360191, - 0.11793279, -0.01071012, -0.86158031, -0.44057927, -0.11391853, -0.08041152, 0.30659840, -0.07929188, 0.14337794, - -0.16240485, -0.37871391, -0.12544847, -0.75813878, 0.07463507, 0.30250356, -0.08979524, -0.05500457, -0.00572075, - -0.15594503, 0.03389021, 0.33084431, 0.39045012, -0.42743438, -0.61926889, -1.01879334, 0.43193951, 0.11156862, - -0.76947951, -0.20159762, 0.24022132, 0.20872289, 0.69780248, -0.16525456, 0.63648707, -1.59807694, -0.14674914, - -0.52725124, -0.42184243, 0.85394889, 0.03816247, -0.73201150, 0.72350580, -0.94382733, 0.30476892, 0.62137985, - 0.76275116, 0.58395672, 0.12438627, 0.09742960, 0.15616673, -0.26625797, 0.15280285, 0.40855104, -0.06499965, - 0.07652657, -0.03907230, -0.03445091, -0.13297464, 0.12203576, 0.49042386, -0.46612582, 0.23596950, -0.60011405, - 0.01329148, -0.40629655, 0.34626818, 0.00672128, 0.21219759, 0.12195532, -0.24550790, 0.25495195, 0.50089574, - -0.69004655, -0.82626939, -0.04906785, 0.22566023, -0.19735636, -0.32598498, -0.23328499, 0.59350103, 0.50138974, - 0.03376095, -0.21038638, 0.23230115, -0.67481101, -0.46950540, -0.53264731, -1.31645954, 0.43338448, -0.07359013, - 0.19401260, 0.85574108, -0.58386785, 0.27350774, 0.94151503, 0.99626285, 0.16530964, -0.52822798, 0.02781926, - 0.19514728, 0.02097620, 0.00889074, -0.16201399, -0.07028764, 0.22292475, -0.00996018, 0.11951973, -0.02360463, - 0.18132643, 0.03626538, -0.40536785, -0.24706507, -1.10316157, 0.23488073, -0.11203269, -0.26491979, 0.32530117, - -0.07893114, -0.00744999, 0.26029640, 0.33739540, 0.02217237, 0.02589254, -0.42112139, 0.24534294, 0.70596570, - -0.23823494, -0.01574550, -0.57523948, -0.01305772, -0.10088185, 0.27640396, -0.16561478, 0.15046248, -0.04703883, - 0.12256249, -0.13618535, -0.25345358, 0.13640152, 0.11063136, 0.76222241, 0.26646805, -0.26234278, 0.19928859, - 0.05528985, -0.14719652, 0.09461970, -0.29426023, -0.11857925, -0.33014619, -0.16937710, 0.49556774, 0.09860725, - -0.08043962, 0.60073936, -0.16133121, 0.60515904, 0.05304303, 0.21871525, 0.45007041, -0.18452203, -0.23329300, - 0.15948120, 0.03171407, 0.05523947, -0.19391575, -0.06312876, 0.05657719, -0.01570622, 0.34798819, 0.35875756, - 0.64115590, -0.12868474, -0.21662687, -0.07916048, -0.02071994, -0.39688477, 0.34791452, -0.01536988, -0.01980658, - 0.20821385, 0.32254547, 0.03658571, 0.53079057, 0.11581320, -0.52330321, 0.08113370, -0.35415897, -0.01983317, - 0.34641969, -0.06101644, -0.00271639, -0.19201282, -0.43245769, -0.21427184, 0.11255077, -0.15757668, -1.97429311, - 0.25491333, 0.18619338, -0.13669698, -0.33716843, -1.20977962, -0.06677102, 0.12260284, 0.31985071, 0.98761481, - -0.66411626, -0.41700807, -0.00110240, -0.32249039, 0.21490636, -0.67965972, -0.16568908, -0.11263562, -1.06136537, - -0.06080189, 0.00003523, -0.27638850, 0.54172385, 0.15916675, 0.66536385, -0.61083424, -1.17721260, -0.79620224, - 1.62779248, -1.29850137, -0.40923908, -0.21678016, 1.11565304, 1.38857508, 1.67485464, -0.48776993, 1.54490137, - -0.99453592, -0.23702216, -1.28989625, -0.32184783, 1.73645914, 0.50596559, -0.42633674, 2.06059289, -1.31561661, - 0.09407058, 0.71311694, 1.60583699, 0.67549241, -0.75638843, -0.11993816, 0.25794804, -0.30944440, -0.43204123, - 0.36899459, 0.19363843, -0.08060863, -0.05935695, 0.27492559, -0.16506658, -0.00417477, 0.57574582, -0.39738783, - 0.30795437, -1.27800059, -0.36806244, 0.00201544, 0.41062146, -0.01292078, 0.33908349, 0.05562977, 0.15150607, - 0.33948043, -0.19380097, -0.34239587, -0.26843691, 0.14322159, 0.16285747, -0.12242185, -0.39411676, -0.39972457, - 0.32914063, -0.14964050, 0.18657172, -0.32965264, 0.50208765, -0.61841202, -0.96437931, -0.19447599, -1.48685813, - 0.36768064, -0.19042422, -0.14381048, 0.16720532, -0.38585469, 0.28041863, 1.07230306, 0.34857085, 0.56100559, - -0.60621732, -0.27094939, 0.03308203, 0.28440759, -0.05372868, -0.37450859, -0.23122661, 0.14196907, -0.08391851, - 0.58788222, 0.06581475, 0.12165748, -0.56094503, -0.62536222, -0.32290021, -1.14628315, 0.28745806, 0.09321925, - -0.11868286, 0.73546922, -0.14506210, 0.10030940, 0.65942341, 0.56377023, 0.38628533, -0.42766783, -0.12002008, - -0.27770182, 0.38072130, -0.41092056, 0.07260298, 0.32786149, -0.18012661, -0.02678201, 0.29315698, -0.62710303, - 0.16001518, -0.31741443, -0.36174574, -0.17293620, -0.11350867, 0.18780905, 0.17321175, 0.81462449, 0.27337193, - -0.34306210, -0.12359867, 0.26058146, 0.48336327, 0.48286983, 0.00497185, -0.08108788, -0.37280399, -0.07095718, - 0.07272183, 0.25405398, -0.01350151, 0.19333066, 0.50434202, 0.30863705, 0.23423783, 0.27947450, -0.35671273, - 0.39509684, -0.28312561, 0.13625887, 0.05653338, 0.26617846, 0.24114241, 0.22899513, -0.34379941, 0.14200218, - 0.16892987, 0.41087806, 0.25089607, -0.16019906, 0.13426897, -0.13074127, -0.23068653, -0.45294666, 0.30708107, - -0.05777374, -0.03524012, -0.18545437, 0.26572716, 0.34135580, -0.10212494, 0.15759155, -0.29985228, 0.00604882, - -1.35232568, 0.02671386, -0.18605591, 0.28203139, 0.06673647, 0.21136442, 0.02198954, -0.02589645, -0.13472135, - -0.33945116, -1.36670744, 0.26497167, -0.01333835, 0.35838512, -0.00214932, -0.67533672, -0.01949281, -0.15939406, - -0.17611854, 0.62018734, -1.11697268, 0.25882152, -0.40646151, -0.21743414, -0.35022104, -0.48264894, 0.15348732, - 0.32525846, -0.62968028, -0.14668293, 0.04142878, -0.18443897, 1.67367685, 0.29640922, 0.54300213, -0.38739282, - -1.12135983, -0.95634991, 1.56781328, -0.78718096, -0.65814853, -0.09405752, 1.45496094, 1.55392945, 1.32255197, - -0.49480981, 1.84735644, -1.09742570, 0.03602623, -1.19865084, 0.01194180, 1.76398528, 0.22691993, -0.24130857, - 2.00458288, -1.63459969, 0.28926355, 0.26902235, 1.57351863, 0.59064698, -2.67679620, 0.40217704, 0.49060968, - 0.01024920, -0.21290652, 0.01566074, -0.11393169, -0.32448450, -0.27194211, 0.21742176, -0.57667369, -0.03412761, - 0.36706647, -0.42090943, 0.39278191, -0.02046515, -0.30790815, -0.07676671, 0.48708537, -0.19606759, 0.39258122, - 0.11010294, 1.56427002, -0.23800702, 0.70309281, -1.84958696, -0.04740064, 0.06504993, 0.21830852, -0.09291255, - -1.47656202, -0.76586556, -0.02407140, -0.12262835, 0.55286926, -0.37243509, -0.11549302, -0.16901262, -0.81201553, - -0.16746910, -1.11338747, -0.03933520, 0.25118551, -0.27406788, 0.25855088, -0.24614365, -0.05488263, 0.42877647, - 0.41920695, 0.49124199}; - -#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w11.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w11.h new file mode 100644 index 0000000000000..ba69fb192e297 --- /dev/null +++ b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w11.h @@ -0,0 +1,34 @@ +//Numpy array shape [15, 15] +//Min -0.984375000000 +//Max 1.203125000000 +//Number of zeros 135 + +#ifndef W11_H_ +#define W11_H_ + +weight11_t w11[225] = { + 0.734375, 0.000000, 0.000000, 1.015625, -0.781250, 0.000000, 1.203125, 0.687500, 0.000000, 0.000000, + 0.593750, 0.281250, 0.843750, 0.000000, -0.343750, 0.000000, 0.000000, 0.000000, -0.937500, 0.531250, + 0.000000, 0.000000, 0.000000, -0.453125, 0.000000, 0.484375, 0.000000, 0.546875, -0.671875, -0.296875, + 0.000000, 0.000000, 0.375000, -0.625000, 0.203125, 0.000000, -0.734375, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, -0.515625, 0.000000, 0.000000, 0.500000, -0.453125, 0.000000, 0.000000, 0.500000, + -0.359375, 0.000000, 0.000000, 0.000000, 0.468750, 0.000000, 0.000000, 0.000000, 0.328125, -0.406250, + 0.000000, 0.359375, 0.359375, -0.375000, 0.000000, 0.000000, -0.296875, 0.406250, 0.000000, 0.000000, + 0.406250, 0.328125, -0.515625, 0.421875, 0.000000, 0.000000, 0.000000, -0.281250, 0.000000, 0.000000, + 0.000000, 0.312500, 0.000000, 0.000000, 0.000000, -0.453125, 0.000000, 0.000000, 0.000000, -0.375000, + 0.000000, -0.453125, -0.984375, 0.000000, -0.406250, 0.000000, 0.421875, -0.343750, 0.000000, 0.000000, + 0.000000, -0.437500, 0.000000, 0.343750, 0.000000, 0.375000, -0.453125, 0.000000, -0.343750, 0.000000, + -0.421875, 0.000000, 0.406250, 0.000000, 0.328125, 0.343750, 0.375000, 0.000000, -0.343750, 0.000000, + 0.328125, 0.000000, -0.359375, 0.000000, 0.000000, -0.453125, 0.000000, 0.000000, 0.000000, 0.000000, + 0.328125, 0.000000, 0.468750, 0.000000, 0.000000, 0.000000, 0.000000, 0.375000, 0.000000, 0.000000, + 0.000000, 0.531250, -0.281250, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + -0.671875, 0.000000, 0.000000, 0.000000, 0.437500, 0.000000, 0.000000, 0.000000, -0.765625, 0.000000, + 0.421875, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.421875, 0.000000, + 0.000000, 0.000000, 0.000000, 0.343750, 0.000000, -0.437500, -0.375000, 0.000000, 0.000000, 0.375000, + 0.296875, 0.000000, 0.000000, 0.000000, 0.000000, 0.375000, 0.000000, 0.000000, 0.390625, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + -0.328125, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.562500, 0.000000, + -0.421875, 0.000000, 0.000000, 0.312500, -0.140625, 0.359375, -0.390625, -0.359375, 0.406250, 0.625000, + -0.484375, 0.000000, 0.000000, 0.687500, -0.406250}; + +#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w14.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w14.h new file mode 100644 index 0000000000000..f4103cee867e2 --- /dev/null +++ b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w14.h @@ -0,0 +1,26 @@ +//Numpy array shape [15, 10] +//Min -0.921875000000 +//Max 1.031250000000 +//Number of zeros 90 + +#ifndef W14_H_ +#define W14_H_ + +weight14_t w14[150] = { + -0.296875, -0.843750, 0.000000, 0.000000, -0.406250, 0.000000, -0.281250, 1.031250, 0.000000, 0.000000, + 0.453125, 0.000000, 0.359375, 0.375000, 0.406250, -0.421875, 0.000000, 0.000000, 0.375000, 0.000000, + 0.000000, 0.828125, 0.000000, 0.000000, 0.000000, 0.000000, -0.312500, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, -0.406250, 0.796875, 0.421875, 0.640625, 0.546875, 0.000000, 0.000000, 0.000000, + -0.328125, 0.000000, 0.000000, 0.000000, -0.328125, -0.890625, 0.000000, 0.859375, 0.750000, 0.000000, + 0.453125, 0.000000, 0.000000, 0.000000, 0.000000, -0.328125, 0.000000, 0.000000, -0.328125, -0.359375, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.750000, 0.640625, 0.000000, 0.000000, + 0.000000, -0.484375, 0.000000, 0.000000, -0.421875, 0.000000, -0.421875, 0.781250, 0.000000, 0.000000, + 0.437500, 0.000000, 0.328125, 0.000000, 0.359375, 0.000000, 0.000000, 0.000000, -0.546875, 0.000000, + 0.484375, 0.640625, 0.531250, 0.000000, 0.000000, 0.000000, 0.625000, -0.296875, 0.000000, -0.437500, + 0.000000, 0.000000, 0.000000, 0.000000, 0.421875, 0.000000, 0.000000, 0.000000, 0.000000, 0.375000, + 0.000000, 0.000000, 0.000000, -0.296875, -0.390625, 0.375000, 0.000000, 0.000000, -0.328125, 0.000000, + 0.000000, 0.000000, -0.671875, 0.000000, -0.921875, 0.000000, -0.875000, 0.000000, 0.000000, 0.000000, + 0.468750, 0.718750, 0.484375, 0.812500, 0.000000, 0.375000, 0.000000, 0.000000, -0.390625, 0.000000, + -0.390625, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.390625, 0.000000}; + +#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w17.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w17.h new file mode 100644 index 0000000000000..0ce1c2b014117 --- /dev/null +++ b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w17.h @@ -0,0 +1,12 @@ +//Numpy array shape [10, 1] +//Min -2.798828125000 +//Max 1.773437500000 +//Number of zeros 6 + +#ifndef W17_H_ +#define W17_H_ + +weight17_t w17[10] = { + 0.000000, 1.773438, 1.755859, 0.000000, 0.000000, 0.000000, 1.603516, 0.000000, -2.798828, 0.000000}; + +#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w2.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w2.h index 592ab44151bbc..cd94ddd044393 100644 --- a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w2.h +++ b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w2.h @@ -1,39 +1,211 @@ -//Numpy array shape [25, 10] -//Min -1.512127518654 -//Max 2.787853240967 -//Number of zeros 0 +//Numpy array shape [80, 25] +//Min -1.515625000000 +//Max 1.312500000000 +//Number of zeros 1205 #ifndef W2_H_ #define W2_H_ -weight_default_t w2[250] = { - 0.20997065, 0.23109458, 0.56466961, 0.22711204, -0.20132071, -0.27363914, 0.14927717, -0.16103272, -0.83456266, - -0.16359854, 0.42116061, 0.25756207, 0.04047282, 0.21591994, -0.26583776, -0.55054820, 0.41611665, -0.02321975, - 0.07042803, 0.50135452, 0.00703545, -0.20829202, -0.33702660, -0.12396229, -0.11880612, -0.27329573, -0.11452802, - -0.31897750, 0.21264470, 0.13742544, -0.28871939, 0.41868410, -0.63091415, -0.19237195, -0.65478534, 0.38763866, - -0.24744406, -0.16881032, 0.39083633, -0.50035834, -0.19310105, -0.31465644, 0.00439816, 0.10827218, -0.49906382, - -0.32572702, -0.91848624, 0.37550700, -0.71738565, -0.34755468, 0.06423171, 0.32547599, 0.02474762, 0.03221778, - 0.48550412, -0.33728692, -0.32770881, 0.17164232, 0.55661368, 0.11896797, 0.36957362, 0.47705862, -0.49895954, - 0.33941826, 0.19965869, 0.01562935, 0.11520918, -0.64897013, 0.09584811, -0.06691046, -0.22340138, -0.28523839, - 0.47164100, 0.39281282, -0.27396747, 0.23841321, -0.16906965, 0.23569225, -0.15681265, 0.18717216, -0.60413569, - -0.08125137, 0.03988006, -0.21231870, -0.22519483, 0.12118224, 0.16755132, 0.12627158, 0.01710406, 0.12804474, - 0.15039428, -0.44942543, -0.31897655, 0.23188710, 0.18285972, 0.19390795, -1.01665187, 0.21815108, -0.29137248, - -0.33327803, -0.59519506, 0.28375888, -0.21275434, 0.20035347, 0.24234673, -0.23726320, 0.13105272, -0.11671171, - -1.04230368, -0.01892293, 0.24302486, -0.11491518, 0.00009525, 0.16215059, -0.33812979, 0.25157502, 0.08174099, - 0.02176141, 0.21500087, 0.09077536, -0.76118916, 0.10925286, 0.29795000, 0.12469041, 0.37909570, -0.20281483, - 0.27489746, 0.37251407, 0.22438200, 0.38048640, 0.05875695, -0.26088551, -0.21821247, -0.16538695, 0.41207287, - -0.16648161, -0.84085250, -0.41789296, -0.34957457, -0.61002076, 0.31845343, 0.14742102, 0.19950806, 0.16061406, - 0.06558945, -0.37494221, -0.08883159, -0.04767518, -0.01558618, -0.38022742, -1.51212752, 0.86078125, 0.19461697, - 0.17105880, -0.30809617, -0.31512862, 2.78785324, -0.00088534, 1.45783448, 1.60251164, 0.00830983, -0.11042736, - -0.09234238, -0.63981187, -0.12528154, 0.26517308, -0.64378422, 0.26114368, -0.03288542, -0.30414325, 0.06316128, - 0.20465648, 0.13085699, -0.47638854, -0.23346442, 0.28762946, 0.11337498, -0.16003485, -0.03085457, -0.34413737, - -0.20898604, 0.25293669, 0.12700504, -0.57297736, 0.37069905, -0.10958206, -0.02782927, -0.04480676, 0.37059775, - 0.22780053, -0.46436781, 0.21395527, -0.12828122, 0.25643846, 0.42216083, 0.38164839, -0.21980932, 0.36473754, - 0.07016987, -0.35408738, -0.16640140, -0.25358951, 0.39250490, -0.54550570, -0.19580491, -0.40004924, 0.17290805, - 0.03295039, 0.15710174, 0.38565248, 0.17310381, -0.26752374, -0.01243732, 0.19979088, -0.15178865, 0.05851814, - -0.30287826, -0.22805928, 0.13903613, -0.17035685, 0.42811340, -0.32098049, 0.01897480, 0.19527332, 0.15685958, - 0.24155772, -1.29652667, 0.23406981, -0.14959824, 0.22470856, 0.06737669, -0.17472392, -0.07033237, 0.12923102, - -0.45487776, 0.28186423, -0.08404353, 0.05938773, -0.14591871, -0.37163615, -0.11934289, 0.09545202, 0.20201178, - 0.15774842, 0.09092412, 0.54094648, 0.01843318, -0.20180281, -1.01475310, -0.02641589}; +weight2_t w2[2000] = { + 0.000000, 0.000000, 0.000000, 0.109375, 0.046875, 0.078125, 0.000000, 0.281250, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, -0.046875, -0.125000, 0.000000, 0.703125, 0.031250, 0.000000, + 0.000000, -0.203125, 0.000000, -1.296875, -0.656250, 0.000000, 0.000000, 0.390625, 0.000000, 0.000000, + 0.203125, 0.125000, 0.000000, 0.000000, 0.000000, 0.234375, -0.343750, 0.000000, 0.203125, 0.000000, + 0.359375, -1.515625, 1.312500, 0.546875, 0.000000, -0.046875, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, -0.156250, -0.125000, -0.250000, 0.000000, 0.187500, 0.000000, -0.906250, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.718750, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.578125, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.093750, 0.000000, 0.390625, + 0.406250, 0.625000, 0.000000, 0.000000, -0.140625, 0.000000, -0.125000, 0.000000, 0.000000, 0.000000, + -0.546875, -0.109375, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.218750, + 0.000000, 0.000000, 0.000000, 0.390625, -0.265625, -0.234375, 0.000000, -0.265625, 0.000000, 0.000000, + 0.562500, 0.000000, 0.140625, 0.000000, 0.000000, 0.296875, 0.000000, -0.812500, 0.000000, -0.375000, + 0.000000, 0.000000, 0.062500, 0.234375, 0.000000, 0.000000, 0.062500, 0.000000, 0.000000, 0.062500, + -0.140625, -0.078125, 0.187500, -0.906250, 0.000000, 0.000000, 0.000000, 0.000000, -0.281250, 0.000000, + 0.718750, 0.296875, -0.937500, -0.937500, 0.000000, 0.000000, 0.000000, -0.062500, 0.843750, 0.031250, + 0.000000, 0.468750, 0.000000, 0.000000, -0.484375, 0.000000, -0.656250, 0.875000, 0.000000, -0.109375, + -0.015625, 0.000000, 0.000000, 0.140625, -0.343750, -0.421875, 0.343750, 0.078125, 0.000000, 0.000000, + 0.171875, 0.000000, 0.000000, -0.531250, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + -0.375000, 0.000000, 0.000000, 0.000000, 0.218750, 0.546875, 0.015625, -0.109375, 0.000000, 0.000000, + -0.125000, 0.000000, 0.000000, 0.000000, -0.078125, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.046875, 0.578125, 0.000000, 0.000000, 0.000000, -0.093750, 0.000000, 0.000000, 0.000000, 0.000000, + 0.093750, 0.000000, -0.078125, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.312500, 0.000000, 0.000000, 0.000000, 0.062500, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.031250, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.171875, 0.000000, -0.031250, 0.000000, 0.453125, 0.000000, 0.000000, 0.000000, 0.703125, + 0.125000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.265625, 0.000000, + 0.000000, -0.484375, 0.000000, -0.031250, 0.000000, 0.000000, -0.156250, 0.000000, 0.187500, 0.484375, + 0.343750, 0.000000, 0.000000, 0.000000, 0.000000, 0.015625, 0.000000, 0.000000, 0.156250, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.140625, 0.000000, 0.000000, -0.062500, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.390625, 0.000000, -0.125000, -0.156250, + 0.000000, 0.000000, 0.000000, -0.062500, 0.000000, 0.218750, -0.671875, 0.000000, 0.281250, 0.000000, + 0.000000, 0.000000, 0.125000, -0.125000, 0.000000, 0.000000, 0.000000, 0.000000, 0.203125, 0.593750, + 0.000000, 0.000000, 0.000000, 0.000000, 0.281250, 0.000000, -0.140625, -0.171875, -0.265625, 0.000000, + 0.437500, -0.343750, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.140625, 0.000000, 0.000000, + 0.000000, -0.281250, -0.125000, 0.000000, 1.234375, -0.171875, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.234375, 0.078125, 0.000000, -0.546875, 0.421875, 0.000000, -0.312500, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.250000, -0.375000, 0.000000, + 0.000000, 0.515625, 0.000000, 0.000000, 0.000000, -0.171875, 0.000000, -0.515625, -0.156250, 0.000000, + 0.171875, -0.453125, 0.000000, 0.000000, -0.500000, 0.000000, 0.171875, -0.187500, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.296875, 0.187500, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.156250, 0.062500, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.156250, 0.187500, 0.000000, 0.000000, 0.000000, + -0.421875, 0.000000, -0.281250, 0.000000, 0.000000, 0.000000, -0.187500, 0.000000, 0.000000, 0.187500, + 0.000000, 0.000000, 0.359375, 0.000000, 0.000000, 0.187500, -0.093750, 0.000000, 0.000000, 0.046875, + -0.250000, 0.000000, 0.000000, 0.156250, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.031250, 0.000000, 0.000000, + -0.250000, 0.000000, 0.000000, -0.125000, 0.000000, 0.000000, 0.000000, 0.000000, -0.078125, 0.000000, + -0.078125, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.015625, 0.000000, 0.000000, + -0.406250, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.109375, 0.000000, 0.000000, + -0.218750, -0.359375, 0.000000, 0.421875, 0.000000, 0.062500, 0.000000, -0.421875, -0.046875, 0.000000, + 0.093750, 0.000000, 0.000000, 0.000000, -0.515625, 0.000000, 0.000000, 0.015625, 0.000000, 0.000000, + -0.218750, 0.281250, 0.000000, 0.281250, -0.156250, 0.250000, 0.000000, 0.000000, 0.109375, 0.015625, + 0.000000, 0.000000, 0.187500, 0.000000, 0.406250, -0.062500, -0.281250, -0.078125, 0.000000, 0.000000, + 0.000000, -0.250000, -0.453125, -0.046875, 0.421875, 0.000000, -0.109375, 0.109375, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.281250, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.218750, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.656250, 0.000000, 0.000000, 0.000000, 0.046875, 0.000000, 0.484375, -0.546875, -0.031250, -0.421875, + 0.000000, -0.781250, 0.000000, 0.000000, -0.546875, 0.265625, 0.171875, -0.203125, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, -0.062500, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.171875, 0.125000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.328125, 0.000000, + 0.140625, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.062500, 0.171875, 0.000000, 0.015625, + 0.000000, 0.000000, 0.000000, 0.000000, -0.234375, 0.000000, 0.000000, 0.000000, 0.000000, -0.078125, + 0.000000, 0.000000, 0.000000, 0.093750, 0.203125, -0.140625, 0.000000, -0.328125, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, -0.359375, 0.000000, 0.187500, -0.171875, -0.187500, 0.000000, 0.031250, + 0.000000, 0.000000, 0.125000, 0.015625, 0.000000, 0.000000, 0.000000, 0.000000, -0.109375, 0.000000, + -0.296875, 0.000000, -0.187500, 0.250000, 0.000000, 0.000000, 0.390625, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.078125, 0.000000, 0.000000, -0.015625, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.093750, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.250000, 0.000000, -0.109375, -0.296875, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.234375, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.187500, 0.000000, + 0.125000, 0.000000, 0.109375, -0.750000, 0.125000, 0.000000, -0.187500, 0.156250, 0.000000, 0.109375, + 0.000000, 0.109375, 0.000000, -0.265625, -0.031250, 0.000000, 0.125000, 0.000000, 0.203125, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.187500, 0.000000, -0.203125, + 0.000000, 0.000000, 0.187500, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.062500, 0.031250, + 0.015625, 0.000000, 0.078125, -0.328125, -0.031250, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.015625, 0.000000, -0.468750, -0.031250, 0.000000, + 0.000000, -0.046875, 0.703125, 0.000000, -0.093750, 0.265625, 0.312500, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.328125, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.375000, 0.000000, -0.187500, + 0.109375, 0.000000, 0.000000, 0.125000, 0.000000, -0.078125, 0.000000, 0.000000, 0.000000, -0.250000, + -0.156250, 0.000000, 0.250000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, -0.187500, 0.000000, 0.109375, 0.000000, 0.031250, 0.031250, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.234375, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.171875, 0.000000, -0.328125, 0.000000, 0.000000, 0.000000, 0.000000, -0.281250, + 0.000000, 0.000000, -0.265625, 0.000000, 0.125000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + -0.312500, 0.000000, 0.000000, 0.000000, 0.000000, -0.031250, 0.000000, 0.390625, 0.000000, 0.000000, + -0.343750, 0.000000, 0.000000, -0.015625, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.046875, + -0.109375, 0.171875, -0.031250, 0.125000, 0.000000, 0.000000, -0.359375, -0.171875, -0.328125, 0.000000, + 0.000000, 0.218750, 0.281250, -0.437500, 0.000000, 0.000000, 0.109375, -0.093750, 0.000000, -0.125000, + 0.000000, 0.000000, 0.140625, 0.156250, 0.000000, 0.000000, 0.375000, 0.000000, 0.156250, 0.000000, + 0.000000, 0.265625, 0.000000, 0.000000, 0.296875, 0.000000, 0.000000, -0.140625, 0.000000, -0.250000, + 0.000000, -0.187500, 0.296875, 0.000000, -0.218750, 0.000000, 0.218750, 0.000000, -0.171875, -0.218750, + 0.000000, -0.328125, 0.000000, 0.062500, 0.000000, 0.234375, 0.000000, 0.000000, 0.000000, -0.062500, + 0.000000, 0.000000, 0.265625, 0.000000, 0.000000, 0.000000, 0.046875, 0.000000, 0.000000, 0.093750, + -0.265625, 0.000000, 0.265625, -0.359375, 0.000000, 0.000000, 0.062500, 0.000000, 0.140625, 0.000000, + 0.046875, 0.000000, 0.000000, 0.000000, 0.156250, 0.000000, 0.203125, 0.000000, 0.000000, -0.203125, + 0.000000, 0.000000, 0.328125, 0.000000, -0.484375, 0.000000, 0.281250, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.390625, 0.000000, 0.000000, + 0.000000, 0.000000, -0.031250, 0.000000, -0.375000, -0.156250, 0.000000, -0.015625, 0.000000, -0.421875, + 0.000000, 0.093750, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.312500, 0.000000, + 0.015625, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.390625, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.125000, 0.046875, 0.000000, 0.000000, 0.000000, + -0.296875, 0.000000, 0.000000, 0.000000, 0.078125, 0.000000, 0.000000, 0.281250, 0.156250, 0.000000, + 0.000000, -0.156250, 0.218750, 0.375000, 0.000000, 0.000000, 0.000000, -0.015625, -0.125000, 0.015625, + 0.359375, 0.171875, 0.000000, -0.078125, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.031250, + 0.000000, 0.187500, 0.000000, 0.000000, 0.000000, 0.000000, 0.281250, 0.171875, 0.000000, 0.171875, + 0.000000, -0.078125, -0.312500, 0.000000, 0.000000, 0.000000, 0.000000, -0.250000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.031250, 0.000000, 0.296875, 0.000000, 0.000000, -0.265625, 0.000000, + 0.000000, 0.343750, 0.250000, 0.000000, -0.265625, 0.000000, 0.000000, 0.000000, 0.000000, -0.109375, + 0.000000, 0.078125, 0.000000, -0.140625, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.187500, 0.000000, 0.078125, -0.156250, 0.234375, -0.031250, 0.000000, 0.234375, 0.000000, 0.109375, + 0.031250, 0.000000, -0.187500, 0.000000, 0.093750, 0.343750, -0.062500, 0.000000, -0.015625, 0.093750, + 0.000000, 0.000000, 0.000000, 0.015625, 0.015625, 0.000000, 0.140625, 0.234375, 0.156250, 0.000000, + 0.000000, -0.062500, 0.187500, 0.000000, 0.000000, 0.000000, 0.015625, -0.125000, 0.000000, 0.234375, + -0.234375, -0.171875, 0.171875, 0.015625, 0.000000, 0.375000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.171875, -0.218750, 0.000000, 0.171875, 0.000000, 0.000000, -0.718750, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, -0.328125, 0.000000, 0.218750, -0.265625, 0.000000, 0.000000, + 0.000000, 0.187500, 0.000000, 0.109375, 0.000000, -0.171875, 0.062500, 0.000000, 0.000000, 0.093750, + 0.000000, 0.000000, -0.078125, 0.000000, -0.328125, 0.000000, 0.000000, 0.187500, 0.000000, -0.359375, + -0.015625, 0.000000, 0.000000, 0.000000, 0.000000, 0.203125, -0.093750, 0.000000, 0.078125, 0.000000, + 0.000000, -0.187500, 0.000000, 0.000000, 0.000000, -0.109375, 0.000000, 0.312500, 0.187500, 0.000000, + 0.000000, -0.078125, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.203125, -0.171875, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.140625, 0.156250, 0.187500, 0.000000, 0.109375, + 0.000000, 0.000000, 0.000000, 0.000000, 0.078125, 0.000000, 0.000000, 0.000000, 0.000000, 0.140625, + 0.000000, -0.156250, 0.000000, 0.312500, 0.000000, 0.000000, 0.000000, 0.000000, 0.031250, -0.031250, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, -0.046875, 0.062500, 0.000000, 0.000000, 0.000000, -0.015625, -0.125000, 0.000000, 0.000000, + 0.000000, -0.156250, -0.015625, 0.250000, -0.109375, -0.171875, 0.000000, 0.000000, 0.000000, 0.000000, + 0.343750, 0.437500, -0.031250, 0.093750, 0.000000, -0.250000, 0.031250, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.078125, 0.031250, 0.000000, 0.000000, 0.437500, + 0.000000, 0.140625, 0.296875, 0.125000, 0.000000, -0.078125, -0.156250, 0.000000, 0.000000, -0.109375, + 0.000000, -0.156250, -0.062500, 0.203125, -0.062500, 0.000000, 0.140625, -0.125000, 0.218750, 0.000000, + 0.000000, -0.421875, 0.000000, 0.000000, 0.312500, 0.000000, 0.000000, 0.234375, 0.250000, 0.000000, + 0.000000, 0.000000, 0.406250, 0.000000, -0.062500, 0.000000, 0.015625, 0.000000, 0.000000, 0.000000, + -0.281250, 0.000000, 0.515625, 0.000000, 0.000000, 0.484375, 0.187500, 0.000000, -0.218750, 0.000000, + 0.312500, 0.000000, 0.125000, 0.062500, 0.125000, 0.000000, 0.468750, -0.578125, 0.000000, -0.546875, + -0.265625, 0.000000, 0.000000, 0.000000, -0.328125, 0.234375, 0.296875, -0.468750, 0.000000, 0.000000, + 0.187500, 0.000000, 0.000000, 0.046875, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.093750, + 0.000000, 0.640625, -0.421875, 0.000000, -0.296875, 0.000000, 0.000000, 0.093750, 0.000000, -0.234375, + 0.000000, 0.000000, -0.281250, -0.265625, 0.000000, -0.250000, 0.250000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.046875, 0.015625, 0.000000, 0.000000, 0.156250, 0.000000, 0.000000, 0.250000, + -0.062500, 0.000000, 0.000000, 0.000000, 0.187500, 0.000000, 0.000000, 0.000000, 0.250000, 0.000000, + -0.140625, -0.125000, 0.000000, 0.250000, 0.000000, 0.000000, -0.343750, 0.000000, 0.343750, 0.000000, + 0.000000, -0.140625, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.281250, 0.000000, 0.000000, + 0.234375, 0.000000, 0.078125, -0.515625, 0.000000, -0.234375, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.375000, 0.000000, -0.046875, 0.000000, 0.296875, 0.296875, 0.000000, 0.109375, + 0.312500, 0.000000, -0.281250, 0.000000, 0.109375, 0.218750, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.281250, 0.000000, -0.109375, 0.000000, 0.203125, 0.000000, -0.046875, 0.000000, + 0.000000, 0.000000, -0.281250, 0.000000, -0.234375, 0.515625, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.109375, 0.109375, 0.000000, -0.031250, -0.156250, -0.296875, 0.000000, -0.390625, + 0.000000, 0.171875, -0.093750, 0.000000, 0.312500, 0.312500, 0.000000, -0.125000, 0.000000, 0.171875, + -0.093750, 0.125000, 0.000000, 0.000000, 0.000000, 0.203125, 0.000000, 0.046875, 0.000000, -0.281250, + -0.281250, -0.265625, 0.000000, 0.000000, 0.000000, 0.343750, 0.000000, 0.000000, 0.000000, -0.140625, + 0.000000, 0.359375, 0.203125, 0.000000, -0.125000, 0.000000, 0.000000, -0.140625, -0.046875, 0.171875, + 0.421875, -0.078125, 0.187500, 0.000000, 0.000000, 0.250000, 0.156250, 0.000000, -0.234375, -0.500000, + 0.031250, 0.265625, 0.390625, -0.453125, 0.000000, 0.000000, 0.000000, -0.296875, -0.109375, -0.390625, + 0.000000, -0.250000, 0.000000, 0.000000, -0.203125, 0.000000, 0.250000, -0.234375, 0.000000, -0.078125, + 0.265625, 0.140625, -0.140625, 0.000000, 0.000000, 0.000000, 0.281250, 0.546875, 0.000000, 0.000000, + 0.000000, 0.000000, -0.359375, 0.000000, -0.328125, 0.156250, -0.296875, 0.171875, 0.000000, 0.000000, + 0.171875, 0.000000, -0.625000, 0.000000, 0.000000, 0.000000, 0.000000, 0.171875, -0.140625, 0.000000, + -0.187500, 0.000000, 0.078125, -0.281250, 0.187500, 0.000000, 0.125000, -0.093750, 0.000000, 0.203125, + -0.203125, 0.000000, -0.187500, 0.031250, 0.000000, -0.156250, -0.078125, -0.078125, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.281250, 0.000000, 0.078125, 0.000000, 0.296875, 0.000000, 0.000000, + -0.234375, 0.000000, -0.015625, 0.000000, 0.078125, 0.281250, 0.000000, 0.171875, 0.109375, 0.000000, + 0.203125, -0.406250, -0.187500, 0.000000, 0.000000, 0.000000, -0.328125, 0.046875, 0.000000, 0.296875, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.234375, + 0.265625, -0.156250, -0.203125, 0.000000, -0.281250, 0.171875, 0.000000, 0.000000, 0.375000, -0.062500, + 0.000000, 0.406250, 0.000000, 0.296875, -0.031250, -0.046875, 0.000000, 0.328125, 0.000000, 0.000000, + 0.000000, 0.218750, -0.109375, 0.000000, 0.000000, 0.125000, -0.093750, -0.125000, 0.000000, -0.171875, + 0.234375, -0.140625, 0.000000, 0.062500, -0.015625, 0.000000, 0.156250, -0.453125, 0.000000, 0.000000, + 0.109375, -0.140625, 0.109375, 0.000000, 0.312500, 0.000000, -0.171875, 0.125000, -0.250000, 0.000000, + 0.187500, -0.078125, 0.156250, 0.000000, 0.125000, 0.203125, 0.000000, -0.281250, 0.000000, 0.000000, + 0.000000, 0.000000, 0.109375, 0.000000, 0.000000, 0.250000, 0.187500, 0.000000, 0.171875, -0.109375, + 0.000000, 0.000000, -0.031250, 0.000000, 0.187500, 0.203125, 0.000000, 0.000000, 0.000000, 0.062500, + 0.093750, 0.156250, 0.000000, 0.015625, 0.000000, 0.515625, 0.328125, 0.000000, -0.015625, 0.000000, + 0.000000, 0.312500, 0.484375, 0.000000, 0.000000, -0.312500, 0.000000, -0.531250, -0.250000, -0.140625, + 0.125000, 0.000000, 0.406250, 0.000000, 0.000000, 0.171875, 0.296875, -0.875000, 0.000000, -0.281250, + 0.359375, 0.000000, 0.000000, 0.000000, -0.375000, 0.000000, 0.250000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, -0.500000, 0.000000, -0.140625, 0.000000, -0.156250, 0.000000, 0.000000, -0.218750, + 0.296875, 0.000000, -1.109375, 0.000000, 0.000000, 0.000000, 0.250000, 0.000000, -0.046875, 0.062500, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.328125, 0.000000, 0.109375, 0.000000, -0.109375, + 0.531250, 0.203125, 0.000000, 0.000000, 0.156250, -0.203125, -0.484375, 0.000000, 0.000000, -0.281250, + 0.000000, 0.000000, -0.156250, 0.000000, 0.000000, -0.078125, 0.203125, -0.109375, 0.000000, 0.000000, + 0.000000, -0.062500, -0.062500, 0.000000, 0.000000, 0.000000, -0.109375, -0.250000, 0.000000, 0.000000, + 0.000000, -0.046875, -0.281250, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.093750, + 0.328125, 0.000000, 0.312500, 0.000000, 0.000000, 0.000000, 0.515625, -0.187500, 0.000000, 0.062500, + -0.031250, -0.250000, 0.281250, -0.015625, -0.312500, 0.000000, 0.140625, 0.000000, 0.000000, 0.000000, + 0.218750, 0.375000, -0.203125, 0.343750, 0.000000, 0.125000, 0.125000, 0.359375, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.093750, 0.312500, 0.000000, 0.000000, 0.250000, 0.000000, + 0.359375, -0.015625, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.312500, 0.000000, 0.000000, + -0.125000, 0.156250, 0.000000, -0.781250, 0.000000, 0.000000, -0.093750, 0.156250, -0.328125, 0.078125, + 0.125000, 0.140625, 0.000000, 0.109375, -0.187500, 0.171875, 0.140625, -0.265625, -0.234375, 0.000000, + 0.000000, 0.000000, 0.000000, -0.093750, 0.000000, 0.000000, 0.171875, 0.000000, 0.078125, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.218750, -0.171875, 0.000000, -0.187500, + -0.234375, 0.000000, 0.140625, 0.156250, 0.000000, 0.484375, 0.406250, -0.234375, 0.343750, -0.812500, + 0.000000, 0.000000, 0.000000, -1.265625, 0.000000, 0.000000, 0.000000, -0.421875, 0.000000, 0.000000, + 0.000000, -0.265625, 0.000000, 0.000000, 0.000000, 0.281250, 0.000000, 0.000000, -0.843750, 0.000000}; #endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w20.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w20.h new file mode 100644 index 0000000000000..25d75a1880f14 --- /dev/null +++ b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w20.h @@ -0,0 +1,12 @@ +//Numpy array shape [10, 1] +//Min -0.931640625000 +//Max 1.476562500000 +//Number of zeros 6 + +#ifndef W20_H_ +#define W20_H_ + +weight20_t w20[10] = { + 0.000000, 0.000000, 0.000000, 1.185547, 0.000000, 1.476562, -0.931641, 0.769531, 0.000000, 0.000000}; + +#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w3.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w3.h deleted file mode 100644 index b67cf178a716e..0000000000000 --- a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w3.h +++ /dev/null @@ -1,23 +0,0 @@ -//Numpy array shape [10, 10] -//Min -1.255380868912 -//Max 1.165371656418 -//Number of zeros 0 - -#ifndef W3_H_ -#define W3_H_ - -weight_default_t w3[100] = { - -0.24639761, 0.36854371, -0.20667994, 0.63942766, -0.48512432, -0.20601453, 0.95860600, -0.76670301, -0.62915105, - -0.16087309, -0.71208179, -0.22137630, -0.61618358, -0.28030652, -0.16592601, 0.01428368, -0.02218036, 0.18670039, - -0.05923353, 0.38925353, -0.03025977, 0.18113941, 0.04013579, -0.24923514, 0.04662795, -0.21779495, -0.11618838, - 0.27686477, -0.12692934, -0.14645813, 0.13050388, -0.61944312, -0.97363800, 0.34909710, -0.49283633, 0.35699531, - -0.21654762, 0.29707199, -0.37069076, -0.45038351, 0.23440604, -0.01497080, -0.43628553, 0.47897390, -0.57205141, - 0.28325596, 0.45101821, 0.30717590, -0.82709831, -1.01788270, 0.11227678, 0.40207320, -0.01430387, 0.33558398, - 0.14979517, 0.40087056, 0.56262153, -0.08988120, -0.39212254, 0.19313116, 0.18044059, -0.09485760, 0.07735054, - -1.25538087, -0.37033975, 0.96087897, -0.62376523, 0.97630143, 0.54678482, 1.16537166, -0.38099980, 0.25253880, - -0.48733908, 0.30896747, 0.00154836, -1.06780457, -0.38455144, 0.22028424, 0.40647805, -0.58109504, -0.29596746, - -0.19207183, -0.55882788, 0.12817945, -0.23813887, 0.05867399, 0.29090765, 0.50279891, 0.23116076, 0.11913682, - -0.03850375, -0.61140555, 0.42096528, -0.28724584, 0.06309307, -0.41296995, -0.22518104, 0.10956753, 0.17092451, - 0.46520787}; - -#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w4.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w4.h deleted file mode 100644 index d2b8c3be33f48..0000000000000 --- a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w4.h +++ /dev/null @@ -1,20 +0,0 @@ -//Numpy array shape [10, 1] -//Min -0.562117636204 -//Max 0.764084100723 -//Number of zeros 0 - -#ifndef W4_H_ -#define W4_H_ - -weight_default_t w4[10] = {0.41375983, - -0.10875144, - 0.31972024, - -0.56211764, - 0.16606922, - 0.33737957, - -0.11298771, - 0.61149263, - 0.09088434, - 0.76408410}; - -#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w5.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w5.h new file mode 100644 index 0000000000000..f001160129b86 --- /dev/null +++ b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w5.h @@ -0,0 +1,74 @@ +//Numpy array shape [25, 25] +//Min -1.203125000000 +//Max 1.078125000000 +//Number of zeros 375 + +#ifndef W5_H_ +#define W5_H_ + +weight5_t w5[625] = { + -0.578125, 0.515625, 0.000000, -0.796875, 0.359375, -0.562500, 0.000000, 0.000000, 0.359375, 0.000000, + 0.390625, 0.000000, 0.000000, -0.281250, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, -0.484375, -0.718750, 0.000000, 0.000000, 0.000000, -0.578125, 0.000000, 0.000000, + 0.000000, 0.343750, 0.000000, 0.421875, 0.000000, 0.218750, 0.000000, 0.000000, -0.281250, 0.000000, + 0.218750, 0.000000, 0.000000, 0.000000, 0.000000, -0.625000, 0.250000, -0.375000, 0.000000, 0.000000, + 0.000000, 0.000000, -1.062500, 0.000000, 0.515625, 0.000000, -0.203125, -0.546875, 0.828125, 0.734375, + 0.000000, 0.500000, 0.000000, 0.000000, -1.203125, 0.000000, -1.062500, 0.375000, 0.000000, 0.000000, + 0.000000, 0.000000, -0.375000, 0.703125, 0.000000, 0.000000, 0.000000, 0.000000, 0.390625, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.375000, 0.000000, 0.000000, -0.250000, 0.265625, + -0.312500, 0.000000, 0.171875, 0.312500, 0.000000, 0.000000, 0.250000, 0.000000, 0.000000, 0.203125, + 0.000000, 0.000000, -0.328125, 0.546875, 0.000000, 0.000000, 0.000000, 0.000000, -0.343750, 0.000000, + 0.000000, 0.000000, 0.000000, -0.218750, 0.281250, -0.296875, 0.000000, -0.187500, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, -0.468750, 0.000000, 0.328125, 0.000000, -0.234375, 0.000000, + 0.421875, 0.000000, 0.000000, 0.312500, 0.000000, 0.000000, 0.000000, 0.140625, 0.000000, 0.000000, + 0.296875, -0.390625, 0.000000, 0.000000, 0.000000, -0.593750, 0.421875, 0.250000, 0.000000, -0.234375, + 0.000000, 0.078125, 0.000000, 0.328125, 0.000000, 0.000000, -0.187500, -0.156250, 0.000000, -0.281250, + 0.000000, 0.000000, 0.359375, 0.000000, 0.218750, -0.281250, 0.000000, -0.171875, 0.218750, 0.000000, + 0.000000, 0.000000, 0.000000, 0.312500, 0.000000, 0.000000, 0.406250, 0.000000, 0.000000, 0.000000, + 0.312500, -0.468750, 0.000000, 0.000000, 0.421875, 0.000000, 0.000000, 0.000000, 0.000000, -0.281250, + -0.218750, -0.484375, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.156250, 0.328125, + 0.421875, 0.000000, 0.218750, 0.640625, 0.000000, 0.187500, 0.000000, 0.000000, -0.234375, -0.531250, + 0.671875, 0.000000, -0.250000, 0.000000, 0.000000, -0.375000, 0.000000, 0.390625, -0.203125, 0.000000, + 0.000000, -0.375000, 0.390625, -0.468750, -0.421875, -0.015625, 0.437500, 0.000000, -0.531250, -0.781250, + 0.500000, 0.000000, 0.671875, 0.421875, 0.000000, 0.000000, -0.359375, 0.000000, 0.000000, 0.359375, + 0.000000, -0.156250, 0.000000, 0.000000, 0.218750, -0.328125, 0.000000, 0.000000, 0.000000, 0.000000, + -0.281250, 0.093750, -0.328125, 0.000000, 0.312500, 0.000000, -0.171875, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.343750, -0.234375, 0.125000, -0.250000, + 0.000000, 0.000000, 0.421875, 0.437500, -0.343750, 0.375000, 0.000000, -0.390625, 0.000000, 0.281250, + 0.000000, 0.203125, 0.000000, 0.000000, 0.000000, 0.000000, -0.234375, -0.312500, -0.312500, 0.000000, + 0.000000, -0.250000, 0.234375, 0.000000, 0.000000, 0.000000, 0.000000, -0.187500, 0.000000, 0.000000, + 0.000000, 0.000000, 0.281250, 0.000000, 0.000000, -0.359375, 0.453125, 0.000000, 0.000000, -0.093750, + -0.406250, 0.250000, 0.000000, -0.281250, 0.000000, 0.000000, -0.250000, -0.250000, 0.000000, -0.234375, + -0.125000, -0.171875, 0.468750, -0.484375, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.375000, 0.000000, 0.000000, 0.140625, + -0.156250, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.187500, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.218750, 0.000000, 0.000000, 0.000000, + -0.125000, 0.390625, 0.000000, -0.328125, 1.078125, 0.234375, 0.312500, 0.000000, 0.000000, 0.000000, + 0.781250, -0.218750, 0.000000, 0.312500, 0.000000, 0.000000, 0.000000, -0.500000, -0.906250, -0.687500, + 0.000000, 0.500000, 0.437500, 0.000000, 0.000000, 0.000000, -0.265625, 0.078125, 0.000000, 0.000000, + 0.000000, -0.500000, 0.265625, 0.000000, 0.000000, 0.000000, -0.562500, 0.000000, 0.656250, 0.000000, + 0.468750, 0.000000, 0.000000, 0.000000, 0.140625, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, -0.562500, 0.000000, 0.000000, -0.343750, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.671875, -0.328125, 0.468750, 0.000000, 0.468750, 0.000000, + 0.062500, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.203125, 0.109375, 0.000000, 0.000000, + -0.328125, 0.000000, 0.218750, 0.000000, 0.000000, -0.328125, -0.187500, 0.000000, 0.203125, 0.296875, + -0.671875, 0.031250, -0.546875, -0.234375, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.812500, + 0.000000, 0.250000, 0.000000, 0.265625, -0.468750, 0.234375, 0.000000, 0.281250, 0.000000, 0.000000, + -0.828125, -0.671875, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.031250, + -0.203125, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.421875, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, 0.656250, 0.000000, 0.453125, 0.343750, 0.343750, 0.000000, 0.000000, + 0.000000, 0.265625, 0.218750, 0.000000, -0.546875, 0.000000, 0.000000, -0.296875, 0.296875, 0.000000, + 0.000000, 0.000000, 0.281250, -0.234375, 0.234375, 0.203125, 0.000000, 0.000000, 0.000000, 0.359375, + 0.000000, -1.078125, 0.000000, 0.000000, 0.000000, -0.187500, 0.437500, 0.000000, 0.000000, -0.500000, + 0.484375, 0.000000, 0.000000, 0.281250, 0.000000, 0.359375, 0.000000, 0.000000, -0.187500, 0.000000, + 0.000000, 0.000000, 0.000000, -0.437500, 0.203125, 0.203125, 0.000000, -0.328125, 0.000000, 0.000000, + -0.250000, 0.000000, 0.000000, 0.000000, 0.390625, 0.000000, 0.328125, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.125000, 0.000000, -0.265625, 0.171875, 0.000000, 0.000000, 0.000000, 0.000000, + -0.421875, 0.359375, 0.000000, -0.390625, -0.093750, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, -0.156250, 0.000000, 0.296875, 0.187500, 0.406250, 0.000000, 0.000000, 0.281250, 0.000000, + 0.000000, -0.046875, 0.000000, 0.000000, 0.000000, -0.265625, -0.250000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.375000, 0.000000, -0.375000, 0.000000, 0.000000, 0.000000, 0.000000, -0.968750, + -0.640625, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.500000, 0.000000, 0.000000, 0.515625, + 0.531250, 0.000000, 0.000000, 0.000000, 0.000000}; + +#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w8.h b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w8.h new file mode 100644 index 0000000000000..30533952a7b8f --- /dev/null +++ b/L1Trigger/Phase2L1ParticleFlow/interface/taus/weights/w8.h @@ -0,0 +1,49 @@ +//Numpy array shape [25, 15] +//Min -0.859375000000 +//Max 0.750000000000 +//Number of zeros 225 + +#ifndef W8_H_ +#define W8_H_ + +weight8_t w8[375] = { + 0.000000, -0.250000, 0.000000, 0.000000, 0.000000, 0.000000, 0.562500, -0.187500, 0.312500, 0.234375, + 0.234375, 0.140625, -0.203125, 0.000000, 0.000000, -0.218750, -0.281250, 0.000000, 0.000000, 0.000000, + 0.187500, 0.296875, 0.000000, -0.296875, 0.000000, -0.203125, 0.328125, -0.390625, 0.000000, 0.000000, + 0.593750, -0.234375, 0.000000, 0.000000, -0.375000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.265625, 0.000000, 0.000000, 0.000000, 0.000000, -0.343750, 0.000000, 0.000000, 0.062500, + 0.000000, 0.000000, 0.000000, 0.312500, 0.000000, 0.000000, 0.406250, -0.265625, -0.421875, 0.000000, + 0.171875, 0.000000, 0.000000, 0.000000, 0.000000, -0.328125, 0.000000, 0.000000, -0.468750, 0.000000, + 0.000000, 0.000000, 0.000000, 0.562500, 0.453125, 0.453125, 0.000000, 0.000000, 0.421875, -0.437500, + -0.296875, -0.250000, -0.359375, 0.000000, -0.234375, -0.625000, 0.000000, -0.328125, 0.000000, -0.359375, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, -0.343750, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, -0.859375, 0.000000, 0.000000, 0.671875, 0.000000, 0.000000, 0.203125, + 0.218750, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.468750, -0.234375, 0.296875, 0.000000, + -0.640625, 0.359375, 0.000000, 0.000000, 0.000000, 0.000000, -0.343750, 0.203125, -0.312500, -0.234375, + 0.000000, 0.250000, 0.187500, 0.000000, 0.000000, 0.000000, 0.000000, 0.531250, 0.000000, 0.000000, + -0.250000, 0.000000, 0.000000, -0.187500, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.468750, -0.218750, 0.375000, 0.000000, -0.265625, 0.000000, -0.218750, -0.296875, 0.265625, + -0.562500, 0.281250, 0.000000, 0.390625, 0.437500, 0.000000, 0.000000, 0.218750, 0.625000, 0.000000, + 0.000000, 0.218750, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.203125, 0.000000, 0.750000, + 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.187500, -0.234375, + 0.265625, 0.171875, -0.328125, 0.328125, 0.000000, 0.250000, 0.000000, 0.000000, -0.218750, 0.000000, + 0.000000, -0.281250, 0.000000, -0.312500, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.500000, 0.125000, 0.000000, 0.265625, 0.312500, 0.203125, 0.562500, 0.000000, -0.234375, 0.187500, + 0.000000, 0.000000, 0.203125, 0.000000, 0.000000, -0.156250, 0.515625, 0.000000, 0.000000, 0.000000, + -0.187500, 0.000000, 0.000000, -0.296875, 0.000000, -0.093750, -0.296875, 0.000000, 0.484375, 0.000000, + 0.453125, 0.000000, -0.203125, 0.000000, 0.000000, -0.406250, 0.000000, -0.187500, 0.250000, -0.343750, + 0.000000, 0.000000, 0.000000, -0.343750, 0.000000, 0.000000, 0.000000, 0.000000, 0.578125, 0.000000, + 0.000000, 0.000000, 0.000000, 0.250000, 0.000000, 0.000000, 0.171875, 0.218750, 0.000000, 0.000000, + 0.000000, -0.281250, 0.000000, 0.468750, -0.375000, 0.000000, 0.000000, 0.000000, 0.000000, -0.343750, + 0.000000, 0.453125, 0.000000, 0.000000, 0.000000, 0.281250, 0.609375, 0.000000, 0.000000, 0.000000, + -0.218750, 0.000000, -0.406250, 0.000000, -0.328125, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, -0.734375, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.250000, 0.000000, + 0.000000, 0.000000, -0.328125, 0.000000, 0.640625, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, -0.593750, -0.375000, 0.000000, 0.312500, 0.312500, 0.000000, 0.562500, + 0.000000, 0.000000, -0.593750, 0.000000, 0.281250, 0.218750, 0.359375, 0.000000, 0.000000, -0.296875, + 0.000000, 0.000000, -0.296875, -0.250000, 0.000000, -0.500000, 0.000000, 0.000000, 0.593750, 0.000000, + 0.000000, 0.000000, 0.000000, -0.328125, -0.343750, 0.531250, 0.000000, 0.000000, 0.000000, 0.000000, + 0.000000, 0.000000, 0.000000, -0.234375, 0.000000, 0.000000, 0.000000, 0.000000, -0.421875, -0.250000, + 0.000000, 0.000000, -0.375000, -0.437500, -0.437500}; + +#endif diff --git a/L1Trigger/Phase2L1ParticleFlow/plugins/L1CTJetFileWriter.cc b/L1Trigger/Phase2L1ParticleFlow/plugins/L1CTJetFileWriter.cc index 82957e57178e1..38e1d75601524 100644 --- a/L1Trigger/Phase2L1ParticleFlow/plugins/L1CTJetFileWriter.cc +++ b/L1Trigger/Phase2L1ParticleFlow/plugins/L1CTJetFileWriter.cc @@ -1,4 +1,5 @@ #include +#include // user include files #include "FWCore/Framework/interface/Frameworkfwd.h" @@ -16,6 +17,7 @@ #include "L1Trigger/DemonstratorTools/interface/utilities.h" #include "DataFormats/L1TParticleFlow/interface/PFJet.h" #include "DataFormats/L1TParticleFlow/interface/gt_datatypes.h" +#include "DataFormats/L1Trigger/interface/EtSum.h" // // class declaration @@ -29,7 +31,8 @@ class L1CTJetFileWriter : public edm::one::EDAnalyzer private: // ----------constants, enums and typedefs --------- - unsigned nJets_; + std::vector collections_; + size_t nFramesPerBX_; size_t ctl2BoardTMUX_; size_t gapLengthOutput_; @@ -39,45 +42,84 @@ class L1CTJetFileWriter : public edm::one::EDAnalyzer // ----------member functions ---------------------- void analyze(const edm::Event&, const edm::EventSetup&) override; void endJob() override; - std::vector> encodeJets(const std::vector jets); + std::vector> encodeJets(const std::vector jets, unsigned nJets); + std::vector> encodeSums(const std::vector sums, unsigned nSums); - edm::EDGetTokenT> jetsToken_; l1t::demo::BoardDataWriter fileWriterOutputToGT_; + std::vector>, edm::EDGetTokenT>>> tokens_; + std::vector> tokensToWrite_; + std::vector nJets_; + std::vector nSums_; }; L1CTJetFileWriter::L1CTJetFileWriter(const edm::ParameterSet& iConfig) - : nJets_(iConfig.getParameter("nJets")), + : collections_(iConfig.getParameter>("collections")), nFramesPerBX_(iConfig.getParameter("nFramesPerBX")), ctl2BoardTMUX_(iConfig.getParameter("TMUX")), - gapLengthOutput_(ctl2BoardTMUX_ * nFramesPerBX_ - 2 * nJets_), + gapLengthOutput_(iConfig.getParameter("gapLengthOutput")), maxLinesPerFile_(iConfig.getParameter("maxLinesPerFile")), channelSpecsOutputToGT_{{{"jets", 0}, {{ctl2BoardTMUX_, gapLengthOutput_}, {0}}}}, - jetsToken_(consumes>(iConfig.getParameter("jets"))), fileWriterOutputToGT_(l1t::demo::parseFileFormat(iConfig.getParameter("format")), iConfig.getParameter("outputFilename"), iConfig.getParameter("outputFileExtension"), nFramesPerBX_, ctl2BoardTMUX_, maxLinesPerFile_, - channelSpecsOutputToGT_) {} + channelSpecsOutputToGT_) { + for (const auto& pset : collections_) { + edm::EDGetTokenT> jetToken; + edm::EDGetTokenT> mhtToken; + unsigned nJets = pset.getParameter("nJets"); + unsigned nSums = pset.getParameter("nSums"); + nJets_.push_back(nJets); + nSums_.push_back(nSums); + bool writeJetToken(false), writeMhtToken(false); + if (nJets > 0) { + jetToken = consumes>(pset.getParameter("jets")); + writeJetToken = true; + } + if (nSums > 0) { + mhtToken = consumes>(pset.getParameter("mht")); + writeMhtToken = true; + } + tokens_.emplace_back(jetToken, mhtToken); + tokensToWrite_.emplace_back(writeJetToken, writeMhtToken); + } +} void L1CTJetFileWriter::analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) { using namespace edm; - // 1) Encode jet information onto vectors containing link data - // TODO remove the sort here and sort the input collection where it's created - const edm::View& jets = iEvent.get(jetsToken_); - std::vector sortedJets; - sortedJets.reserve(jets.size()); - std::copy(jets.begin(), jets.end(), std::back_inserter(sortedJets)); - - std::stable_sort( - sortedJets.begin(), sortedJets.end(), [](l1t::PFJet i, l1t::PFJet j) { return (i.hwPt() > j.hwPt()); }); - const auto outputJets(encodeJets(sortedJets)); + // 1) Pack collections in the order they're specified. jets then sums within collection + std::vector> link_words; + for (unsigned iCollection = 0; iCollection < collections_.size(); iCollection++) { + if (tokensToWrite_.at(iCollection).first) { + const auto& jetToken = tokens_.at(iCollection).first; + // 2) Encode jet information onto vectors containing link data + const edm::View& jets = iEvent.get(jetToken); + std::vector sortedJets; + sortedJets.reserve(jets.size()); + std::copy(jets.begin(), jets.end(), std::back_inserter(sortedJets)); + + std::stable_sort( + sortedJets.begin(), sortedJets.end(), [](l1t::PFJet i, l1t::PFJet j) { return (i.hwPt() > j.hwPt()); }); + const auto outputJets(encodeJets(sortedJets, nJets_.at(iCollection))); + link_words.insert(link_words.end(), outputJets.begin(), outputJets.end()); + } - // 2) Pack jet information into 'event data' object, and pass that to file writer + if (tokensToWrite_.at(iCollection).second) { + // 3) Encode sums onto vectors containing link data + const auto& mhtToken = tokens_.at(iCollection).second; + const edm::View& mht = iEvent.get(mhtToken); + std::vector orderedSums; + std::copy(mht.begin(), mht.end(), std::back_inserter(orderedSums)); + const auto outputSums(encodeSums(orderedSums, nSums_.at(iCollection))); + link_words.insert(link_words.end(), outputSums.begin(), outputSums.end()); + } + } + // 4) Pack jet information into 'event data' object, and pass that to file writer l1t::demo::EventData eventDataJets; - eventDataJets.add({"jets", 0}, outputJets); + eventDataJets.add({"jets", 0}, link_words); fileWriterOutputToGT_.addEvent(eventDataJets); } @@ -87,31 +129,51 @@ void L1CTJetFileWriter::endJob() { fileWriterOutputToGT_.flush(); } -std::vector> L1CTJetFileWriter::encodeJets(const std::vector jets) { - std::vector> jet_words; - for (unsigned i = 0; i < nJets_; i++) { - l1t::PFJet j; - if (i < jets.size()) { - j = jets.at(i); - } else { // pad up to nJets_ with null jets - l1t::PFJet j(0, 0, 0, 0, 0, 0); - } - jet_words.push_back(j.encodedJet()[0]); - jet_words.push_back(j.encodedJet()[1]); +std::vector> L1CTJetFileWriter::encodeJets(const std::vector jets, const unsigned nJets) { + // Encode up to nJets jets, padded with 0s + std::vector> jet_words(2 * nJets, 0); // allocate 2 words per jet + for (unsigned i = 0; i < std::min(nJets, (uint)jets.size()); i++) { + const l1t::PFJet& j = jets.at(i); + jet_words[2 * i] = j.encodedJet()[0]; + jet_words[2 * i + 1] = j.encodedJet()[1]; } return jet_words; } +std::vector> L1CTJetFileWriter::encodeSums(const std::vector sums, unsigned nSums) { + // Need two l1t::EtSum for each GT Sum + std::vector> sum_words; + for (unsigned i = 0; i < nSums; i++) { + if (2 * i < sums.size()) { + l1gt::Sum gtSum; + gtSum.valid = 1; // if the sums are sent at all, they are valid + gtSum.vector_pt.V = sums.at(2 * i + 1).hwPt(); + gtSum.vector_phi.V = sums.at(2 * i + 1).hwPhi(); + gtSum.scalar_pt.V = sums.at(2 * i).hwPt(); + sum_words.push_back(gtSum.pack_ap()); + } else { + sum_words.push_back(0); + } + } + return sum_words; +} + // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void L1CTJetFileWriter::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { - //The following says we do not know what parameters are allowed so do no validation - // Please change this to state exactly what you do use, even if it is no parameters edm::ParameterSetDescription desc; - desc.add("jets"); + { + edm::ParameterSetDescription vpsd1; + vpsd1.addOptional("jets"); + vpsd1.addOptional("mht"); + vpsd1.add("nJets", 0); + vpsd1.add("nSums", 0); + desc.addVPSet("collections", vpsd1); + } desc.add("outputFilename"); desc.add("outputFileExtension", "txt"); desc.add("nJets", 12); desc.add("nFramesPerBX", 9); + desc.add("gapLengthOutput", 4); desc.add("TMUX", 6); desc.add("maxLinesPerFile", 1024); desc.add("format", "EMPv2"); diff --git a/L1Trigger/Phase2L1ParticleFlow/plugins/L1MHtPFProducer.cc b/L1Trigger/Phase2L1ParticleFlow/plugins/L1MHtPFProducer.cc index c788261e396a7..fbcc76db7dabf 100644 --- a/L1Trigger/Phase2L1ParticleFlow/plugins/L1MHtPFProducer.cc +++ b/L1Trigger/Phase2L1ParticleFlow/plugins/L1MHtPFProducer.cc @@ -43,16 +43,16 @@ void L1MhtPfProducer::produce(edm::StreamID, edm::Event& iEvent, const edm::Even // Get the jets from the event l1t::PFJetCollection edmJets = iEvent.get(jetsToken); + std::vector hwJets = convertEDMToHW(edmJets); // convert to the emulator format // Apply pT and eta selections - l1t::PFJetCollection edmJetsFiltered; - std::copy_if(edmJets.begin(), edmJets.end(), std::back_inserter(edmJetsFiltered), [&](auto jet) { - return jet.pt() > minJetPt && std::abs(jet.eta()) < maxJetEta; + std::vector hwJetsFiltered; + std::copy_if(hwJets.begin(), hwJets.end(), std::back_inserter(hwJetsFiltered), [&](auto jet) { + return jet.hwPt > l1ct::Scales::makePtFromFloat(minJetPt) && + std::abs(jet.hwEta) < l1ct::Scales::makeGlbEta(maxJetEta); }); - // Run the emulation - std::vector hwJets = convertEDMToHW(edmJetsFiltered); // convert to the emulator format - l1ct::Sum hwSums = htmht(hwJets); // call the emulator - std::vector edmSums = convertHWToEDM(hwSums); // convert back to edm format + l1ct::Sum hwSums = htmht(hwJetsFiltered); // call the emulator + std::vector edmSums = convertHWToEDM(hwSums); // convert back to edm format // Put the sums in the event std::unique_ptr> mhtCollection(new std::vector(0)); diff --git a/L1Trigger/Phase2L1ParticleFlow/plugins/L1NNTauProducer.cc b/L1Trigger/Phase2L1ParticleFlow/plugins/L1NNTauProducer.cc index 68baad7517a74..9359f761dce2f 100644 --- a/L1Trigger/Phase2L1ParticleFlow/plugins/L1NNTauProducer.cc +++ b/L1Trigger/Phase2L1ParticleFlow/plugins/L1NNTauProducer.cc @@ -29,8 +29,9 @@ class L1NNTauProducer : public edm::stream::EDProducer fTauNNId_; - std::unique_ptr fTauNNIdHW_; + std::unique_ptr fTauNNIdHW_; // Default void produce(edm::Event& iEvent, const edm::EventSetup& iSetup) override; void process_SW(const l1t::PFCandidateCollection& parts, std::unique_ptr& iTaus); @@ -212,6 +213,7 @@ void L1NNTauProducer::makeTau_HW(const l1t::PFCandidate& seed, L1TauEmu::z0_t z0 = 0; L1TauEmu::dxy_t dxy = 0; + // Reconstruct the Tau Cone for (unsigned i0 = 0; i0 < parts.size(); i0++) { if (L1TauEmu::inCone(seed, (parts[i0]), rCone2)) { if (parts[i0].id() == l1t::PFCandidate::Electron || parts[i0].id() == l1t::PFCandidate::ChargedHadron || @@ -246,17 +248,19 @@ void L1NNTauProducer::makeTau_HW(const l1t::PFCandidate& seed, if (pt < fSeedPt_) return; - result_t NN = fTauNNIdHW_->compute(seed, parts); - input_t* lNNVector = fTauNNIdHW_->NNVectorVar(); + // Tau NN Inference + Tau_NN_Result NN_ouput = fTauNNIdHW_->compute(seed, parts); + // Needed for making PFTau + input_t* lNNVector = fTauNNIdHW_->NNVectorVar(); float pNNVec[80]; for (unsigned i0 = 0; i0 < 80; i0++) pNNVec[i0] = float(lNNVector[i0]); //Firmware Tau l1ct::Tau l1ctTau; - l1ctTau.hwPt = l1ct::pt_t(pt); //l1gt is <16,11> and currently <16,14> - l1ctTau.hwEta = l1ct::Scales::makeGlbEta(seed.eta()); // seed.eta() and seed.phi() are in physical coordinates + l1ctTau.hwPt = l1ct::pt_t(pt * NN_ouput.nn_pt_correction); //l1gt is <16,11> and currently <16,14> + l1ctTau.hwEta = l1ct::Scales::makeGlbEta(seed.eta()); // seed.eta() and seed.phi() are in physical coordinates l1ctTau.hwPhi = l1ct::Scales::makeGlbPhi(seed.phi()); l1ctTau.hwSeedPt = seed.pt(); @@ -264,7 +268,7 @@ void L1NNTauProducer::makeTau_HW(const l1t::PFCandidate& seed, l1ctTau.hwCharge = seed.charge(); l1ctTau.hwType = l1ct::Tau::type_t(lId); - l1ctTau.hwRawId = ap_uint<10>(NN * 1024); //NN Output is ap_fixed<16, 8> so need to cast. + l1ctTau.hwRawId = ap_uint<10>(NN_ouput.nn_id * 1024); //NN Output is ap_fixed<16, 6> so need to cast. //Convert to GT format and pack to encodedTau of PFTau l1gt::Tau l1gtTau = l1ctTau.toGT(); @@ -277,7 +281,7 @@ void L1NNTauProducer::makeTau_HW(const l1t::PFCandidate& seed, l1gt::Scales::floatPhi(l1gtTau.v3.phi), float(mass)); - l1t::PFTau l1PFTau(tempP4, pNNVec, NN, 0, lId); + l1t::PFTau l1PFTau(tempP4, pNNVec, NN_ouput.nn_id, 0, lId); l1PFTau.setZ0(float(z0) * 0.05); //L1TauEmu::z0_base); l1PFTau.setDxy(float(dxy) * 0.05); //L1TauEmu::dxy_base); diff --git a/L1Trigger/Phase2L1ParticleFlow/plugins/L1TCorrelatorLayer1Producer.cc b/L1Trigger/Phase2L1ParticleFlow/plugins/L1TCorrelatorLayer1Producer.cc index a6a77c217fe14..38e8222dab630 100644 --- a/L1Trigger/Phase2L1ParticleFlow/plugins/L1TCorrelatorLayer1Producer.cc +++ b/L1Trigger/Phase2L1ParticleFlow/plugins/L1TCorrelatorLayer1Producer.cc @@ -28,6 +28,7 @@ #include "L1Trigger/Phase2L1ParticleFlow/interface/regionizer/regionizer_base_ref.h" #include "L1Trigger/Phase2L1ParticleFlow/interface/regionizer/multififo_regionizer_ref.h" #include "L1Trigger/Phase2L1ParticleFlow/interface/regionizer/buffered_folded_multififo_regionizer_ref.h" +#include "L1Trigger/Phase2L1ParticleFlow/interface/regionizer/middle_buffer_multififo_regionizer_ref.h" #include "L1Trigger/Phase2L1ParticleFlow/interface/regionizer/tdr_regionizer_ref.h" #include "L1Trigger/Phase2L1ParticleFlow/interface/pf/pfalgo2hgc_ref.h" #include "L1Trigger/Phase2L1ParticleFlow/interface/pf/pfalgo3_ref.h" @@ -157,12 +158,9 @@ class L1TCorrelatorLayer1Producer : public edm::stream::EDProducer<> { std::unique_ptr> fetchDecodedTracks() const; void putPuppi(edm::Event &iEvent) const; - void putEgStaObjects(edm::Event &iEvent, - const std::string &egLablel, - std::vector>> &egsta_refs); + void putEgStaObjects(edm::Event &iEvent, const std::string &egLablel) const; void putEgObjects(edm::Event &iEvent, const bool writeEgSta, - const std::vector>> &egsta_refs, const std::string &tkEmLabel, const std::string &tkEmPerBoardLabel, const std::string &tkEleLabel, @@ -268,6 +266,9 @@ L1TCorrelatorLayer1Producer::L1TCorrelatorLayer1Producer(const edm::ParameterSet const auto &pset = iConfig.getParameter("regionizerAlgoParameters"); regionizer_ = std::make_unique(pset.getParameter("barrelSetup"), pset); + } else if (regalgo == "MiddleBufferMultififo") { + regionizer_ = std::make_unique( + iConfig.getParameter("regionizerAlgoParameters")); } else if (regalgo == "TDR") { regionizer_ = std::make_unique( iConfig.getParameter("regionizerAlgoParameters")); @@ -370,9 +371,11 @@ void L1TCorrelatorLayer1Producer::fillDescriptions(edm::ConfigurationDescription auto bfMultififoRegPD = getParDesc("regionizerAlgo"); auto multififoBarrelRegPD = edm::ParameterDescription( "regionizerAlgoParameters", l1ct::MultififoRegionizerEmulator::getParameterSetDescriptionBarrel(), true); + auto mbMultififoRegPD = getParDesc("regionizerAlgo"); desc.ifValue(edm::ParameterDescription("regionizerAlgo", "Ideal", true), "Ideal" >> idealRegPD or "TDR" >> tdrRegPD or "Multififo" >> multififoRegPD or - "BufferedFoldedMultififo" >> bfMultififoRegPD or "MultififoBarrel" >> multififoBarrelRegPD); + "BufferedFoldedMultififo" >> bfMultififoRegPD or "MultififoBarrel" >> multififoBarrelRegPD or + "MiddleBufferMultififo" >> mbMultififoRegPD); // PF desc.ifValue(edm::ParameterDescription("pfAlgo", "PFAlgo3", true), "PFAlgo3" >> getParDesc("pfAlgo") or @@ -576,7 +579,7 @@ void L1TCorrelatorLayer1Producer::produce(edm::Event &iEvent, const edm::EventSe // get a global reference to the EGSta before being mixed among differente regions std::vector>> egsta_refs; if (l1tkegalgo_->writeEgSta()) { - putEgStaObjects(iEvent, "L1Eg", egsta_refs); + putEgStaObjects(iEvent, "L1Eg"); } // l1tkegsorter_->setDebug(true); @@ -592,7 +595,7 @@ void L1TCorrelatorLayer1Producer::produce(edm::Event &iEvent, const edm::EventSe putPuppi(iEvent); // save the EG objects - putEgObjects(iEvent, l1tkegalgo_->writeEgSta(), egsta_refs, "L1TkEm", "L1TkEmPerBoard", "L1TkEle", "L1TkElePerBoard"); + putEgObjects(iEvent, l1tkegalgo_->writeEgSta(), "L1TkEm", "L1TkEmPerBoard", "L1TkEle", "L1TkElePerBoard"); // Then go do the multiplicities for (int i = 0; i <= l1muType; ++i) { @@ -1102,22 +1105,13 @@ void L1TCorrelatorLayer1Producer::putPuppi(edm::Event &iEvent) const { iEvent.put(std::move(reg), "PuppiRegional"); } -// NOTE: as a side effect we change the "sta_idx" of TkEle and TkEm objects to an index of the -// vector of refs, for this reason this is not const. We could make this more explicit via arguments -void L1TCorrelatorLayer1Producer::putEgStaObjects(edm::Event &iEvent, - const std::string &egLablel, - std::vector>> &egsta_refs) { +void L1TCorrelatorLayer1Producer::putEgStaObjects(edm::Event &iEvent, const std::string &egLablel) const { auto egs = std::make_unique>(); - edm::RefProd> ref_egs = iEvent.getRefBeforePut>(egLablel); - - edm::Ref>::key_type idx = 0; // FIXME: in case more BXes are introduced shuld probably use egs->key(egs->end(bx)); for (unsigned int ir = 0, nr = event_.pfinputs.size(); ir < nr; ++ir) { const auto ® = event_.pfinputs[ir].region; - std::vector ref_pos(event_.out[ir].egsta.size()); - // EG standalone objects for (unsigned int ieg = 0, neg = event_.out[ir].egsta.size(); ieg < neg; ++ieg) { const auto &p = event_.out[ir].egsta[ieg]; @@ -1127,20 +1121,6 @@ void L1TCorrelatorLayer1Producer::putEgStaObjects(edm::Event &iEvent, reco::Candidate::PolarLorentzVector(p.floatPt(), reg.floatGlbEta(p.hwEta), reg.floatGlbPhi(p.hwPhi), 0.)); eg.setHwQual(p.hwQual); egs->push_back(0, eg); - egsta_refs.push_back(edm::Ref>(ref_egs, idx++)); - ref_pos[ieg] = egsta_refs.size() - 1; - } - - for (auto &egiso : event_.out[ir].egphoton) { - if (egiso.hwPt == 0) - continue; - egiso.sta_idx = ref_pos[egiso.sta_idx]; - } - - for (auto &egele : event_.out[ir].egelectron) { - if (egele.hwPt == 0) - continue; - egele.sta_idx = ref_pos[egele.sta_idx]; } } @@ -1149,7 +1129,6 @@ void L1TCorrelatorLayer1Producer::putEgStaObjects(edm::Event &iEvent, void L1TCorrelatorLayer1Producer::putEgObjects(edm::Event &iEvent, const bool writeEgSta, - const std::vector>> &egsta_refs, const std::string &tkEmLabel, const std::string &tkEmPerBoardLabel, const std::string &tkEleLabel, @@ -1172,25 +1151,16 @@ void L1TCorrelatorLayer1Producer::putEgObjects(edm::Event &iEvent, if (egiso.hwPt == 0) continue; - edm::Ref> ref_egsta; - if (writeEgSta) { - ref_egsta = egsta_refs[egiso.sta_idx]; - } else { - auto egptr = egiso.srcCluster->constituentsAndFractions()[0].first; - ref_egsta = - edm::Ref>(egptr.id(), dynamic_cast(egptr.get()), egptr.key()); - } - reco::Candidate::PolarLorentzVector mom(egiso.floatPt(), egiso.floatEta(), egiso.floatPhi(), 0.); l1t::TkEm tkem(reco::Candidate::LorentzVector(mom), - ref_egsta, + egiso.srcCluster->constituentsAndFractions()[0].first, egiso.floatRelIso(l1ct::EGIsoObjEmu::IsoType::TkIso), egiso.floatRelIso(l1ct::EGIsoObjEmu::IsoType::TkIsoPV)); tkem.setHwQual(egiso.hwQual); tkem.setPFIsol(egiso.floatRelIso(l1ct::EGIsoObjEmu::IsoType::PfIso)); tkem.setPFIsolPV(egiso.floatRelIso(l1ct::EGIsoObjEmu::IsoType::PfIsoPV)); - tkem.setEgBinaryWord(egiso.pack()); + tkem.setEgBinaryWord(egiso.pack(), l1t::TkEm::HWEncoding::CT); tkems->push_back(tkem); npho_obj.push_back(tkems->size() - 1); } @@ -1201,25 +1171,17 @@ void L1TCorrelatorLayer1Producer::putEgObjects(edm::Event &iEvent, if (egele.hwPt == 0) continue; - edm::Ref> ref_egsta; - if (writeEgSta) { - ref_egsta = egsta_refs[egele.sta_idx]; - } else { - auto egptr = egele.srcCluster->constituentsAndFractions()[0].first; - ref_egsta = - edm::Ref>(egptr.id(), dynamic_cast(egptr.get()), egptr.key()); - } - - reco::Candidate::PolarLorentzVector mom(egele.floatPt(), egele.floatEta(), egele.floatPhi(), 0.); + reco::Candidate::PolarLorentzVector mom(egele.floatPt(), egele.floatVtxEta(), egele.floatVtxPhi(), 0.); l1t::TkElectron tkele(reco::Candidate::LorentzVector(mom), - ref_egsta, + egele.srcCluster->constituentsAndFractions()[0].first, edm::refToPtr(egele.srcTrack->track()), egele.floatRelIso(l1ct::EGIsoEleObjEmu::IsoType::TkIso)); tkele.setHwQual(egele.hwQual); tkele.setPFIsol(egele.floatRelIso(l1ct::EGIsoEleObjEmu::IsoType::PfIso)); - tkele.setEgBinaryWord(egele.pack()); - tkele.setIdScore(egele.idScore); + tkele.setEgBinaryWord(egele.pack(), l1t::TkElectron::HWEncoding::CT); + tkele.setIdScore(egele.floatIDScore()); + tkele.setCharge(egele.intCharge()); tkeles->push_back(tkele); nele_obj.push_back(tkeles->size() - 1); } diff --git a/L1Trigger/Phase2L1ParticleFlow/plugins/L1TCtL2EgProducer.cc b/L1Trigger/Phase2L1ParticleFlow/plugins/L1TCtL2EgProducer.cc index 6097408bf20e3..25360347e03c6 100644 --- a/L1Trigger/Phase2L1ParticleFlow/plugins/L1TCtL2EgProducer.cc +++ b/L1Trigger/Phase2L1ParticleFlow/plugins/L1TCtL2EgProducer.cc @@ -44,16 +44,13 @@ class L1TCtL2EgProducer : public edm::global::EDProducer<> { void endJob() override; - struct RefRemapper { - typedef TTTrack L1TTTrackType; + typedef TTTrack L1TTTrackType; + typedef std::vector, edm::Ptr>> ConstituentPtrVector; - BXVector>> oldRefs; - std::map>, edm::Ref>> old2newRefMap; - std::vector, edm::Ptr>> origRefAndPtr; - }; - - void convertToEmu(const l1t::TkElectron &tkele, RefRemapper &refRemapper, l1ct::OutputBoard &boarOut) const; - void convertToEmu(const l1t::TkEm &tkele, RefRemapper &refRemapper, l1ct::OutputBoard &boarOut) const; + void convertToEmu(const l1t::TkElectron &tkele, + ConstituentPtrVector &constituentsPtrs, + l1ct::OutputBoard &boarOut) const; + void convertToEmu(const l1t::TkEm &tkele, ConstituentPtrVector &constituentsPtrs, l1ct::OutputBoard &boarOut) const; void convertToPuppi(const l1t::PFCandidateCollection &l1PFCands, l1ct::PuppiObjs &puppiObjs) const; template @@ -115,24 +112,12 @@ class L1TCtL2EgProducer : public edm::global::EDProducer<> { template void merge(const PFInstanceInputs &instance, edm::Event &iEvent, - RefRemapper &refRemapper, + ConstituentPtrVector &constituentsPtrs, std::unique_ptr &out) const { edm::Handle handle; for (const auto &tokenAndChannel : instance.tokensAndChannels()) { iEvent.getByToken(tokenAndChannel.first, handle); - populate(out, handle, tokenAndChannel.second, refRemapper); - } - remapRefs(iEvent, out, refRemapper); - } - - template - void remapRefs(edm::Event &iEvent, std::unique_ptr &out, RefRemapper &refRemapper) const {} - - void remapRefs(edm::Event &iEvent, std::unique_ptr> &out, RefRemapper &refRemapper) const { - edm::RefProd> ref_egs = iEvent.getRefBeforePut>(tkEGInstanceLabel_); - edm::Ref>::key_type idx = 0; - for (std::size_t ix = 0; ix < out->size(); ix++) { - refRemapper.old2newRefMap[refRemapper.oldRefs[ix]] = edm::Ref>(ref_egs, idx++); + populate(out, handle, tokenAndChannel.second, constituentsPtrs); } } @@ -140,7 +125,7 @@ class L1TCtL2EgProducer : public edm::global::EDProducer<> { void populate(std::unique_ptr &out, const edm::Handle &in, const std::vector &links, - RefRemapper &refRemapper) const { + ConstituentPtrVector &constituentsPtrs) const { assert(links.size() == in->nRegions()); for (unsigned int iBoard = 0, nBoard = in->nRegions(); iBoard < nBoard; ++iBoard) { auto region = in->region(iBoard); @@ -149,7 +134,7 @@ class L1TCtL2EgProducer : public edm::global::EDProducer<> { continue; // std::cout << "Board eta: " << in->eta(iBoard) << " phi: " << in->phi(iBoard) << " link: " << linkID << std::endl; for (const auto &obj : region) { - convertToEmu(obj, refRemapper, out->at(linkID)); + convertToEmu(obj, constituentsPtrs, out->at(linkID)); } } } @@ -157,36 +142,31 @@ class L1TCtL2EgProducer : public edm::global::EDProducer<> { void populate(std::unique_ptr> &out, const edm::Handle> &in, const std::vector &links, - RefRemapper &refRemapper) const { - edm::Ref>::key_type idx = 0; + ConstituentPtrVector &constituentsPtrs) const { for (int bx = in->getFirstBX(); bx <= in->getLastBX(); bx++) { for (auto egee_itr = in->begin(bx); egee_itr != in->end(bx); egee_itr++) { out->push_back(bx, *egee_itr); - // this to ensure that the old ref and the new object have the same - // index in the BXVector collection so that we can still match them no - // matter which BX we will insert next - refRemapper.oldRefs.push_back(bx, edm::Ref>(in, idx++)); } } } template void putEgObjects(edm::Event &iEvent, - const RefRemapper &refRemapper, + const ConstituentPtrVector &constituentsPtrs, const std::string &label, const std::vector emulated) const { auto egobjs = std::make_unique(); for (const auto &emu : emulated) { if (emu.hwPt == 0) continue; - auto obj = convertFromEmu(emu, refRemapper); + auto obj = convertFromEmu(emu, constituentsPtrs); egobjs->push_back(obj); } iEvent.put(std::move(egobjs), label); } - l1t::TkEm convertFromEmu(const l1ct::EGIsoObjEmu &emu, const RefRemapper &refRemapper) const; - l1t::TkElectron convertFromEmu(const l1ct::EGIsoEleObjEmu &emu, const RefRemapper &refRemapper) const; + l1t::TkEm convertFromEmu(const l1ct::EGIsoObjEmu &emu, const ConstituentPtrVector &constituentsPtrs) const; + l1t::TkElectron convertFromEmu(const l1ct::EGIsoEleObjEmu &emu, const ConstituentPtrVector &constituentsPtrs) const; PFInstanceInputs> tkEGInputs_; PFInstanceInputs tkEmInputs_; @@ -283,16 +263,16 @@ std::vector> L1TCtL2EgProducer::encodeLayer1EgObjs(unsigned int nObj } void L1TCtL2EgProducer::produce(edm::StreamID, edm::Event &iEvent, const edm::EventSetup &) const { - RefRemapper refmapper; + ConstituentPtrVector constituents; auto outEgs = std::make_unique>(); - merge(tkEGInputs_, iEvent, refmapper, outEgs); + merge(tkEGInputs_, iEvent, constituents, outEgs); iEvent.put(std::move(outEgs), tkEGInstanceLabel_); auto boards = std::make_unique>(l2egsorter.nInputBoards()); - merge(tkEleInputs_, iEvent, refmapper, boards); - merge(tkEmInputs_, iEvent, refmapper, boards); + merge(tkEleInputs_, iEvent, constituents, boards); + merge(tkEmInputs_, iEvent, constituents, boards); if (doInPtrn_) { l1t::demo::EventData inData; @@ -321,8 +301,8 @@ void L1TCtL2EgProducer::produce(edm::StreamID, edm::Event &iEvent, const edm::Ev outPtrnWrt_->addEvent(outData); } - putEgObjects(iEvent, refmapper, tkEmInstanceLabel_, out_photons_emu); - putEgObjects(iEvent, refmapper, tkEleInstanceLabel_, out_eles_emu); + putEgObjects(iEvent, constituents, tkEmInstanceLabel_, out_photons_emu); + putEgObjects(iEvent, constituents, tkEleInstanceLabel_, out_eles_emu); } void L1TCtL2EgProducer::endJob() { @@ -334,49 +314,39 @@ void L1TCtL2EgProducer::endJob() { } void L1TCtL2EgProducer::convertToEmu(const l1t::TkElectron &tkele, - RefRemapper &refRemapper, + ConstituentPtrVector &constituentsPtrs, l1ct::OutputBoard &boarOut) const { EGIsoEleObjEmu emu; emu.initFromBits(tkele.egBinaryWord()); emu.srcCluster = nullptr; emu.srcTrack = nullptr; - auto refEg = tkele.EGRef(); - const auto newref = refRemapper.old2newRefMap.find(refEg); - if (newref != refRemapper.old2newRefMap.end()) { - refEg = newref->second; - } - refRemapper.origRefAndPtr.push_back(std::make_pair(refEg, tkele.trkPtr())); - emu.sta_idx = refRemapper.origRefAndPtr.size() - 1; + + constituentsPtrs.push_back(std::make_pair(tkele.egCaloPtr(), tkele.trkPtr())); + emu.src_idx = constituentsPtrs.size() - 1; + // NOTE: The emulator and FW data-format stores absolute iso while the CMSSW object stores relative iso emu.setHwIso(EGIsoEleObjEmu::IsoType::TkIso, l1ct::Scales::makeIso(tkele.trkIsol() * tkele.pt())); emu.setHwIso(EGIsoEleObjEmu::IsoType::PfIso, l1ct::Scales::makeIso(tkele.pfIsol() * tkele.pt())); emu.setHwIso(EGIsoEleObjEmu::IsoType::PuppiIso, l1ct::Scales::makeIso(tkele.puppiIsol() * tkele.pt())); - // std::cout << "[convertToEmu] TkEle pt: " << emu.hwPt << " eta: " << emu.hwEta << " phi: " << emu.hwPhi << " staidx: " << emu.sta_idx << std::endl; - // FIXME: this is temporary while waiting to move the BDT score to the FW object - emu.idScore = tkele.idScore(); + // std::cout << "[convertToEmu] TkEle pt: " << emu.hwPt << " eta: " << emu.hwEta << " phi: " << emu.hwPhi << " staidx: " << emu.src_idx << std::endl; boarOut.egelectron.push_back(emu); } void L1TCtL2EgProducer::convertToEmu(const l1t::TkEm &tkem, - RefRemapper &refRemapper, + ConstituentPtrVector &constituentsPtrs, l1ct::OutputBoard &boarOut) const { EGIsoObjEmu emu; emu.initFromBits(tkem.egBinaryWord()); emu.srcCluster = nullptr; - auto refEg = tkem.EGRef(); - const auto newref = refRemapper.old2newRefMap.find(refEg); - if (newref != refRemapper.old2newRefMap.end()) { - refEg = newref->second; - } - refRemapper.origRefAndPtr.push_back(std::make_pair(refEg, edm::Ptr(nullptr, 0))); - emu.sta_idx = refRemapper.origRefAndPtr.size() - 1; + constituentsPtrs.push_back(std::make_pair(tkem.egCaloPtr(), edm::Ptr())); + emu.src_idx = constituentsPtrs.size() - 1; // NOTE: The emulator and FW data-format stores absolute iso while the CMSSW object stores relative iso emu.setHwIso(EGIsoObjEmu::IsoType::TkIso, l1ct::Scales::makeIso(tkem.trkIsol() * tkem.pt())); emu.setHwIso(EGIsoObjEmu::IsoType::PfIso, l1ct::Scales::makeIso(tkem.pfIsol() * tkem.pt())); emu.setHwIso(EGIsoObjEmu::IsoType::PuppiIso, l1ct::Scales::makeIso(tkem.puppiIsol() * tkem.pt())); emu.setHwIso(EGIsoObjEmu::IsoType::TkIsoPV, l1ct::Scales::makeIso(tkem.trkIsolPV() * tkem.pt())); emu.setHwIso(EGIsoObjEmu::IsoType::PfIsoPV, l1ct::Scales::makeIso(tkem.pfIsolPV() * tkem.pt())); - // std::cout << "[convertToEmu] TkEM pt: " << emu.hwPt << " eta: " << emu.hwEta << " phi: " << emu.hwPhi << " staidx: " << emu.sta_idx << std::endl; + // std::cout << "[convertToEmu] TkEM pt: " << emu.hwPt << " eta: " << emu.hwEta << " phi: " << emu.hwPhi << " staidx: " << emu.src_idx << std::endl; boarOut.egphoton.push_back(emu); } @@ -388,42 +358,44 @@ void L1TCtL2EgProducer::convertToPuppi(const l1t::PFCandidateCollection &l1PFCan } } -l1t::TkEm L1TCtL2EgProducer::convertFromEmu(const l1ct::EGIsoObjEmu &egiso, const RefRemapper &refRemapper) const { - // std::cout << "[convertFromEmu] TkEm pt: " << egiso.hwPt << " eta: " << egiso.hwEta << " phi: " << egiso.hwPhi << " staidx: " << egiso.sta_idx << std::endl; +l1t::TkEm L1TCtL2EgProducer::convertFromEmu(const l1ct::EGIsoObjEmu &egiso, + const ConstituentPtrVector &constituentsPtrs) const { + // std::cout << "[convertFromEmu] TkEm pt: " << egiso.hwPt << " eta: " << egiso.hwEta << " phi: " << egiso.hwPhi << " staidx: " << egiso.src_idx << std::endl; // NOTE: the TkEM object is created with the accuracy as in GT object (not the Correlator internal one)! const auto gteg = egiso.toGT(); reco::Candidate::PolarLorentzVector mom( l1gt::Scales::floatPt(gteg.v3.pt), l1gt::Scales::floatEta(gteg.v3.eta), l1gt::Scales::floatPhi(gteg.v3.phi), 0.); // NOTE: The emulator and FW data-format stores absolute iso while the CMSSW object stores relative iso l1t::TkEm tkem(reco::Candidate::LorentzVector(mom), - refRemapper.origRefAndPtr[egiso.sta_idx].first, + constituentsPtrs[egiso.src_idx].first, egiso.floatRelIso(l1ct::EGIsoObjEmu::IsoType::TkIso), egiso.floatRelIso(l1ct::EGIsoObjEmu::IsoType::TkIsoPV)); tkem.setHwQual(gteg.quality); tkem.setPFIsol(egiso.floatRelIso(l1ct::EGIsoObjEmu::IsoType::PfIso)); tkem.setPFIsolPV(egiso.floatRelIso(l1ct::EGIsoObjEmu::IsoType::PfIsoPV)); tkem.setPuppiIsol(egiso.floatRelIso(l1ct::EGIsoObjEmu::IsoType::PuppiIso)); - tkem.setEgBinaryWord(gteg.pack()); + tkem.setEgBinaryWord(gteg.pack(), l1t::TkEm::HWEncoding::GT); return tkem; } l1t::TkElectron L1TCtL2EgProducer::convertFromEmu(const l1ct::EGIsoEleObjEmu &egele, - const RefRemapper &refRemapper) const { - // std::cout << "[convertFromEmu] TkEle pt: " << egele.hwPt << " eta: " << egele.hwEta << " phi: " << egele.hwPhi << " staidx: " << egele.sta_idx << std::endl; + const ConstituentPtrVector &constituentsPtrs) const { + // std::cout << "[convertFromEmu] TkEle pt: " << egele.hwPt << " eta: " << egele.hwEta << " phi: " << egele.hwPhi << " staidx: " << egele.src_idx << std::endl; // NOTE: the TkElectron object is created with the accuracy as in GT object (not the Correlator internal one)! const auto gteg = egele.toGT(); reco::Candidate::PolarLorentzVector mom( l1gt::Scales::floatPt(gteg.v3.pt), l1gt::Scales::floatEta(gteg.v3.eta), l1gt::Scales::floatPhi(gteg.v3.phi), 0.); // NOTE: The emulator and FW data-format stores absolute iso while the CMSSW object stores relative iso l1t::TkElectron tkele(reco::Candidate::LorentzVector(mom), - refRemapper.origRefAndPtr[egele.sta_idx].first, - refRemapper.origRefAndPtr[egele.sta_idx].second, + constituentsPtrs[egele.src_idx].first, + constituentsPtrs[egele.src_idx].second, egele.floatRelIso(l1ct::EGIsoEleObjEmu::IsoType::TkIso)); tkele.setHwQual(gteg.quality); tkele.setPFIsol(egele.floatRelIso(l1ct::EGIsoEleObjEmu::IsoType::PfIso)); tkele.setPuppiIsol(egele.floatRelIso(l1ct::EGIsoEleObjEmu::IsoType::PuppiIso)); - tkele.setEgBinaryWord(gteg.pack()); - tkele.setIdScore(egele.idScore); + tkele.setEgBinaryWord(gteg.pack(), l1t::TkElectron::HWEncoding::GT); + tkele.setIdScore(egele.floatIDScore()); + tkele.setCharge(egele.intCharge()); return tkele; } diff --git a/L1Trigger/Phase2L1ParticleFlow/plugins/L1TEGMultiMerger.cc b/L1Trigger/Phase2L1ParticleFlow/plugins/L1TEGMultiMerger.cc index fd91a0f7bdda6..7fab2ccc495ed 100644 --- a/L1Trigger/Phase2L1ParticleFlow/plugins/L1TEGMultiMerger.cc +++ b/L1Trigger/Phase2L1ParticleFlow/plugins/L1TEGMultiMerger.cc @@ -23,11 +23,6 @@ class L1TEGMultiMerger : public edm::global::EDProducer<> { private: void produce(edm::StreamID, edm::Event&, const edm::EventSetup&) const override; - struct RefRemapper { - BXVector>> oldRefs; - std::map>, edm::Ref>> old2newRefMap; - }; - template class InstanceMerger { public: @@ -41,51 +36,26 @@ class L1TEGMultiMerger : public edm::global::EDProducer<> { prod->produces(instanceLabel_); } - void produce(edm::Event& iEvent, RefRemapper& refRemapper) const { + void produce(edm::Event& iEvent) const { edm::Handle handle; auto out = std::make_unique(); for (const auto& token : tokens_) { iEvent.getByToken(token, handle); - populate(out, handle, refRemapper); + populate(out, handle); } - remapRefs(iEvent, out, refRemapper); iEvent.put(std::move(out), instanceLabel_); } private: template - void remapRefs(edm::Event& iEvent, std::unique_ptr& out, RefRemapper& refRemapper) const { - for (auto& egobj : *out) { - auto newref = refRemapper.old2newRefMap.find(egobj.EGRef()); - if (newref != refRemapper.old2newRefMap.end()) { - egobj.setEGRef(newref->second); - } - } - } - - void remapRefs(edm::Event& iEvent, std::unique_ptr>& out, RefRemapper& refRemapper) const { - edm::RefProd> ref_egs = iEvent.getRefBeforePut>(instanceLabel_); - edm::Ref>::key_type idx = 0; - for (std::size_t ix = 0; ix < out->size(); ix++) { - refRemapper.old2newRefMap[refRemapper.oldRefs[ix]] = edm::Ref>(ref_egs, idx++); - } - } - - template - void populate(std::unique_ptr& out, const edm::Handle& in, RefRemapper& refRemapper) const { + void populate(std::unique_ptr& out, const edm::Handle& in) const { out->insert(out->end(), in->begin(), in->end()); } - void populate(std::unique_ptr>& out, - const edm::Handle>& in, - RefRemapper& refRemapper) const { - edm::Ref>::key_type idx = 0; + void populate(std::unique_ptr>& out, const edm::Handle>& in) const { for (int bx = in->getFirstBX(); bx <= in->getLastBX(); bx++) { for (auto egee_itr = in->begin(bx); egee_itr != in->end(bx); egee_itr++) { out->push_back(bx, *egee_itr); - // this to ensure that the old ref and the new object have the same index in the BXVector collection so that we can still match them - // no matter which BX we will insert next - refRemapper.oldRefs.push_back(bx, edm::Ref>(in, idx++)); } } } @@ -114,13 +84,12 @@ L1TEGMultiMerger::L1TEGMultiMerger(const edm::ParameterSet& conf) { L1TEGMultiMerger::~L1TEGMultiMerger() {} void L1TEGMultiMerger::produce(edm::StreamID, edm::Event& iEvent, const edm::EventSetup&) const { - RefRemapper refmapper; for (const auto& egMerger : tkEGMerger) - egMerger.produce(iEvent, refmapper); + egMerger.produce(iEvent); for (const auto& eleMerger : tkEleMerger) - eleMerger.produce(iEvent, refmapper); + eleMerger.produce(iEvent); for (const auto& emMerger : tkEmMerger) - emMerger.produce(iEvent, refmapper); + emMerger.produce(iEvent); } void L1TEGMultiMerger::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { diff --git a/L1Trigger/Phase2L1ParticleFlow/python/L1BJetProducer_cff.py b/L1Trigger/Phase2L1ParticleFlow/python/L1BJetProducer_cff.py index 731d661cfd6c1..75904f145c4c2 100644 --- a/L1Trigger/Phase2L1ParticleFlow/python/L1BJetProducer_cff.py +++ b/L1Trigger/Phase2L1ParticleFlow/python/L1BJetProducer_cff.py @@ -4,7 +4,7 @@ from L1Trigger.Phase2L1ParticleFlow.L1BJetProducer_cfi import L1BJetProducer l1tBJetProducerPuppi = L1BJetProducer.clone( - jets = ("l1tSCPFL1PuppiExtended", ""), + jets = ("l1tSC4PFL1PuppiExtended", ""), maxJets = 6, minPt = 10, vtx = ("l1tVertexFinderEmulator","L1VerticesEmulation") @@ -12,7 +12,7 @@ l1tBJetProducerPuppiCorrectedEmulator = l1tBJetProducerPuppi.clone( - jets = ("l1tSCPFL1PuppiExtendedCorrectedEmulator", "") + jets = ("l1tSC4PFL1PuppiExtendedCorrectedEmulator", "") ) L1TBJetsTask = cms.Task( diff --git a/L1Trigger/Phase2L1ParticleFlow/python/l1TkEgAlgoEmulator_cfi.py b/L1Trigger/Phase2L1ParticleFlow/python/l1TkEgAlgoEmulator_cfi.py index 4b267ff987db1..2fb19ea8b56eb 100644 --- a/L1Trigger/Phase2L1ParticleFlow/python/l1TkEgAlgoEmulator_cfi.py +++ b/L1Trigger/Phase2L1ParticleFlow/python/l1TkEgAlgoEmulator_cfi.py @@ -1,6 +1,7 @@ import FWCore.ParameterSet.Config as cms tkEgAlgoParameters = cms.PSet( + # debug=cms.untracked.uint32(4), nTRACK=cms.uint32(50), # very large numbers for first test nTRACK_EGIN=cms.uint32(50), # very large numbers for first test nEMCALO_EGIN=cms.uint32(50), # very large numbers for first test @@ -23,39 +24,36 @@ dZ=cms.double(0.6), dRMin=cms.double(0.07), dRMax=cms.double(0.30), - tkQualityChi2Max=cms.double(100), ), tkIsoParametersTkEle=cms.PSet( tkQualityPtMin=cms.double(2.), dZ=cms.double(0.6), dRMin=cms.double(0.03), dRMax=cms.double(0.20), - tkQualityChi2Max=cms.double(1e10), ), pfIsoParametersTkEm=cms.PSet( tkQualityPtMin=cms.double(1.), dZ=cms.double(0.6), dRMin=cms.double(0.07), dRMax=cms.double(0.30), - tkQualityChi2Max=cms.double(100), ), pfIsoParametersTkEle=cms.PSet( tkQualityPtMin=cms.double(1.), dZ=cms.double(0.6), dRMin=cms.double(0.03), dRMax=cms.double(0.20), - tkQualityChi2Max=cms.double(1e10), ), doTkIso=cms.bool(True), doPfIso=cms.bool(True), hwIsoTypeTkEle=cms.uint32(0), - hwIsoTypeTkEm=cms.uint32(2), + hwIsoTypeTkEm=cms.uint32(0), doCompositeTkEle=cms.bool(False), nCompCandPerCluster=cms.uint32(3), compositeParametersTkEle=cms.PSet( - # the working points are cuts on BDT output logits log(p/1-p) - loose_wp=cms.double(-0.732422), - tight_wp=cms.double(0.214844), + # NOTE: conifer BDT score is log(p/1-p) + # the working points are cuts on BDT output logits [log(p/1-p)]/4 (range -1 to 1 to match the FW dataformat) + loose_wp=cms.double(-0.181641), + tight_wp=cms.double(0.0527344), model=cms.string("L1Trigger/Phase2L1ParticleFlow/data/compositeID.json") ), ) diff --git a/L1Trigger/Phase2L1ParticleFlow/python/l1pfJetMet_cff.py b/L1Trigger/Phase2L1ParticleFlow/python/l1pfJetMet_cff.py index 1ca27e1cf018f..423941d5bb4ae 100644 --- a/L1Trigger/Phase2L1ParticleFlow/python/l1pfJetMet_cff.py +++ b/L1Trigger/Phase2L1ParticleFlow/python/l1pfJetMet_cff.py @@ -3,13 +3,20 @@ from L1Trigger.Phase2L1ParticleFlow.l1SeedConePFJetProducer_cfi import l1SeedConePFJetProducer from L1Trigger.Phase2L1ParticleFlow.l1SeedConePFJetEmulatorProducer_cfi import l1SeedConePFJetEmulatorProducer from L1Trigger.Phase2L1ParticleFlow.l1tDeregionizerProducer_cfi import l1tDeregionizerProducer as l1tLayer2Deregionizer, l1tDeregionizerProducerExtended as l1tLayer2DeregionizerExtended -l1tSCPFL1PF = l1SeedConePFJetProducer.clone(L1PFObjects = 'l1tLayer1:PF') -l1tSCPFL1Puppi = l1SeedConePFJetProducer.clone() -l1tSCPFL1PuppiEmulator = l1SeedConePFJetEmulatorProducer.clone(L1PFObjects = 'l1tLayer2Deregionizer:Puppi') -l1tSCPFL1PuppiCorrectedEmulator = l1SeedConePFJetEmulatorProducer.clone(L1PFObjects = 'l1tLayer2Deregionizer:Puppi', - doCorrections = True, - correctorFile = "L1Trigger/Phase2L1ParticleFlow/data/jecs/jecs_20220308.root", - correctorDir = "L1PuppiSC4EmuJets") +l1tSC4PFL1PF = l1SeedConePFJetProducer.clone(L1PFObjects = 'l1tLayer1:PF') +l1tSC4PFL1Puppi = l1SeedConePFJetProducer.clone() +l1tSC4PFL1PuppiEmulator = l1SeedConePFJetEmulatorProducer.clone(L1PFObjects = 'l1tLayer2Deregionizer:Puppi') +l1tSC8PFL1PuppiEmulator = l1SeedConePFJetEmulatorProducer.clone(L1PFObjects = 'l1tLayer2Deregionizer:Puppi', + coneSize = cms.double(0.8)) +l1tSC4PFL1PuppiCorrectedEmulator = l1SeedConePFJetEmulatorProducer.clone(L1PFObjects = 'l1tLayer2Deregionizer:Puppi', + doCorrections = cms.bool(True), + correctorFile = cms.string("L1Trigger/Phase2L1ParticleFlow/data/jecs/jecs_20220308.root"), + correctorDir = cms.string('L1PuppiSC4EmuJets')) +l1tSC8PFL1PuppiCorrectedEmulator = l1SeedConePFJetEmulatorProducer.clone(L1PFObjects = 'l1tLayer2Deregionizer:Puppi', + coneSize = cms.double(0.8), + doCorrections = cms.bool(True), + correctorFile = cms.string("L1Trigger/Phase2L1ParticleFlow/data/jecs/jecs_20220308.root"), + correctorDir = cms.string('L1PuppiSC4EmuJets')) _correctedJets = cms.EDProducer("L1TCorrectedPFJetProducer", jets = cms.InputTag("_tag_"), @@ -26,28 +33,25 @@ phase2_hgcalV11.toModify(_correctedJets, correctorFile = "L1Trigger/Phase2L1ParticleFlow/data/jecs/jecs_20220308.root") from L1Trigger.Phase2L1ParticleFlow.l1tMHTPFProducer_cfi import l1tMHTPFProducer -l1tSCPFL1PuppiCorrectedEmulatorMHT = l1tMHTPFProducer.clone(jets = 'l1tSCPFL1PuppiCorrectedEmulator') +l1tSC4PFL1PuppiCorrectedEmulatorMHT = l1tMHTPFProducer.clone(jets = 'l1tSC4PFL1PuppiCorrectedEmulator') -L1TPFJetsTask = cms.Task( - l1tLayer2Deregionizer, l1tSCPFL1PF, l1tSCPFL1Puppi, l1tSCPFL1PuppiEmulator, l1tSCPFL1PuppiCorrectedEmulator, l1tSCPFL1PuppiCorrectedEmulatorMHT -) - -l1tSCPFL1PuppiExtended = l1SeedConePFJetProducer.clone(L1PFObjects = 'l1tLayer1Extended:Puppi') -l1tSCPFL1PuppiExtendedEmulator = l1SeedConePFJetEmulatorProducer.clone(L1PFObjects = 'l1tLayer2DeregionizerExtended:Puppi') -l1tSCPFL1PuppiExtendedCorrectedEmulator = l1SeedConePFJetEmulatorProducer.clone(L1PFObjects = 'l1tLayer2DeregionizerExtended:Puppi', - doCorrections = True, - correctorFile = "L1Trigger/Phase2L1ParticleFlow/data/jecs/jecs_20220308.root", - correctorDir = "L1PuppiSC4EmuJets") +l1tSC4PFL1PuppiExtended = l1SeedConePFJetProducer.clone(L1PFObjects = 'l1tLayer1Extended:Puppi') +l1tSC4PFL1PuppiExtendedEmulator = l1SeedConePFJetEmulatorProducer.clone(L1PFObjects = 'l1tLayer2DeregionizerExtended:Puppi') +l1tSC4PFL1PuppiExtendedCorrectedEmulator = l1SeedConePFJetEmulatorProducer.clone(L1PFObjects = 'l1tLayer2DeregionizerExtended:Puppi', + doCorrections = cms.bool(True), + correctorFile = cms.string("L1Trigger/Phase2L1ParticleFlow/data/jecs/jecs_20220308.root"), + correctorDir = cms.string('L1PuppiSC4EmuJets')) L1TPFJetsTask = cms.Task( - l1tLayer2Deregionizer, l1tSCPFL1PF, l1tSCPFL1Puppi, l1tSCPFL1PuppiEmulator, l1tSCPFL1PuppiCorrectedEmulator, l1tSCPFL1PuppiCorrectedEmulatorMHT + l1tLayer2Deregionizer, l1tSC4PFL1PF, l1tSC4PFL1Puppi, l1tSC4PFL1PuppiEmulator, l1tSC4PFL1PuppiCorrectedEmulator, l1tSC4PFL1PuppiCorrectedEmulatorMHT, + l1tSC8PFL1PuppiEmulator, l1tSC8PFL1PuppiCorrectedEmulator ) L1TPFJetsExtendedTask = cms.Task( - l1tLayer2DeregionizerExtended, l1tSCPFL1PuppiExtended, l1tSCPFL1PuppiExtendedEmulator, l1tSCPFL1PuppiExtendedCorrectedEmulator + l1tLayer2DeregionizerExtended, l1tSC4PFL1PuppiExtended, l1tSC4PFL1PuppiExtendedEmulator, l1tSC4PFL1PuppiExtendedCorrectedEmulator ) L1TPFJetsEmulationTask = cms.Task( - l1tLayer2Deregionizer, l1tSCPFL1PuppiEmulator, l1tSCPFL1PuppiCorrectedEmulator, l1tSCPFL1PuppiCorrectedEmulatorMHT + l1tLayer2Deregionizer, l1tSC4PFL1PuppiEmulator, l1tSC4PFL1PuppiCorrectedEmulator, l1tSC4PFL1PuppiCorrectedEmulatorMHT, + l1tSC8PFL1PuppiEmulator, l1tSC8PFL1PuppiCorrectedEmulator ) - diff --git a/L1Trigger/Phase2L1ParticleFlow/python/l1tJetFileWriter_cfi.py b/L1Trigger/Phase2L1ParticleFlow/python/l1tJetFileWriter_cfi.py index a53db5cb667fe..e450e0c57e317 100644 --- a/L1Trigger/Phase2L1ParticleFlow/python/l1tJetFileWriter_cfi.py +++ b/L1Trigger/Phase2L1ParticleFlow/python/l1tJetFileWriter_cfi.py @@ -1,9 +1,12 @@ import FWCore.ParameterSet.Config as cms l1tSeededConeJetFileWriter = cms.EDAnalyzer('L1CTJetFileWriter', - jets = cms.InputTag("l1tSCPFL1PuppiEmulatorCorrected"), - nJets = cms.uint32(12), + collections = cms.VPSet(cms.PSet(jets = cms.InputTag("l1tSC4PFL1PuppiCorrectedEmulator"), + nJets = cms.uint32(12), + mht = cms.InputTag("l1tSC4PFL1PuppiCorrectedEmulatorMHT"), + nSums = cms.uint32(1))), nFramesPerBX = cms.uint32(9), # 360 MHz clock or 25 Gb/s link + gapLengthOutput = cms.uint32(4), TMUX = cms.uint32(6), maxLinesPerFile = cms.uint32(1024), outputFilename = cms.string("L1CTSCJetsPatterns"), diff --git a/L1Trigger/Phase2L1ParticleFlow/python/l1tMHTPFProducer_cfi.py b/L1Trigger/Phase2L1ParticleFlow/python/l1tMHTPFProducer_cfi.py index 31e3f57591c9e..e51ca85feafb4 100644 --- a/L1Trigger/Phase2L1ParticleFlow/python/l1tMHTPFProducer_cfi.py +++ b/L1Trigger/Phase2L1ParticleFlow/python/l1tMHTPFProducer_cfi.py @@ -1,7 +1,7 @@ import FWCore.ParameterSet.Config as cms l1tMHTPFProducer = cms.EDProducer("L1MhtPfProducer", - jets = cms.InputTag("l1tSCPFL1PuppiEmulator"), + jets = cms.InputTag("l1tSC4PFL1PuppiEmulator"), minJetPt = cms.double(30.0), maxJetEta = cms.double(2.4) ) diff --git a/L1Trigger/Phase2L1ParticleFlow/src/egamma/pftkegalgo_ref.cpp b/L1Trigger/Phase2L1ParticleFlow/src/egamma/pftkegalgo_ref.cpp index 097ec687420d7..6b32c57848782 100644 --- a/L1Trigger/Phase2L1ParticleFlow/src/egamma/pftkegalgo_ref.cpp +++ b/L1Trigger/Phase2L1ParticleFlow/src/egamma/pftkegalgo_ref.cpp @@ -107,7 +107,6 @@ edm::ParameterSetDescription l1ct::PFTkEGAlgoEmuConfig::IsoParameters::getParame description.add("dZ", 0.6); description.add("dRMin"); description.add("dRMax"); - description.add("tkQualityChi2Max"); return description; } @@ -248,7 +247,7 @@ void PFTkEGAlgoEmulator::link_emCalo2tk_composite(const PFRegionEmu &r, const std::vector &emcalo, const std::vector &track, std::vector &emCalo2tk, - std::vector &emCaloTkBdtScore) const { + std::vector &emCaloTkBdtScore) const { unsigned int nTrackMax = std::min(track.size(), cfg.nTRACK_EGIN); for (int ic = 0, nc = emcalo.size(); ic < nc; ++ic) { auto &calo = emcalo[ic]; @@ -281,12 +280,12 @@ void PFTkEGAlgoEmulator::link_emCalo2tk_composite(const PFRegionEmu &r, if (nCandPerCluster == 0) continue; - float maxScore = -999; + id_score_t maxScore = -pow(2, l1ct::id_score_t::iwidth - 1); int ibest = -1; for (unsigned int icand = 0; icand < nCandPerCluster; icand++) { auto &cand = candidates[icand]; const std::vector &emcalo_sel = emcalo; - float score = compute_composite_score(cand, emcalo_sel, track, cfg.compIDparams); + id_score_t score = compute_composite_score(cand, emcalo_sel, track, cfg.compIDparams); if ((score > cfg.compIDparams.bdtScore_loose_wp) && (score > maxScore)) { maxScore = score; ibest = icand; @@ -299,10 +298,10 @@ void PFTkEGAlgoEmulator::link_emCalo2tk_composite(const PFRegionEmu &r, } } -float PFTkEGAlgoEmulator::compute_composite_score(CompositeCandidate &cand, - const std::vector &emcalo, - const std::vector &track, - const PFTkEGAlgoEmuConfig::CompIDParameters ¶ms) const { +id_score_t PFTkEGAlgoEmulator::compute_composite_score(CompositeCandidate &cand, + const std::vector &emcalo, + const std::vector &track, + const PFTkEGAlgoEmuConfig::CompIDParameters ¶ms) const { // Get the cluster/track objects that form the composite candidate const auto &calo = emcalo[cand.cluster_idx]; const auto &tk = track[cand.track_idx]; @@ -325,7 +324,7 @@ float PFTkEGAlgoEmulator::compute_composite_score(CompositeCandidate &cand, std::vector inputs = {tkpt, hoe, srrtot, deta, dphi, dpt, meanz, nstubs, chi2rphi, chi2rz, chi2bend}; std::vector bdt_score = composite_bdt_->decision_function(inputs); - return bdt_score[0]; + return bdt_score[0] / 4; } void PFTkEGAlgoEmulator::sel_emCalo(unsigned int nmax_sel, @@ -364,7 +363,7 @@ void PFTkEGAlgoEmulator::run(const PFInputRegion &in, OutputRegion &out) const { link_emCalo2emCalo(emcalo_sel, emCalo2emCalo); std::vector emCalo2tk(emcalo_sel.size(), -1); - std::vector emCaloTkBdtScore(emcalo_sel.size(), -999); + std::vector emCaloTkBdtScore(emcalo_sel.size(), 0); if (cfg.doCompositeTkEle) { link_emCalo2tk_composite(in.region, emcalo_sel, in.track, emCalo2tk, emCaloTkBdtScore); @@ -392,7 +391,7 @@ void PFTkEGAlgoEmulator::eg_algo(const PFRegionEmu ®ion, const std::vector &track, const std::vector &emCalo2emCalo, const std::vector &emCalo2tk, - const std::vector &emCaloTkBdtScore, + const std::vector &emCaloTkBdtScore, std::vector &egstas, std::vector &egobjs, std::vector &egeleobjs) const { @@ -408,7 +407,7 @@ void PFTkEGAlgoEmulator::eg_algo(const PFRegionEmu ®ion, << " phi " << calo.hwPhi << std::endl; int itk = emCalo2tk[ic]; - float bdt = emCaloTkBdtScore[ic]; + const id_score_t &bdt = emCaloTkBdtScore[ic]; // check if brem recovery is on if (!cfg.doBremRecovery || cfg.writeBeforeBremRecovery) { @@ -443,7 +442,6 @@ void PFTkEGAlgoEmulator::eg_algo(const PFRegionEmu ®ion, } // 2. create EG objects with brem recovery - // NOTE: duplicating the object is suboptimal but this is done for keeping things as in TDR code... addEgObjsToPF(egstas, egobjs, egeleobjs, emcalo, track, ic, calo.hwEmID, ptBremReco, itk, bdt, components); } } @@ -499,7 +497,7 @@ EGIsoEleObjEmu &PFTkEGAlgoEmulator::addEGIsoEleToPF(std::vector const TkObjEmu &track, const unsigned int hwQual, const pt_t ptCorr, - const float bdtScore) const { + const id_score_t bdtScore) const { EGIsoEleObjEmu egiso; egiso.clear(); egiso.hwPt = ptCorr; @@ -523,7 +521,7 @@ EGIsoEleObjEmu &PFTkEGAlgoEmulator::addEGIsoEleToPF(std::vector egiso.hwCharge = track.hwCharge; egiso.srcCluster = calo.src; egiso.srcTrack = track.src; - egiso.idScore = bdtScore; + egiso.hwIDScore = bdtScore; egobjs.push_back(egiso); if (debug_ > 2) @@ -542,18 +540,18 @@ void PFTkEGAlgoEmulator::addEgObjsToPF(std::vector &egstas, const unsigned int hwQual, const pt_t ptCorr, const int tk_idx, - const float bdtScore, + const id_score_t bdtScore, const std::vector &components) const { - int sta_idx = -1; + int src_idx = -1; if (writeEgSta()) { addEGStaToPF(egstas, emcalo[calo_idx], hwQual, ptCorr, components); - sta_idx = egstas.size() - 1; + src_idx = egstas.size() - 1; } EGIsoObjEmu &egobj = addEGIsoToPF(egobjs, emcalo[calo_idx], hwQual, ptCorr); - egobj.sta_idx = sta_idx; + egobj.src_idx = src_idx; if (tk_idx != -1) { EGIsoEleObjEmu &eleobj = addEGIsoEleToPF(egeleobjs, emcalo[calo_idx], track[tk_idx], hwQual, ptCorr, bdtScore); - eleobj.sta_idx = sta_idx; + eleobj.src_idx = src_idx; } } diff --git a/L1Trigger/Phase2L1ParticleFlow/src/puppi/linpuppi_ref.cpp b/L1Trigger/Phase2L1ParticleFlow/src/puppi/linpuppi_ref.cpp index e2643999529e1..fd5d1e27cdad5 100644 --- a/L1Trigger/Phase2L1ParticleFlow/src/puppi/linpuppi_ref.cpp +++ b/L1Trigger/Phase2L1ParticleFlow/src/puppi/linpuppi_ref.cpp @@ -16,6 +16,7 @@ #endif using namespace l1ct; +using namespace linpuppi; l1ct::LinPuppiEmulator::LinPuppiEmulator(unsigned int nTrack, unsigned int nIn, @@ -220,16 +221,15 @@ void l1ct::LinPuppiEmulator::linpuppi_chs_ref(const PFRegionEmu ®ion, const std::vector &pfch /*[nTrack]*/, std::vector &outallch /*[nTrack]*/) const { const unsigned int nTrack = std::min(nTrack_, pfch.size()); + const unsigned int nVtx = std::min(nVtx_, pv.size()); outallch.resize(nTrack); for (unsigned int i = 0; i < nTrack; ++i) { int pZ0 = pfch[i].hwZ0; int z0diff = -99999; - for (unsigned int j = 0; j < nVtx_; ++j) { - if (j < pv.size()) { - int pZ0Diff = pZ0 - pv[j].hwZ0; - if (std::abs(z0diff) > std::abs(pZ0Diff)) - z0diff = pZ0Diff; - } + for (unsigned int j = 0; j < nVtx; ++j) { + int pZ0Diff = pZ0 - pv[j].hwZ0; + if (std::abs(z0diff) > std::abs(pZ0Diff)) + z0diff = pZ0Diff; } bool accept = pfch[i].hwPt != 0; if (!fakePuppi_) @@ -278,107 +278,90 @@ unsigned int l1ct::LinPuppiEmulator::find_ieta(const PFRegionEmu ®ion, eta_t } std::pair l1ct::LinPuppiEmulator::sum2puppiPt_ref( - uint64_t sum, pt_t pt, unsigned int ieta, bool isEM, int icand) const { - const int sum_bitShift = LINPUPPI_sum_bitShift; - const int x2_bits = LINPUPPI_x2_bits; // decimal bits the discriminator values - const int alpha_bits = LINPUPPI_alpha_bits; // decimal bits of the alpha values - const int alphaSlope_bits = LINPUPPI_alphaSlope_bits; // decimal bits of the alphaSlope values - const int ptSlope_bits = LINPUPPI_ptSlope_bits; // decimal bits of the ptSlope values - const int weight_bits = LINPUPPI_weight_bits; - - const int ptSlopeNe = ptSlopeNe_[ieta] * (1 << ptSlope_bits); - const int ptSlopePh = ptSlopePh_[ieta] * (1 << ptSlope_bits); - const int ptZeroNe = ptZeroNe_[ieta] / LINPUPPI_ptLSB; // in pt scale - const int ptZeroPh = ptZeroPh_[ieta] / LINPUPPI_ptLSB; // in pt scale - const int alphaCrop = alphaCrop_[ieta] * (1 << x2_bits); - const int alphaSlopeNe = - alphaSlope_[ieta] * std::log(2.) * - (1 << alphaSlope_bits); // we put a log(2) here since we compute alpha as log2(sum) instead of ln(sum) - const int alphaSlopePh = alphaSlope_[ieta] * std::log(2.) * (1 << alphaSlope_bits); - const int alphaZeroNe = alphaZero_[ieta] / std::log(2.) * (1 << alpha_bits); - const int alphaZeroPh = alphaZero_[ieta] / std::log(2.) * (1 << alpha_bits); - const int priorNe = priorNe_[ieta] * (1 << x2_bits); - const int priorPh = priorPh_[ieta] * (1 << x2_bits); - - // -- simplest version - //int alpha = sum > 0 ? int(std::log2(float(sum) * LINPUPPI_pt2DR2_scale / (1< 0 ? int(std::log2(float(sum))*(1 << alpha_bits) + (std::log2(LINPUPPI_pt2DR2_scale) - sum_bitShift)*(1 << alpha_bits) + 0.5 ) : 0; - // -- re-written for a LUT implementation of the log2 + sumTerm_t sum, pt_t pt, unsigned int ieta, bool isEM, int icand) const { + const alpha_t logOffset = std::log2(linpuppi::PT2DR2_LSB) - SUM_BITSHIFT; + const ptSlope_t ptSlopeNe = ptSlopeNe_[ieta]; + const ptSlope_t ptSlopePh = ptSlopePh_[ieta]; + const l1ct::pt_t ptZeroNe = ptZeroNe_[ieta]; + const l1ct::pt_t ptZeroPh = ptZeroPh_[ieta]; + const x2_t alphaCrop = alphaCrop_[ieta]; + // we put a log(2) in all alphaSlopes here since we compute alpha as log2(sum) instead of ln(sum) + const alphaSlope_t alphaSlopeNe = alphaSlope_[ieta] * std::log(2.); + const alphaSlope_t alphaSlopePh = alphaSlope_[ieta] * std::log(2.); + const alpha_t alphaZeroNe = alphaZero_[ieta] / std::log(2.); + const alpha_t alphaZeroPh = alphaZero_[ieta] / std::log(2.); + const x2_t priorNe = priorNe_[ieta]; + const x2_t priorPh = priorPh_[ieta]; + + // emulate computing + // alpha = log2(sum) + // x2a = alphaSlope*(alpha - alphaZero) + // we use a 10-bit LUT for the log2(sum), and to save firmware resources we + // also pack the computation of x2a in the same LUT. + // In this emulator, however, we also compute the separately alpha, as it is + // useful for debugging and comparison to the floating-point emulator. const int log2lut_bits = 10; - int alpha = 0; - uint64_t logarg = sum; + alpha_t alpha = 0; + uint64_t logarg = sum.bits_to_uint64(); if (logarg > 0) { - alpha = int((std::log2(LINPUPPI_pt2DR2_scale) - sum_bitShift) * (1 << alpha_bits) + 0.5); + alpha = logOffset; while (logarg >= (1 << log2lut_bits)) { logarg = logarg >> 1; - alpha += (1 << alpha_bits); + alpha += 1; } - alpha += int( - std::log2(float(logarg)) * - (1 - << alpha_bits)); // the maximum value of this term is log2lut_bits * (1 << alpha_bits) ~ 10*16 = 160 => fits in ap_uint<4+alpha_bits> + alpha += alpha_t(std::log2(float(logarg))); } - int alphaZero = (isEM ? alphaZeroPh : alphaZeroNe); - int alphaSlope = (isEM ? alphaSlopePh : alphaSlopeNe); - int x2a = std::min(std::max(alphaSlope * (alpha - alphaZero) >> (alphaSlope_bits + alpha_bits - x2_bits), -alphaCrop), - alphaCrop); - - // -- re-written to fit in a single LUT - int x2a_lut = -alphaSlope * alphaZero; - logarg = sum; + alpha_t alphaZero = (isEM ? alphaZeroPh : alphaZeroNe); + alphaSlope_t alphaSlope = (isEM ? alphaSlopePh : alphaSlopeNe); + x2_t x2a = -alphaSlope * alphaZero; + logarg = sum.bits_to_uint64(); if (logarg > 0) { - x2a_lut += alphaSlope * int((std::log2(LINPUPPI_pt2DR2_scale) - sum_bitShift) * (1 << alpha_bits) + 0.5); + x2a += alphaSlope * logOffset; while (logarg >= (1 << log2lut_bits)) { logarg = logarg >> 1; - x2a_lut += alphaSlope * (1 << alpha_bits); + x2a += alphaSlope; } - x2a_lut += alphaSlope * int(std::log2(float(logarg)) * (1 << alpha_bits)); - /*if (in <= 3) dbgPrintf("ref [%d]: x2a(sum = %9lu): logarg = %9lu, sumterm = %9d, table[logarg] = %9d, ret pre-crop = %9d\n", - in, sum, logarg, - alphaSlope * int((std::log2(LINPUPPI_pt2DR2_scale) - sum_bitShift)*(1 << alpha_bits) + 0.5) - alphaSlope * alphaZero, - alphaSlope * int(std::log2(float(logarg))*(1 << alpha_bits)), - x2a_lut); */ - } else { - //if (in <= 3) dbgPrintf("ref [%d]: x2a(sum = %9lu): logarg = %9lu, ret pre-crop = %9d\n", - // in, sum, logarg, x2a_lut); + x2a += alphaSlope * alpha_t(std::log2(float(logarg))); } - x2a_lut = std::min(std::max(x2a_lut >> (alphaSlope_bits + alpha_bits - x2_bits), -alphaCrop), alphaCrop); - assert(x2a_lut == x2a); - - int ptZero = (isEM ? ptZeroPh : ptZeroNe); - int ptSlope = (isEM ? ptSlopePh : ptSlopeNe); - int x2pt = ptSlope * (Scales::ptToInt(pt) - ptZero) >> (ptSlope_bits + 2 - x2_bits); + /* // debug printout, can be useful to study ranges and precisions for LUTs and coefficients + dbgPrintf("ref: x2a(sum = %10.5f, raw = %9lu): logarg = %9lu, sumterm = %.6f, table[logarg] = %d = %.6f, ret pre-crop = %d = %.6f\n", + sum.to_float(), sum.bits_to_uint64(), logarg, + (alphaSlope * (logOffset - alphaZero)).to_float(), + (alphaSlope * alpha_t(std::log2(float(logarg)))).V.to_int(), + (alphaSlope * alpha_t(std::log2(float(logarg)))).to_float(), + x2a.V.to_int(), x2a.to_float()); + */ + x2a = std::min(std::max(x2a, -alphaCrop), alphaCrop); - int prior = (isEM ? priorPh : priorNe); + l1ct::pt_t ptZero = (isEM ? ptZeroPh : ptZeroNe); + ptSlope_t ptSlope = (isEM ? ptSlopePh : ptSlopeNe); + x2_t x2pt = ptSlope * (pt - ptZero); - int x2 = x2a + x2pt - prior; + x2_t prior = (isEM ? priorPh : priorNe); - int weight = - std::min(1.0 / (1.0 + std::exp(-float(x2) / (1 << x2_bits))) * (1 << weight_bits) + 0.5, (1 << weight_bits)); + x2_t x2 = x2a + x2pt - prior; - pt_t ptPuppi = Scales::makePt((Scales::ptToInt(pt) * weight) >> weight_bits); + puppiWgt_t weight = 1.0 / (1.0 + std::exp(-x2.to_float())); + typedef ap_fixed pt_rounding_t; + pt_t ptPuppi = pt_rounding_t(pt * weight); if (debug_) dbgPrintf( - "ref candidate %02d pt %7.2f em %1d ieta %1d: alpha %+7.2f x2a %+5d = %+7.3f x2pt %+5d = %+7.3f x2 %+5d " - "= %+7.3f --> weight %4d = %.4f puppi pt %7.2f\n", + "ref candidate %02d pt %7.2f em %1d ieta %1d: sum %10.4f alpha %+7.2f x2a %+7.3f x2pt %+7.3f x2 %+7.3f " + "--> weight %.4f puppi pt %7.2f\n", icand, Scales::floatPt(pt), int(isEM), ieta, - std::max(alpha / float(1 << alpha_bits) * std::log(2.), -99.99f), - x2a, - x2a / float(1 << x2_bits), - x2pt, - x2pt / float(1 << x2_bits), - x2, - x2 / float(1 << x2_bits), - weight, - weight / float(1 << weight_bits), + sum.to_float(), + alpha.to_float() * std::log(2.), + x2a.to_float(), + x2pt.to_float(), + x2.to_float(), + weight.to_float(), Scales::floatPt(ptPuppi)); - return std::make_pair(ptPuppi, puppiWgt_t(weight)); + return std::make_pair(ptPuppi, weight); } void l1ct::LinPuppiEmulator::fwdlinpuppi_ref(const PFRegionEmu ®ion, @@ -387,9 +370,7 @@ void l1ct::LinPuppiEmulator::fwdlinpuppi_ref(const PFRegionEmu ®ion, std::vector &outallne /*[nIn]*/, std::vector &outselne /*[nOut]*/) const { const unsigned int nIn = std::min(nIn_, caloin.size()); - const int PTMAX2 = (iptMax_ * iptMax_); - - const int sum_bitShift = LINPUPPI_sum_bitShift; + const unsigned int PTMAX2 = (iptMax_ * iptMax_); outallne_nocut.resize(nIn); outallne.resize(nIn); @@ -398,27 +379,17 @@ void l1ct::LinPuppiEmulator::fwdlinpuppi_ref(const PFRegionEmu ®ion, outallne[in].clear(); if (caloin[in].hwPt == 0) continue; - uint64_t sum = 0; // 2 ^ sum_bitShift times (int pt^2)/(int dr2) + sumTerm_t sum = 0; // (pt^2)/(dr2) for (unsigned int it = 0; it < nIn; ++it) { if (it == in || caloin[it].hwPt == 0) continue; - unsigned int dr2 = dr2_int( - caloin[it].hwEta, caloin[it].hwPhi, caloin[in].hwEta, caloin[in].hwPhi); // if dr is inside puppi cone - if (dr2 <= dR2Max_) { - ap_uint<9> dr2short = (dr2 >= dR2Min_ ? dr2 : dR2Min_) >> 5; // reduce precision to make divide LUT cheaper - uint64_t pt2 = Scales::ptToInt(caloin[it].hwPt) * Scales::ptToInt(caloin[it].hwPt); - uint64_t term = std::min(pt2 >> 5, PTMAX2 >> 5) * ((1 << sum_bitShift) / int(dr2short)); - // dr2short >= (dR2Min_ >> 5) = 2 - // num <= (PTMAX2 >> 5) << sum_bitShift = (2^11) << 15 = 2^26 - // ==> term <= 2^25 - //dbgPrintf("ref term [%2d,%2d]: dr = %8d pt2_shift = %8lu term = %12lu\n", in, it, dr2, std::min(pt2 >> 5, PTMAX2 >> 5), term); - assert(uint64_t(PTMAX2 << (sum_bitShift - 5)) / (dR2Min_ >> 5) <= (1 << 25)); - assert(term < (1 << 25)); + unsigned int dr2 = dr2_int(caloin[it].hwEta, caloin[it].hwPhi, caloin[in].hwEta, caloin[in].hwPhi); + if (dr2 <= dR2Max_) { // if dr is inside puppi cone + unsigned int dr2short = (dr2 >= dR2Min_ ? dr2 : dR2Min_) >> 5; // reduce precision to make divide LUT cheaper + unsigned int pt = caloin[it].intPt(), pt2 = pt * pt; + dr2inv_t dr2inv = 1.0f / float(dr2short); + sumTerm_t term = std::min(pt2 >> 5, PTMAX2 >> 5) * dr2inv; sum += term; - //dbgPrintf(" pT cand %5.1f pT item %5.1f dR = %.3f term = %.1f [dbl] = %lu [int]\n", - // caloin[in].floatPt(), caloin[it].floatPt(), std::sqrt(dr2*LINPUPPI_DR2LSB), - // double(std::min(pt2 >> 5, 131071)<<15)/double(std::max(dr2,dR2Min_) >> 5), - // term); } } unsigned int ieta = find_ieta(region, caloin[in].hwEta); @@ -441,9 +412,7 @@ void l1ct::LinPuppiEmulator::linpuppi_ref(const PFRegionEmu ®ion, std::vector &outselne /*[nOut]*/) const { const unsigned int nIn = std::min(nIn_, pfallne.size()); const unsigned int nTrack = std::min(nTrack_, track.size()); - const int PTMAX2 = (iptMax_ * iptMax_); - - const int sum_bitShift = LINPUPPI_sum_bitShift; + const unsigned int PTMAX2 = (iptMax_ * iptMax_); outallne_nocut.resize(nIn); outallne.resize(nIn); @@ -452,7 +421,7 @@ void l1ct::LinPuppiEmulator::linpuppi_ref(const PFRegionEmu ®ion, outallne[in].clear(); if (pfallne[in].hwPt == 0) continue; - uint64_t sum = 0; // 2 ^ sum_bitShift times (int pt^2)/(int dr2) + sumTerm_t sum = 0; // (pt^2)/(dr2) for (unsigned int it = 0; it < nTrack; ++it) { if (track[it].hwPt == 0) continue; @@ -467,23 +436,23 @@ void l1ct::LinPuppiEmulator::linpuppi_ref(const PFRegionEmu ®ion, } if (std::abs(pZMin) > int(dzCut_)) continue; - unsigned int dr2 = dr2_int( - pfallne[in].hwEta, pfallne[in].hwPhi, track[it].hwEta, track[it].hwPhi); // if dr is inside puppi cone - if (dr2 <= dR2Max_) { - ap_uint<9> dr2short = (dr2 >= dR2Min_ ? dr2 : dR2Min_) >> 5; // reduce precision to make divide LUT cheaper - uint64_t pt2 = Scales::ptToInt(track[it].hwPt) * Scales::ptToInt(track[it].hwPt); - uint64_t term = std::min(pt2 >> 5, PTMAX2 >> 5) * ((1 << sum_bitShift) / int(dr2short)); - // dr2short >= (dR2Min_ >> 5) = 2 - // num <= (PTMAX2 >> 5) << sum_bitShift = (2^11) << 15 = 2^26 - // ==> term <= 2^25 - //dbgPrintf("ref term [%2d,%2d]: dr = %8d pt2_shift = %8lu term = %12lu\n", in, it, dr2, std::min(pt2 >> 5, PTMAX2 >> 5), term); - assert(uint64_t(PTMAX2 << (sum_bitShift - 5)) / (dR2Min_ >> 5) <= (1 << 25)); - assert(term < (1 << 25)); + unsigned int dr2 = dr2_int(pfallne[in].hwEta, pfallne[in].hwPhi, track[it].hwEta, track[it].hwPhi); + if (dr2 <= dR2Max_) { // if dr is inside puppi cone + unsigned int dr2short = (dr2 >= dR2Min_ ? dr2 : dR2Min_) >> 5; // reduce precision to make divide LUT cheaper + unsigned int pt = track[it].intPt(), pt2 = pt * pt; + dr2inv_t dr2inv = 1.0f / float(dr2short); + sumTerm_t term = std::min(pt2 >> 5, PTMAX2 >> 5) * dr2inv; + /* // printout useful for comparing internals steps of computation to the ones done in the firmware code + dbgPrintf("cand pT %5.1f eta %+6.2f ref term %2d %2d: dr = %8d pt2_shift = %8lu term = %12lu apterm = %12.6f\n", + pfallne[it].floatPt(), + region.floatGlbEtaOf(pfallne[it]), + in, + it, + dr2, + std::min(pt2 >> 5, PTMAX2 >> 5), + term.to_float()); + */ sum += term; - //dbgPrintf(" pT cand %5.1f pT item %5.1f dR = %.3f term = %.1f [dbl] = %lu [int]\n", - // pfallne[in].floatPt(), track[it].floatPt(), std::sqrt(dr2*LINPUPPI_DR2LSB), - // double(std::min(pt2 >> 5, 131071)<<15)/double(std::max(dr2,dR2Min_) >> 5), - // term); } } @@ -567,17 +536,15 @@ void l1ct::LinPuppiEmulator::fwdlinpuppi_flt(const PFRegionEmu ®ion, for (unsigned int it = 0; it < nIn; ++it) { if (it == in || caloin[it].hwPt == 0) continue; - unsigned int dr2 = dr2_int( - caloin[it].hwEta, caloin[it].hwPhi, caloin[in].hwEta, caloin[in].hwPhi); // if dr is inside puppi cone - if (dr2 <= dR2Max_) { - sum += std::pow(std::min(caloin[it].floatPt(), f_ptMax), 2) / - (std::max(dr2, dR2Min_) * LINPUPPI_DR2LSB); + unsigned int dr2 = dr2_int(caloin[it].hwEta, caloin[it].hwPhi, caloin[in].hwEta, caloin[in].hwPhi); + if (dr2 <= dR2Max_) { // if dr is inside puppi cone + sum += std::pow(std::min(caloin[it].floatPt(), f_ptMax), 2) / (std::max(dr2, dR2Min_) * DR2_LSB); } } unsigned int ieta = find_ieta(region, caloin[in].hwEta); std::pair ptAndW = sum2puppiPt_flt(sum, caloin[in].floatPt(), ieta, caloin[in].hwIsEM(), in); - outallne_nocut[in].fill(region, caloin[in], Scales::makePtFromFloat(ptAndW.first), int(ptAndW.second * 256)); + outallne_nocut[in].fill(region, caloin[in], Scales::makePtFromFloat(ptAndW.first), l1ct::puppiWgt_t(ptAndW.second)); if (region.isFiducial(caloin[in]) && outallne_nocut[in].hwPt >= ptCut_[ieta]) { outallne[in] = outallne_nocut[in]; } @@ -610,26 +577,24 @@ void l1ct::LinPuppiEmulator::linpuppi_flt(const PFRegionEmu ®ion, continue; int pZMin = 99999; - for (unsigned int v = 0; v < nVtx_; ++v) { - if (v < pv.size()) { - int ppZMin = std::abs(int(track[it].hwZ0 - pv[v].hwZ0)); - if (pZMin > ppZMin) - pZMin = ppZMin; - } + for (unsigned int v = 0, nVtx = std::min(nVtx_, pv.size()); v < nVtx; ++v) { + int ppZMin = std::abs(int(track[it].hwZ0 - pv[v].hwZ0)); + if (pZMin > ppZMin) + pZMin = ppZMin; } if (std::abs(pZMin) > int(dzCut_)) continue; unsigned int dr2 = dr2_int( pfallne[in].hwEta, pfallne[in].hwPhi, track[it].hwEta, track[it].hwPhi); // if dr is inside puppi cone if (dr2 <= dR2Max_) { - sum += std::pow(std::min(track[it].floatPt(), f_ptMax), 2) / - (std::max(dr2, dR2Min_) * LINPUPPI_DR2LSB); + sum += std::pow(std::min(track[it].floatPt(), f_ptMax), 2) / (std::max(dr2, dR2Min_) * DR2_LSB); } } unsigned int ieta = find_ieta(region, pfallne[in].hwEta); bool isEM = pfallne[in].hwId.isPhoton(); std::pair ptAndW = sum2puppiPt_flt(sum, pfallne[in].floatPt(), ieta, isEM, in); - outallne_nocut[in].fill(region, pfallne[in], Scales::makePtFromFloat(ptAndW.first), int(ptAndW.second * 256)); + outallne_nocut[in].fill( + region, pfallne[in], Scales::makePtFromFloat(ptAndW.first), l1ct::puppiWgt_t(ptAndW.second)); if (region.isFiducial(pfallne[in]) && outallne_nocut[in].hwPt >= ptCut_[ieta]) { outallne[in] = outallne_nocut[in]; } diff --git a/L1Trigger/Phase2L1ParticleFlow/src/regionizer/buffered_folded_multififo_regionizer_ref.cpp b/L1Trigger/Phase2L1ParticleFlow/src/regionizer/buffered_folded_multififo_regionizer_ref.cpp index 9717416c15b0d..9f1e9ca3f8bcf 100644 --- a/L1Trigger/Phase2L1ParticleFlow/src/regionizer/buffered_folded_multififo_regionizer_ref.cpp +++ b/L1Trigger/Phase2L1ParticleFlow/src/regionizer/buffered_folded_multififo_regionizer_ref.cpp @@ -88,16 +88,17 @@ void l1ct::BufferedFoldedMultififoRegionizerEmulator::initSectorsAndRegions(cons l1ct::glbeta_t etaMin, etaMax; findEtaBounds_(fold_[ie].sectors.track[0].region, fold_[ie].regions, etaMin, etaMax); for (unsigned int isec = 0; ntk_ > 0 && isec < NTK_SECTORS; ++isec) { - tkBuffers_[2 * isec + ie] = l1ct::multififo_regionizer::EtaBuffer(nclocks_ / 2, etaMin, etaMax); + tkBuffers_[2 * isec + ie] = + l1ct::multififo_regionizer::EtaPhiBuffer(nclocks_ / 2, etaMin, etaMax); } findEtaBounds_(fold_[ie].sectors.hadcalo[0].region, fold_[ie].regions, etaMin, etaMax); for (unsigned int isec = 0; ncalo_ > 0 && isec < NCALO_SECTORS; ++isec) { caloBuffers_[2 * isec + ie] = - l1ct::multififo_regionizer::EtaBuffer(nclocks_ / 2, etaMin, etaMax); + l1ct::multififo_regionizer::EtaPhiBuffer(nclocks_ / 2, etaMin, etaMax); } findEtaBounds_(fold_[ie].sectors.muon.region, fold_[ie].regions, etaMin, etaMax); if (nmu_ > 0) { - muBuffers_[ie] = l1ct::multififo_regionizer::EtaBuffer(nclocks_ / 2, etaMin, etaMax); + muBuffers_[ie] = l1ct::multififo_regionizer::EtaPhiBuffer(nclocks_ / 2, etaMin, etaMax); } } } diff --git a/L1Trigger/Phase2L1ParticleFlow/src/regionizer/middle_buffer_multififo_regionizer_ref.cpp b/L1Trigger/Phase2L1ParticleFlow/src/regionizer/middle_buffer_multififo_regionizer_ref.cpp new file mode 100644 index 0000000000000..aa818bb139ab6 --- /dev/null +++ b/L1Trigger/Phase2L1ParticleFlow/src/regionizer/middle_buffer_multififo_regionizer_ref.cpp @@ -0,0 +1,631 @@ +#include "L1Trigger/Phase2L1ParticleFlow/interface/regionizer/middle_buffer_multififo_regionizer_ref.h" +#include "L1Trigger/Phase2L1ParticleFlow/interface/dbgPrintf.h" +#include "L1Trigger/Phase2L1ParticleFlow/interface/regionizer/multififo_regionizer_elements_ref.icc" + +#include +#include +#include + +#ifdef CMSSW_GIT_HASH +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/ParameterSet/interface/allowedValues.h" + +l1ct::MiddleBufferMultififoRegionizerEmulator::MiddleBufferMultififoRegionizerEmulator(const edm::ParameterSet& iConfig) + : MiddleBufferMultififoRegionizerEmulator(iConfig.getParameter("nClocks"), + iConfig.getParameter("nBuffers"), + iConfig.getParameter("etaBufferDepth"), + iConfig.getParameter("nTkLinks"), + iConfig.getParameter("nHCalLinks"), + iConfig.getParameter("nECalLinks"), + iConfig.getParameter("nTrack"), + iConfig.getParameter("nCalo"), + iConfig.getParameter("nEmCalo"), + iConfig.getParameter("nMu"), + /*streaming=*/true, + /*outii=*/2, + /*pauseii=*/1, + iConfig.getParameter("useAlsoVtxCoords")) { + debug_ = iConfig.getUntrackedParameter("debug", false); +} + +edm::ParameterSetDescription l1ct::MiddleBufferMultififoRegionizerEmulator::getParameterSetDescription() { + edm::ParameterSetDescription description; + description.add("nClocks", 162); + description.add("nBuffers", 27); + description.add("etaBufferDepth", 54); + description.add("nTkLinks", 1); + description.add("nHCalLinks", 1); + description.add("nECalLinks", 0); + description.add("nTrack", 22); + description.add("nCalo", 15); + description.add("nEmCalo", 12); + description.add("nMu", 2); + description.add("useAlsoVtxCoords", true); + description.addUntracked("debug", false); + return description; +} + +#endif + +l1ct::MiddleBufferMultififoRegionizerEmulator::MiddleBufferMultififoRegionizerEmulator(unsigned int nclocks, + unsigned int nbuffers, + unsigned int etabufferDepth, + unsigned int ntklinks, + unsigned int nHCalLinks, + unsigned int nECalLinks, + unsigned int ntk, + unsigned int ncalo, + unsigned int nem, + unsigned int nmu, + bool streaming, + unsigned int outii, + unsigned int pauseii, + bool useAlsoVtxCoords) + : RegionizerEmulator(useAlsoVtxCoords), + NTK_SECTORS(9), + NCALO_SECTORS(3), + NTK_LINKS(ntklinks), + HCAL_LINKS(nHCalLinks), + ECAL_LINKS(nECalLinks), + NMU_LINKS(1), + nclocks_(nclocks), + nbuffers_(nbuffers), + etabuffer_depth_(etabufferDepth), + ntk_(ntk), + ncalo_(ncalo), + nem_(nem), + nmu_(nmu), + outii_(outii), + pauseii_(pauseii), + nregions_pre_(27), + nregions_post_(54), + streaming_(streaming), + init_(false), + iclock_(0), + tkRegionizerPre_(ntk, ntk, false, outii, pauseii, useAlsoVtxCoords), + tkRegionizerPost_(ntk, (ntk + outii - 1) / outii, true, outii, pauseii, useAlsoVtxCoords), + hadCaloRegionizerPre_(ncalo, ncalo, false, outii, pauseii), + hadCaloRegionizerPost_(ncalo, (ncalo + outii - 1) / outii, true, outii, pauseii), + emCaloRegionizerPre_(nem, nem, false, outii, pauseii), + emCaloRegionizerPost_(nem, (nem + outii - 1) / outii, true, outii, pauseii), + muRegionizerPre_(nmu, nmu, false, outii, pauseii), + muRegionizerPost_(nmu, std::max(1u, (nmu + outii - 1) / outii), true, outii, pauseii), + tkBuffers_(ntk ? nbuffers_ : 0), + hadCaloBuffers_(ncalo ? nbuffers_ : 0), + emCaloBuffers_(nem ? nbuffers_ : 0), + muBuffers_(nmu ? nbuffers_ : 0) { + assert(nbuffers_ == nregions_post_ || nbuffers_ == nregions_pre_); + unsigned int phisectors = 9, etaslices = 3; + for (unsigned int ietaslice = 0; ietaslice < etaslices && ntk > 0; ++ietaslice) { + for (unsigned int ie = 0; ie < 2; ++ie) { // 0 = negative, 1 = positive + unsigned int nTFEtaSlices = ietaslice == 1 ? 2 : 1; + if ((ietaslice == 0 && ie == 1) || (ietaslice == 2 && ie == 0)) + continue; + unsigned int ireg0 = phisectors * ietaslice, il0 = 3 * NTK_LINKS * (nTFEtaSlices - 1) * ie; + for (unsigned int is = 0; is < NTK_SECTORS; ++is) { // 9 tf sectors + for (unsigned int il = 0; il < NTK_LINKS; ++il) { // max tracks per sector per clock + unsigned int isp = (is + 1) % NTK_SECTORS, ism = (is + NTK_SECTORS - 1) % NTK_SECTORS; + tkRoutes_.emplace_back(is + NTK_SECTORS * ie, il, is + ireg0, il0 + il); + tkRoutes_.emplace_back(is + NTK_SECTORS * ie, il, isp + ireg0, il0 + il + NTK_LINKS); + tkRoutes_.emplace_back(is + NTK_SECTORS * ie, il, ism + ireg0, il0 + il + 2 * NTK_LINKS); + } + } + } + } + // calo + for (unsigned int ie = 0; ie < etaslices; ++ie) { + for (unsigned int is = 0; is < NCALO_SECTORS; ++is) { // NCALO_SECTORS sectors + for (unsigned int j = 0; j < 3; ++j) { // 3 regions x sector + for (unsigned int il = 0; il < HCAL_LINKS; ++il) { + caloRoutes_.emplace_back(is, il, 3 * is + j + phisectors * ie, il); + if (j) { + caloRoutes_.emplace_back((is + 1) % 3, il, 3 * is + j + phisectors * ie, il + HCAL_LINKS); + } + } + for (unsigned int il = 0; il < ECAL_LINKS; ++il) { + emCaloRoutes_.emplace_back(is, il, 3 * is + j + phisectors * ie, il); + if (j) { + emCaloRoutes_.emplace_back((is + 1) % 3, il, 3 * is + j + phisectors * ie, il + ECAL_LINKS); + } + } + } + } + } + // mu + for (unsigned int il = 0; il < NMU_LINKS && nmu > 0; ++il) { + for (unsigned int j = 0; j < nregions_pre_; ++j) { + muRoutes_.emplace_back(0, il, j, il); + } + } +} + +l1ct::MiddleBufferMultififoRegionizerEmulator::~MiddleBufferMultififoRegionizerEmulator() {} + +void l1ct::MiddleBufferMultififoRegionizerEmulator::initSectorsAndRegions(const RegionizerDecodedInputs& in, + const std::vector& out) { + assert(!init_); + init_ = true; + assert(out.size() == nregions_post_); + + std::vector mergedRegions; + unsigned int neta = 3, nphi = 9; + mergedRegions.reserve(nregions_pre_); + mergedRegions_.reserve(nregions_pre_); + outputRegions_.reserve(nregions_post_); + for (unsigned int ieta = 0; ieta < neta; ++ieta) { + for (unsigned int iphi = 0; iphi < nphi; ++iphi) { + const PFRegionEmu& reg0 = out[(2 * ieta + 0) * nphi + iphi].region; + const PFRegionEmu& reg1 = out[(2 * ieta + 1) * nphi + iphi].region; + assert(reg0.hwPhiCenter == reg1.hwPhiCenter); + mergedRegions.emplace_back(reg0.floatEtaMin(), + reg1.floatEtaMax(), + reg0.floatPhiCenter(), + reg0.floatPhiHalfWidth() * 2, + reg0.floatEtaExtra(), + reg0.floatPhiExtra()); + mergedRegions_.push_back(mergedRegions.back().region); + outputRegions_.push_back(reg0); + outputRegions_.push_back(reg1); + if (debug_) { + dbgCout() << "Created region with etaCenter " << mergedRegions.back().region.hwEtaCenter.to_int() + << ", halfWidth " << mergedRegions.back().region.hwEtaHalfWidth.to_int() << "\n"; + } + if (nbuffers_ == nregions_post_) { + for (int i = 0; i < 2; ++i) { + unsigned int iout = (2 * ieta + i) * nphi + iphi; + const l1ct::PFRegionEmu& from = mergedRegions.back().region; + const l1ct::PFRegionEmu& to = out[iout].region; + l1ct::glbeta_t etaMin = to.hwEtaCenter - to.hwEtaHalfWidth - to.hwEtaExtra - from.hwEtaCenter; + l1ct::glbeta_t etaMax = to.hwEtaCenter + to.hwEtaHalfWidth + to.hwEtaExtra - from.hwEtaCenter; + l1ct::glbeta_t etaShift = from.hwEtaCenter - to.hwEtaCenter; + l1ct::glbphi_t phiMin = -to.hwPhiHalfWidth - to.hwPhiExtra; + l1ct::glbphi_t phiMax = +to.hwPhiHalfWidth + to.hwPhiExtra; + l1ct::glbphi_t phiShift = 0; + if (ntk_ > 0) + tkBuffers_[iout] = l1ct::multififo_regionizer::EtaPhiBuffer( + etabuffer_depth_, etaMin, etaMax, etaShift, phiMin, phiMax, phiShift); + if (ncalo_ > 0) + hadCaloBuffers_[iout] = l1ct::multififo_regionizer::EtaPhiBuffer( + etabuffer_depth_, etaMin, etaMax, etaShift, phiMin, phiMax, phiShift); + if (nem_ > 0) + emCaloBuffers_[iout] = l1ct::multififo_regionizer::EtaPhiBuffer( + etabuffer_depth_, etaMin, etaMax, etaShift, phiMin, phiMax, phiShift); + if (nmu_ > 0) + muBuffers_[iout] = l1ct::multififo_regionizer::EtaPhiBuffer( + etabuffer_depth_, etaMin, etaMax, etaShift, phiMin, phiMax, phiShift); + } + } else if (nbuffers_ == nregions_pre_) { + unsigned int iout = ieta * nphi + iphi; + if (ntk_ > 0) + tkBuffers_[iout] = l1ct::multififo_regionizer::EtaPhiBuffer(etabuffer_depth_); + if (ncalo_ > 0) + hadCaloBuffers_[iout] = l1ct::multififo_regionizer::EtaPhiBuffer(etabuffer_depth_); + if (nem_ > 0) + emCaloBuffers_[iout] = l1ct::multififo_regionizer::EtaPhiBuffer(etabuffer_depth_); + if (nmu_ > 0) + muBuffers_[iout] = l1ct::multififo_regionizer::EtaPhiBuffer(etabuffer_depth_); + } + } + } + if (ntk_) { + assert(in.track.size() == 2 * NTK_SECTORS); + tkRegionizerPre_.initSectors(in.track); + tkRegionizerPre_.initRegions(mergedRegions); + tkRegionizerPre_.initRouting(tkRoutes_); + tkRegionizerPost_.initRegions(out); + } + if (ncalo_) { + assert(in.hadcalo.size() == NCALO_SECTORS); + hadCaloRegionizerPre_.initSectors(in.hadcalo); + hadCaloRegionizerPre_.initRegions(mergedRegions); + hadCaloRegionizerPre_.initRouting(caloRoutes_); + hadCaloRegionizerPost_.initRegions(out); + } + if (nem_) { + assert(in.emcalo.size() == NCALO_SECTORS); + emCaloRegionizerPre_.initSectors(in.emcalo); + emCaloRegionizerPre_.initRegions(mergedRegions); + if (ECAL_LINKS) + emCaloRegionizerPre_.initRouting(emCaloRoutes_); + emCaloRegionizerPost_.initRegions(out); + } + if (nmu_) { + muRegionizerPre_.initSectors(in.muon); + muRegionizerPre_.initRegions(mergedRegions); + muRegionizerPre_.initRouting(muRoutes_); + muRegionizerPost_.initRegions(out); + } +} + +bool l1ct::MiddleBufferMultififoRegionizerEmulator::step(bool newEvent, + const std::vector& links_tk, + const std::vector& links_hadCalo, + const std::vector& links_emCalo, + const std::vector& links_mu, + std::vector& out_tk, + std::vector& out_hadCalo, + std::vector& out_emCalo, + std::vector& out_mu, + bool /*unused*/) { + iclock_ = (newEvent ? 0 : iclock_ + 1); + bool newRead = iclock_ == 2 * etabuffer_depth_; + + std::vector pre_out_tk; + std::vector pre_out_hadCalo; + std::vector pre_out_emCalo; + std::vector pre_out_mu; + bool ret = false; + if (ntk_) + ret = tkRegionizerPre_.step(newEvent, links_tk, pre_out_tk, false); + if (nmu_) + ret = muRegionizerPre_.step(newEvent, links_mu, pre_out_mu, false); + if (ncalo_) + ret = hadCaloRegionizerPre_.step(newEvent, links_hadCalo, pre_out_hadCalo, false); + if (nem_) { + if (ECAL_LINKS) { + ret = emCaloRegionizerPre_.step(newEvent, links_emCalo, pre_out_emCalo, false); + } else if (ncalo_) { + pre_out_emCalo.resize(pre_out_hadCalo.size()); + for (unsigned int i = 0, n = pre_out_hadCalo.size(); i < n; ++i) { + decode(pre_out_hadCalo[i], pre_out_emCalo[i]); + } + } + } + + // in the no-streaming case, we just output the pre-regionizer + if (!streaming_) { + out_tk.swap(pre_out_tk); + out_mu.swap(pre_out_mu); + out_hadCalo.swap(pre_out_hadCalo); + out_emCalo.swap(pre_out_emCalo); + return ret; + } + + // otherwise, we push into the eta buffers + if (newEvent) { + for (auto& b : tkBuffers_) + b.writeNewEvent(); + for (auto& b : hadCaloBuffers_) + b.writeNewEvent(); + for (auto& b : emCaloBuffers_) + b.writeNewEvent(); + for (auto& b : muBuffers_) + b.writeNewEvent(); + } + unsigned int neta = 3, nphi = 9; + for (unsigned int ieta = 0; ieta < neta; ++ieta) { + for (unsigned int iphi = 0; iphi < nphi; ++iphi) { + unsigned int iin = ieta * nphi + iphi; + for (int i = 0, n = nbuffers_ == nregions_pre_ ? 1 : 2; i < n; ++i) { + unsigned int iout = (n * ieta + i) * nphi + iphi; + if (ntk_) + tkBuffers_[iout].maybe_push(pre_out_tk[iin]); + if (ncalo_) + hadCaloBuffers_[iout].maybe_push(pre_out_hadCalo[iin]); + if (nem_) + emCaloBuffers_[iout].maybe_push(pre_out_emCalo[iin]); + if (nmu_) + muBuffers_[iout].maybe_push(pre_out_mu[iin]); + } + } + } + + // and we read from eta buffers into muxes + if (newRead) { + for (auto& b : tkBuffers_) + b.readNewEvent(); + for (auto& b : hadCaloBuffers_) + b.readNewEvent(); + for (auto& b : emCaloBuffers_) + b.readNewEvent(); + for (auto& b : muBuffers_) + b.readNewEvent(); + } + std::vector bufferOut_tk(ntk_ ? nregions_post_ : 0); + std::vector bufferOut_hadCalo(ncalo_ ? nregions_post_ : 0); + std::vector bufferOut_emCalo(nem_ ? nregions_post_ : 0); + std::vector bufferOut_mu(nmu_ ? nregions_post_ : 0); + if (nbuffers_ == nregions_post_) { // just copy directly + for (unsigned int i = 0; i < nregions_post_; ++i) { + if (ntk_) + bufferOut_tk[i] = tkBuffers_[i].pop(); + if (ncalo_) + bufferOut_hadCalo[i] = hadCaloBuffers_[i].pop(); + if (nem_) + bufferOut_emCalo[i] = emCaloBuffers_[i].pop(); + if (nmu_) + bufferOut_mu[i] = muBuffers_[i].pop(); + } + } else if (nbuffers_ == nregions_pre_) { // propagate and copy + unsigned int neta = 3, nphi = 9; + for (unsigned int ieta = 0; ieta < neta; ++ieta) { + for (unsigned int iphi = 0; iphi < nphi; ++iphi) { + unsigned int iin = ieta * nphi + iphi; + const l1ct::PFRegionEmu& from = mergedRegions_[iin]; + l1ct::TkObjEmu tk = ntk_ ? tkBuffers_[iin].pop() : l1ct::TkObjEmu(); + l1ct::HadCaloObjEmu calo = ncalo_ ? hadCaloBuffers_[iin].pop() : l1ct::HadCaloObjEmu(); + l1ct::EmCaloObjEmu em = nem_ ? emCaloBuffers_[iin].pop() : l1ct::EmCaloObjEmu(); + l1ct::MuObjEmu mu = nmu_ ? muBuffers_[iin].pop() : l1ct::MuObjEmu(); + for (int i = 0; i < 2; ++i) { + const l1ct::PFRegionEmu& to = outputRegions_[2 * iin + i]; + unsigned int iout = (2 * ieta + i) * nphi + iphi; + l1ct::glbeta_t etaMin = to.hwEtaCenter - to.hwEtaHalfWidth - to.hwEtaExtra - from.hwEtaCenter; + l1ct::glbeta_t etaMax = to.hwEtaCenter + to.hwEtaHalfWidth + to.hwEtaExtra - from.hwEtaCenter; + l1ct::glbeta_t etaShift = from.hwEtaCenter - to.hwEtaCenter; + l1ct::glbphi_t phiMin = -to.hwPhiHalfWidth - to.hwPhiExtra; + l1ct::glbphi_t phiMax = +to.hwPhiHalfWidth + to.hwPhiExtra; + if (tk.hwPt > 0 && l1ct::multififo_regionizer::local_eta_phi_window(tk, etaMin, etaMax, phiMin, phiMax)) { + bufferOut_tk[iout] = tk; + bufferOut_tk[iout].hwEta += etaShift; + } + if (calo.hwPt > 0 && l1ct::multififo_regionizer::local_eta_phi_window(calo, etaMin, etaMax, phiMin, phiMax)) { + bufferOut_hadCalo[iout] = calo; + bufferOut_hadCalo[iout].hwEta += etaShift; + } + if (em.hwPt > 0 && l1ct::multififo_regionizer::local_eta_phi_window(em, etaMin, etaMax, phiMin, phiMax)) { + bufferOut_emCalo[iout] = em; + bufferOut_emCalo[iout].hwEta += etaShift; + } + if (mu.hwPt > 0 && l1ct::multififo_regionizer::local_eta_phi_window(mu, etaMin, etaMax, phiMin, phiMax)) { + bufferOut_mu[iout] = mu; + bufferOut_mu[iout].hwEta += etaShift; + } + } + } + } + } + if (ntk_) + tkRegionizerPost_.muxonly_step(newEvent, /*flush=*/true, bufferOut_tk, out_tk); + if (ncalo_) + hadCaloRegionizerPost_.muxonly_step(newEvent, /*flush=*/true, bufferOut_hadCalo, out_hadCalo); + if (nem_) + emCaloRegionizerPost_.muxonly_step(newEvent, /*flush=*/true, bufferOut_emCalo, out_emCalo); + if (nmu_) + muRegionizerPost_.muxonly_step(newEvent, /*flush=*/true, bufferOut_mu, out_mu); + + return newRead; +} + +void l1ct::MiddleBufferMultififoRegionizerEmulator::fillLinks(unsigned int iclock, + const l1ct::RegionizerDecodedInputs& in, + std::vector& links, + std::vector& valid) { + if (ntk_ == 0) + return; + assert(NTK_LINKS == 1); + links.resize(NTK_SECTORS * NTK_LINKS * 2); + valid.resize(links.size()); + // emulate reduced rate from 96b tracks on 64b links + unsigned int itkclock = 2 * (iclock / 3) + (iclock % 3) - 1; // will underflow for iclock == 0 but it doesn't matter + for (unsigned int is = 0, idx = 0; is < 2 * NTK_SECTORS; ++is, ++idx) { // tf sectors + const l1ct::DetectorSector& sec = in.track[is]; + unsigned int ntracks = sec.size(); + unsigned int nw64 = (ntracks * 3 + 1) / 2; + if (iclock % 3 == 0) { + links[idx].clear(); + valid[idx] = (iclock == 0) || (iclock < nw64); + } else if (itkclock < ntracks && itkclock < nclocks_ - 1) { + links[idx] = sec[itkclock]; + valid[idx] = true; + } else { + links[idx].clear(); + valid[idx] = false; + } + } +} + +template +void l1ct::MiddleBufferMultififoRegionizerEmulator::fillCaloLinks_(unsigned int iclock, + const std::vector>& in, + std::vector& links, + std::vector& valid) { + unsigned int NLINKS = (typeid(T) == typeid(l1ct::HadCaloObjEmu) ? HCAL_LINKS : ECAL_LINKS); + links.resize(NCALO_SECTORS * NLINKS); + valid.resize(links.size()); + for (unsigned int is = 0, idx = 0; is < NCALO_SECTORS; ++is) { + for (unsigned int il = 0; il < NLINKS; ++il, ++idx) { + unsigned int ioffs = iclock * NLINKS + il; + if (ioffs < in[is].size() && iclock < nclocks_ - 1) { + links[idx] = in[is][ioffs]; + valid[idx] = true; + } else { + links[idx].clear(); + valid[idx] = false; + } + } + } +} +void l1ct::MiddleBufferMultififoRegionizerEmulator::fillSharedCaloLinks( + unsigned int iclock, + const std::vector>& em_in, + const std::vector>& had_in, + std::vector& links, + std::vector& valid) { + assert(ECAL_LINKS == 0 && HCAL_LINKS == 1 && ncalo_ != 0 && nem_ != 0); + links.resize(NCALO_SECTORS); + valid.resize(links.size()); + // for the moment we assume the first 54 clocks are for EM, the rest for HAD + const unsigned int NCLK_EM = 54; + for (unsigned int is = 0; is < NCALO_SECTORS; ++is) { + links[is].clear(); + if (iclock < NCLK_EM) { + valid[is] = true; + if (iclock < em_in[is].size()) { + encode(em_in[is][iclock], links[is]); + } + } else { + if (iclock - NCLK_EM < had_in[is].size()) { + encode(had_in[is][iclock - NCLK_EM], links[is]); + valid[is] = true; + } else { + valid[is] = false; + } + } + } // sectors +} + +void l1ct::MiddleBufferMultififoRegionizerEmulator::fillLinks(unsigned int iclock, + const l1ct::RegionizerDecodedInputs& in, + std::vector& links, + std::vector& valid) { + if (ncalo_ == 0) + return; + if (nem_ != 0 && ECAL_LINKS == 0 && HCAL_LINKS == 1) + fillSharedCaloLinks(iclock, in.emcalo, in.hadcalo, links, valid); + else + fillCaloLinks_(iclock, in.hadcalo, links, valid); +} + +void l1ct::MiddleBufferMultififoRegionizerEmulator::fillLinks(unsigned int iclock, + const l1ct::RegionizerDecodedInputs& in, + std::vector& links, + std::vector& valid) { + if (nem_ == 0) + return; + fillCaloLinks_(iclock, in.emcalo, links, valid); +} + +void l1ct::MiddleBufferMultififoRegionizerEmulator::fillLinks(unsigned int iclock, + const l1ct::RegionizerDecodedInputs& in, + std::vector& links, + std::vector& valid) { + if (nmu_ == 0) + return; + assert(NMU_LINKS == 1); + links.resize(NMU_LINKS); + valid.resize(links.size()); + if (iclock < in.muon.size() && iclock < nclocks_ - 1) { + links[0] = in.muon[iclock]; + valid[0] = true; + } else { + links[0].clear(); + valid[0] = false; + } +} + +void l1ct::MiddleBufferMultififoRegionizerEmulator::destream(int iclock, + const std::vector& tk_out, + const std::vector& em_out, + const std::vector& calo_out, + const std::vector& mu_out, + PFInputRegion& out) { + if (ntk_) + tkRegionizerPost_.destream(iclock, tk_out, out.track); + if (ncalo_) + hadCaloRegionizerPost_.destream(iclock, calo_out, out.hadcalo); + if (nem_) + emCaloRegionizerPost_.destream(iclock, em_out, out.emcalo); + if (nmu_) + muRegionizerPost_.destream(iclock, mu_out, out.muon); +} + +void l1ct::MiddleBufferMultififoRegionizerEmulator::reset() { + tkRegionizerPre_.reset(); + emCaloRegionizerPre_.reset(); + hadCaloRegionizerPre_.reset(); + muRegionizerPre_.reset(); + tkRegionizerPost_.reset(); + emCaloRegionizerPost_.reset(); + hadCaloRegionizerPost_.reset(); + muRegionizerPost_.reset(); + for (auto& b : tkBuffers_) + b.reset(); + for (auto& b : hadCaloBuffers_) + b.reset(); + for (auto& b : emCaloBuffers_) + b.reset(); + for (auto& b : muBuffers_) + b.reset(); +} + +void l1ct::MiddleBufferMultififoRegionizerEmulator::run(const RegionizerDecodedInputs& in, + std::vector& out) { + assert(streaming_); // doesn't make sense otherwise + if (!init_) + initSectorsAndRegions(in, out); + reset(); + std::vector tk_links_in, tk_out; + std::vector em_links_in, em_out; + std::vector calo_links_in, calo_out; + std::vector mu_links_in, mu_out; + + // read and sort the inputs + for (unsigned int iclock = 0; iclock < nclocks_; ++iclock) { + fillLinks(iclock, in, tk_links_in); + fillLinks(iclock, in, em_links_in); + fillLinks(iclock, in, calo_links_in); + fillLinks(iclock, in, mu_links_in); + + bool newevt = (iclock == 0); + step(newevt, tk_links_in, calo_links_in, em_links_in, mu_links_in, tk_out, calo_out, em_out, mu_out, true); + } + + // set up an empty event + for (auto& l : tk_links_in) + l.clear(); + for (auto& l : em_links_in) + l.clear(); + for (auto& l : calo_links_in) + l.clear(); + for (auto& l : mu_links_in) + l.clear(); + + // read and put the inputs in the regions + assert(out.size() == nregions_post_); + for (unsigned int iclock = 0; iclock < nclocks_; ++iclock) { + bool newevt = (iclock == 0); + step(newevt, tk_links_in, calo_links_in, em_links_in, mu_links_in, tk_out, calo_out, em_out, mu_out, true); + + unsigned int ireg = (iclock / (outii_ + pauseii_)); + if ((iclock % (outii_ + pauseii_)) >= outii_) + continue; + if (ireg >= nregions_post_) + break; + + if (streaming_) { + destream(iclock, tk_out, em_out, calo_out, mu_out, out[ireg]); + } else { + if (iclock % outii_ == 0) { + out[ireg].track = tk_out; + out[ireg].emcalo = em_out; + out[ireg].hadcalo = calo_out; + out[ireg].muon = mu_out; + } + } + } + + reset(); +} + +void l1ct::MiddleBufferMultififoRegionizerEmulator::encode(const l1ct::EmCaloObjEmu& from, l1ct::HadCaloObjEmu& to) { + assert(!from.hwEmID[5]); + to.hwPt = from.hwPt; + to.hwEmPt = from.hwPtErr; + to.hwEta = from.hwEta; + to.hwPhi = from.hwPhi; + to.hwEmID[5] = true; + to.hwEmID(4, 0) = from.hwEmID(4, 0); + to.src = from.src; +} +void l1ct::MiddleBufferMultififoRegionizerEmulator::encode(const l1ct::HadCaloObjEmu& from, l1ct::HadCaloObjEmu& to) { + assert(!from.hwEmID[5]); + to = from; +} +void l1ct::MiddleBufferMultififoRegionizerEmulator::decode(l1ct::HadCaloObjEmu& had, l1ct::EmCaloObjEmu& em) { + if (had.hwPt && had.hwEmID[5]) { + em.hwPt = had.hwPt; + em.hwPtErr = had.hwEmPt; + em.hwEta = had.hwEta; + em.hwPhi = had.hwPhi; + em.hwEmID[5] = 0; + em.hwEmID(4, 0) = had.hwEmID(4, 0); + em.hwSrrTot = 0; + em.hwMeanZ = 0; + em.hwHoe = 0; + em.src = had.src; + had.clear(); + } else { + em.clear(); + } +} \ No newline at end of file diff --git a/L1Trigger/Phase2L1ParticleFlow/src/regionizer/multififo_regionizer_ref.cpp b/L1Trigger/Phase2L1ParticleFlow/src/regionizer/multififo_regionizer_ref.cpp index a9e9d3977dde4..ef509bb4ccaf1 100644 --- a/L1Trigger/Phase2L1ParticleFlow/src/regionizer/multififo_regionizer_ref.cpp +++ b/L1Trigger/Phase2L1ParticleFlow/src/regionizer/multififo_regionizer_ref.cpp @@ -34,6 +34,7 @@ l1ct::MultififoRegionizerEmulator::MultififoRegionizerEmulator(const edm::Parame l1ct::MultififoRegionizerEmulator::MultififoRegionizerEmulator(const std::string& barrelSetup, const edm::ParameterSet& iConfig) : MultififoRegionizerEmulator(parseBarrelSetup(barrelSetup), + iConfig.getParameter("nTkLinks"), iConfig.getParameter("nHCalLinks"), iConfig.getParameter("nECalLinks"), iConfig.getParameter("nClocks"), @@ -71,6 +72,7 @@ edm::ParameterSetDescription l1ct::MultififoRegionizerEmulator::getParameterSetD description.ifValue(edm::ParameterDescription("barrelSetup", "Full54", true), edm::allowedValues("Full54", "Full27")); description.add("nClocks", 54); + description.add("nTkLinks", 2); description.add("nHCalLinks", 2); description.add("nECalLinks", 1); description.add("nTrack", 22); @@ -154,6 +156,7 @@ l1ct::MultififoRegionizerEmulator::MultififoRegionizerEmulator(unsigned int nend } l1ct::MultififoRegionizerEmulator::MultififoRegionizerEmulator(BarrelSetup barrelSetup, + unsigned int ntklinks, unsigned int nHCalLinks, unsigned int nECalLinks, unsigned int nclocks, @@ -168,7 +171,7 @@ l1ct::MultififoRegionizerEmulator::MultififoRegionizerEmulator(BarrelSetup barre : RegionizerEmulator(useAlsoVtxCoords), NTK_SECTORS((barrelSetup == BarrelSetup::Phi18 || barrelSetup == BarrelSetup::Phi9) ? 5 : 9), NCALO_SECTORS((barrelSetup == BarrelSetup::Phi18 || barrelSetup == BarrelSetup::Phi9) ? 2 : 3), - NTK_LINKS(2), + NTK_LINKS(ntklinks), NCALO_LINKS(2), HCAL_LINKS(nHCalLinks), ECAL_LINKS(nECalLinks), @@ -237,13 +240,13 @@ l1ct::MultififoRegionizerEmulator::MultififoRegionizerEmulator(BarrelSetup barre } else if (barrelSetup == BarrelSetup::Central18 || barrelSetup == BarrelSetup::Central9) { nTFEtaSlices = 2; } - unsigned int ireg0 = phisectors * ietaslice, il0 = 6 * (nTFEtaSlices - 1) * ie; + unsigned int ireg0 = phisectors * ietaslice, il0 = 3 * NTK_LINKS * (nTFEtaSlices - 1) * ie; if (barrelSetup == BarrelSetup::Phi18 || barrelSetup == BarrelSetup::Phi9) { for (unsigned int iregphi = 0; iregphi < (nregions_ / etaslices); ++iregphi) { for (unsigned int il = 0; il < NTK_LINKS; ++il) { tkRoutes_.emplace_back((iregphi + 1) + NTK_SECTORS * ie, il, iregphi + ireg0, il0 + il); - tkRoutes_.emplace_back((iregphi + 0) + NTK_SECTORS * ie, il, iregphi + ireg0, il0 + il + 2); - tkRoutes_.emplace_back((iregphi + 2) + NTK_SECTORS * ie, il, iregphi + ireg0, il0 + il + 4); + tkRoutes_.emplace_back((iregphi + 0) + NTK_SECTORS * ie, il, iregphi + ireg0, il0 + il + NTK_LINKS); + tkRoutes_.emplace_back((iregphi + 2) + NTK_SECTORS * ie, il, iregphi + ireg0, il0 + il + 2 * NTK_LINKS); } } } else { @@ -251,8 +254,8 @@ l1ct::MultififoRegionizerEmulator::MultififoRegionizerEmulator(BarrelSetup barre for (unsigned int il = 0; il < NTK_LINKS; ++il) { // max tracks per sector per clock unsigned int isp = (is + 1) % NTK_SECTORS, ism = (is + NTK_SECTORS - 1) % NTK_SECTORS; tkRoutes_.emplace_back(is + NTK_SECTORS * ie, il, is + ireg0, il0 + il); - tkRoutes_.emplace_back(is + NTK_SECTORS * ie, il, isp + ireg0, il0 + il + 2); - tkRoutes_.emplace_back(is + NTK_SECTORS * ie, il, ism + ireg0, il0 + il + 4); + tkRoutes_.emplace_back(is + NTK_SECTORS * ie, il, isp + ireg0, il0 + il + NTK_LINKS); + tkRoutes_.emplace_back(is + NTK_SECTORS * ie, il, ism + ireg0, il0 + il + 2 * NTK_LINKS); } } } diff --git a/L1Trigger/Phase2L1ParticleFlow/src/regionizer/tdr_regionizer_ref.cpp b/L1Trigger/Phase2L1ParticleFlow/src/regionizer/tdr_regionizer_ref.cpp index 79616c2753888..5d9d635dc8b02 100644 --- a/L1Trigger/Phase2L1ParticleFlow/src/regionizer/tdr_regionizer_ref.cpp +++ b/L1Trigger/Phase2L1ParticleFlow/src/regionizer/tdr_regionizer_ref.cpp @@ -75,9 +75,11 @@ void l1ct::TDRRegionizerEmulator::initSectorsAndRegions(const RegionizerDecodedI netaInBR_, nphiInBR_, nmu_, bigRegionEdges_[i], bigRegionEdges_[i + 1], nclocks_, 1, false); } - dbgCout() << "in.track.size() = " << in.track.size() << std::endl; - dbgCout() << "in.hadcalo.size() = " << in.hadcalo.size() << std::endl; - dbgCout() << "in.emcalo.size() = " << in.emcalo.size() << std::endl; + if (debug_) { + dbgCout() << "in.track.size() = " << in.track.size() << std::endl; + dbgCout() << "in.hadcalo.size() = " << in.hadcalo.size() << std::endl; + dbgCout() << "in.emcalo.size() = " << in.emcalo.size() << std::endl; + } if (ntk_) { for (unsigned int i = 0; i < nBigRegions_; i++) { diff --git a/L1Trigger/Phase2L1ParticleFlow/src/taus/TauNNIdHW.cc b/L1Trigger/Phase2L1ParticleFlow/src/taus/TauNNIdHW.cc index 4f87a9a56b760..f4e996f6a665e 100644 --- a/L1Trigger/Phase2L1ParticleFlow/src/taus/TauNNIdHW.cc +++ b/L1Trigger/Phase2L1ParticleFlow/src/taus/TauNNIdHW.cc @@ -12,6 +12,8 @@ void TauNNIdHW::initialize(const std::string &iInput, int iNParticles) { fId_ = std::make_unique(fNParticles_); fInput_ = iInput; } + +//Prepare the inputs for the Tau NN void TauNNIdHW::SetNNVectorVar() { NNvectorVar_.clear(); for (unsigned i0 = 0; i0 < fNParticles_; i0++) { @@ -35,35 +37,62 @@ void TauNNIdHW::SetNNVectorVar() { } } -result_t TauNNIdHW::EvaluateNN() { - input_t data[N_INPUTS]; +// Main architecture of the NN here +Tau_NN_Result TauNNIdHW::EvaluateNN() { + input_t model_input[N_INPUT_1_1]; for (unsigned int i = 0; i < NNvectorVar_.size(); i++) { - data[i] = input_t(NNvectorVar_[i]); + model_input[i] = input_t(NNvectorVar_[i]); } - layer1_t layer1_out[N_LAYER_1]; - layer1_t logits1[N_LAYER_1]; - nnet::compute_layer(data, logits1, w1, b1); - nnet::relu(logits1, layer1_out); - layer2_t layer2_out[N_LAYER_2]; - layer2_t logits2[N_LAYER_2]; - nnet::compute_layer(layer1_out, logits2, w2, b2); - nnet::relu(logits2, layer2_out); + nnet::dense(model_input, layer2_out, w2, b2); // Dense_1 + + layer4_t layer4_out[N_LAYER_2]; + nnet::relu(layer2_out, layer4_out); // relu_1 + + layer5_t layer5_out[N_LAYER_5]; + nnet::dense(layer4_out, layer5_out, w5, b5); // Dense_2 + + layer7_t layer7_out[N_LAYER_5]; + nnet::relu(layer5_out, layer7_out); // relu_2 + + layer8_t layer8_out[N_LAYER_8]; + nnet::dense(layer7_out, layer8_out, w8, b8); // Dense_3 + + layer10_t layer10_out[N_LAYER_8]; + nnet::relu(layer8_out, layer10_out); // relu_3 + + layer11_t layer11_out[N_LAYER_11]; + nnet::dense(layer10_out, layer11_out, w11, b11); // Dense_4 + + layer13_t layer13_out[N_LAYER_11]; + nnet::relu(layer11_out, layer13_out); // relu_4 + + layer14_t layer14_out[N_LAYER_14]; + nnet::dense(layer13_out, layer14_out, w14, b14); // Dense_5 + + layer16_t layer16_out[N_LAYER_14]; + nnet::relu(layer14_out, layer16_out); // relu_5 - layer3_t layer3_out[N_LAYER_3]; - layer3_t logits3[N_LAYER_3]; - nnet::compute_layer(layer2_out, logits3, w3, b3); - nnet::relu(logits3, layer3_out); + layer17_t layer17_out[N_LAYER_17]; + nnet::dense(layer16_out, layer17_out, w17, b17); // Dense_6 - result_t logits4[N_OUTPUTS]; - nnet::compute_layer(layer3_out, logits4, w4, b4); - result_t res[N_OUTPUTS]; - nnet::sigmoid(logits4, res); + result_t layer19_out[N_LAYER_17]; + nnet::sigmoid(layer17_out, layer19_out); // jetID_output - return res[0]; + result_t layer20_out[N_LAYER_20]; + nnet::dense(layer16_out, layer20_out, w20, b20); // pT_output + + // Return both pT correction and the NN ID + Tau_NN_Result nn_out; + nn_out.nn_pt_correction = layer20_out[0]; + nn_out.nn_id = layer19_out[0]; + + return nn_out; } + /* +// Uncomment for debugging purposes void TauNNIdHW::print() { for (unsigned i0 = 0; i0 < fNParticles_; i0++) { input_t pPt = input_t(fPt_.get()[i0]); @@ -78,30 +107,43 @@ void TauNNIdHW::print() { fprintf(file_, "\n"); } */ -result_t TauNNIdHW::compute(const l1t::PFCandidate &iSeed, std::vector &iParts) { + +Tau_NN_Result TauNNIdHW::compute(const l1t::PFCandidate &iSeed, std::vector &iParts) { + // Initialize the input vector for (unsigned i0 = 0; i0 < fNParticles_; i0++) { fPt_.get()[i0] = 0.; fEta_.get()[i0] = 0.; fPhi_.get()[i0] = 0.; fId_.get()[i0] = 0.; } + + // Sort the candidates by pT std::sort(iParts.begin(), iParts.end(), [](l1t::PFCandidate i, l1t::PFCandidate j) { return (pt_t(i.pt()) > pt_t(j.pt())); }); + + // Compute the values w.r.t to the seeds for (unsigned int i0 = 0; i0 < iParts.size(); i0++) { if (i0 >= fNParticles_) break; + fPt_.get()[i0] = pt_t(iParts[i0].pt()); fEta_.get()[i0] = etaphi_t(iSeed.eta() - iParts[i0].eta()); etaphi_t lDPhi = etaphi_t(iSeed.phi()) - etaphi_t(iParts[i0].phi()); etaphi_t lMPI = 3.1415; + if (lDPhi > lMPI) lDPhi = lDPhi - lMPI; if (lDPhi < -lMPI) lDPhi = lDPhi + lMPI; + fPhi_.get()[i0] = lDPhi; fId_.get()[i0] = id_t(iParts[i0].id()); } + + // Set the inputs SetNNVectorVar(); + + // Return the N outputs with the inputs return EvaluateNN(); } diff --git a/L1Trigger/Phase2L1ParticleFlow/test/make_l1ct_binaryFiles_cfg.py b/L1Trigger/Phase2L1ParticleFlow/test/make_l1ct_binaryFiles_cfg.py index 98fe13e76b47d..facae0439020f 100644 --- a/L1Trigger/Phase2L1ParticleFlow/test/make_l1ct_binaryFiles_cfg.py +++ b/L1Trigger/Phase2L1ParticleFlow/test/make_l1ct_binaryFiles_cfg.py @@ -37,7 +37,8 @@ "drop l1tPFClusters_*_*_*", "drop l1tPFTracks_*_*_*", "drop l1tPFCandidates_*_*_*", - "drop l1tTkPrimaryVertexs_*_*_*") + "drop l1tTkPrimaryVertexs_*_*_*"), + skipEvents = cms.untracked.uint32(0), ) process.load('Configuration.Geometry.GeometryExtended2026D88Reco_cff') @@ -51,6 +52,7 @@ process.load('L1Trigger.Phase2L1ParticleFlow.l1ctLayer1_cff') process.load('L1Trigger.Phase2L1ParticleFlow.l1ctLayer2EG_cff') +process.load('L1Trigger.Phase2L1ParticleFlow.l1pfJetMet_cff') process.load('L1Trigger.L1TTrackMatch.l1tGTTInputProducer_cfi') process.load('L1Trigger.L1TTrackMatch.l1tTrackSelectionProducer_cfi') process.l1tTrackSelectionProducer.processSimulatedTracks = False # these would need stubs, and are not used anyway @@ -58,15 +60,15 @@ from L1Trigger.Phase2L1GMT.gmt_cfi import l1tStandaloneMuons process.l1tSAMuonsGmt = l1tStandaloneMuons.clone() -from L1Trigger.Phase2L1ParticleFlow.l1SeedConePFJetEmulatorProducer_cfi import l1SeedConePFJetEmulatorProducer -from L1Trigger.Phase2L1ParticleFlow.l1tDeregionizerProducer_cfi import l1tDeregionizerProducer from L1Trigger.Phase2L1ParticleFlow.l1tJetFileWriter_cfi import l1tSeededConeJetFileWriter -process.l1tLayer2Deregionizer = l1tDeregionizerProducer.clone() -process.l1tLayer2SeedConeJetsCorrected = l1SeedConePFJetEmulatorProducer.clone(L1PFObjects = cms.InputTag('l1tLayer2Deregionizer', 'Puppi'), - doCorrections = cms.bool(True), - correctorFile = cms.string("L1Trigger/Phase2L1ParticleFlow/data/jecs/jecs_20220308.root"), - correctorDir = cms.string('L1PuppiSC4EmuJets')) -process.l1tLayer2SeedConeJetWriter = l1tSeededConeJetFileWriter.clone(jets = "l1tLayer2SeedConeJetsCorrected") +l1ctLayer2SCJetsProducts = cms.VPSet([cms.PSet(jets = cms.InputTag("l1tSC4PFL1PuppiCorrectedEmulator"), + nJets = cms.uint32(12), + mht = cms.InputTag("l1tSC4PFL1PuppiCorrectedEmulatorMHT"), + nSums = cms.uint32(2)), + cms.PSet(jets = cms.InputTag("l1tSC8PFL1PuppiCorrectedEmulator"), + nJets = cms.uint32(12)) + ]) +process.l1tLayer2SeedConeJetWriter = l1tSeededConeJetFileWriter.clone(collections = l1ctLayer2SCJetsProducts) process.l1tLayer1BarrelTDR = process.l1tLayer1Barrel.clone() process.l1tLayer1BarrelTDR.regionizerAlgo = cms.string("TDR") @@ -130,7 +132,9 @@ process.l1tLayer1HF + process.l1tLayer1 + process.l1tLayer2Deregionizer + - process.l1tLayer2SeedConeJetsCorrected + + process.l1tSC4PFL1PuppiCorrectedEmulator + + process.l1tSC4PFL1PuppiCorrectedEmulatorMHT + + process.l1tSC8PFL1PuppiCorrectedEmulator + # process.l1tLayer2SeedConeJetWriter + process.l1tLayer2EG ) @@ -149,7 +153,7 @@ ##################################################################################################################### ## Layer 2 seeded-cone jets if not args.patternFilesOFF: - process.runPF.insert(process.runPF.index(process.l1tLayer2SeedConeJetsCorrected)+1, process.l1tLayer2SeedConeJetWriter) + process.runPF.insert(process.runPF.index(process.l1tSC8PFL1PuppiCorrectedEmulator)+1, process.l1tLayer2SeedConeJetWriter) process.l1tLayer2SeedConeJetWriter.maxLinesPerFile = _eventsPerFile*54 if not args.dumpFilesOFF: @@ -171,13 +175,24 @@ del process.l1tLayer1HGCalNoTKTM18.regionizerAlgoParameters.nEndcaps del process.l1tLayer1HGCalNoTKTM18.regionizerAlgoParameters.nTkLinks del process.l1tLayer1HGCalNoTKTM18.regionizerAlgoParameters.nCaloLinks + process.l1tLayer1BarrelSerenityTM18 = process.l1tLayer1BarrelSerenity.clone() + process.l1tLayer1BarrelSerenityTM18.regionizerAlgo = "MiddleBufferMultififo" + process.l1tLayer1BarrelSerenityTM18.regionizerAlgoParameters = cms.PSet( + nTrack = process.l1tLayer1BarrelSerenity.regionizerAlgoParameters.nTrack, + nCalo = process.l1tLayer1BarrelSerenity.regionizerAlgoParameters.nCalo, + nEmCalo = process.l1tLayer1BarrelSerenity.regionizerAlgoParameters.nEmCalo, + nMu = process.l1tLayer1BarrelSerenity.regionizerAlgoParameters.nMu, + ) + process.l1tLayer1BarrelSerenityTM18.boards = cms.VPSet(*[cms.PSet(regions = cms.vuint32(*range(18*i,18*i+18))) for i in range(3)]) process.runPF.insert(process.runPF.index(process.l1tLayer1HGCal)+1, process.l1tLayer1HGCalTM18) process.runPF.insert(process.runPF.index(process.l1tLayer1HGCalNoTK)+1, process.l1tLayer1HGCalNoTKTM18) + process.runPF.insert(process.runPF.index(process.l1tLayer1BarrelSerenity)+1, process.l1tLayer1BarrelSerenityTM18) if not args.patternFilesOFF: process.l1tLayer1HGCalTM18.patternWriters = cms.untracked.VPSet(*hgcalTM18WriterConfigs) process.l1tLayer1HGCalNoTKTM18.patternWriters = cms.untracked.VPSet(hgcalNoTKOutputTM18WriterConfig) + process.l1tLayer1BarrelSerenityTM18.patternWriters = cms.untracked.VPSet() if not args.dumpFilesOFF: - for det in "HGCalTM18", "HGCalNoTKTM18": + for det in "HGCalTM18", "HGCalNoTKTM18", "BarrelSerenityTM18": getattr(process, 'l1tLayer1'+det).dumpFileName = cms.untracked.string("TTbar_PU200_"+det+".dump") process.source.fileNames = [ '/store/cmst3/group/l1tr/gpetrucc/12_5_X/NewInputs125X/150223/TTbar_PU200/inputs125X_1.root' ] diff --git a/L1Trigger/RegionalCaloTrigger/plugins/L1RCTProducer.cc b/L1Trigger/RegionalCaloTrigger/plugins/L1RCTProducer.cc index bda67327d23f2..315e55c803ebb 100644 --- a/L1Trigger/RegionalCaloTrigger/plugins/L1RCTProducer.cc +++ b/L1Trigger/RegionalCaloTrigger/plugins/L1RCTProducer.cc @@ -4,6 +4,7 @@ #include "CondFormats/RunInfo/interface/RunInfo.h" #include "FWCore/Framework/interface/LuminosityBlock.h" #include "FWCore/Framework/interface/Run.h" +#include "FWCore/Utilities/interface/Exception.h" #include using std::vector; @@ -94,6 +95,13 @@ void L1RCTProducer::beginLuminosityBlock(edm::LuminosityBlock const &lumiSeg, co // LS, update the FED vector from OMDS can pass the flag as the bool?? but // only check LS number if flag is true anyhow if (getFedsFromOmds) { + throw cms::Exception("L1RCTProducer Configuration") + << "L1RCTProducer is being run with the configuration parameter getFedsFromOmds set true. " + << "Underlying Framework changes have broken the implementation of that option. " + << "It was not fixed because we believe this option is no longer used or needed. " + << "If you actually need this option, please report this failure to the Framework. " + << "For more details see GitHub Issue 43697."; + /* unsigned int nLumi = lumiSeg.luminosityBlock(); // doesn't even need the (unsigned int) cast // because LuminosityBlockNumber_t is already // an unsigned int @@ -117,6 +125,7 @@ void L1RCTProducer::beginLuminosityBlock(edm::LuminosityBlock const &lumiSeg, co } else if (queryIntervalInLS <= 0) { // don't do interval checking... cout message?? } + */ } } diff --git a/L1Trigger/TrackFindingTMTT/interface/L1fittedTrack.h b/L1Trigger/TrackFindingTMTT/interface/L1fittedTrack.h index 3461554157a5e..d9fcdb7f78ada 100644 --- a/L1Trigger/TrackFindingTMTT/interface/L1fittedTrack.h +++ b/L1Trigger/TrackFindingTMTT/interface/L1fittedTrack.h @@ -333,11 +333,18 @@ namespace tmtt { // Is the fitted track trajectory within the same (eta,phi) sector of the HT used to find it? bool consistentSector() const { - bool insidePhi = - (std::abs(reco::deltaPhi(this->phiAtChosenR(done_bcon_), secTmp_->phiCentre())) < secTmp_->sectorHalfWidth()); - bool insideEta = - (this->zAtChosenR() > secTmp_->zAtChosenR_Min() && this->zAtChosenR() < secTmp_->zAtChosenR_Max()); - return (insidePhi && insideEta); + if (settings_->hybrid()) { + float phiCentre = 2. * M_PI * iPhiSec() / settings_->numPhiSectors(); + float sectorHalfWidth = M_PI / settings_->numPhiSectors(); + bool insidePhi = (std::abs(reco::deltaPhi(this->phiAtChosenR(done_bcon_), phiCentre)) < sectorHalfWidth); + return insidePhi; + } else { + bool insidePhi = (std::abs(reco::deltaPhi(this->phiAtChosenR(done_bcon_), secTmp_->phiCentre())) < + secTmp_->sectorHalfWidth()); + bool insideEta = + (this->zAtChosenR() > secTmp_->zAtChosenR_Min() && this->zAtChosenR() < secTmp_->zAtChosenR_Max()); + return (insidePhi && insideEta); + } } // Digitize track and degrade helix parameter resolution according to effect of digitisation. diff --git a/L1Trigger/TrackFindingTMTT/plugins/BuildFile.xml b/L1Trigger/TrackFindingTMTT/plugins/BuildFile.xml index 6d68bf2342baa..02d08452e2a17 100644 --- a/L1Trigger/TrackFindingTMTT/plugins/BuildFile.xml +++ b/L1Trigger/TrackFindingTMTT/plugins/BuildFile.xml @@ -1,7 +1,7 @@ - - - - - - + + + + + + diff --git a/L1Trigger/TrackFindingTMTT/plugins/TMTrackProducer.cc b/L1Trigger/TrackFindingTMTT/plugins/TMTrackProducer.cc index e47691281030c..d55960e1399fd 100644 --- a/L1Trigger/TrackFindingTMTT/plugins/TMTrackProducer.cc +++ b/L1Trigger/TrackFindingTMTT/plugins/TMTrackProducer.cc @@ -28,7 +28,6 @@ namespace tmtt { namespace { std::once_flag printOnce; - std::once_flag callOnce; } // namespace std::unique_ptr TMTrackProducer::initializeGlobalCache(edm::ParameterSet const& iConfig) { diff --git a/L1Trigger/TrackFindingTMTT/test/tmtt_tf_analysis_cfg.py b/L1Trigger/TrackFindingTMTT/test/tmtt_tf_analysis_cfg.py index 59e9d0e9d2613..000187debb365 100644 --- a/L1Trigger/TrackFindingTMTT/test/tmtt_tf_analysis_cfg.py +++ b/L1Trigger/TrackFindingTMTT/test/tmtt_tf_analysis_cfg.py @@ -40,7 +40,7 @@ if GEOMETRY == "D88": # Read data from card files (defines getCMSdataFromCards()): - #from MCsamples.RelVal_1130_D76.PU200_TTbar_14TeV_cfi import * + #from MCsamples.RelVal_1260_D88.PU200_TTbar_14TeV_cfi import * #inputMC = getCMSdataFromCards() # Or read .root files from directory on local computer: @@ -48,11 +48,11 @@ #inputMC=getCMSlocaldata(dirName) # Or read specified dataset (accesses CMS DB, so use this method only occasionally): - #dataName="/RelValTTbar_14TeV/CMSSW_11_3_0_pre6-PU_113X_mcRun4_realistic_v6_2026D76PU200-v1/GEN-SIM-DIGI-RAW" + #dataName="/RelValTTbar_14TeV/CMSSW_12_6_0-PU_125X_mcRun4_realistic_v5_2026D88PU200RV183v2-v1/GEN-SIM-DIGI-RAW" #inputMC=getCMSdata(dataName) # Or read specified .root file: - inputMC = ["/store/relval/CMSSW_12_6_0_pre4/RelValTTbar_14TeV/GEN-SIM-DIGI-RAW/PU_125X_mcRun4_realistic_v2_2026D88PU200-v1/2590000/00b3d04b-4c7b-4506-8d82-9538fb21ee19.root"] + inputMC = ["/store/mc/CMSSW_12_6_0/RelValTTbar_14TeV/GEN-SIM-DIGI-RAW/PU_125X_mcRun4_realistic_v5_2026D88PU200RV183v2-v1/30000/0959f326-3f52-48d8-9fcf-65fc41de4e27.root"] else: print("this is not a valid geometry!!!") diff --git a/L1Trigger/TrackFindingTracklet/BuildFile.xml b/L1Trigger/TrackFindingTracklet/BuildFile.xml index f5666d006fdf3..2153ca6da6736 100644 --- a/L1Trigger/TrackFindingTracklet/BuildFile.xml +++ b/L1Trigger/TrackFindingTracklet/BuildFile.xml @@ -3,9 +3,9 @@ - + diff --git a/L1Trigger/TrackFindingTracklet/README.md b/L1Trigger/TrackFindingTracklet/README.md index a1aec2a9a969d..f7ef989634e98 100644 --- a/L1Trigger/TrackFindingTracklet/README.md +++ b/L1Trigger/TrackFindingTracklet/README.md @@ -1,19 +1,17 @@ -To run L1 tracking & create TTree of tracking performance: +To run the L1 tracking & create a TTree of tracking performance: cmsRun L1TrackNtupleMaker_cfg.py -By setting variable L1TRKALGO inside this script, you can change the -L1 tracking algo used. +By setting variable L1TRKALGO inside this script, you can change which +L1 tracking algo is used. It defaults to HYBRID. For the baseline HYBRID algo, which runs Tracklet pattern reco followed -by KF track fit, TrackFindingTracklet/interface/Settings.h configures the pattern reco, (although some -parameters there are overridden by l1tTTTracksFromTrackletEmulation_cfi.py). -The KF fit is configued by the constructor of TrackFindingTMTT/src/Settings.cc. +by KF track fit, TrackFindingTracklet/interface/Settings.h configures the pattern reco stage, (although some parameters there are overridden by l1tTTTracksFromTrackletEmulation_cfi.py). +The KF fit is configured by the constructor of TrackFindingTMTT/src/Settings.cc. The ROOT macros L1TrackNtuplePlot.C & L1TrackQualityPlot.C make tracking performance & BDT track quality performance plots from the TTree. Both can be run via makeHists.csh . -The optional "NewKF" track fit, (which is not yet baseline, as no duplicate -track removal is compatible with it), is configured via +The optional "NewKF" track fit can be run by changing L1TRKALGO=HYBRID_NEWKF. It corresponds to the curent FW, but is is not yet the default, as only a basic duplicate track removal is available for it. It is configured via TrackTrigger/python/ProducerSetup_cfi.py, (which also configures the DTC). diff --git a/L1Trigger/TrackFindingTracklet/interface/ChannelAssignment.h b/L1Trigger/TrackFindingTracklet/interface/ChannelAssignment.h index 15ffa038ef460..c4d644d41d56b 100644 --- a/L1Trigger/TrackFindingTracklet/interface/ChannelAssignment.h +++ b/L1Trigger/TrackFindingTracklet/interface/ChannelAssignment.h @@ -22,12 +22,28 @@ namespace trklet { ChannelAssignment() {} ChannelAssignment(const edm::ParameterSet& iConfig, const tt::Setup* setup); ~ChannelAssignment() {} - // sets channelId of given TTTrackRef from TrackBuilder or PurgeDuplicate (if enabled), return false if track outside pt range - bool channelId(const TTTrackRef& ttTrackRef, int& channelId); - // number of used channels for tracks + // returns channelId of given TTTrackRef from TrackBuilder + int channelId(const TTTrackRef& ttTrackRef) const; + // number of used TB channels for tracks int numChannelsTrack() const { return numChannelsTrack_; } - // number of used channels for stubs + // number of used TB channels for stubs int numChannelsStub() const { return numChannelsStub_; } + // number of bits used to represent layer id [barrel: 0-5, discs: 6-10] + int widthLayerId() const { return widthLayerId_; } + // number of bits used to represent stub id for projected stubs + int widthStubId() const { return widthStubId_; } + // number of bits used to represent stub id for seed stubs + int widthSeedStubId() const { return widthSeedStubId_; } + // number of bits used to distinguish between tilted and untilded barrel modules or 2S and PS endcap modules + int widthPSTilt() const { return widthPSTilt_; } + // depth of fifos within systolic array + int depthMemory() const { return depthMemory_; } + // number of comparison modules used in each DR node + int numComparisonModules() const { return numComparisonModules_; } + // min number of shared stubs to identify duplicates + int minIdenticalStubs() const { return minIdenticalStubs_; } + // number of DR nodes + int numNodesDR() const { return numNodesDR_; } // number of used seed types in tracklet algorithm int numSeedTypes() const { return numSeedTypes_; } // sets layerId (0-7 in sequence the seed type projects to) of given TTStubRef and seedType, returns false if seeed stub @@ -42,29 +58,49 @@ namespace trklet { int offsetStub(int channelTrack) const; // seed layers for given seed type id const std::vector& seedingLayers(int seedType) const { return seedTypesSeedLayers_.at(seedType); } - // + // returns SensorModule::Type for given TTStubRef tt::SensorModule::Type type(const TTStubRef& ttStubRef) const { return setup_->type(ttStubRef); } - // - int layerId(int seedType, int channel) const { return seedTypesProjectionLayers_.at(seedType).at(channel); } - // + // layers a seed types can project to using default layer id [barrel: 1-6, discs: 11-15] + int layerId(int seedType, int channel) const; + // returns TBout channel Id for given seed type and default layer id [barrel: 1-6, discs: 11-15], returns -1 if layerId and seedType are inconsistent int channelId(int seedType, int layerId) const; // max number of seeding layers int numSeedingLayers() const { return numSeedingLayers_; } + // return DR node for given ttTrackRef + int nodeDR(const TTTrackRef& ttTrackRef) const; private: // helper class to store configurations const tt::Setup* setup_; - // use tracklet seed type as channel id if False, binned track pt used if True - bool useDuplicateRemoval_; - // pt Boundaries in GeV, last boundary is infinity - std::vector boundaries_; + // DRin parameter + edm::ParameterSet pSetDRin_; + // number of bits used to represent layer id [barrel: 0-5, discs: 6-10] + int widthLayerId_; + // number of bits used to represent stub id for projected stubs + int widthStubId_; + // number of bits used to represent stub id for seed stubs + int widthSeedStubId_; + // number of bits used to distinguish between tilted and untilded barrel modules or 2S and PS endcap modules + int widthPSTilt_; + // depth of fifos within systolic array + int depthMemory_; + // positive pt Boundaries in GeV (symmetric negatives are assumed), first boundary is pt cut, last boundary is infinity, defining ot bins used by DR + std::vector ptBoundaries_; + // DRin parameter + edm::ParameterSet pSetDR_; + // number of comparison modules used in each DR node + int numComparisonModules_; + // min number of shared stubs to identify duplicates [default: 3] + int minIdenticalStubs_; + // number of DR nodes + int numNodesDR_; // seed type names std::vector seedTypeNames_; // number of used seed types in tracklet algorithm int numSeedTypes_; - // number of used channels for tracks + // number of used TB channels for tracks int numChannelsTrack_; - // number of used channels for stubs + // number of used TB channels for stubs int numChannelsStub_; // seeding layers of seed types using default layer id [barrel: 1-6, discs: 11-15] std::vector> seedTypesSeedLayers_; diff --git a/L1Trigger/TrackFindingTracklet/interface/DR.h b/L1Trigger/TrackFindingTracklet/interface/DR.h new file mode 100644 index 0000000000000..b3956fb0744ec --- /dev/null +++ b/L1Trigger/TrackFindingTracklet/interface/DR.h @@ -0,0 +1,75 @@ +#ifndef L1Trigger_TrackFindingTracklet_DR_h +#define L1Trigger_TrackFindingTracklet_DR_h + +#include "L1Trigger/TrackTrigger/interface/Setup.h" +#include "L1Trigger/TrackerTFP/interface/DataFormats.h" +#include "L1Trigger/TrackFindingTracklet/interface/ChannelAssignment.h" + +#include + +namespace trklet { + + /*! \class trklet::DR + * \brief Class to bit- and clock-accurate emulate duplicate removal + * DR identifies duplicates based on pairs of tracks that share stubs in at least 3 layers. + * It keeps the first such track in each pair. + * \author Thomas Schuh + * \date 2023, Feb + */ + class DR { + public: + DR(const edm::ParameterSet& iConfig, + const tt::Setup* setup_, + const trackerTFP::DataFormats* dataFormats, + const ChannelAssignment* channelAssignment, + int region); + ~DR() {} + // read in and organize input tracks and stubs + void consume(const tt::StreamsTrack& streamsTrack, const tt::StreamsStub& streamsStub); + // fill output products + void produce(tt::StreamsStub& accpetedStubs, + tt::StreamsTrack& acceptedTracks, + tt::StreamsStub& lostStubs, + tt::StreamsTrack& lostTracks); + + private: + struct Stub { + Stub(const tt::FrameStub& frame, int stubId, int channel) : frame_(frame), stubId_(stubId), channel_(channel) {} + bool operator==(const Stub& s) const { return s.stubId_ == stubId_; } + tt::FrameStub frame_; + // all stubs id + int stubId_; + // kf layer id + int channel_; + }; + struct Track { + // max number of stubs a track may formed of (we allow only one stub per layer) + static constexpr int max_ = 7; + Track() { stubs_.reserve(max_); } + Track(const tt::FrameTrack& frame, const std::vector& stubs) : frame_(frame), stubs_(stubs) {} + tt::FrameTrack frame_; + std::vector stubs_; + }; + // compares two tracks, returns true if those are considered duplicates + bool equalEnough(Track* t0, Track* t1) const; + // true if truncation is enbaled + bool enableTruncation_; + // provides run-time constants + const tt::Setup* setup_; + // provides dataformats + const trackerTFP::DataFormats* dataFormats_; + // helper class to assign tracks to channel + const ChannelAssignment* channelAssignment_; + // processing region (0 - 8) aka processing phi nonant + const int region_; + // storage of input tracks + std::vector tracks_; + // storage of input stubs + std::vector stubs_; + // h/w liked organized pointer to input tracks + std::vector> input_; + }; + +} // namespace trklet + +#endif diff --git a/L1Trigger/TrackFindingTracklet/interface/DRin.h b/L1Trigger/TrackFindingTracklet/interface/DRin.h new file mode 100644 index 0000000000000..f18d985405823 --- /dev/null +++ b/L1Trigger/TrackFindingTracklet/interface/DRin.h @@ -0,0 +1,169 @@ +#ifndef L1Trigger_TrackFindingTracklet_DRin_h +#define L1Trigger_TrackFindingTracklet_DRin_h + +#include "L1Trigger/TrackTrigger/interface/Setup.h" +#include "L1Trigger/TrackerTFP/interface/DataFormats.h" +#include "L1Trigger/TrackerTFP/interface/LayerEncoding.h" +#include "L1Trigger/TrackFindingTracklet/interface/ChannelAssignment.h" +#include "L1Trigger/TrackFindingTracklet/interface/Settings.h" + +#include +#include + +namespace trklet { + + /*! \class trklet::DRin + * \brief Class to emulate transformation of tracklet tracks and stubs into TMTT format + * and routing of seed type streams into inv2R streams + * \author Thomas Schuh + * \date 2023, Jan + */ + class DRin { + public: + DRin(const edm::ParameterSet& iConfig, + const tt::Setup* setup_, + const trackerTFP::DataFormats* dataFormats, + const trackerTFP::LayerEncoding* layerEncoding, + const ChannelAssignment* channelAssignment, + const Settings* settings, + int region); + ~DRin() {} + // read in and organize input tracks and stubs + void consume(const tt::StreamsTrack& streamsTrack, const tt::StreamsStub& streamsStub); + // fill output products + void produce(tt::StreamsStub& accpetedStubs, + tt::StreamsTrack& acceptedTracks, + tt::StreamsStub& lostStubs, + tt::StreamsTrack& lostTracks); + + private: + // truncates double precision of val into base precision, +1.e-12 restores robustness of addition of 2 digitised values + double digi(double val, double base) const { return (floor(val / base + 1.e-12) + .5) * base; } + // basetransformation of val from baseLow into baseHigh using widthMultiplier bit multiplication + double redigi(double val, double baseLow, double baseHigh, int widthMultiplier) const; + struct Stub { + Stub(const TTStubRef& ttStubRef, + int layer, + int layerDet, + bool seed, + int stubId, + double r, + double phi, + double z, + bool psTilt) + : valid_(true), + ttStubRef_(ttStubRef), + layer_(layer), + layerDet_(layerDet), + layerKF_(-1), + seed_(seed), + stubId_(stubId), + r_(r), + phi_(phi), + z_(z), + psTilt_(psTilt) {} + bool valid_; + TTStubRef ttStubRef_; + // layers a seed types can project to using default layer id [barrel: 1-6, discs: 11-15] + int layer_; + // layer id [0-5] barrel [6-10] end cap discs + int layerDet_; + // layer id [0-6] counted from inside-out along track + int layerKF_; + // true if stub was part of the seed + bool seed_; + // traclet stub id + int stubId_; + // radius w.r.t. chosenRofPhi in cm + double r_; + // phi residual in rad + double phi_; + // z residual in cm + double z_; + // true if barrel tilted module or encap PS module + bool psTilt_; + }; + struct Track { + static constexpr int max_ = 8; + Track() { stubs_.reserve(max_); } + Track(const TTTrackRef& ttTrackRef, + bool valid, + double inv2R, + double phiT, + double cot, + double zT, + const std::vector& stubs) + : ttTrackRef_(ttTrackRef), + valid_(valid), + sector_(-1), + inv2R_(inv2R), + phiT_(phiT), + cot_(cot), + zT_(zT), + stubs_(stubs) {} + TTTrackRef ttTrackRef_; + bool valid_; + TTBV maybe_; + int sector_; + double inv2R_; + double phiT_; + double cot_; + double zT_; + std::vector stubs_; + }; + // remove and return first element of deque, returns nullptr if empty + template + T* pop_front(std::deque& ts) const; + // true if truncation is enbaled + bool enableTruncation_; + // stub residuals are recalculated from seed parameter and TTStub position + bool useTTStubResiduals_; + // provides run-time constants + const tt::Setup* setup_; + // provides dataformats + const trackerTFP::DataFormats* dataFormats_; + // helper class to encode layer + const trackerTFP::LayerEncoding* layerEncoding_; + // helper class to assign tracks to channel + const ChannelAssignment* channelAssignment_; + // provides tracklet constants + const Settings* settings_; + // processing region (0 - 8) aka processing phi nonant + const int region_; + // storage of input tracks + std::vector tracks_; + // storage of input stubs + std::vector stubs_; + // h/w liked organized pointer to input tracks + std::vector> input_; + // unified tracklet digitisation granularity + double baseUinv2R_; + double baseUphiT_; + double baseUcot_; + double baseUzT_; + double baseUr_; + double baseUphi_; + double baseUz_; + // KF input format digitisation granularity (identical to TMTT) + double baseLinv2R_; + double baseLphiT_; + double baseLcot_; + double baseLzT_; + double baseLr_; + double baseLphi_; + double baseLz_; + // Finer granularity (by powers of 2) than the TMTT one. Used to transform from Tracklet to TMTT base. + double baseHinv2R_; + double baseHphiT_; + double baseHcot_; + double baseHzT_; + double baseHr_; + double baseHphi_; + double baseHz_; + // digitisation granularity used for inverted cot(theta) + double baseInvCot_; + }; + +} // namespace trklet + +#endif diff --git a/L1Trigger/TrackFindingTracklet/interface/IMATH_TrackletCalculator.h b/L1Trigger/TrackFindingTracklet/interface/IMATH_TrackletCalculator.h index 4a14ef1740ec8..15814a40643eb 100644 --- a/L1Trigger/TrackFindingTracklet/interface/IMATH_TrackletCalculator.h +++ b/L1Trigger/TrackFindingTracklet/interface/IMATH_TrackletCalculator.h @@ -147,9 +147,9 @@ namespace trklet { VarParam plus2{globals_, "plus2", 2., 10}; VarParam minus1{globals_, "minus1", -1., 10}; - VarParam r1mean{globals_, "r1mean", "Kr", settings_.rmax(trklet::N_LAYER - 1), settings_.kr()}; - VarParam r2mean{globals_, "r2mean", "Kr", settings_.rmax(trklet::N_LAYER - 1), settings_.kr()}; - VarParam r12mean{globals_, "r12mean", "Kr", 2 * settings_.rmax(trklet::N_DISK - 1), settings_.kr()}; + VarParam r1mean{globals_, "r1mean", "Kr", settings_.rmax(N_LAYER - 1), settings_.kr()}; + VarParam r2mean{globals_, "r2mean", "Kr", settings_.rmax(N_LAYER - 1), settings_.kr()}; + VarParam r12mean{globals_, "r12mean", "Kr", 2 * settings_.rmax(N_DISK - 1), settings_.kr()}; //inputs VarDef r1{globals_, "r1", "Kr", settings_.drmax(), settings_.kr()}; @@ -161,22 +161,22 @@ namespace trklet { VarDef phi1{globals_, "phi1", "Kphi", settings_.dphisector() / 0.75, settings_.kphi1()}; VarDef phi2{globals_, "phi2", "Kphi", settings_.dphisector() / 0.75, settings_.kphi1()}; - VarDef rproj0{globals_, "rproj0", "Kr", settings_.rmax(trklet::N_LAYER - 1), settings_.kr()}; - VarDef rproj1{globals_, "rproj1", "Kr", settings_.rmax(trklet::N_LAYER - 1), settings_.kr()}; - VarDef rproj2{globals_, "rproj2", "Kr", settings_.rmax(trklet::N_LAYER - 1), settings_.kr()}; - VarDef rproj3{globals_, "rproj3", "Kr", settings_.rmax(trklet::N_LAYER - 1), settings_.kr()}; + VarDef rproj0{globals_, "rproj0", "Kr", settings_.rmax(N_LAYER - 1), settings_.kr()}; + VarDef rproj1{globals_, "rproj1", "Kr", settings_.rmax(N_LAYER - 1), settings_.kr()}; + VarDef rproj2{globals_, "rproj2", "Kr", settings_.rmax(N_LAYER - 1), settings_.kr()}; + VarDef rproj3{globals_, "rproj3", "Kr", settings_.rmax(N_LAYER - 1), settings_.kr()}; - VarDef zproj0{globals_, "zproj0", "Kz", settings_.zmax(trklet::N_DISK - 1), settings_.kz()}; - VarDef zproj1{globals_, "zproj1", "Kz", settings_.zmax(trklet::N_DISK - 1), settings_.kz()}; - VarDef zproj2{globals_, "zproj2", "Kz", settings_.zmax(trklet::N_DISK - 1), settings_.kz()}; - VarDef zproj3{globals_, "zproj3", "Kz", settings_.zmax(trklet::N_DISK - 1), settings_.kz()}; - VarDef zproj4{globals_, "zproj4", "Kz", settings_.zmax(trklet::N_DISK - 1), settings_.kz()}; + VarDef zproj0{globals_, "zproj0", "Kz", settings_.zmax(N_DISK - 1), settings_.kz()}; + VarDef zproj1{globals_, "zproj1", "Kz", settings_.zmax(N_DISK - 1), settings_.kz()}; + VarDef zproj2{globals_, "zproj2", "Kz", settings_.zmax(N_DISK - 1), settings_.kz()}; + VarDef zproj3{globals_, "zproj3", "Kz", settings_.zmax(N_DISK - 1), settings_.kz()}; + VarDef zproj4{globals_, "zproj4", "Kz", settings_.zmax(N_DISK - 1), settings_.kz()}; //calculations //tracklet - VarAdd r1abs{globals_, "r1abs", &r1, &r1mean, settings_.rmax(trklet::N_LAYER - 1)}; - VarAdd r2abs{globals_, "r2abs", &r2, &r2mean, settings_.rmax(trklet::N_LAYER - 1)}; + VarAdd r1abs{globals_, "r1abs", &r1, &r1mean, settings_.rmax(N_LAYER - 1)}; + VarAdd r2abs{globals_, "r2abs", &r2, &r2mean, settings_.rmax(N_LAYER - 1)}; VarSubtract dr{globals_, "dr", &r2, &r1}; diff --git a/L1Trigger/TrackFindingTracklet/interface/IMATH_TrackletCalculatorDisk.h b/L1Trigger/TrackFindingTracklet/interface/IMATH_TrackletCalculatorDisk.h index 351521be5ccea..cb80405b2344b 100644 --- a/L1Trigger/TrackFindingTracklet/interface/IMATH_TrackletCalculatorDisk.h +++ b/L1Trigger/TrackFindingTracklet/interface/IMATH_TrackletCalculatorDisk.h @@ -136,31 +136,31 @@ namespace trklet { VarParam plus1{globals_, "plus1", 1., 10}; VarParam minus1{globals_, "minus1", -1, 10}; - VarParam z1mean{globals_, "z1mean", "Kz", settings_.zmax(trklet::N_DISK - 1), settings_.kz()}; - VarParam z2mean{globals_, "z2mean", "Kz", settings_.zmax(trklet::N_DISK - 1), settings_.kz()}; + VarParam z1mean{globals_, "z1mean", "Kz", settings_.zmax(N_DISK - 1), settings_.kz()}; + VarParam z2mean{globals_, "z2mean", "Kz", settings_.zmax(N_DISK - 1), settings_.kz()}; //inputs - VarDef r1{globals_, "r1", "Kr", settings_.rmax(trklet::N_LAYER - 1), settings_.kr()}; - VarDef r2{globals_, "r2", "Kr", settings_.rmax(trklet::N_LAYER - 1), settings_.kr()}; + VarDef r1{globals_, "r1", "Kr", settings_.rmax(N_LAYER - 1), settings_.kr()}; + VarDef r2{globals_, "r2", "Kr", settings_.rmax(N_LAYER - 1), settings_.kr()}; VarDef z1{globals_, "z1", "Kz", settings_.dzmax(), settings_.kz()}; VarDef z2{globals_, "z2", "Kz", settings_.dzmax(), settings_.kz()}; VarDef phi1{globals_, "phi1", "Kphi", settings_.dphisector() / 0.75, settings_.kphi1()}; VarDef phi2{globals_, "phi2", "Kphi", settings_.dphisector() / 0.75, settings_.kphi1()}; - VarDef rproj0{globals_, "rproj0", "Kr", settings_.rmax(trklet::N_LAYER - 1), settings_.kr()}; - VarDef rproj1{globals_, "rproj1", "Kr", settings_.rmax(trklet::N_LAYER - 1), settings_.kr()}; - VarDef rproj2{globals_, "rproj2", "Kr", settings_.rmax(trklet::N_LAYER - 1), settings_.kr()}; + VarDef rproj0{globals_, "rproj0", "Kr", settings_.rmax(N_LAYER - 1), settings_.kr()}; + VarDef rproj1{globals_, "rproj1", "Kr", settings_.rmax(N_LAYER - 1), settings_.kr()}; + VarDef rproj2{globals_, "rproj2", "Kr", settings_.rmax(N_LAYER - 1), settings_.kr()}; - VarDef zproj0{globals_, "zproj0", "Kz", settings_.zmax(trklet::N_DISK - 1), settings_.kz()}; - VarDef zproj1{globals_, "zproj1", "Kz", settings_.zmax(trklet::N_DISK - 1), settings_.kz()}; - VarDef zproj2{globals_, "zproj2", "Kz", settings_.zmax(trklet::N_DISK - 1), settings_.kz()}; + VarDef zproj0{globals_, "zproj0", "Kz", settings_.zmax(N_DISK - 1), settings_.kz()}; + VarDef zproj1{globals_, "zproj1", "Kz", settings_.zmax(N_DISK - 1), settings_.kz()}; + VarDef zproj2{globals_, "zproj2", "Kz", settings_.zmax(N_DISK - 1), settings_.kz()}; //calculations //tracklet - VarAdd z1abs{globals_, "z1abs", &z1, &z1mean, settings_.zmax(trklet::N_DISK - 1)}; - VarAdd z2abs{globals_, "z2abs", &z2, &z2mean, settings_.zmax(trklet::N_DISK - 1)}; + VarAdd z1abs{globals_, "z1abs", &z1, &z1mean, settings_.zmax(N_DISK - 1)}; + VarAdd z2abs{globals_, "z2abs", &z2, &z2mean, settings_.zmax(N_DISK - 1)}; VarSubtract dr{globals_, "dr", &r2, &r1, dr_max}; diff --git a/L1Trigger/TrackFindingTracklet/interface/IMATH_TrackletCalculatorOverlap.h b/L1Trigger/TrackFindingTracklet/interface/IMATH_TrackletCalculatorOverlap.h index 835ada017311a..f6b77ca73f8f6 100644 --- a/L1Trigger/TrackFindingTracklet/interface/IMATH_TrackletCalculatorOverlap.h +++ b/L1Trigger/TrackFindingTracklet/interface/IMATH_TrackletCalculatorOverlap.h @@ -141,32 +141,32 @@ namespace trklet { VarParam minus1{globals_, "minus1", -1, 10}; // // - VarParam r1mean{globals_, "r1mean", "Kr", settings_.rmax(trklet::N_LAYER - 1), settings_.kr()}; - VarParam z2mean{globals_, "z2mean", "Kz", settings_.zmax(trklet::N_DISK - 1), settings_.kz()}; + VarParam r1mean{globals_, "r1mean", "Kr", settings_.rmax(N_LAYER - 1), settings_.kr()}; + VarParam z2mean{globals_, "z2mean", "Kz", settings_.zmax(N_DISK - 1), settings_.kz()}; //inputs VarDef r1{globals_, "r1", "Kr", settings_.drmax(), settings_.kr()}; - VarDef r2{globals_, "r2", "Kr", settings_.rmax(trklet::N_LAYER - 1), settings_.kr()}; + VarDef r2{globals_, "r2", "Kr", settings_.rmax(N_LAYER - 1), settings_.kr()}; VarDef z1{globals_, "z1", "Kz", settings_.zlength(), settings_.kz()}; VarDef z2{globals_, "z2", "Kz", settings_.dzmax(), settings_.kz()}; VarDef phi1{globals_, "phi1", "Kphi", settings_.dphisector() / 0.75, settings_.kphi1()}; VarDef phi2{globals_, "phi2", "Kphi", settings_.dphisector() / 0.75, settings_.kphi1()}; - VarDef rproj0{globals_, "rproj0", "Kr", settings_.rmax(trklet::N_LAYER - 1), settings_.kr()}; - VarDef rproj1{globals_, "rproj1", "Kr", settings_.rmax(trklet::N_LAYER - 1), settings_.kr()}; - VarDef rproj2{globals_, "rproj2", "Kr", settings_.rmax(trklet::N_LAYER - 1), settings_.kr()}; + VarDef rproj0{globals_, "rproj0", "Kr", settings_.rmax(N_LAYER - 1), settings_.kr()}; + VarDef rproj1{globals_, "rproj1", "Kr", settings_.rmax(N_LAYER - 1), settings_.kr()}; + VarDef rproj2{globals_, "rproj2", "Kr", settings_.rmax(N_LAYER - 1), settings_.kr()}; - VarDef zproj0{globals_, "zproj0", "Kz", settings_.zmax(trklet::N_DISK - 1), settings_.kz()}; - VarDef zproj1{globals_, "zproj1", "Kz", settings_.zmax(trklet::N_DISK - 1), settings_.kz()}; - VarDef zproj2{globals_, "zproj2", "Kz", settings_.zmax(trklet::N_DISK - 1), settings_.kz()}; - VarDef zproj3{globals_, "zproj3", "Kz", settings_.zmax(trklet::N_DISK - 1), settings_.kz()}; + VarDef zproj0{globals_, "zproj0", "Kz", settings_.zmax(N_DISK - 1), settings_.kz()}; + VarDef zproj1{globals_, "zproj1", "Kz", settings_.zmax(N_DISK - 1), settings_.kz()}; + VarDef zproj2{globals_, "zproj2", "Kz", settings_.zmax(N_DISK - 1), settings_.kz()}; + VarDef zproj3{globals_, "zproj3", "Kz", settings_.zmax(N_DISK - 1), settings_.kz()}; //calculations //tracklet - VarAdd r1abs{globals_, "r1abs", &r1, &r1mean, settings_.rmax(trklet::N_LAYER - 1)}; - VarAdd z2abs{globals_, "z2abs", &z2, &z2mean, settings_.zmax(trklet::N_DISK - 1)}; + VarAdd r1abs{globals_, "r1abs", &r1, &r1mean, settings_.rmax(N_LAYER - 1)}; + VarAdd z2abs{globals_, "z2abs", &z2, &z2mean, settings_.zmax(N_DISK - 1)}; VarSubtract dr{globals_, "dr", &r2, &r1abs, dr_max}; diff --git a/L1Trigger/TrackFindingTracklet/interface/KFin.h b/L1Trigger/TrackFindingTracklet/interface/KFin.h index 63d9a05a216d1..9408c83e23a38 100644 --- a/L1Trigger/TrackFindingTracklet/interface/KFin.h +++ b/L1Trigger/TrackFindingTracklet/interface/KFin.h @@ -5,16 +5,15 @@ #include "L1Trigger/TrackerTFP/interface/DataFormats.h" #include "L1Trigger/TrackerTFP/interface/LayerEncoding.h" #include "L1Trigger/TrackFindingTracklet/interface/ChannelAssignment.h" -#include "L1Trigger/TrackFindingTracklet/interface/Settings.h" #include namespace trklet { /*! \class trklet::KFin - * \brief Class to emulate transformation of tracklet tracks and stubs into TMTT format + * \brief Class to emulate the data transformation happening betwwen DR and KF * \author Thomas Schuh - * \date 2021, Dec + * \date 2023, Feb */ class KFin { public: @@ -23,10 +22,8 @@ namespace trklet { const trackerTFP::DataFormats* dataFormats, const trackerTFP::LayerEncoding* layerEncoding, const ChannelAssignment* channelAssignment, - const Settings* settings, int region); ~KFin() {} - // read in and organize input tracks and stubs void consume(const tt::StreamsTrack& streamsTrack, const tt::StreamsStub& streamsStub); // fill output products @@ -38,60 +35,43 @@ namespace trklet { private: // truncates double precision of val into base precision, +1.e-12 restores robustness of addition of 2 digitised values double digi(double val, double base) const { return (floor(val / base + 1.e-12) + .5) * base; } - // basetransformation of val from baseLow into baseHigh using widthMultiplier bit multiplication - double redigi(double val, double baseLow, double baseHigh, int widthMultiplier) const; struct Stub { - Stub(const TTStubRef& ttStubRef, int layer, double r, double phi, double z, bool psTilt) - : valid_(true), ttStubRef_(ttStubRef), layer_(layer), r_(r), phi_(phi), z_(z), psTilt_(psTilt) {} - bool valid_; + Stub(const TTStubRef& ttStubRef, double r, double phi, double z, int layerId, bool psTilt, int channel) + : ttStubRef_(ttStubRef), r_(r), phi_(phi), z_(z), layerId_(layerId), psTilt_(psTilt), channel_(channel) {} TTStubRef ttStubRef_; - int layer_; - // radius w.r.t. chosenRofPhi in cm double r_; - // phi residual in rad double phi_; - // z residual in cm double z_; + int layerId_; + bool psTilt_; + int channel_; // phi uncertainty * sqrt(12) + additional terms in rad double dPhi_; // z uncertainty * sqrt(12) + additional terms in cm double dZ_; - // true if barrel tilted module or encap PS module - bool psTilt_; }; struct Track { - static constexpr int max_ = 8; + static constexpr int max_ = 7; Track() { stubs_.reserve(max_); } - Track(const TTTrackRef& ttTrackRef, - bool valid, - double inv2R, - double phiT, + Track(const tt::FrameTrack& frame, + const std::vector& stubs, double cot, double zT, - const std::vector& stubs) - : ttTrackRef_(ttTrackRef), - valid_(valid), - sector_(-1), - inv2R_(inv2R), - phiT_(phiT), - cot_(cot), - zT_(zT), - stubs_(stubs) {} - TTTrackRef ttTrackRef_; - bool valid_; - TTBV maybe_; - int sector_; - double inv2R_; - double phiT_; + double inv2R, + int sectorEta) + : frame_(frame), stubs_(stubs), cot_(cot), zT_(zT), inv2R_(inv2R), sectorEta_(sectorEta) {} + tt::FrameTrack frame_; + std::vector stubs_; double cot_; double zT_; - std::vector stubs_; + double inv2R_; + int sectorEta_; }; - + // remove and return first element of deque, returns nullptr if empty + template + T* pop_front(std::deque& ts) const; // true if truncation is enbaled bool enableTruncation_; - // stub residuals are recalculated from seed parameter and TTStub position - bool useTTStubResiduals_; // provides run-time constants const tt::Setup* setup_; // provides dataformats @@ -100,8 +80,6 @@ namespace trklet { const trackerTFP::LayerEncoding* layerEncoding_; // helper class to assign tracks to channel const ChannelAssignment* channelAssignment_; - // provides tracklet constants - const Settings* settings_; // processing region (0 - 8) aka processing phi nonant const int region_; // storage of input tracks @@ -110,32 +88,6 @@ namespace trklet { std::vector stubs_; // h/w liked organized pointer to input tracks std::vector> input_; - // unified tracklet digitisation granularity - double baseUinv2R_; - double baseUphiT_; - double baseUcot_; - double baseUzT_; - double baseUr_; - double baseUphi_; - double baseUz_; - // KF input format digitisation granularity (identical to TMTT) - double baseLinv2R_; - double baseLphiT_; - double baseLcot_; - double baseLzT_; - double baseLr_; - double baseLphi_; - double baseLz_; - // Finer granularity (by powers of 2) than the TMTT one. Used to transform from Tracklet to TMTT base. - double baseHinv2R_; - double baseHphiT_; - double baseHcot_; - double baseHzT_; - double baseHr_; - double baseHphi_; - double baseHz_; - // digitisation granularity used for inverted cot(theta) - double baseInvCot_; }; } // namespace trklet diff --git a/L1Trigger/TrackFindingTracklet/interface/MatchEngineUnit.h b/L1Trigger/TrackFindingTracklet/interface/MatchEngineUnit.h index 2fbd253507b51..e4b5bf3cd7bc5 100644 --- a/L1Trigger/TrackFindingTracklet/interface/MatchEngineUnit.h +++ b/L1Trigger/TrackFindingTracklet/interface/MatchEngineUnit.h @@ -56,7 +56,7 @@ namespace trklet { bool idle() const { return idle_; } - bool active() const { return !idle_ || good__ || good___ || !empty(); } + bool active() const { return !idle_ || good__ || good____ || !empty(); } void setAlmostFull(); @@ -80,7 +80,7 @@ namespace trklet { VMStubsMEMemory* vmstubsmemory_; unsigned int nrzbins_; - unsigned int rzbin_; + unsigned int rzbin_, rzbin__, rzbin____, rzbin___; unsigned int phibin_; int shift_; @@ -99,6 +99,9 @@ namespace trklet { unsigned int layerdisk_; + //The minimum radius for 2s disks in projection bins + unsigned int ir2smin_; + //Save state at the start of istep bool almostfullsave_; @@ -107,14 +110,14 @@ namespace trklet { //Various manually pipelined variables //Each _ represents a layer of pipelining - //e.g., good__ is set and one iteration later good___ is updated - VMStubME vmstub__, vmstub__t, vmstub___; - bool isPSseed__, isPSseed__t, isPSseed___; - bool good__, good__t, good___; - int projfinerz__, projfinerz__t, projfinerz___; - int projfinephi__, projfinephi__t, projfinephi___; - int projrinv__, projrinv__t, projrinv___; - Tracklet *proj__, *proj__t, *proj___; + //e.g., good__ is set and one iteration later good____ is updated + VMStubME vmstub__, vmstub___, vmstub____; + bool isPSseed__, isPSseed___, isPSseed____; + bool good__, good___, good____; + int projfinerz__, projfinerz___, projfinerz____; + int projfinephi__, projfinephi___, projfinephi____; + int projrinv__, projrinv___, projrinv____; + Tracklet *proj__, *proj___, *proj____; //save the candidate matches CircularBuffer> candmatches_; diff --git a/L1Trigger/TrackFindingTracklet/interface/PurgeDuplicate.h b/L1Trigger/TrackFindingTracklet/interface/PurgeDuplicate.h index 03a4c510d4698..26dcda9d9e7e5 100644 --- a/L1Trigger/TrackFindingTracklet/interface/PurgeDuplicate.h +++ b/L1Trigger/TrackFindingTracklet/interface/PurgeDuplicate.h @@ -33,7 +33,7 @@ namespace trklet { void addOutput(MemoryBase* memory, std::string output) override; void addInput(MemoryBase* memory, std::string input) override; - void execute(std::vector& outputtracks_, unsigned int iSector); + void execute(std::vector& outputtracks, unsigned int iSector); private: double getPhiRes(Tracklet* curTracklet, const Stub* curStub) const; @@ -48,10 +48,14 @@ namespace trklet { std::vector getInventedSeedingStub(unsigned int, const Tracklet*, const std::vector&) const; - // return the regular rinvbins which contain the input tracklet - unsigned int findVarRInvBin(const Tracklet* trk) const; - // return the overlap rinvbins which contain the input tracklet - std::vector findOverlapRInvBins(const Tracklet* trk) const; + // return the regular rinv bins which contain the input tracklet + unsigned int findRinvBin(const Tracklet* trk) const; + // return the regular phi bins which contain the input tracklet + unsigned int findPhiBin(const Tracklet* trk) const; + // return the overlap rinv bins which contain the input tracklet + std::vector findOverlapRinvBins(const Tracklet* trk) const; + // return the overlap phi bins which contain the input tracklet + std::vector findOverlapPhiBins(const Tracklet* trk) const; // sort the tracklets into the correct bin by comparing the overlap rinv bin(s) the tracklets are in to the current bin bool isTrackInBin(const std::vector& vec, unsigned int num) const; diff --git a/L1Trigger/TrackFindingTracklet/interface/Settings.h b/L1Trigger/TrackFindingTracklet/interface/Settings.h index b5542ef6ae03a..eb72ad0b6e9a8 100644 --- a/L1Trigger/TrackFindingTracklet/interface/Settings.h +++ b/L1Trigger/TrackFindingTracklet/interface/Settings.h @@ -58,9 +58,11 @@ namespace trklet { Settings() { //Comment out to run tracklet-only algorithm #ifdef CMSSW_GIT_HASH +#ifndef CMS_DICT_IMPL // Don't print message if genreflex being run. #ifndef USEHYBRID #pragma message "USEHYBRID is undefined, so Hybrid L1 tracking disabled." #endif +#endif #endif } @@ -290,12 +292,16 @@ namespace trklet { void setStripLength_2S(double stripLength_2S) { stripLength_2S_ = stripLength_2S; } //Following functions are used for duplicate removal - //Function which gets the value corresponding to the overlap size for the overlap rinv bins in DR - double overlapSize() const { return overlapSize_; } - //Function which gets the value corresponding to the number of tracks that are compared to all the other tracks per rinv bin + //Function which returns the value corresponding to the overlap size for the overlap rinv bins in DR + double rinvOverlapSize() const { return rinvOverlapSize_; } + //Function which returns the value corresponding to the overlap size for the overlap phi bins in DR + double phiOverlapSize() const { return phiOverlapSize_; } + //Function which returns the value corresponding to the number of tracks that are compared to all the other tracks per rinv bin unsigned int numTracksComparedPerBin() const { return numTracksComparedPerBin_; } - //Grabs the bin edges you need for duplicate removal bins - const std::vector varRInvBins() const { return varRInvBins_; } + //Returns the rinv bin edges you need for duplicate removal bins + const std::vector& rinvBins() const { return rinvBins_; } + //Returns the phi bin edges you need for duplicate removal bins + const std::vector& phiBins() const { return phiBins_; } std::string skimfile() const { return skimfile_; } void setSkimfile(std::string skimfile) { skimfile_ = skimfile; } @@ -1041,12 +1047,17 @@ namespace trklet { double stripLength_2S_{5.0250}; //Following values are used for duplicate removal - //Variable bin edges for 6 bins. - std::vector varRInvBins_{-rinvcut(), -0.004968, -0.003828, 0, 0.003828, 0.004968, rinvcut()}; + //Rinv bins were optimised to ensure a similar number of tracks in each bin prior to DR + //Rinv bin edges for 6 bins. + std::vector rinvBins_{-rinvcut(), -0.004968, -0.003828, 0, 0.003828, 0.004968, rinvcut()}; + //Phi bin edges for 2 bins. + std::vector phiBins_{0, dphisectorHG() / 2, dphisectorHG()}; //Overlap size for the overlap rinv bins in DR - double overlapSize_{0.0004}; + double rinvOverlapSize_{0.0004}; + //Overlap size for the overlap phi bins in DR + double phiOverlapSize_{M_PI / 360}; //The maximum number of tracks that are compared to all the other tracks per rinv bin - int numTracksComparedPerBin_{64}; + int numTracksComparedPerBin_{32}; double sensorSpacing_2S_{0.18}; }; diff --git a/L1Trigger/TrackFindingTracklet/interface/Stub.h b/L1Trigger/TrackFindingTracklet/interface/Stub.h index 90c8f7f1dbd80..6a2dc6bd21f6f 100644 --- a/L1Trigger/TrackFindingTracklet/interface/Stub.h +++ b/L1Trigger/TrackFindingTracklet/interface/Stub.h @@ -43,13 +43,18 @@ namespace trklet { unsigned int nbitsfinephi = 8; FPGAWord finephi( phicorr_.bits(phicorr_.nbits() - nbitsfinephi, nbitsfinephi), nbitsfinephi, true, __LINE__, __FILE__); - return str() + "|" + stubindex_.str() + "|" + finephi.str(); + if (layer_.value() == -1) { + return str() + "|" + negdisk_.str() + "|" + stubindex_.str() + "|" + finephi.str(); + } else { + return str() + "|" + stubindex_.str() + "|" + finephi.str(); + } } FPGAWord allStubIndex() const { return stubindex_; } unsigned int phiregionaddress() const; std::string phiregionaddressstr() const; + std::string phiregionstr() const; void setAllStubIndex(int nstub); //should migrate away from using this method @@ -59,6 +64,7 @@ namespace trklet { const FPGAWord& r() const { return r_; } const FPGAWord& z() const { return z_; } + const FPGAWord& negdisk() const { return negdisk_; } const FPGAWord& phi() const { return phi_; } const FPGAWord& phicorr() const { return phicorr_; } const FPGAWord& alpha() const { return alpha_; } @@ -87,6 +93,7 @@ namespace trklet { FPGAWord disk_; FPGAWord r_; FPGAWord z_; + FPGAWord negdisk_; FPGAWord phi_; FPGAWord alpha_; diff --git a/L1Trigger/TrackFindingTracklet/interface/StubKiller.h b/L1Trigger/TrackFindingTracklet/interface/StubKiller.h new file mode 100644 index 0000000000000..8cc656adcf2e6 --- /dev/null +++ b/L1Trigger/TrackFindingTracklet/interface/StubKiller.h @@ -0,0 +1,60 @@ +#ifndef __STUBKILLER_H__ +#define __STUBKILLER_H__ + +#include "DataFormats/L1TrackTrigger/interface/TTTypes.h" +#include "Geometry/TrackerGeometryBuilder/interface/TrackerGeometry.h" +#include "DataFormats/TrackerCommon/interface/TrackerTopology.h" +#include "Geometry/CommonTopologies/interface/PixelGeomDetUnit.h" +#include "Geometry/CommonTopologies/interface/PixelTopology.h" +#include "TRandom3.h" +#include "TMath.h" + +class StubKiller { +public: + StubKiller(); + ~StubKiller() {} + + void initialise(unsigned int killScenario, + const TrackerTopology* trackerTopology, + const TrackerGeometry* trackerGeometry); + + bool killStub(const TTStub* stub, + const std::vector layersToKill, + const double minPhiToKill, + const double maxPhiToKill, + const double minZToKill, + const double maxZToKill, + const double minRToKill, + const double maxRToKill, + const double fractionOfStubsToKillInLayers, + const double fractionOfStubsToKillEverywhere); + + bool killStub(const TTStub* stub); + + bool killStubInDeadModule(const TTStub* stub); + + std::map getListOfDeadModules() { return deadModules_; } + +private: + void chooseModulesToKill(); + void addDeadLayerModulesToDeadModuleList(); + + unsigned int killScenario_; + const TrackerTopology* trackerTopology_; + const TrackerGeometry* trackerGeometry_; + + std::vector layersToKill_; + double minPhiToKill_; + double maxPhiToKill_; + double minZToKill_; + double maxZToKill_; + double minRToKill_; + double maxRToKill_; + double fractionOfStubsToKillInLayers_; + double fractionOfStubsToKillEverywhere_; + double fractionOfModulesToKillEverywhere_; + + std::map deadModules_; +}; + +#endif diff --git a/L1Trigger/TrackFindingTracklet/interface/Tracklet.h b/L1Trigger/TrackFindingTracklet/interface/Tracklet.h index 2e31286e775fe..3aa11e765d602 100644 --- a/L1Trigger/TrackFindingTracklet/interface/Tracklet.h +++ b/L1Trigger/TrackFindingTracklet/interface/Tracklet.h @@ -58,11 +58,11 @@ namespace trklet { bool stubtruthmatch(const L1TStub* stub); - const Stub* innerFPGAStub() { return innerFPGAStub_; } + const Stub* innerFPGAStub() const { return innerFPGAStub_; } - const Stub* middleFPGAStub() { return middleFPGAStub_; } + const Stub* middleFPGAStub() const { return middleFPGAStub_; } - const Stub* outerFPGAStub() { return outerFPGAStub_; } + const Stub* outerFPGAStub() const { return outerFPGAStub_; } std::string addressstr(); diff --git a/L1Trigger/TrackFindingTracklet/plugins/BuildFile.xml b/L1Trigger/TrackFindingTracklet/plugins/BuildFile.xml index df534e5516622..f0c02190bf428 100644 --- a/L1Trigger/TrackFindingTracklet/plugins/BuildFile.xml +++ b/L1Trigger/TrackFindingTracklet/plugins/BuildFile.xml @@ -1,9 +1,6 @@ - - - diff --git a/L1Trigger/TrackFindingTracklet/plugins/L1FPGATrackProducer.cc b/L1Trigger/TrackFindingTracklet/plugins/L1FPGATrackProducer.cc index c67565196e501..5f130d07dead1 100644 --- a/L1Trigger/TrackFindingTracklet/plugins/L1FPGATrackProducer.cc +++ b/L1Trigger/TrackFindingTracklet/plugins/L1FPGATrackProducer.cc @@ -87,6 +87,7 @@ #include "L1Trigger/TrackFindingTracklet/interface/Tracklet.h" #include "L1Trigger/TrackFindingTracklet/interface/Residual.h" #include "L1Trigger/TrackFindingTracklet/interface/Stub.h" +#include "L1Trigger/TrackFindingTracklet/interface/StubKiller.h" #include "L1Trigger/TrackFindingTracklet/interface/StubStreamData.h" #include "L1Trigger/TrackFindingTracklet/interface/HitPatternHelper.h" @@ -170,6 +171,10 @@ class L1FPGATrackProducer : public edm::one::EDProducer { // event processor for the tracklet track finding trklet::TrackletEventProcessor eventProcessor; + // used to "kill" stubs from a selected area of the detector + StubKiller* stubKiller_; + int failScenario_; + unsigned int nHelixPar_; bool extended_; bool reduced_; @@ -255,6 +260,8 @@ L1FPGATrackProducer::L1FPGATrackProducer(edm::ParameterSet const& iConfig) memoryModulesFile = iConfig.getParameter("memoryModulesFile"); wiresFile = iConfig.getParameter("wiresFile"); + failScenario_ = iConfig.getUntrackedParameter("FailScenario", 0); + extended_ = iConfig.getParameter("Extended"); reduced_ = iConfig.getParameter("Reduced"); nHelixPar_ = iConfig.getParameter("Hnpar"); @@ -398,6 +405,17 @@ void L1FPGATrackProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSe const TrackerTopology* const tTopo = &iSetup.getData(esGetTokenTTopo_); const TrackerGeometry* const theTrackerGeom = &iSetup.getData(esGetTokenTGeom_); + // check killing stubs for detector degradation studies + // if failType = 0, StubKiller does not kill any modules + int failType = 0; + if (failScenario_ < 0 || failScenario_ > 9) { + edm::LogVerbatim("Tracklet") << "Invalid fail scenario! Ignoring input"; + } else + failType = failScenario_; + + stubKiller_ = new StubKiller(); + stubKiller_->initialise(failType, tTopo, theTrackerGeom); + //////////////////////// // GET THE PRIMITIVES // edm::Handle handleDTC; @@ -602,28 +620,33 @@ void L1FPGATrackProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSe const unsigned int intDetId = innerDetId.rawId(); - ev.addStub(dtcname, - region, - layerdisk, - stubwordhex, - setup_->psModule(dtcId), - isFlipped, - tiltedBarrel, - tiltedRingId, - endcapRingId, - intDetId, - ttPos.x(), - ttPos.y(), - ttPos.z(), - stubbend, - stubRef->innerClusterPosition(), - assocTPs, - theStubIndex); - - const trklet::L1TStub& lastStub = ev.lastStub(); - stubMap[lastStub] = stubRef; - stubIndexMap[lastStub.uniqueIndex()] = stub.first; - theStubIndex++; + // check killing stubs for detector degredation studies + const TTStub* theStub = &(*stubRef); + bool killThisStub = stubKiller_->killStub(theStub); + if (!killThisStub) { + ev.addStub(dtcname, + region, + layerdisk, + stubwordhex, + setup_->psModule(dtcId), + isFlipped, + tiltedBarrel, + tiltedRingId, + endcapRingId, + intDetId, + ttPos.x(), + ttPos.y(), + ttPos.z(), + stubbend, + stubRef->innerClusterPosition(), + assocTPs, + theStubIndex); + + const trklet::L1TStub& lastStub = ev.lastStub(); + stubMap[lastStub] = stubRef; + stubIndexMap[lastStub.uniqueIndex()] = stub.first; + theStubIndex++; + } } } } @@ -637,13 +660,18 @@ void L1FPGATrackProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSe const std::vector& tracks = eventProcessor.tracks(); + // max number of projection layers const unsigned int maxNumProjectionLayers = channelAssignment_->maxNumProjectionLayers(); // number of track channels const unsigned int numStreamsTrack = N_SECTOR * channelAssignment_->numChannelsTrack(); // number of stub channels const unsigned int numStreamsStub = N_SECTOR * channelAssignment_->numChannelsStub(); + // number of seeding layers + const unsigned int numSeedingLayers = channelAssignment_->numSeedingLayers(); + // max number of stub channel per track + const unsigned int numStubChannel = maxNumProjectionLayers + numSeedingLayers; // number of stub channels if all seed types streams padded to have same number of stub channels (for coding simplicity) - const unsigned int numStreamsStubRaw = numStreamsTrack * maxNumProjectionLayers; + const unsigned int numStreamsStubRaw = numStreamsTrack * numStubChannel; // Streams formatted to allow this code to run outside CMSSW. vector> streamsTrackRaw(numStreamsTrack); @@ -744,23 +772,23 @@ void L1FPGATrackProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSe int iSeed = chanTrk % channelAssignment_->numChannelsTrack(); // seed type streamsTrack[chanTrk].emplace_back(bitsTrk); - const unsigned int chanStubOffsetIn = chanTrk * maxNumProjectionLayers; + const unsigned int chanStubOffsetIn = chanTrk * numStubChannel; const unsigned int chanStubOffsetOut = channelAssignment_->offsetStub(chanTrk); const unsigned int numProjLayers = channelAssignment_->numProjectionLayers(iSeed); - TTBV hitMap(0, numProjLayers); + TTBV hitMap(0, numProjLayers + numSeedingLayers); // remove padding from stub stream - for (unsigned int iproj = 0; iproj < maxNumProjectionLayers; iproj++) { + for (unsigned int iproj = 0; iproj < numStubChannel; iproj++) { // FW current has one (perhaps invalid) stub per layer per track. const StubStreamData& stubdata = streamsStubRaw[chanStubOffsetIn + iproj][itk]; const L1TStub& stub = stubdata.stub(); - if (stubdata.valid()) { - const TTStubRef ttStubRef = stubMap[stub]; - int layerId(-1); - if (!channelAssignment_->layerId(stubdata.iSeed(), ttStubRef, layerId)) - continue; - hitMap.set(layerId); - streamsStub[chanStubOffsetOut + layerId].emplace_back(ttStubRef, stubdata.dataBits()); - } + if (!stubdata.valid()) + continue; + const TTStubRef& ttStubRef = stubMap[stub]; + const int seedType = stubdata.iSeed(); + const int layerId = setup_->layerId(ttStubRef); + const int channelId = channelAssignment_->channelId(seedType, layerId); + hitMap.set(channelId); + streamsStub[chanStubOffsetOut + channelId].emplace_back(ttStubRef, stubdata.dataBits()); } for (int layerId : hitMap.ids(false)) { // invalid stubs streamsStub[chanStubOffsetOut + layerId].emplace_back(tt::FrameStub()); diff --git a/L1Trigger/TrackFindingTracklet/plugins/ProducerDR.cc b/L1Trigger/TrackFindingTracklet/plugins/ProducerDR.cc new file mode 100644 index 0000000000000..c2bfb2e7ebfee --- /dev/null +++ b/L1Trigger/TrackFindingTracklet/plugins/ProducerDR.cc @@ -0,0 +1,140 @@ +#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/Framework/interface/Run.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/Utilities/interface/EDGetToken.h" +#include "FWCore/Utilities/interface/EDPutToken.h" +#include "FWCore/Utilities/interface/ESGetToken.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "DataFormats/Common/interface/Handle.h" + +#include "L1Trigger/TrackTrigger/interface/Setup.h" +#include "L1Trigger/TrackerTFP/interface/DataFormats.h" +#include "L1Trigger/TrackFindingTracklet/interface/ChannelAssignment.h" +#include "L1Trigger/TrackFindingTracklet/interface/DR.h" +#include "SimTracker/TrackTriggerAssociation/interface/TTTypes.h" + +#include +#include +#include +#include +#include +#include + +using namespace std; +using namespace edm; +using namespace trackerTFP; +using namespace tt; + +namespace trklet { + + /*! \class trklet::ProducerDR + * \brief Emulates removal of duplicated TTTracks f/w + * \author Thomas Schuh + * \date 2023, Feb + */ + class ProducerDR : public stream::EDProducer<> { + public: + explicit ProducerDR(const ParameterSet&); + ~ProducerDR() override {} + + private: + void beginRun(const Run&, const EventSetup&) override; + void produce(Event&, const EventSetup&) override; + virtual void endJob() {} + // ED input token of Tracks + EDGetTokenT edGetTokenTracks_; + // ED input token of Stubs + EDGetTokenT edGetTokenStubs_; + // ED output token for stubs + EDPutTokenT edPutTokenAcceptedStubs_; + EDPutTokenT edPutTokenLostStubs_; + // ED output token for tracks + EDPutTokenT edPutTokenAcceptedTracks_; + EDPutTokenT edPutTokenLostTracks_; + // Setup token + ESGetToken esGetTokenSetup_; + // DataFormats token + ESGetToken esGetTokenDataFormats_; + // ChannelAssignment token + ESGetToken esGetTokenChannelAssignment_; + // configuration + ParameterSet iConfig_; + // helper class to store configurations + const Setup* setup_ = nullptr; + // helper class to extract structured data from tt::Frames + const DataFormats* dataFormats_ = nullptr; + // helper class to assign tracks to channel + const ChannelAssignment* channelAssignment_ = nullptr; + }; + + ProducerDR::ProducerDR(const ParameterSet& iConfig) : iConfig_(iConfig) { + const string& label = iConfig.getParameter("LabelDRin"); + const string& branchAcceptedStubs = iConfig.getParameter("BranchAcceptedStubs"); + const string& branchAcceptedTracks = iConfig.getParameter("BranchAcceptedTracks"); + const string& branchLostStubs = iConfig.getParameter("BranchLostStubs"); + const string& branchLostTracks = iConfig.getParameter("BranchLostTracks"); + // book in- and output ED products + edGetTokenTracks_ = consumes(InputTag(label, branchAcceptedTracks)); + edGetTokenStubs_ = consumes(InputTag(label, branchAcceptedStubs)); + edPutTokenAcceptedStubs_ = produces(branchAcceptedStubs); + edPutTokenAcceptedTracks_ = produces(branchAcceptedTracks); + edPutTokenLostStubs_ = produces(branchLostStubs); + edPutTokenLostTracks_ = produces(branchLostTracks); + // book ES products + esGetTokenSetup_ = esConsumes(); + esGetTokenDataFormats_ = esConsumes(); + esGetTokenChannelAssignment_ = esConsumes(); + } + + void ProducerDR::beginRun(const Run& iRun, const EventSetup& iSetup) { + // helper class to store configurations + setup_ = &iSetup.getData(esGetTokenSetup_); + if (!setup_->configurationSupported()) + return; + // check process history if desired + if (iConfig_.getParameter("CheckHistory")) + setup_->checkHistory(iRun.processHistory()); + // helper class to extract structured data from tt::Frames + dataFormats_ = &iSetup.getData(esGetTokenDataFormats_); + // helper class to assign tracks to channel + channelAssignment_ = &iSetup.getData(esGetTokenChannelAssignment_); + } + + void ProducerDR::produce(Event& iEvent, const EventSetup& iSetup) { + // empty DR products + const int numStreamsTracks = channelAssignment_->numNodesDR() * setup_->numRegions(); + const int numStreamsStubs = numStreamsTracks * setup_->numLayers(); + StreamsStub acceptedStubs(numStreamsStubs); + StreamsTrack acceptedTracks(numStreamsTracks); + StreamsStub lostStubs(numStreamsStubs); + StreamsTrack lostTracks(numStreamsTracks); + // read in TBout Product and produce KFin product + if (setup_->configurationSupported()) { + Handle handleStubs; + iEvent.getByToken(edGetTokenStubs_, handleStubs); + const StreamsStub& stubs = *handleStubs; + Handle handleTracks; + iEvent.getByToken(edGetTokenTracks_, handleTracks); + const StreamsTrack& tracks = *handleTracks; + for (int region = 0; region < setup_->numRegions(); region++) { + // object to remove duplicated tracks in a processing region + DR dr(iConfig_, setup_, dataFormats_, channelAssignment_, region); + // read in and organize input tracks and stubs + dr.consume(tracks, stubs); + // fill output products + dr.produce(acceptedStubs, acceptedTracks, lostStubs, lostTracks); + } + } + // store products + iEvent.emplace(edPutTokenAcceptedStubs_, std::move(acceptedStubs)); + iEvent.emplace(edPutTokenAcceptedTracks_, std::move(acceptedTracks)); + iEvent.emplace(edPutTokenLostStubs_, std::move(lostStubs)); + iEvent.emplace(edPutTokenLostTracks_, std::move(lostTracks)); + } + +} // namespace trklet + +DEFINE_FWK_MODULE(trklet::ProducerDR); diff --git a/L1Trigger/TrackFindingTracklet/plugins/ProducerDRin.cc b/L1Trigger/TrackFindingTracklet/plugins/ProducerDRin.cc new file mode 100644 index 0000000000000..f67f2c0cad3c2 --- /dev/null +++ b/L1Trigger/TrackFindingTracklet/plugins/ProducerDRin.cc @@ -0,0 +1,152 @@ +#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/Framework/interface/Run.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/Utilities/interface/EDGetToken.h" +#include "FWCore/Utilities/interface/EDPutToken.h" +#include "FWCore/Utilities/interface/ESGetToken.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "DataFormats/Common/interface/Handle.h" + +#include "L1Trigger/TrackTrigger/interface/Setup.h" +#include "L1Trigger/TrackerTFP/interface/DataFormats.h" +#include "L1Trigger/TrackerTFP/interface/LayerEncoding.h" +#include "L1Trigger/TrackFindingTracklet/interface/ChannelAssignment.h" +#include "L1Trigger/TrackFindingTracklet/interface/Settings.h" +#include "L1Trigger/TrackFindingTracklet/interface/DRin.h" +#include "SimTracker/TrackTriggerAssociation/interface/TTTypes.h" + +#include +#include +#include +#include +#include +#include + +using namespace std; +using namespace edm; +using namespace trackerTFP; +using namespace tt; + +namespace trklet { + + /*! \class trklet::ProducerDRin + * \brief Transforms format of TBout into that expected by DR input. + * \author Thomas Schuh + * \date 2023, Jan + */ + class ProducerDRin : public stream::EDProducer<> { + public: + explicit ProducerDRin(const ParameterSet&); + ~ProducerDRin() override {} + + private: + void beginRun(const Run&, const EventSetup&) override; + void produce(Event&, const EventSetup&) override; + virtual void endJob() {} + + // ED input token of Tracks + EDGetTokenT edGetTokenTracks_; + // ED input token of Stubs + EDGetTokenT edGetTokenStubs_; + // ED output token for stubs + EDPutTokenT edPutTokenAcceptedStubs_; + EDPutTokenT edPutTokenLostStubs_; + // ED output token for tracks + EDPutTokenT edPutTokenAcceptedTracks_; + EDPutTokenT edPutTokenLostTracks_; + // Setup token + ESGetToken esGetTokenSetup_; + // DataFormats token + ESGetToken esGetTokenDataFormats_; + // LayerEncoding token + ESGetToken esGetTokenLayerEncoding_; + // ChannelAssignment token + ESGetToken esGetTokenChannelAssignment_; + // configuration + ParameterSet iConfig_; + // helper class to store configurations + const Setup* setup_ = nullptr; + // helper class to extract structured data from tt::Frames + const DataFormats* dataFormats_ = nullptr; + // helper class to encode layer + const LayerEncoding* layerEncoding_ = nullptr; + // helper class to assign tracks to channel + const ChannelAssignment* channelAssignment_ = nullptr; + // helper class to store tracklet configurations + Settings settings_; + }; + + ProducerDRin::ProducerDRin(const ParameterSet& iConfig) : iConfig_(iConfig) { + const string& label = iConfig.getParameter("LabelTBout"); + const string& branchAcceptedStubs = iConfig.getParameter("BranchAcceptedStubs"); + const string& branchAcceptedTracks = iConfig.getParameter("BranchAcceptedTracks"); + const string& branchLostStubs = iConfig.getParameter("BranchLostStubs"); + const string& branchLostTracks = iConfig.getParameter("BranchLostTracks"); + // book in- and output ED products + edGetTokenTracks_ = consumes(InputTag(label, branchAcceptedTracks)); + edGetTokenStubs_ = consumes(InputTag(label, branchAcceptedStubs)); + edPutTokenAcceptedStubs_ = produces(branchAcceptedStubs); + edPutTokenAcceptedTracks_ = produces(branchAcceptedTracks); + edPutTokenLostStubs_ = produces(branchLostStubs); + edPutTokenLostTracks_ = produces(branchLostTracks); + // book ES products + esGetTokenSetup_ = esConsumes(); + esGetTokenDataFormats_ = esConsumes(); + esGetTokenLayerEncoding_ = esConsumes(); + esGetTokenChannelAssignment_ = esConsumes(); + } + + void ProducerDRin::beginRun(const Run& iRun, const EventSetup& iSetup) { + // helper class to store configurations + setup_ = &iSetup.getData(esGetTokenSetup_); + if (!setup_->configurationSupported()) + return; + // check process history if desired + if (iConfig_.getParameter("CheckHistory")) + setup_->checkHistory(iRun.processHistory()); + // helper class to extract structured data from tt::Frames + dataFormats_ = &iSetup.getData(esGetTokenDataFormats_); + // helper class to encode layer + layerEncoding_ = &iSetup.getData(esGetTokenLayerEncoding_); + // helper class to assign tracks to channel + channelAssignment_ = &iSetup.getData(esGetTokenChannelAssignment_); + } + + void ProducerDRin::produce(Event& iEvent, const EventSetup& iSetup) { + // empty KFin products + const int numStreamsTracks = channelAssignment_->numNodesDR() * setup_->numRegions(); + const int numStreamsStubs = numStreamsTracks * setup_->numLayers(); + StreamsStub acceptedStubs(numStreamsStubs); + StreamsTrack acceptedTracks(numStreamsTracks); + StreamsStub lostStubs(numStreamsStubs); + StreamsTrack lostTracks(numStreamsTracks); + // read in TBout Product and produce KFin product + if (setup_->configurationSupported()) { + Handle handleStubs; + iEvent.getByToken(edGetTokenStubs_, handleStubs); + const StreamsStub& stubs = *handleStubs; + Handle handleTracks; + iEvent.getByToken(edGetTokenTracks_, handleTracks); + const StreamsTrack& tracks = *handleTracks; + for (int region = 0; region < setup_->numRegions(); region++) { + // object to reformat tracks from tracklet fromat to TMTT format in a processing region + DRin drin(iConfig_, setup_, dataFormats_, layerEncoding_, channelAssignment_, &settings_, region); + // read in and organize input tracks and stubs + drin.consume(tracks, stubs); + // fill output products + drin.produce(acceptedStubs, acceptedTracks, lostStubs, lostTracks); + } + } + // store products + iEvent.emplace(edPutTokenAcceptedStubs_, std::move(acceptedStubs)); + iEvent.emplace(edPutTokenAcceptedTracks_, std::move(acceptedTracks)); + iEvent.emplace(edPutTokenLostStubs_, std::move(lostStubs)); + iEvent.emplace(edPutTokenLostTracks_, std::move(lostTracks)); + } + +} // namespace trklet + +DEFINE_FWK_MODULE(trklet::ProducerDRin); diff --git a/L1Trigger/TrackFindingTracklet/plugins/ProducerKFin.cc b/L1Trigger/TrackFindingTracklet/plugins/ProducerKFin.cc index f65d0667f29e6..cad15606af4bd 100644 --- a/L1Trigger/TrackFindingTracklet/plugins/ProducerKFin.cc +++ b/L1Trigger/TrackFindingTracklet/plugins/ProducerKFin.cc @@ -12,9 +12,7 @@ #include "L1Trigger/TrackTrigger/interface/Setup.h" #include "L1Trigger/TrackerTFP/interface/DataFormats.h" -#include "L1Trigger/TrackerTFP/interface/LayerEncoding.h" #include "L1Trigger/TrackFindingTracklet/interface/ChannelAssignment.h" -#include "L1Trigger/TrackFindingTracklet/interface/Settings.h" #include "L1Trigger/TrackFindingTracklet/interface/KFin.h" #include "SimTracker/TrackTriggerAssociation/interface/TTTypes.h" @@ -33,9 +31,9 @@ using namespace tt; namespace trklet { /*! \class trklet::ProducerKFin - * \brief Transforms format of TBout into that expected by KF input. + * \brief Transforms format of DR into that expected by KF input. * \author Thomas Schuh - * \date 2020, Oct; updated 2021, Dec + * \date 2023, Feb */ class ProducerKFin : public stream::EDProducer<> { public: @@ -46,7 +44,6 @@ namespace trklet { void beginRun(const Run&, const EventSetup&) override; void produce(Event&, const EventSetup&) override; virtual void endJob() {} - // ED input token of Tracks EDGetTokenT edGetTokenTracks_; // ED input token of Stubs @@ -75,12 +72,10 @@ namespace trklet { const LayerEncoding* layerEncoding_ = nullptr; // helper class to assign tracks to channel const ChannelAssignment* channelAssignment_ = nullptr; - // helper class to store tracklet configurations - Settings settings_; }; ProducerKFin::ProducerKFin(const ParameterSet& iConfig) : iConfig_(iConfig) { - const string& label = iConfig.getParameter("LabelTBout"); + const string& label = iConfig.getParameter("LabelDR"); const string& branchAcceptedStubs = iConfig.getParameter("BranchAcceptedStubs"); const string& branchAcceptedTracks = iConfig.getParameter("BranchAcceptedTracks"); const string& branchLostStubs = iConfig.getParameter("BranchLostStubs"); @@ -117,10 +112,12 @@ namespace trklet { void ProducerKFin::produce(Event& iEvent, const EventSetup& iSetup) { // empty KFin products - StreamsStub acceptedStubs(dataFormats_->numStreamsStubs(Process::kfin)); - StreamsTrack acceptedTracks(dataFormats_->numStreamsTracks(Process::kfin)); - StreamsStub lostStubs(dataFormats_->numStreamsStubs(Process::kfin)); - StreamsTrack lostTracks(dataFormats_->numStreamsTracks(Process::kfin)); + const int numStreamsTracks = setup_->kfNumWorker() * setup_->numRegions(); + const int numStreamsStubs = numStreamsTracks * setup_->numLayers(); + StreamsStub acceptedStubs(numStreamsStubs); + StreamsTrack acceptedTracks(numStreamsTracks); + StreamsStub lostStubs(numStreamsStubs); + StreamsTrack lostTracks(numStreamsTracks); // read in TBout Product and produce KFin product if (setup_->configurationSupported()) { Handle handleStubs; @@ -130,8 +127,8 @@ namespace trklet { iEvent.getByToken(edGetTokenTracks_, handleTracks); const StreamsTrack& tracks = *handleTracks; for (int region = 0; region < setup_->numRegions(); region++) { - // object to reformat tracks from tracklet fromat to TMTT format in a processing region - KFin kfin(iConfig_, setup_, dataFormats_, layerEncoding_, channelAssignment_, &settings_, region); + // object to reformat tracks from DR fromat to KF format in a processing region + KFin kfin(iConfig_, setup_, dataFormats_, layerEncoding_, channelAssignment_, region); // read in and organize input tracks and stubs kfin.consume(tracks, stubs); // fill output products diff --git a/L1Trigger/TrackFindingTracklet/plugins/ProducerKFout.cc b/L1Trigger/TrackFindingTracklet/plugins/ProducerKFout.cc index 13f1a11dfe7b4..7e2b4d44ee2c6 100644 --- a/L1Trigger/TrackFindingTracklet/plugins/ProducerKFout.cc +++ b/L1Trigger/TrackFindingTracklet/plugins/ProducerKFout.cc @@ -12,7 +12,7 @@ #include "L1Trigger/TrackTrigger/interface/Setup.h" #include "L1Trigger/TrackerTFP/interface/DataFormats.h" -#include "L1Trigger/TrackerTFP/interface/DistServer.h" +#include "L1Trigger/TrackTrigger/interface/L1TrackQuality.h" #include #include @@ -26,6 +26,10 @@ namespace trklet { /*! \class trklet::ProducerKFout * \brief Converts KF output into TFP output + * A bit accurate emulation of the track transformation, the + * eta routing and splitting of the 96-bit track words into 64-bit + * packets. Also run is a bit accurate emulation of the track quality + * BDT, whose output is also added to the track word. * \author Christopher Brown * \date 2021, Aug */ @@ -33,8 +37,6 @@ namespace trklet { public: explicit ProducerKFout(const ParameterSet&); ~ProducerKFout() override {} - template - int digitise(const vector Bins, T Value, T factor = 1); private: void beginRun(const Run&, const EventSetup&) override; @@ -58,18 +60,34 @@ namespace trklet { // configuration ParameterSet iConfig_; // helper class to store configurations - const Setup* setup_ = nullptr; + const Setup* setup_; // helper class to extract structured data from tt::Frames - const DataFormats* dataFormats_ = nullptr; + const DataFormats* dataFormats_; // Bins for dPhi/dZ use to create weight LUT vector dPhiBins_; vector dZBins_; - // Constant used throughout for partial ttrack words - int partialTrackWordBits_; + std::unique_ptr trackQualityModel_; + vector tqBins_; + double tqTanlScale_; + double tqZ0Scale_; + static constexpr double ap_fixed_rescale = 32.0; // For convenience and keeping readable code, accessed many times int numWorkers_; + + int partialTrackWordBits_; + + // Helper function to convert floating chi2 to chi2 bin + template + unsigned int digitise(const T& bins, double value, double factor) { + unsigned int bin = 0; + for (unsigned int i = 0; i < bins.size() - 1; i++) { + if (value * factor > bins[i] && value * factor <= bins[i + 1]) + bin = i; + } + return bin; + } }; ProducerKFout::ProducerKFout(const ParameterSet& iConfig) : iConfig_(iConfig) { @@ -87,6 +105,15 @@ namespace trklet { // book ES products esGetTokenSetup_ = esConsumes(); esGetTokenDataFormats_ = esConsumes(); + // initial ES products + setup_ = nullptr; + dataFormats_ = nullptr; + + trackQualityModel_ = std::make_unique(iConfig.getParameter("TrackQualityPSet")); + edm::ParameterSet trackQualityPSset = iConfig.getParameter("TrackQualityPSet"); + tqBins_ = trackQualityPSset.getParameter>("tqemu_bins"); + tqTanlScale_ = trackQualityPSset.getParameter("tqemu_TanlScale"); + tqZ0Scale_ = trackQualityPSset.getParameter("tqemu_Z0Scale"); } void ProducerKFout::beginRun(const Run& iRun, const EventSetup& iSetup) { @@ -101,30 +128,28 @@ namespace trklet { dataFormats_ = &iSetup.getData(esGetTokenDataFormats_); // Calculate 1/dz**2 and 1/dphi**2 bins for v0 and v1 weightings + + float temp_dphi = 0.0; + float temp_dz = 0.0; for (int i = 0; i < pow(2, dataFormats_->width(Variable::dPhi, Process::kfin)) / pow(2, setup_->weightBinFraction()); - i++) - dPhiBins_.push_back( - pow(dataFormats_->base(Variable::dPhi, Process::kfin) * (i + 1) * pow(2, setup_->weightBinFraction()), -2)); - + i++) { + temp_dphi = + pow(dataFormats_->base(Variable::dPhi, Process::kfin) * (i + 1) * pow(2, setup_->weightBinFraction()), -2); + temp_dphi = temp_dphi / setup_->dphiTruncation(); + temp_dphi = std::floor(temp_dphi); + dPhiBins_.push_back(temp_dphi * setup_->dphiTruncation()); + } for (int i = 0; i < pow(2, dataFormats_->width(Variable::dZ, Process::kfin)) / pow(2, setup_->weightBinFraction()); - i++) - dZBins_.push_back( - pow(dataFormats_->base(Variable::dZ, Process::kfin) * (i + 1) * pow(2, setup_->weightBinFraction()), -2)); - - partialTrackWordBits_ = TTBV::S_ / 2; - numWorkers_ = setup_->kfNumWorker(); - } - - // Helper function to convert floating chi2 to chi2 bin - template - int ProducerKFout::digitise(const vector Bins, T Value, T factor) { - for (int i = 0; i < (int)Bins.size(); i++) { - if (Value * factor > Bins[i] && Value * factor <= Bins[i + 1]) { - return i; - } + i++) { + temp_dz = + pow(dataFormats_->base(Variable::dZ, Process::kfin) * (i + 1) * pow(2, setup_->weightBinFraction()), -2); + temp_dz = temp_dz * setup_->dzTruncation(); + temp_dz = std::ceil(temp_dz); + dZBins_.push_back(temp_dz / setup_->dzTruncation()); } - return -1; + numWorkers_ = setup_->kfNumWorker(); + partialTrackWordBits_ = TTBV::S_ / 2; } void ProducerKFout::produce(Event& iEvent, const EventSetup& iSetup) { @@ -143,12 +168,12 @@ namespace trklet { iEvent.getByToken(edGetTokenTTTrackRefMap_, handleTTTrackRefMap); const TTTrackRefMap& ttTrackRefMap = *handleTTTrackRefMap.product(); // 18 Output Links (First Vector) each has a vector of tracks per event (second vector) each track is 3 32 bit TTBV partial tracks - vector> SortedPartialTracks(setup_->numRegions() * setup_->tfpNumChannel(), vector(0)); + vector> sortedPartialTracks(setup_->numRegions() * setup_->tfpNumChannel(), vector(0)); - TrackKFOutSAPtrCollectionss InTrackStreams; - TrackKFOutSAPtrCollectionss OutTrackStreams; + TrackKFOutSAPtrCollectionss inTrackStreams; + TrackKFOutSAPtrCollectionss outTrackStreams; - // Setup empty collections for input tracks to distribution server + // Setup empty collections for input tracks to be routed for (int iRegion = 0; iRegion < setup_->numRegions(); iRegion++) { TrackKFOutSAPtrCollections temp_collection; for (int iLink = 0; iLink < setup_->tfpNumChannel(); iLink++) { @@ -157,10 +182,10 @@ namespace trklet { temp.emplace_back(std::make_shared()); temp_collection.push_back(temp); } - OutTrackStreams.push_back(temp_collection); + outTrackStreams.push_back(temp_collection); } - // Setup empty collections for oiutpu tracks from distribution server + // Setup empty collections for output tracks from routing for (int iRegion = 0; iRegion < setup_->numRegions(); iRegion++) { TrackKFOutSAPtrCollections temp_collection; for (int iLink = 0; iLink < numWorkers_; iLink++) { @@ -169,45 +194,60 @@ namespace trklet { temp.emplace_back(std::make_shared()); temp_collection.push_back(temp); } - InTrackStreams.push_back(temp_collection); + inTrackStreams.push_back(temp_collection); } - StreamsTrack OutputStreamsTracks(setup_->numRegions() * setup_->tfpNumChannel()); + StreamsTrack outputStreamsTracks(setup_->numRegions() * setup_->tfpNumChannel()); + + // Setup containers for track quality + float tempTQMVA = 0.0; + // Due to ap_fixed implementation in CMSSW this 10,5 must be specified at compile time, TODO make this a changeable parameter + std::vector> trackQuality_inputs = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; for (int iLink = 0; iLink < (int)streamsTracks.size(); iLink++) { for (int iTrack = 0; iTrack < (int)streamsTracks[iLink].size(); iTrack++) { const auto& track = streamsTracks[iLink].at(iTrack); - TrackKF InTrack(track, dataFormats_); + TrackKF inTrack(track, dataFormats_); - double temp_z0 = InTrack.zT() - ((InTrack.cot() * setup_->chosenRofZ())); + double temp_z0 = inTrack.zT() - ((inTrack.cot() * setup_->chosenRofZ())); // Correction to Phi calcuation depending if +ve/-ve phi sector - const double baseSectorCorr = InTrack.sectorPhi() ? -setup_->baseSector() : setup_->baseSector(); + const double baseSectorCorr = inTrack.sectorPhi() ? -setup_->baseSector() : setup_->baseSector(); - double temp_phi0 = InTrack.phiT() - ((InTrack.inv2R()) * setup_->hybridChosenRofPhi()) + baseSectorCorr; + double temp_phi0 = inTrack.phiT() - ((inTrack.inv2R()) * setup_->hybridChosenRofPhi()) + baseSectorCorr; - double temp_tanL = InTrack.cotGlobal(); + double temp_tanL = inTrack.cotGlobal(); - TTBV HitPattern(0, setup_->numLayers()); + TTBV hitPattern(0, setup_->numLayers()); double tempchi2rphi = 0; double tempchi2rz = 0; + int temp_nstub = 0; + int temp_ninterior = 0; + bool counter = false; + for (int iStub = 0; iStub < setup_->numLayers() - 1; iStub++) { const auto& stub = streamsStubs[setup_->numLayers() * iLink + iStub].at(iTrack); - StubKF InStub(stub, dataFormats_, iStub); + StubKF inStub(stub, dataFormats_, iStub); - if (!stub.first.isNonnull()) + if (!stub.first.isNonnull()) { + if (counter) + temp_ninterior += 1; continue; + } + + counter = true; - HitPattern.set(iStub); - double phiSquared = pow(InStub.phi(), 2); - double zSquared = pow(InStub.z(), 2); + hitPattern.set(iStub); + temp_nstub += 1; + double phiSquared = pow(inStub.phi(), 2); + double zSquared = pow(inStub.z(), 2); - double tempv0 = dPhiBins_[(InStub.dPhi() / (dataFormats_->base(Variable::dPhi, Process::kfin) * + double tempv0 = dPhiBins_[(inStub.dPhi() / (dataFormats_->base(Variable::dPhi, Process::kfin) * pow(2, setup_->weightBinFraction())))]; double tempv1 = dZBins_[( - InStub.dZ() / (dataFormats_->base(Variable::dZ, Process::kfin) * pow(2, setup_->weightBinFraction())))]; + inStub.dZ() / (dataFormats_->base(Variable::dZ, Process::kfin) * pow(2, setup_->weightBinFraction())))]; double tempRphi = phiSquared * tempv0; double tempRz = zSquared * tempv1; @@ -216,108 +256,138 @@ namespace trklet { tempchi2rz += tempRz; } // Iterate over track stubs - // TODO extract TTTrack bit widths from TTTrack word pending update to the TTTrack_word class - TTBV TrackValid(1, 1, false); - TTBV extraMVA(0, 6, false); - TTBV TQMVA(0, 3, false); - TTBV BendChi2(0, 3, false); - TTBV Chi2rphi( - digitise(setup_->kfoutchi2rphiBins(), tempchi2rphi, (double)setup_->kfoutchi2rphiConv()), 4, false); - TTBV Chi2rz(digitise(setup_->kfoutchi2rzBins(), tempchi2rz, (double)setup_->kfoutchi2rzConv()), 4, false); - TTBV D0(0, 13, false); - TTBV z0(temp_z0, dataFormats_->base(Variable::zT, Process::kf), 12, true); - TTBV TanL(temp_tanL, dataFormats_->base(Variable::cot, Process::kf), 16, true); - TTBV phi0(temp_phi0, dataFormats_->base(Variable::phiT, Process::kf), 12, true); - TTBV InvR(-InTrack.inv2R(), dataFormats_->base(Variable::inv2R, Process::kf), 16, true); - InvR.resize(15); - // 13 + 3 + 7 + 3 + 3 - TTBV PartialTrack3((D0 + BendChi2 + HitPattern + TQMVA + extraMVA), partialTrackWordBits_, false); - // 16 + 12 + 4 - TTBV PartialTrack2((TanL + z0 + Chi2rz), partialTrackWordBits_, false); - // 1 + 15 + 12 + 4 - TTBV PartialTrack1((TrackValid + InvR + phi0 + Chi2rphi), partialTrackWordBits_, false); - - int sortKey = (InTrack.sectorEta() < (int)(setup_->numSectorsEta() / 2)) ? 0 : 1; + // Create bit vectors for eacch output, including digitisation of chi2 + // TODO implement extraMVA, bendChi2, d0 + TTBV trackValid(1, TTTrack_TrackWord::TrackBitWidths::kValidSize, false); + TTBV extraMVA(0, TTTrack_TrackWord::TrackBitWidths::kMVAOtherSize, false); + TTBV bendChi2(0, TTTrack_TrackWord::TrackBitWidths::kBendChi2Size, false); + TTBV chi2rphi(digitise(TTTrack_TrackWord::chi2RPhiBins, tempchi2rphi, (double)setup_->kfoutchi2rphiConv()), + TTTrack_TrackWord::TrackBitWidths::kChi2RPhiSize, + false); + TTBV chi2rz(digitise(TTTrack_TrackWord::chi2RZBins, tempchi2rz, (double)setup_->kfoutchi2rzConv()), + TTTrack_TrackWord::TrackBitWidths::kChi2RZSize, + false); + TTBV d0(0, TTTrack_TrackWord::TrackBitWidths::kD0Size, false); + TTBV z0( + temp_z0, dataFormats_->base(Variable::zT, Process::kf), TTTrack_TrackWord::TrackBitWidths::kZ0Size, true); + TTBV tanL(temp_tanL, + dataFormats_->base(Variable::cot, Process::kf), + TTTrack_TrackWord::TrackBitWidths::kTanlSize, + true); + TTBV phi0(temp_phi0, + dataFormats_->base(Variable::phiT, Process::kf), + TTTrack_TrackWord::TrackBitWidths::kPhiSize, + true); + TTBV invR(-inTrack.inv2R(), + dataFormats_->base(Variable::inv2R, Process::kf), + TTTrack_TrackWord::TrackBitWidths::kRinvSize + 1, + true); + invR.resize(TTTrack_TrackWord::TrackBitWidths::kRinvSize); + + // Create input vector for BDT + trackQuality_inputs = { + (std::trunc(tanL.val() / tqTanlScale_)) / ap_fixed_rescale, + (std::trunc(z0.val() / tqZ0Scale_)) / ap_fixed_rescale, + 0, + temp_nstub, + temp_ninterior, + digitise(TTTrack_TrackWord::chi2RPhiBins, tempchi2rphi, (double)setup_->kfoutchi2rphiConv()), + digitise(TTTrack_TrackWord::chi2RZBins, tempchi2rz, (double)setup_->kfoutchi2rzConv())}; + + // Run BDT emulation and package output into 3 bits + + tempTQMVA = trackQualityModel_->runEmulatedTQ(trackQuality_inputs); + tempTQMVA = std::trunc(tempTQMVA * ap_fixed_rescale); + TTBV tqMVA(digitise(tqBins_, tempTQMVA, 1.0), TTTrack_TrackWord::TrackBitWidths::kMVAQualitySize, false); + + // Build 32 bit partial tracks for outputting in 64 bit packets + // 12 + 3 + 7 + 3 + 6 + TTBV partialTrack3((d0 + bendChi2 + hitPattern + tqMVA + extraMVA), partialTrackWordBits_, false); + // 16 + 12 + 4 + TTBV partialTrack2((tanL + z0 + chi2rz), partialTrackWordBits_, false); + // 1 + 15 + 12 + 4 + TTBV partialTrack1((trackValid + invR + phi0 + chi2rphi), partialTrackWordBits_, false); + + int sortKey = (inTrack.sectorEta() < (int)(setup_->numSectorsEta() / 2)) ? 0 : 1; // Set correct bit to valid for track valid - TrackKFOut Temp_track( - PartialTrack1.set(31), PartialTrack2, PartialTrack3, sortKey, track, iTrack, iLink, true); - - InTrackStreams[iLink / numWorkers_][iLink % numWorkers_][iTrack] = (std::make_shared(Temp_track)); - + TrackKFOut temp_track(partialTrack1.set((partialTrackWordBits_ - 1)), + partialTrack2, + partialTrack3, + sortKey, + track, + iTrack, + iLink, + true); + + inTrackStreams[iLink / setup_->kfNumWorker()][iLink % setup_->kfNumWorker()][iTrack] = + (std::make_shared(temp_track)); } // Iterate over Tracks - - //Pad out input streams to Dist server with extra null track if odd number of tracks on a stream - int iFinTrack = (int)streamsTracks[iLink].size(); - if (iFinTrack % numWorkers_ != 0) { - TrackKFOut null_track(0, 0, 0, iLink % numWorkers_, tt::FrameTrack(), iFinTrack + 1, iLink, true); - InTrackStreams[iLink / numWorkers_][iLink % numWorkers_][iFinTrack] = - (std::make_shared(null_track)); - } - } // Iterate over Links - // Fill products and match up tracks - - // One distribution server for every region, num inputs = num KF workers, num outputs = num output links - vector distServers( - setup_->numRegions(), - DistServer(numWorkers_, setup_->tfpNumChannel(), 2)); //Magic number for interleaving in dist server - + } // Iterate over Links + // Route Tracks in eta based on their sort key for (int iRegion = 0; iRegion < setup_->numRegions(); iRegion++) { - for (int iTrack = 0; iTrack < setup_->numFramesIO() * ((double)TTBV::S_ / setup_->tttrackBits()); iTrack++) { - TrackKFOutSAPtrCollection DistIn; - for (int iWorker = 0; iWorker < numWorkers_; iWorker++) - DistIn.push_back(InTrackStreams[iRegion][iWorker][iTrack]); // Reorganise input to distribution server - TrackKFOutSAPtrCollection DistOut = distServers[iRegion].clock(DistIn); // Run dist server - for (int iLink = 0; iLink < setup_->tfpNumChannel(); iLink++) - OutTrackStreams[iRegion][iLink][iTrack] = - DistOut[iLink]; // Reorganise output of distribution server in output streams + int buffered_tracks[] = {0, 0}; + for (int iTrack = 0; + iTrack < setup_->numFramesIO() * ((double)TTBV::S_ / TTTrack_TrackWord::TrackBitWidths::kTrackWordSize); + iTrack++) { + for (int iWorker = 0; iWorker < setup_->kfNumWorker(); iWorker++) { + for (int iLink = 0; iLink < setup_->tfpNumChannel(); iLink++) { + if ((inTrackStreams[iRegion][iWorker][iTrack]->sortKey() == iLink) && + (inTrackStreams[iRegion][iWorker][iTrack]->dataValid() == true)) { + outTrackStreams[iRegion][iLink][buffered_tracks[iLink]] = inTrackStreams[iRegion][iWorker][iTrack]; + buffered_tracks[iLink] = buffered_tracks[iLink] + 1; + } + } + } } } - // Pack output of distribution server onto each link, with correct partial tracks in correct places + // Pack output of router onto each link, with correct partial tracks in correct places for (int iRegion = 0; iRegion < setup_->numRegions(); iRegion++) { for (int iLink = 0; iLink < setup_->tfpNumChannel(); iLink++) { - for (int iTrack = 0; iTrack < (int)OutTrackStreams[iRegion][iLink].size(); iTrack++) { - SortedPartialTracks[2 * iRegion + iLink].push_back( - OutTrackStreams[iRegion][iLink][iTrack]->PartialTrack1()); - SortedPartialTracks[2 * iRegion + iLink].push_back( - OutTrackStreams[iRegion][iLink][iTrack]->PartialTrack2()); - SortedPartialTracks[2 * iRegion + iLink].push_back( - OutTrackStreams[iRegion][iLink][iTrack]->PartialTrack3()); - OutputStreamsTracks[2 * iRegion + iLink].emplace_back(OutTrackStreams[iRegion][iLink][iTrack]->track()); + for (int iTrack = 0; iTrack < (int)outTrackStreams[iRegion][iLink].size(); iTrack++) { + sortedPartialTracks[2 * iRegion + iLink].push_back( + outTrackStreams[iRegion][iLink][iTrack]->PartialTrack1()); + sortedPartialTracks[2 * iRegion + iLink].push_back( + outTrackStreams[iRegion][iLink][iTrack]->PartialTrack2()); + sortedPartialTracks[2 * iRegion + iLink].push_back( + outTrackStreams[iRegion][iLink][iTrack]->PartialTrack3()); + outputStreamsTracks[2 * iRegion + iLink].emplace_back(outTrackStreams[iRegion][iLink][iTrack]->track()); } } } - - const TTBV NullBitTrack(0, partialTrackWordBits_, false); - for (int iLink = 0; iLink < (int)OutputStreamsTracks.size(); iLink++) { + // Fill products and match up tracks + // store products + const TTBV nullBitTrack(0, partialTrackWordBits_, false); + for (int iLink = 0; iLink < (int)outputStreamsTracks.size(); iLink++) { // Iterate through partial tracks - int numLinkTracks = (int)OutputStreamsTracks[iLink].size(); + int numLinkTracks = (int)outputStreamsTracks[iLink].size(); if (numLinkTracks == 0) continue; // Don't fill links if no tracks if ((numLinkTracks % 2 != 0)) { - SortedPartialTracks[iLink].push_back(NullBitTrack); //Pad out final set of bits - OutputStreamsTracks[iLink].emplace_back( - OutputStreamsTracks[iLink][numLinkTracks++]); //Pad out with final repeated track + sortedPartialTracks[iLink].push_back(nullBitTrack); //Pad out final set of bits + outputStreamsTracks[iLink].emplace_back( + outputStreamsTracks[iLink][numLinkTracks++]); //Pad out with final repeated track } //If there is an odd number of tracks - for (int iTrack = 0; iTrack < (int)(SortedPartialTracks[iLink].size()); iTrack++) { + for (int iTrack = 0; iTrack < (int)(sortedPartialTracks[iLink].size()); iTrack++) { if (iTrack % 2 != 1) // Write to links every other partial track, 3 partial tracks per full TTTrack continue; - TTTrackRef TrackRef; + TTTrackRef trackRef; for (auto& it : ttTrackRefMap) { //Iterate through ttTrackRefMap to find TTTrackRef Key by a TTTrack Value - if (it.second == OutputStreamsTracks[iLink][(int)(iTrack - 1) / 3].first) - TrackRef = it.first; + if (it.second == outputStreamsTracks[iLink][(int)(iTrack - 1) / 3].first) + trackRef = it.first; } - if ((int)iTrack / 3 <= setup_->numFramesIO() * ((double)TTBV::S_ / setup_->tttrackBits())) + if ((int)iTrack / 3 <= + setup_->numFramesIO() * ((double)TTBV::S_ / TTTrack_TrackWord::TrackBitWidths::kTrackWordSize)) accepted[iLink].emplace_back( - std::make_pair(TrackRef, - (SortedPartialTracks[iLink][iTrack - 1].slice(partialTrackWordBits_) + - SortedPartialTracks[iLink][iTrack].slice(partialTrackWordBits_)) + std::make_pair(trackRef, + (sortedPartialTracks[iLink][iTrack - 1].slice(partialTrackWordBits_) + + sortedPartialTracks[iLink][iTrack].slice(partialTrackWordBits_)) .bs())); else lost[iLink].emplace_back( - std::make_pair(TrackRef, - (SortedPartialTracks[iLink][iTrack - 1].slice(partialTrackWordBits_) + - SortedPartialTracks[iLink][iTrack].slice(partialTrackWordBits_)) + std::make_pair(trackRef, + (sortedPartialTracks[iLink][iTrack - 1].slice(partialTrackWordBits_) + + sortedPartialTracks[iLink][iTrack].slice(partialTrackWordBits_)) .bs())); } //Iterate through sorted partial tracks } // Iterate through links diff --git a/L1Trigger/TrackFindingTracklet/plugins/ProducerTBout.cc b/L1Trigger/TrackFindingTracklet/plugins/ProducerTBout.cc index 5e2cf2e20ed65..981ed2ba75c6b 100644 --- a/L1Trigger/TrackFindingTracklet/plugins/ProducerTBout.cc +++ b/L1Trigger/TrackFindingTracklet/plugins/ProducerTBout.cc @@ -130,8 +130,8 @@ namespace trklet { int channelId(-1); for (int i = 0; i < (int)handleTTTracks->size(); i++) { const TTTrackRef ttTrackRef(handleTTTracks, i); - if (channelAssignment_->channelId(ttTrackRef, channelId)) - ttTrackRefs[channelId].push_back(ttTrackRef); + const int channelId = channelAssignment_->channelId(ttTrackRef); + ttTrackRefs[channelId].push_back(ttTrackRef); } // get and trunacte tracks Handle handleTracks; diff --git a/L1Trigger/TrackFindingTracklet/python/Analyzer_cff.py b/L1Trigger/TrackFindingTracklet/python/Analyzer_cff.py index 5fd6ea8441bbe..13635fe458ca2 100644 --- a/L1Trigger/TrackFindingTracklet/python/Analyzer_cff.py +++ b/L1Trigger/TrackFindingTracklet/python/Analyzer_cff.py @@ -5,6 +5,8 @@ TrackFindingTrackletAnalyzerTBout = cms.EDAnalyzer( 'trklet::AnalyzerTBout', TrackFindingTrackletAnalyzer_params, TrackFindingTrackletProducer_params ) TrackFindingTrackletAnalyzerTracklet = cms.EDAnalyzer( 'trklet::AnalyzerTracklet', TrackFindingTrackletAnalyzer_params, TrackFindingTrackletProducer_params ) +TrackFindingTrackletAnalyzerDRin = cms.EDAnalyzer( 'trklet::AnalyzerDRin', TrackFindingTrackletAnalyzer_params, TrackFindingTrackletProducer_params ) +TrackFindingTrackletAnalyzerDR = cms.EDAnalyzer( 'trklet::AnalyzerDR', TrackFindingTrackletAnalyzer_params, TrackFindingTrackletProducer_params ) TrackFindingTrackletAnalyzerKFin = cms.EDAnalyzer( 'trklet::AnalyzerKFin', TrackFindingTrackletAnalyzer_params, TrackFindingTrackletProducer_params ) TrackFindingTrackletAnalyzerKF = cms.EDAnalyzer( 'trackerTFP::AnalyzerKF', TrackFindingTrackletAnalyzer_params, TrackFindingTrackletProducer_params ) TrackFindingTrackletAnalyzerKFout = cms.EDAnalyzer( 'trklet::AnalyzerKFout', TrackFindingTrackletAnalyzer_params, TrackFindingTrackletProducer_params ) diff --git a/L1Trigger/TrackFindingTracklet/python/ChannelAssignment_cfi.py b/L1Trigger/TrackFindingTracklet/python/ChannelAssignment_cfi.py index 2e05d43acdfcb..64bee96e8f6bf 100644 --- a/L1Trigger/TrackFindingTracklet/python/ChannelAssignment_cfi.py +++ b/L1Trigger/TrackFindingTracklet/python/ChannelAssignment_cfi.py @@ -3,8 +3,21 @@ ChannelAssignment_params = cms.PSet ( - UseDuplicateRemoval = cms.bool ( True ), # use tracklet seed type as channel id if False, binned track pt used if True - PtBoundaries = cms.vdouble( 1.34 ), # positive pt Boundaries in GeV (symmetric negatives are assumed), last boundary is infinity, defining ot bins used by DR + # DRin parameter + DRin = cms.PSet ( + WidthLayerId = cms.int32( 4 ), # number of bits used to represent layer id [barrel: 0-5, discs: 6-10] + WidthStubId = cms.int32( 10 ), # number of bits used to represent stub id for projected stubs + WidthSeedStubId = cms.int32( 7 ), # number of bits used to represent stub id for seed stubs + WidthPSTilt = cms.int32( 1 ), # number of bits used to distinguish between tilted and untilded barrel modules or 2S and PS endcap modules + DepthMemory = cms.int32( 32 ), # depth of fifos within systolic array + PtBoundaries = cms.vdouble( 3.0, 5.0, 8.0, 12.0, 24.0 ) # positive pt Boundaries in GeV (symmetric negatives are assumed), first boundary is pt cut, last boundary is infinity, defining pt bins used by DR + ), + + # DR parameter + DR = cms.PSet ( + NumComparisonModules = cms.int32( 16 ), # number of comparison modules used in each DR node + MinIdenticalStubs = cms.int32( 3 ) # min number of shared stubs to identify duplicates + ), SeedTypes = cms.vstring( "L1L2", "L2L3", "L3L4", "L5L6", "D1D2", "D3D4", "L1D1", "L2D1" ), # seed types used in tracklet algorithm (position gives int value) diff --git a/L1Trigger/TrackFindingTracklet/python/Customize_cff.py b/L1Trigger/TrackFindingTracklet/python/Customize_cff.py index 192a65beef491..bf714fb8c26c4 100644 --- a/L1Trigger/TrackFindingTracklet/python/Customize_cff.py +++ b/L1Trigger/TrackFindingTracklet/python/Customize_cff.py @@ -11,8 +11,6 @@ def fwConfig(process): process.l1tTTTracksFromTrackletEmulation.RemovalType = "" process.l1tTTTracksFromTrackletEmulation.DoMultipleMatches = False process.l1tTTTracksFromTrackletEmulation.StoreTrackBuilderOutput = True - process.ChannelAssignment.UseDuplicateRemoval = False - process.TrackTriggerSetup.KalmanFilter.NumWorker = 8 # configures track finding s/w to behave as a subchain of processing steps def reducedConfig(process): diff --git a/L1Trigger/TrackFindingTracklet/python/Demonstrator_cfi.py b/L1Trigger/TrackFindingTracklet/python/Demonstrator_cfi.py index 755325c8114b0..cd104644742ec 100644 --- a/L1Trigger/TrackFindingTracklet/python/Demonstrator_cfi.py +++ b/L1Trigger/TrackFindingTracklet/python/Demonstrator_cfi.py @@ -3,9 +3,9 @@ TrackTriggerDemonstrator_params = cms.PSet ( - LabelIn = cms.string( "TrackFindingTrackletProducerIRin" ), # - LabelOut = cms.string( "TrackFindingTrackletProducerTBout" ), # - DirIPBB = cms.string( "/heplnw039/tschuh/work/proj/IRinTBout/" ), # path to ipbb proj area - RunTime = cms.double( 8.0 ) # runtime in us + LabelIn = cms.string( "TrackFindingTrackletProducerDRin" ), # + LabelOut = cms.string( "TrackFindingTrackletProducerDR" ), # + DirIPBB = cms.string( "/heplnw039/tschuh/work/proj/DRinDR/" ), # path to ipbb proj area + RunTime = cms.double( 4.5 ) # runtime in us ) \ No newline at end of file diff --git a/L1Trigger/TrackFindingTracklet/python/Producer_cff.py b/L1Trigger/TrackFindingTracklet/python/Producer_cff.py index a2b4a4548fd27..181908f36a455 100644 --- a/L1Trigger/TrackFindingTracklet/python/Producer_cff.py +++ b/L1Trigger/TrackFindingTracklet/python/Producer_cff.py @@ -10,6 +10,8 @@ TrackFindingTrackletProducerIRin = cms.EDProducer( 'trklet::ProducerIRin', TrackFindingTrackletProducer_params ) TrackFindingTrackletProducerTBout = cms.EDProducer( 'trklet::ProducerTBout', TrackFindingTrackletProducer_params ) +TrackFindingTrackletProducerDRin = cms.EDProducer( 'trklet::ProducerDRin', TrackFindingTrackletProducer_params ) +TrackFindingTrackletProducerDR = cms.EDProducer( 'trklet::ProducerDR', TrackFindingTrackletProducer_params ) TrackFindingTrackletProducerKFin = cms.EDProducer( 'trklet::ProducerKFin', TrackFindingTrackletProducer_params ) TrackFindingTrackletProducerKF = cms.EDProducer( 'trackerTFP::ProducerKF', TrackFindingTrackletProducer_params ) TrackFindingTrackletProducerTT = cms.EDProducer( 'trklet::ProducerTT', TrackFindingTrackletProducer_params ) diff --git a/L1Trigger/TrackFindingTracklet/python/Producer_cfi.py b/L1Trigger/TrackFindingTracklet/python/Producer_cfi.py index 929af970fa0b1..5e2128b2ac70a 100644 --- a/L1Trigger/TrackFindingTracklet/python/Producer_cfi.py +++ b/L1Trigger/TrackFindingTracklet/python/Producer_cfi.py @@ -1,10 +1,13 @@ import FWCore.ParameterSet.Config as cms +from L1Trigger.TrackTrigger.TrackQualityParams_cfi import * TrackFindingTrackletProducer_params = cms.PSet ( InputTag = cms.InputTag( "l1tTTTracksFromTrackletEmulation", "Level1TTTracks"), # InputTagDTC = cms.InputTag( "TrackerDTCProducer", "StubAccepted"), # LabelTBout = cms.string ( "TrackFindingTrackletProducerTBout" ), # + LabelDRin = cms.string ( "TrackFindingTrackletProducerDRin" ), # + LabelDR = cms.string ( "TrackFindingTrackletProducerDR" ), # LabelKFin = cms.string ( "TrackFindingTrackletProducerKFin" ), # LabelKF = cms.string ( "TrackFindingTrackletProducerKF" ), # LabelTT = cms.string ( "TrackFindingTrackletProducerTT" ), # @@ -18,5 +21,7 @@ EnableTruncation = cms.bool ( True ), # enable emulation of truncation for TBout, KF, KFin, lost stubs are filled in BranchLost PrintKFDebug = cms.bool ( False ), # print end job internal unused MSB UseTTStubResiduals = cms.bool ( False ), # stub residuals are recalculated from seed parameter and TTStub position + TrackQualityPSet = cms.PSet ( TrackQualityParams ), + ) diff --git a/L1Trigger/TrackFindingTracklet/python/l1tTTTracksFromTrackletEmulation_cfi.py b/L1Trigger/TrackFindingTracklet/python/l1tTTTracksFromTrackletEmulation_cfi.py index 5779fce86d024..e3dcf9d8c904c 100644 --- a/L1Trigger/TrackFindingTracklet/python/l1tTTTracksFromTrackletEmulation_cfi.py +++ b/L1Trigger/TrackFindingTracklet/python/l1tTTTracksFromTrackletEmulation_cfi.py @@ -11,6 +11,7 @@ TrackingParticleInputTag = cms.InputTag("mix", "MergedTrackTruth"), BeamSpotSource = cms.InputTag("offlineBeamSpot"), asciiFileName = cms.untracked.string(""), + FailScenario = cms.untracked.int32(0), Extended = cms.bool(False), Reduced = cms.bool(False), Hnpar = cms.uint32(4), diff --git a/L1Trigger/TrackFindingTracklet/src/ChannelAssignment.cc b/L1Trigger/TrackFindingTracklet/src/ChannelAssignment.cc index 6dd3d20b2e29e..a040bbc8125c3 100644 --- a/L1Trigger/TrackFindingTracklet/src/ChannelAssignment.cc +++ b/L1Trigger/TrackFindingTracklet/src/ChannelAssignment.cc @@ -16,11 +16,20 @@ namespace trklet { ChannelAssignment::ChannelAssignment(const edm::ParameterSet& iConfig, const Setup* setup) : setup_(setup), - useDuplicateRemoval_(iConfig.getParameter("UseDuplicateRemoval")), - boundaries_(iConfig.getParameter>("PtBoundaries")), + pSetDRin_(iConfig.getParameter("DRin")), + widthLayerId_(pSetDRin_.getParameter("WidthLayerId")), + widthStubId_(pSetDRin_.getParameter("WidthStubId")), + widthSeedStubId_(pSetDRin_.getParameter("WidthSeedStubId")), + widthPSTilt_(pSetDRin_.getParameter("WidthPSTilt")), + depthMemory_(pSetDRin_.getParameter("DepthMemory")), + ptBoundaries_(pSetDRin_.getParameter>("PtBoundaries")), + pSetDR_(iConfig.getParameter("DR")), + numComparisonModules_(pSetDR_.getParameter("NumComparisonModules")), + minIdenticalStubs_(pSetDR_.getParameter("MinIdenticalStubs")), + numNodesDR_(2 * (ptBoundaries_.size() + 1)), seedTypeNames_(iConfig.getParameter>("SeedTypes")), numSeedTypes_(seedTypeNames_.size()), - numChannelsTrack_(useDuplicateRemoval_ ? 2 * boundaries_.size() : numSeedTypes_), + numChannelsTrack_(numSeedTypes_), channelEncoding_(iConfig.getParameter>("IRChannelsIn")) { const ParameterSet& pSetSeedTypesSeedLayers = iConfig.getParameter("SeedTypesSeedLayers"); const ParameterSet& pSetSeedTypesProjectionLayers = iConfig.getParameter("SeedTypesProjectionLayers"); @@ -30,13 +39,6 @@ namespace trklet { seedTypesSeedLayers_.emplace_back(pSetSeedTypesSeedLayers.getParameter>(s)); seedTypesProjectionLayers_.emplace_back(pSetSeedTypesProjectionLayers.getParameter>(s)); } - auto acc = [](int sum, vector ints) { return sum + (int)ints.size(); }; - numChannelsStub_ = accumulate(seedTypesProjectionLayers_.begin(), seedTypesProjectionLayers_.end(), 0, acc); - offsetsStubs_.reserve(numSeedTypes_); - for (int seed = 0; seed < numSeedTypes_; seed++) { - const auto it = next(seedTypesProjectionLayers_.begin(), seed); - offsetsStubs_.emplace_back(accumulate(seedTypesProjectionLayers_.begin(), it, 0, acc)); - } // consistency check const int offsetBarrel = setup_->offsetLayerId(); const int numBarrelLayer = setup_->numBarrelLayer(); @@ -112,37 +114,29 @@ namespace trklet { numSeedingLayers_ = max_element(seedTypesSeedLayers_.begin(), seedTypesSeedLayers_.end(), bigger)->size(); maxNumProjectionLayers_ = max_element(seedTypesProjectionLayers_.begin(), seedTypesProjectionLayers_.end(), bigger)->size(); + auto acc = [](int sum, vector ints) { return sum += (int)ints.size(); }; + offsetsStubs_.reserve(numSeedTypes_); + numChannelsStub_ = accumulate( + seedTypesProjectionLayers_.begin(), seedTypesProjectionLayers_.end(), numSeedingLayers_ * numSeedTypes_, acc); + for (int seed = 0; seed < numSeedTypes_; seed++) { + const auto it = next(seedTypesProjectionLayers_.begin(), seed); + offsetsStubs_.emplace_back(accumulate(seedTypesProjectionLayers_.begin(), it, numSeedingLayers_ * seed, acc)); + } } - // sets channelId of given TTTrackRef, return false if track outside pt range - bool ChannelAssignment::channelId(const TTTrackRef& ttTrackRef, int& channelId) { - if (!useDuplicateRemoval_) { - const int seedType = ttTrackRef->trackSeedType(); - if (seedType >= numSeedTypes_) { - cms::Exception exception("logic_error"); - exception << "TTTracks form seed type" << seedType << " not in supported list: ("; - for (const auto& s : seedTypeNames_) - exception << s << " "; - exception << ")."; - exception.addContext("trklet:ChannelAssignment:channelId"); - throw exception; - } - channelId = ttTrackRef->phiSector() * numSeedTypes_ + seedType; - return true; - } - const double pt = ttTrackRef->momentum().perp(); - channelId = -1; - for (double boundary : boundaries_) { - if (pt < boundary) - break; - else - channelId++; + // returns channelId of given TTTrackRef + int ChannelAssignment::channelId(const TTTrackRef& ttTrackRef) const { + const int seedType = ttTrackRef->trackSeedType(); + if (seedType >= numSeedTypes_) { + cms::Exception exception("logic_error"); + exception << "TTTracks form seed type" << seedType << " not in supported list: ("; + for (const auto& s : seedTypeNames_) + exception << s << " "; + exception << ")."; + exception.addContext("trklet:ChannelAssignment:channelId"); + throw exception; } - if (channelId == -1) - return false; - channelId = ttTrackRef->rInv() < 0. ? channelId : numChannelsTrack_ - channelId - 1; - channelId += ttTrackRef->phiSector() * numChannelsTrack_; - return true; + return ttTrackRef->phiSector() * numSeedTypes_ + seedType; } // sets layerId of given TTStubRef and seedType, returns false if seeed stub @@ -174,20 +168,45 @@ namespace trklet { // index of first stub channel belonging to given track channel int ChannelAssignment::offsetStub(int channelTrack) const { - return channelTrack / numChannelsTrack_ * numChannelsStub_ + offsetsStubs_[channelTrack % numChannelsTrack_]; + const int region = channelTrack / numChannelsTrack_; + const int channel = channelTrack % numChannelsTrack_; + return region * numChannelsStub_ + offsetsStubs_[channel]; } - // + // returns TBout channel Id int ChannelAssignment::channelId(int seedType, int layerId) const { const vector& projections = seedTypesProjectionLayers_.at(seedType); - const vector& seeds = seedTypesSeedLayers_.at(seedType); const auto itp = find(projections.begin(), projections.end(), layerId); + if (itp != projections.end()) + return distance(projections.begin(), itp); + const vector& seeds = seedTypesSeedLayers_.at(seedType); const auto its = find(seeds.begin(), seeds.end(), layerId); if (its != seeds.end()) return (int)projections.size() + distance(seeds.begin(), its); - if (itp != projections.end()) - return distance(projections.begin(), itp); return -1; } + // return DR node for given ttTrackRef + int ChannelAssignment::nodeDR(const TTTrackRef& ttTrackRef) const { + const double pt = ttTrackRef->momentum().perp(); + int bin(0); + for (double b : ptBoundaries_) { + if (pt < b) + break; + bin++; + } + if (ttTrackRef->rInv() >= 0.) + bin += numNodesDR_ / 2; + else + bin = numNodesDR_ / 2 - 1 - bin; + return bin; + } + + // layers a seed types can project to using default layer id [barrel: 1-6, discs: 11-15] + int ChannelAssignment::layerId(int seedType, int channel) const { + if (channel < numProjectionLayers(seedType)) + return seedTypesProjectionLayers_.at(seedType).at(channel); + return seedTypesSeedLayers_.at(seedType).at(channel - numProjectionLayers(seedType)); + } + } // namespace trklet diff --git a/L1Trigger/TrackFindingTracklet/src/DR.cc b/L1Trigger/TrackFindingTracklet/src/DR.cc new file mode 100644 index 0000000000000..14afa089a247e --- /dev/null +++ b/L1Trigger/TrackFindingTracklet/src/DR.cc @@ -0,0 +1,160 @@ +#include "L1Trigger/TrackFindingTracklet/interface/DR.h" + +#include +#include +#include + +using namespace std; +using namespace edm; +using namespace tt; +using namespace trackerTFP; + +namespace trklet { + + DR::DR(const ParameterSet& iConfig, + const Setup* setup, + const DataFormats* dataFormats, + const ChannelAssignment* channelAssignment, + int region) + : enableTruncation_(iConfig.getParameter("EnableTruncation")), + setup_(setup), + dataFormats_(dataFormats), + channelAssignment_(channelAssignment), + region_(region), + input_(channelAssignment_->numNodesDR()) {} + + // read in and organize input tracks and stubs + void DR::consume(const StreamsTrack& streamsTrack, const StreamsStub& streamsStub) { + const int offsetTrack = region_ * channelAssignment_->numNodesDR(); + auto nonNullTrack = [](int sum, const FrameTrack& frame) { return sum + (frame.first.isNonnull() ? 1 : 0); }; + auto nonNullStub = [](int sum, const FrameStub& frame) { return sum + (frame.first.isNonnull() ? 1 : 0); }; + // count tracks and stubs and reserve corresponding vectors + int sizeTracks(0); + int sizeStubs(0); + for (int channel = 0; channel < channelAssignment_->numNodesDR(); channel++) { + const int streamTrackId = offsetTrack + channel; + const int offsetStub = streamTrackId * setup_->numLayers(); + const StreamTrack& streamTrack = streamsTrack[streamTrackId]; + input_[channel].reserve(streamTrack.size()); + sizeTracks += accumulate(streamTrack.begin(), streamTrack.end(), 0, nonNullTrack); + for (int layer = 0; layer < setup_->numLayers(); layer++) { + const StreamStub& streamStub = streamsStub[offsetStub + layer]; + sizeStubs += accumulate(streamStub.begin(), streamStub.end(), 0, nonNullStub); + } + } + tracks_.reserve(sizeTracks); + stubs_.reserve(sizeStubs); + // transform input data into handy structs + for (int channel = 0; channel < channelAssignment_->numNodesDR(); channel++) { + vector& input = input_[channel]; + const int streamTrackId = offsetTrack + channel; + const int offsetStub = streamTrackId * setup_->numLayers(); + const StreamTrack& streamTrack = streamsTrack[streamTrackId]; + for (int frame = 0; frame < (int)streamTrack.size(); frame++) { + const FrameTrack& frameTrack = streamTrack[frame]; + if (frameTrack.first.isNull()) { + input.push_back(nullptr); + continue; + } + vector stubs; + stubs.reserve(setup_->numLayers()); + for (int layer = 0; layer < setup_->numLayers(); layer++) { + const FrameStub& frameStub = streamsStub[offsetStub + layer][frame]; + if (frameStub.first.isNull()) + continue; + TTBV ttBV = frameStub.second; + const TTBV z(ttBV, dataFormats_->format(Variable::z, Process::kfin).width(), 0, true); + ttBV >>= dataFormats_->format(Variable::z, Process::kfin).width(); + const TTBV phi(ttBV, dataFormats_->format(Variable::phi, Process::kfin).width(), 0, true); + ttBV >>= dataFormats_->format(Variable::phi, Process::kfin).width(); + const TTBV r(ttBV, dataFormats_->format(Variable::r, Process::kfin).width(), 0, true); + ttBV >>= dataFormats_->format(Variable::r, Process::kfin).width(); + const TTBV stubId(ttBV, channelAssignment_->widthSeedStubId(), 0); + ttBV >>= channelAssignment_->widthSeedStubId(); + const TTBV layerId(ttBV, channelAssignment_->widthLayerId(), 0); + ttBV >>= channelAssignment_->widthLayerId(); + const TTBV tilt(ttBV, channelAssignment_->widthPSTilt(), 0); + const FrameStub frame(frameStub.first, + Frame("1" + tilt.str() + layerId.str() + r.str() + phi.str() + z.str())); + stubs_.emplace_back(frame, stubId.val(), layer); + stubs.push_back(&stubs_.back()); + } + tracks_.emplace_back(frameTrack, stubs); + input.push_back(&tracks_.back()); + } + // remove all gaps between end and last track + for (auto it = input.end(); it != input.begin();) + it = (*--it) ? input.begin() : input.erase(it); + } + } + + // fill output products + void DR::produce(StreamsStub& accpetedStubs, + StreamsTrack& acceptedTracks, + StreamsStub& lostStubs, + StreamsTrack& lostTracks) { + const int offsetTrack = region_ * channelAssignment_->numNodesDR(); + for (int node = 0; node < channelAssignment_->numNodesDR(); node++) { + const int channelTrack = offsetTrack + node; + const int offsetStub = channelTrack * setup_->numLayers(); + // remove duplicated tracks, no merge of stubs, one stub per layer expected + vector cms(channelAssignment_->numComparisonModules(), nullptr); + vector& tracks = input_[node]; + for (Track*& track : tracks) { + if (!track) + // gaps propagate trough chain and appear in output stream + continue; + for (Track*& trackCM : cms) { + if (!trackCM) { + // tracks used in CMs propagate trough chain and appear in output stream unaltered + trackCM = track; + break; + } + if (equalEnough(track, trackCM)) { + // tracks compared in CMs propagate trough chain and appear in output stream as gap if identified as duplicate or unaltered elsewise + track = nullptr; + break; + } + } + } + // remove all gaps between end and last track + for (auto it = tracks.end(); it != tracks.begin();) + it = (*--it) ? tracks.begin() : tracks.erase(it); + // store output + StreamTrack& streamTrack = acceptedTracks[channelTrack]; + streamTrack.reserve(tracks.size()); + for (int layer = 0; layer < setup_->numLayers(); layer++) + accpetedStubs[offsetStub + layer].reserve(tracks.size()); + for (Track* track : tracks) { + if (!track) { + streamTrack.emplace_back(FrameTrack()); + for (int layer = 0; layer < setup_->numLayers(); layer++) + accpetedStubs[offsetStub + layer].emplace_back(FrameStub()); + continue; + } + streamTrack.push_back(track->frame_); + TTBV hitPattern(0, setup_->numLayers()); + for (Stub* stub : track->stubs_) { + hitPattern.set(stub->channel_); + accpetedStubs[offsetStub + stub->channel_].push_back(stub->frame_); + } + for (int layer : hitPattern.ids(false)) + accpetedStubs[offsetStub + layer].emplace_back(FrameStub()); + } + } + } + + // compares two tracks, returns true if those are considered duplicates + bool DR::equalEnough(Track* t0, Track* t1) const { + int same(0); + for (int layer = 0; layer < setup_->numLayers(); layer++) { + auto onLayer = [layer](Stub* stub) { return stub->channel_ == layer; }; + const auto s0 = find_if(t0->stubs_.begin(), t0->stubs_.end(), onLayer); + const auto s1 = find_if(t1->stubs_.begin(), t1->stubs_.end(), onLayer); + if (s0 != t0->stubs_.end() && s1 != t1->stubs_.end() && **s0 == **s1) + same++; + } + return same >= channelAssignment_->minIdenticalStubs(); + } + +} // namespace trklet diff --git a/L1Trigger/TrackFindingTracklet/src/DRin.cc b/L1Trigger/TrackFindingTracklet/src/DRin.cc new file mode 100644 index 0000000000000..9196ff1fee994 --- /dev/null +++ b/L1Trigger/TrackFindingTracklet/src/DRin.cc @@ -0,0 +1,459 @@ +#include "L1Trigger/TrackFindingTracklet/interface/DRin.h" + +#include +#include +#include +#include + +using namespace std; +using namespace edm; +using namespace tt; +using namespace trackerTFP; + +namespace trklet { + + DRin::DRin(const ParameterSet& iConfig, + const Setup* setup, + const DataFormats* dataFormats, + const LayerEncoding* layerEncoding, + const ChannelAssignment* channelAssignment, + const Settings* settings, + int region) + : enableTruncation_(iConfig.getParameter("EnableTruncation")), + useTTStubResiduals_(iConfig.getParameter("UseTTStubResiduals")), + setup_(setup), + dataFormats_(dataFormats), + layerEncoding_(layerEncoding), + channelAssignment_(channelAssignment), + settings_(settings), + region_(region), + input_(channelAssignment_->numChannelsTrack()), + // unified tracklet digitisation granularity + baseUinv2R_(.5 * settings_->kphi1() / settings_->kr() * pow(2, settings_->rinv_shift())), + baseUphiT_(settings_->kphi1() * pow(2, settings_->phi0_shift())), + baseUcot_(settings_->kz() / settings_->kr() * pow(2, settings_->t_shift())), + baseUzT_(settings_->kz() * pow(2, settings_->z0_shift())), + baseUr_(settings_->kr()), + baseUphi_(settings_->kphi1()), + baseUz_(settings_->kz()), + // KF input format digitisation granularity (identical to TMTT) + baseLinv2R_(dataFormats->base(Variable::inv2R, Process::kfin)), + baseLphiT_(dataFormats->base(Variable::phiT, Process::kfin)), + baseLcot_(dataFormats->base(Variable::cot, Process::kfin)), + baseLzT_(dataFormats->base(Variable::zT, Process::kfin)), + baseLr_(dataFormats->base(Variable::r, Process::kfin)), + baseLphi_(dataFormats->base(Variable::phi, Process::kfin)), + baseLz_(dataFormats->base(Variable::z, Process::kfin)), + // Finer granularity (by powers of 2) than the TMTT one. Used to transform from Tracklet to TMTT base. + baseHinv2R_(baseLinv2R_ * pow(2, floor(log2(baseUinv2R_ / baseLinv2R_)))), + baseHphiT_(baseLphiT_ * pow(2, floor(log2(baseUphiT_ / baseLphiT_)))), + baseHcot_(baseLcot_ * pow(2, floor(log2(baseUcot_ / baseLcot_)))), + baseHzT_(baseLzT_ * pow(2, floor(log2(baseUzT_ / baseLzT_)))), + baseHr_(baseLr_ * pow(2, floor(log2(baseUr_ / baseLr_)))), + baseHphi_(baseLphi_ * pow(2, floor(log2(baseUphi_ / baseLphi_)))), + baseHz_(baseLz_ * pow(2, floor(log2(baseUz_ / baseLz_)))) { + // calculate digitisation granularity used for inverted cot(theta) + const int baseShiftInvCot = ceil(log2(setup_->outerRadius() / setup_->hybridRangeR())) - setup_->widthDSPbu(); + baseInvCot_ = pow(2, baseShiftInvCot); + } + + // read in and organize input tracks and stubs + void DRin::consume(const StreamsTrack& streamsTrack, const StreamsStub& streamsStub) { + static const double maxCot = sinh(setup_->maxEta()) + setup_->beamWindowZ() / setup_->chosenRofZ(); + static const int unusedMSBcot = floor(log2(baseUcot_ * pow(2, settings_->nbitst()) / (2. * maxCot))); + static const double baseCot = + baseUcot_ * pow(2, settings_->nbitst() - unusedMSBcot - 1 - setup_->widthAddrBRAM18()); + const int offsetTrack = region_ * channelAssignment_->numChannelsTrack(); + // count tracks and stubs to reserve container + int nTracks(0); + int nStubs(0); + for (int channel = 0; channel < channelAssignment_->numChannelsTrack(); channel++) { + const int channelTrack = offsetTrack + channel; + const int offsetStub = channelAssignment_->offsetStub(channelTrack); + const StreamTrack& streamTrack = streamsTrack[channelTrack]; + input_[channel].reserve(streamTrack.size()); + for (int frame = 0; frame < (int)streamTrack.size(); frame++) { + if (streamTrack[frame].first.isNull()) + continue; + nTracks++; + for (int layer = 0; layer < channelAssignment_->numProjectionLayers(channel); layer++) + if (streamsStub[offsetStub + layer][frame].first.isNonnull()) + nStubs++; + } + } + stubs_.reserve(nStubs + nTracks * channelAssignment_->numSeedingLayers()); + tracks_.reserve(nTracks); + // store tracks and stubs + for (int channel = 0; channel < channelAssignment_->numChannelsTrack(); channel++) { + const int channelTrack = offsetTrack + channel; + const int offsetStub = channelAssignment_->offsetStub(channelTrack); + const StreamTrack& streamTrack = streamsTrack[channelTrack]; + vector& input = input_[channel]; + for (int frame = 0; frame < (int)streamTrack.size(); frame++) { + const TTTrackRef& ttTrackRef = streamTrack[frame].first; + if (ttTrackRef.isNull()) { + input.push_back(nullptr); + continue; + } + //convert track parameter + const double r2Inv = digi(-ttTrackRef->rInv() / 2., baseUinv2R_); + const double phi0U = + digi(tt::deltaPhi(ttTrackRef->phi() - region_ * setup_->baseRegion() + setup_->hybridRangePhi() / 2.), + baseUphiT_); + const double phi0S = digi(phi0U - setup_->hybridRangePhi() / 2., baseUphiT_); + const double cot = digi(ttTrackRef->tanL(), baseUcot_); + const double z0 = digi(ttTrackRef->z0(), baseUzT_); + const double phiT = digi(phi0S + r2Inv * digi(dataFormats_->chosenRofPhi(), baseUr_), baseUphiT_); + const double zT = digi(z0 + cot * digi(setup_->chosenRofZ(), baseUr_), baseUzT_); + // kill tracks outside of fiducial range + if (abs(phiT) > setup_->baseRegion() / 2. || abs(zT) > setup_->hybridMaxCot() * setup_->chosenRofZ() || + abs(z0) > setup_->beamWindowZ()) { + input.push_back(nullptr); + continue; + } + // convert stubs + vector stubs; + stubs.reserve(channelAssignment_->numProjectionLayers(channel) + channelAssignment_->numSeedingLayers()); + for (int layer = 0; layer < channelAssignment_->numProjectionLayers(channel); layer++) { + const FrameStub& frameStub = streamsStub[offsetStub + layer][frame]; + const TTStubRef& ttStubRef = frameStub.first; + if (ttStubRef.isNull()) + continue; + const int layerId = channelAssignment_->layerId(channel, layer); + // parse residuals from tt::Frame and take r and layerId from tt::TTStubRef + const bool barrel = setup_->barrel(ttStubRef); + const int layerIdTracklet = setup_->trackletLayerId(ttStubRef); + const double basePhi = barrel ? settings_->kphi1() : settings_->kphi(layerIdTracklet); + const double baseRZ = barrel ? settings_->kz(layerIdTracklet) : settings_->kz(); + const int widthRZ = barrel ? settings_->zresidbits() : settings_->rresidbits(); + TTBV hw(frameStub.second); + const TTBV hwRZ(hw, widthRZ, 0, true); + hw >>= widthRZ; + const TTBV hwPhi(hw, settings_->phiresidbits(), 0, true); + hw >>= settings_->phiresidbits(); + const int indexLayerId = setup_->indexLayerId(ttStubRef); + const SensorModule::Type type = setup_->type(ttStubRef); + const int widthR = setup_->tbWidthR(type); + const double baseR = setup_->hybridBaseR(type); + const TTBV hwR(hw, widthR, 0, barrel); + hw >>= widthR; + double r = hwR.val(baseR) + (barrel ? setup_->hybridLayerR(indexLayerId) : 0.); + if (type == SensorModule::Disk2S) + r = setup_->disk2SR(indexLayerId, r); + r = digi(r - dataFormats_->chosenRofPhi(), baseUr_); + double phi = hwPhi.val(basePhi); + if (basePhi > baseUphi_) + phi += baseUphi_ / 2.; + const double z = digi(hwRZ.val(baseRZ) * (barrel ? 1. : -cot), baseUz_); + const TTBV hwStubId(hw, channelAssignment_->widthSeedStubId(), 0, false); + const int stubId = hwStubId.val(); + // determine module type + bool psTilt; + if (barrel) { + const double posZ = (r + digi(dataFormats_->chosenRofPhi(), baseUr_)) * cot + z0 + z; + const int indexLayerId = setup_->indexLayerId(ttStubRef); + const double limit = setup_->tiltedLayerLimitZ(indexLayerId); + psTilt = abs(posZ) < limit; + } else + psTilt = setup_->psModule(ttStubRef); + if (useTTStubResiduals_) { + const GlobalPoint gp = setup_->stubPos(ttStubRef); + const double ttR = r; + const double ttZ = gp.z() - (z0 + (ttR + dataFormats_->chosenRofPhi()) * cot); + stubs_.emplace_back(ttStubRef, layerId, layerIdTracklet, false, stubId, ttR, phi, ttZ, psTilt); + } else + stubs_.emplace_back(ttStubRef, layerId, layerIdTracklet, false, stubId, r, phi, z, psTilt); + stubs.push_back(&stubs_.back()); + } + // create fake seed stubs, since TrackBuilder doesn't output these stubs, required by the KF. + for (int seedingLayer = 0; seedingLayer < channelAssignment_->numSeedingLayers(); seedingLayer++) { + const int channelStub = channelAssignment_->numProjectionLayers(channel) + seedingLayer; + const FrameStub& frameStub = streamsStub[offsetStub + channelStub][frame]; + const TTStubRef& ttStubRef = frameStub.first; + if (ttStubRef.isNull()) + continue; + const int layerId = channelAssignment_->layerId(channel, channelStub); + const int layerIdTracklet = setup_->trackletLayerId(ttStubRef); + const int stubId = TTBV(frameStub.second).val(channelAssignment_->widthSeedStubId()); + const bool barrel = setup_->barrel(ttStubRef); + double r; + if (barrel) + r = digi(setup_->hybridLayerR(layerId - setup_->offsetLayerId()) - dataFormats_->chosenRofPhi(), baseUr_); + else { + r = (z0 + + digi(setup_->hybridDiskZ(layerId - setup_->offsetLayerId() - setup_->offsetLayerDisks()), baseUzT_)) * + digi(1. / digi(abs(cot), baseCot), baseInvCot_); + r = digi(r - digi(dataFormats_->chosenRofPhi(), baseUr_), baseUr_); + } + static constexpr double phi = 0.; + static constexpr double z = 0.; + // determine module type + bool psTilt; + if (barrel) { + const double posZ = + digi(digi(setup_->hybridLayerR(layerId - setup_->offsetLayerId()), baseUr_) * cot + z0, baseUz_); + const int indexLayerId = setup_->indexLayerId(ttStubRef); + const double limit = digi(setup_->tiltedLayerLimitZ(indexLayerId), baseUz_); + psTilt = abs(posZ) < limit; + } else + psTilt = true; + const GlobalPoint gp = setup_->stubPos(ttStubRef); + const double ttR = gp.perp() - dataFormats_->chosenRofPhi(); + const double ttZ = gp.z() - (z0 + (ttR + dataFormats_->chosenRofPhi()) * cot); + if (useTTStubResiduals_) + stubs_.emplace_back(ttStubRef, layerId, layerIdTracklet, true, stubId, ttR, phi, ttZ, psTilt); + else + stubs_.emplace_back(ttStubRef, layerId, layerIdTracklet, true, stubId, r, phi, z, psTilt); + stubs.push_back(&stubs_.back()); + } + const bool valid = frame < setup_->numFrames() ? true : enableTruncation_; + tracks_.emplace_back(ttTrackRef, valid, r2Inv, phiT, cot, zT, stubs); + input.push_back(&tracks_.back()); + } + } + } + + // fill output products + void DRin::produce(StreamsStub& accpetedStubs, + StreamsTrack& acceptedTracks, + StreamsStub& lostStubs, + StreamsTrack& lostTracks) { + // base transform into high precision TMTT format + for (Track& track : tracks_) { + track.inv2R_ = redigi(track.inv2R_, baseUinv2R_, baseHinv2R_, setup_->widthDSPbu()); + track.phiT_ = redigi(track.phiT_, baseUphiT_, baseHphiT_, setup_->widthDSPbu()); + track.cot_ = redigi(track.cot_, baseUcot_, baseHcot_, setup_->widthDSPbu()); + track.zT_ = redigi(track.zT_, baseUzT_, baseHzT_, setup_->widthDSPbu()); + for (Stub* stub : track.stubs_) { + stub->r_ = redigi(stub->r_, baseUr_, baseHr_, setup_->widthDSPbu()); + stub->phi_ = redigi(stub->phi_, baseUphi_, baseHphi_, setup_->widthDSPbu()); + stub->z_ = redigi(stub->z_, baseUz_, baseHz_, setup_->widthDSPbu()); + } + } + // find sector + for (Track& track : tracks_) { + const int sectorPhi = track.phiT_ < 0. ? 0 : 1; + track.phiT_ -= (sectorPhi - .5) * setup_->baseSector(); + int sectorEta(-1); + for (; sectorEta < setup_->numSectorsEta(); sectorEta++) + if (track.zT_ < digi(setup_->chosenRofZ() * sinh(setup_->boundarieEta(sectorEta + 1)), baseHzT_)) + break; + if (sectorEta >= setup_->numSectorsEta() || sectorEta <= -1) { + track.valid_ = false; + continue; + } + track.cot_ = track.cot_ - digi(setup_->sectorCot(sectorEta), baseHcot_); + track.zT_ = track.zT_ - digi(setup_->chosenRofZ() * setup_->sectorCot(sectorEta), baseHzT_); + track.sector_ = sectorPhi * setup_->numSectorsEta() + sectorEta; + } + // base transform into TMTT format + for (Track& track : tracks_) { + if (!track.valid_) + continue; + // store track parameter shifts + const double dinv2R = digi(track.inv2R_ - digi(track.inv2R_, baseLinv2R_), baseHinv2R_); + const double dphiT = digi(track.phiT_ - digi(track.phiT_, baseLphiT_), baseHphiT_); + const double dcot = digi(track.cot_ - digi(track.cot_, baseLcot_), baseHcot_); + const double dzT = digi(track.zT_ - digi(track.zT_, baseLzT_), baseHzT_); + // shift track parameter; + track.inv2R_ = digi(track.inv2R_, baseLinv2R_); + track.phiT_ = digi(track.phiT_, baseLphiT_); + track.cot_ = digi(track.cot_, baseLcot_); + track.zT_ = digi(track.zT_, baseLzT_); + // range checks + if (!dataFormats_->format(Variable::inv2R, Process::kfin).inRange(track.inv2R_, true)) + track.valid_ = false; + if (!dataFormats_->format(Variable::phiT, Process::kfin).inRange(track.phiT_, true)) + track.valid_ = false; + if (!dataFormats_->format(Variable::cot, Process::kfin).inRange(track.cot_, true)) + track.valid_ = false; + if (!dataFormats_->format(Variable::zT, Process::kfin).inRange(track.zT_, true)) + track.valid_ = false; + if (!track.valid_) + continue; + // adjust stub residuals by track parameter shifts + for (Stub* stub : track.stubs_) { + const double dphi = digi(dphiT + stub->r_ * dinv2R, baseHphi_); + const double r = stub->r_ + digi(dataFormats_->chosenRofPhi() - setup_->chosenRofZ(), baseHr_); + const double dz = digi(dzT + r * dcot, baseHz_); + stub->phi_ = digi(stub->phi_ + dphi, baseLphi_); + stub->z_ = digi(stub->z_ + dz, baseLz_); + // range checks + if (!dataFormats_->format(Variable::phi, Process::kfin).inRange(stub->phi_)) + stub->valid_ = false; + if (!dataFormats_->format(Variable::z, Process::kfin).inRange(stub->z_)) + stub->valid_ = false; + } + } + // encode layer id + for (Track& track : tracks_) { + if (!track.valid_) + continue; + const int sectorEta = track.sector_ % setup_->numSectorsEta(); + const int zT = dataFormats_->format(Variable::zT, Process::kfin).toUnsigned(track.zT_); + const int cot = dataFormats_->format(Variable::cot, Process::kfin).toUnsigned(track.cot_); + for (Stub* stub : track.stubs_) { + if (!stub->valid_) + continue; + // store encoded layerId + stub->layerKF_ = layerEncoding_->layerIdKF(sectorEta, zT, cot, stub->layer_); + // kill stubs from layers which can't be crossed by track + if (stub->layerKF_ == -1) + stub->valid_ = false; + } + TTBV hitPattern(0, setup_->numLayers()); + // kill multiple stubs from same kf layer + for (Stub* stub : track.stubs_) { + if (!stub->valid_) + continue; + if (hitPattern[stub->layerKF_]) + stub->valid_ = false; + else + hitPattern.set(stub->layerKF_); + } + // lookup maybe layers + track.maybe_ = layerEncoding_->maybePattern(sectorEta, zT, cot); + } + // kill tracks with not enough layer + for (Track& track : tracks_) { + if (!track.valid_) + continue; + TTBV hits(0, setup_->numLayers()); + for (const Stub* stub : track.stubs_) + if (stub->valid_) + hits.set(stub->layerKF_); + if (hits.count() < setup_->kfMinLayers()) + track.valid_ = false; + } + // store helper + auto frameTrack = [this](Track* track) { + const TTBV sectorPhi( + dataFormats_->format(Variable::sectorPhi, Process::kfin).ttBV(track->sector_ / setup_->numSectorsEta())); + const TTBV sectorEta( + dataFormats_->format(Variable::sectorEta, Process::kfin).ttBV(track->sector_ % setup_->numSectorsEta())); + const TTBV inv2R(dataFormats_->format(Variable::inv2R, Process::kfin).ttBV(track->inv2R_)); + const TTBV phiT(dataFormats_->format(Variable::phiT, Process::kfin).ttBV(track->phiT_)); + const TTBV cot(dataFormats_->format(Variable::cot, Process::kfin).ttBV(track->cot_)); + const TTBV zT(dataFormats_->format(Variable::zT, Process::kfin).ttBV(track->zT_)); + return FrameTrack( + track->ttTrackRef_, + Frame("1" + sectorPhi.str() + sectorEta.str() + inv2R.str() + phiT.str() + zT.str() + cot.str())); + }; + auto frameStub = [this](Track* track, int layer) { + auto equal = [layer](Stub* stub) { return stub->valid_ && stub->layerKF_ == layer; }; + const auto it = find_if(track->stubs_.begin(), track->stubs_.end(), equal); + if (it == track->stubs_.end() || !(*it)->valid_) + return FrameStub(); + Stub* stub = *it; + const TTBV layerId(stub->layerDet_, channelAssignment_->widthLayerId()); + const TTBV stubId(stub->stubId_, channelAssignment_->widthSeedStubId(), true); + const TTBV r(dataFormats_->format(Variable::r, Process::kfin).ttBV(stub->r_)); + const TTBV phi(dataFormats_->format(Variable::phi, Process::kfin).ttBV(stub->phi_)); + const TTBV z(dataFormats_->format(Variable::z, Process::kfin).ttBV(stub->z_)); + return FrameStub( + stub->ttStubRef_, + Frame("1" + to_string(stub->psTilt_) + layerId.str() + stubId.str() + r.str() + phi.str() + z.str())); + }; + // route tracks into pt bins and store result + const int offsetTrack = region_ * channelAssignment_->numNodesDR(); + for (int nodeDR = 0; nodeDR < channelAssignment_->numNodesDR(); nodeDR++) { + deque accepted; + deque lost; + vector> stacks(channelAssignment_->numChannelsTrack()); + vector> inputs(channelAssignment_->numChannelsTrack()); + for (int channel = 0; channel < channelAssignment_->numChannelsTrack(); channel++) { + for (Track* track : input_[channel]) { + const bool match = track && channelAssignment_->nodeDR(track->ttTrackRef_) == nodeDR; + if (match && !track->valid_) + lost.push_back(track); + inputs[channel].push_back(match && track->valid_ ? track : nullptr); + } + } + // remove all gaps between end and last track + for (deque& input : inputs) + for (auto it = input.end(); it != input.begin();) + it = (*--it) ? input.begin() : input.erase(it); + // clock accurate firmware emulation, each while trip describes one clock tick, one stub in and one stub out per tick + while (!all_of(inputs.begin(), inputs.end(), [](const deque& tracks) { return tracks.empty(); }) or + !all_of(stacks.begin(), stacks.end(), [](const deque& tracks) { return tracks.empty(); })) { + // fill input fifos + for (int channel = 0; channel < channelAssignment_->numChannelsTrack(); channel++) { + deque& stack = stacks[channel]; + Track* track = pop_front(inputs[channel]); + if (track) { + if (enableTruncation_ && (int)stack.size() == channelAssignment_->depthMemory() - 1) + lost.push_back(pop_front(stack)); + stack.push_back(track); + } + } + // merge input fifos to one stream, prioritizing higher input channel over lower channel + bool nothingToRoute(true); + for (int channel = channelAssignment_->numChannelsTrack() - 1; channel >= 0; channel--) { + Track* track = pop_front(stacks[channel]); + if (track) { + nothingToRoute = false; + accepted.push_back(track); + break; + } + } + if (nothingToRoute) + accepted.push_back(nullptr); + } + // truncate if desired + if (enableTruncation_ && (int)accepted.size() > setup_->numFrames()) { + const auto limit = next(accepted.begin(), setup_->numFrames()); + copy_if(limit, accepted.end(), back_inserter(lost), [](const Track* track) { return track; }); + accepted.erase(limit, accepted.end()); + } + // remove all gaps between end and last track + for (auto it = accepted.end(); it != accepted.begin();) + it = (*--it) ? accepted.begin() : accepted.erase(it); + // fill products StreamsStub& accpetedStubs, StreamsTrack& acceptedTracks, StreamsStub& lostStubs, StreamsTrack& lostTracks + const int channelTrack = offsetTrack + nodeDR; + const int offsetStub = channelTrack * setup_->numLayers(); + // fill lost tracks and stubs without gaps + lostTracks[channelTrack].reserve(lost.size()); + for (int layer = 0; layer < setup_->numLayers(); layer++) + lostStubs[offsetStub + layer].reserve(lost.size()); + for (Track* track : lost) { + lostTracks[channelTrack].emplace_back(frameTrack(track)); + for (int layer = 0; layer < setup_->numLayers(); layer++) + lostStubs[offsetStub + layer].emplace_back(frameStub(track, layer)); + } + // fill accepted tracks and stubs with gaps + acceptedTracks[channelTrack].reserve(accepted.size()); + for (int layer = 0; layer < setup_->numLayers(); layer++) + accpetedStubs[offsetStub + layer].reserve(accepted.size()); + for (Track* track : accepted) { + if (!track) { // fill gap + acceptedTracks[channelTrack].emplace_back(FrameTrack()); + for (int layer = 0; layer < setup_->numLayers(); layer++) + accpetedStubs[offsetStub + layer].emplace_back(FrameStub()); + continue; + } + acceptedTracks[channelTrack].emplace_back(frameTrack(track)); + for (int layer = 0; layer < setup_->numLayers(); layer++) + accpetedStubs[offsetStub + layer].emplace_back(frameStub(track, layer)); + } + } + } + + // remove and return first element of deque, returns nullptr if empty + template + T* DRin::pop_front(deque& ts) const { + T* t = nullptr; + if (!ts.empty()) { + t = ts.front(); + ts.pop_front(); + } + return t; + } + + // basetransformation of val from baseLow into baseHigh using widthMultiplier bit multiplication + double DRin::redigi(double val, double baseLow, double baseHigh, int widthMultiplier) const { + const double base = pow(2, 1 - widthMultiplier); + const double transform = digi(baseLow / baseHigh, base); + return (floor(val * transform / baseLow) + .5) * baseHigh; + } + +} // namespace trklet diff --git a/L1Trigger/TrackFindingTracklet/src/FitTrack.cc b/L1Trigger/TrackFindingTracklet/src/FitTrack.cc index 30bfa3a97cf5c..5050aa8585ee8 100644 --- a/L1Trigger/TrackFindingTracklet/src/FitTrack.cc +++ b/L1Trigger/TrackFindingTracklet/src/FitTrack.cc @@ -1054,12 +1054,12 @@ void FitTrack::execute(deque& streamTrackRaw, const string valid("1"); streamTrackRaw.emplace_back(valid + seed + rinv + phi0 + z0 + t); + // convert projected stubs unsigned int ihit(0); for (unsigned int ilayer = 0; ilayer < N_LAYER + N_DISK; ilayer++) { if (bestTracklet->match(ilayer)) { const Residual& resid = bestTracklet->resid(ilayer); // create bit accurate 64 bit word - const string valid("1"); string r = resid.stubptr()->r().str(); const string& phi = resid.fpgaphiresid().str(); const string& rz = resid.fpgarzresid().str(); @@ -1068,10 +1068,18 @@ void FitTrack::execute(deque& streamTrackRaw, bool disk2S = (stub->disk() != 0) && (stub->isPSmodule() == 0); if (disk2S) r = string(widthDisk2Sidentifier, '0') + r; + const string& stubId = resid.fpgastubid().str(); // store seed, L1TStub, and bit accurate 64 bit word in clock accurate output - streamsStubRaw[ihit++].emplace_back(seedType, *stub, valid + r + phi + rz); + streamsStubRaw[ihit++].emplace_back(seedType, *stub, valid + stubId + r + phi + rz); } } + // convert seed stubs + const string& stubId0 = bestTracklet->innerFPGAStub()->stubindex().str(); + const L1TStub* stub0 = bestTracklet->innerFPGAStub()->l1tstub(); + streamsStubRaw[ihit++].emplace_back(seedType, *stub0, valid + stubId0); + const string& stubId1 = bestTracklet->outerFPGAStub()->stubindex().str(); + const L1TStub* stub1 = bestTracklet->outerFPGAStub()->l1tstub(); + streamsStubRaw[ihit++].emplace_back(seedType, *stub1, valid + stubId1); // fill all layers that have no stubs with gaps while (ihit < streamsStubRaw.size()) { streamsStubRaw[ihit++].emplace_back(); diff --git a/L1Trigger/TrackFindingTracklet/src/HybridFit.cc b/L1Trigger/TrackFindingTracklet/src/HybridFit.cc index 9d292d22adfd0..8902cc7070225 100644 --- a/L1Trigger/TrackFindingTracklet/src/HybridFit.cc +++ b/L1Trigger/TrackFindingTracklet/src/HybridFit.cc @@ -187,70 +187,76 @@ void HybridFit::Fit(Tracklet* tracklet, std::vector& trackstublist) tmtt::L1fittedTrack fittedTrk = fitterKF.fit(l1track3d); if (fittedTrk.accepted()) { - tmtt::KFTrackletTrack trk = fittedTrk.returnKFTrackletTrack(); + if (fittedTrk.consistentSector()) { + tmtt::KFTrackletTrack trk = fittedTrk.returnKFTrackletTrack(); - if (settings_.printDebugKF()) - edm::LogVerbatim("L1track") << "Done with Kalman fit. Pars: pt = " << trk.pt() - << ", 1/2R = " << settings_.bfield() * 3 * trk.qOverPt() / 2000 - << ", phi0 = " << trk.phi0() << ", eta = " << trk.eta() << ", z0 = " << trk.z0() - << ", chi2 = " << trk.chi2() << ", accepted = " << trk.accepted(); - - double d0, chi2rphi, phi0, qoverpt = -999; - if (trk.done_bcon()) { - d0 = trk.d0_bcon(); - chi2rphi = trk.chi2rphi_bcon(); - phi0 = trk.phi0_bcon(); - qoverpt = trk.qOverPt_bcon(); - } else { - d0 = trk.d0(); - chi2rphi = trk.chi2rphi(); - phi0 = trk.phi0(); - qoverpt = trk.qOverPt(); - } + if (settings_.printDebugKF()) + edm::LogVerbatim("L1track") << "Done with Kalman fit. Pars: pt = " << trk.pt() + << ", 1/2R = " << settings_.bfield() * 3 * trk.qOverPt() / 2000 + << ", phi0 = " << trk.phi0() << ", eta = " << trk.eta() << ", z0 = " << trk.z0() + << ", chi2 = " << trk.chi2() << ", accepted = " << trk.accepted(); + + double d0, chi2rphi, phi0, qoverpt = -999; + if (trk.done_bcon()) { + d0 = trk.d0_bcon(); + chi2rphi = trk.chi2rphi_bcon(); + phi0 = trk.phi0_bcon(); + qoverpt = trk.qOverPt_bcon(); + } else { + d0 = trk.d0(); + chi2rphi = trk.chi2rphi(); + phi0 = trk.phi0(); + qoverpt = trk.qOverPt(); + } - // Tracklet wants phi0 with respect to lower edge of sector, not global phi0. - double phi0fit = reco::reduceRange(phi0 - iSector_ * 2 * M_PI / N_SECTOR + 0.5 * settings_.dphisectorHG()); - double rinvfit = 0.01 * settings_.c() * settings_.bfield() * qoverpt; - - int irinvfit = floor(rinvfit / settings_.krinvpars()); - int iphi0fit = floor(phi0fit / settings_.kphi0pars()); - int itanlfit = floor(trk.tanLambda() / settings_.ktpars()); - int iz0fit = floor(trk.z0() / settings_.kz0pars()); - int id0fit = floor(d0 / settings_.kd0pars()); - int ichi2rphifit = chi2rphi / 16; - int ichi2rzfit = trk.chi2rz() / 16; - - const vector& stubsFromFit = trk.stubs(); - vector l1stubsFromFit; - for (const tmtt::Stub* s : stubsFromFit) { - unsigned int IDf = s->index(); - const L1TStub* l1s = L1StubIndices.at(IDf); - l1stubsFromFit.push_back(l1s); - } + // Tracklet wants phi0 with respect to lower edge of sector, not global phi0. + double phi0fit = reco::reduceRange(phi0 - iSector_ * 2 * M_PI / N_SECTOR + 0.5 * settings_.dphisectorHG()); + double rinvfit = 0.01 * settings_.c() * settings_.bfield() * qoverpt; + + int irinvfit = floor(rinvfit / settings_.krinvpars()); + int iphi0fit = floor(phi0fit / settings_.kphi0pars()); + int itanlfit = floor(trk.tanLambda() / settings_.ktpars()); + int iz0fit = floor(trk.z0() / settings_.kz0pars()); + int id0fit = floor(d0 / settings_.kd0pars()); + int ichi2rphifit = chi2rphi / 16; + int ichi2rzfit = trk.chi2rz() / 16; + + const vector& stubsFromFit = trk.stubs(); + vector l1stubsFromFit; + for (const tmtt::Stub* s : stubsFromFit) { + unsigned int IDf = s->index(); + const L1TStub* l1s = L1StubIndices.at(IDf); + l1stubsFromFit.push_back(l1s); + } - tracklet->setFitPars(rinvfit, - phi0fit, - d0, - trk.tanLambda(), - trk.z0(), - chi2rphi, - trk.chi2rz(), - rinvfit, - phi0fit, - d0, - trk.tanLambda(), - trk.z0(), - chi2rphi, - trk.chi2rz(), - irinvfit, - iphi0fit, - id0fit, - itanlfit, - iz0fit, - ichi2rphifit, - ichi2rzfit, - trk.hitPattern(), - l1stubsFromFit); + tracklet->setFitPars(rinvfit, + phi0fit, + d0, + trk.tanLambda(), + trk.z0(), + chi2rphi, + trk.chi2rz(), + rinvfit, + phi0fit, + d0, + trk.tanLambda(), + trk.z0(), + chi2rphi, + trk.chi2rz(), + irinvfit, + iphi0fit, + id0fit, + itanlfit, + iz0fit, + ichi2rphifit, + ichi2rzfit, + trk.hitPattern(), + l1stubsFromFit); + } else { + if (settings_.printDebugKF()) { + edm::LogVerbatim("L1track") << "FitTrack:KF rejected track"; + } + } } else { if (settings_.printDebugKF()) { edm::LogVerbatim("L1track") << "FitTrack:KF rejected track"; diff --git a/L1Trigger/TrackFindingTracklet/src/KFin.cc b/L1Trigger/TrackFindingTracklet/src/KFin.cc index 66b7c5adb4a9f..c5cbc3d469648 100644 --- a/L1Trigger/TrackFindingTracklet/src/KFin.cc +++ b/L1Trigger/TrackFindingTracklet/src/KFin.cc @@ -16,185 +16,98 @@ namespace trklet { const DataFormats* dataFormats, const LayerEncoding* layerEncoding, const ChannelAssignment* channelAssignment, - const Settings* settings, int region) : enableTruncation_(iConfig.getParameter("EnableTruncation")), - useTTStubResiduals_(iConfig.getParameter("UseTTStubResiduals")), setup_(setup), dataFormats_(dataFormats), layerEncoding_(layerEncoding), channelAssignment_(channelAssignment), - settings_(settings), region_(region), - input_(channelAssignment_->numChannelsTrack()) { - // unified tracklet digitisation granularity - baseUinv2R_ = .5 * settings_->kphi1() / settings_->kr() * pow(2, settings_->rinv_shift()); - baseUphiT_ = settings_->kphi1() * pow(2, settings_->phi0_shift()); - baseUcot_ = settings_->kz() / settings_->kr() * pow(2, settings_->t_shift()); - baseUzT_ = settings_->kz() * pow(2, settings_->z0_shift()); - baseUr_ = settings_->kr(); - baseUphi_ = settings_->kphi1(); - baseUz_ = settings_->kz(); - // KF input format digitisation granularity (identical to TMTT) - baseLinv2R_ = dataFormats->base(Variable::inv2R, Process::kfin); - baseLphiT_ = dataFormats->base(Variable::phiT, Process::kfin); - baseLcot_ = dataFormats->base(Variable::cot, Process::kfin); - baseLzT_ = dataFormats->base(Variable::zT, Process::kfin); - baseLr_ = dataFormats->base(Variable::r, Process::kfin); - baseLphi_ = dataFormats->base(Variable::phi, Process::kfin); - baseLz_ = dataFormats->base(Variable::z, Process::kfin); - // Finer granularity (by powers of 2) than the TMTT one. Used to transform from Tracklet to TMTT base. - baseHinv2R_ = baseLinv2R_ * pow(2, floor(log2(baseUinv2R_ / baseLinv2R_))); - baseHphiT_ = baseLphiT_ * pow(2, floor(log2(baseUphiT_ / baseLphiT_))); - baseHcot_ = baseLcot_ * pow(2, floor(log2(baseUcot_ / baseLcot_))); - baseHzT_ = baseLzT_ * pow(2, floor(log2(baseUzT_ / baseLzT_))); - baseHr_ = baseLr_ * pow(2, floor(log2(baseUr_ / baseLr_))); - baseHphi_ = baseLphi_ * pow(2, floor(log2(baseUphi_ / baseLphi_))); - baseHz_ = baseLz_ * pow(2, floor(log2(baseUz_ / baseLz_))); - // calculate digitisation granularity used for inverted cot(theta) - const int baseShiftInvCot = ceil(log2(setup_->outerRadius() / setup_->hybridRangeR())) - setup_->widthDSPbu(); - baseInvCot_ = pow(2, baseShiftInvCot); - } + input_(channelAssignment_->numNodesDR()) {} // read in and organize input tracks and stubs void KFin::consume(const StreamsTrack& streamsTrack, const StreamsStub& streamsStub) { - static const double maxCot = sinh(setup_->maxEta()) + setup_->beamWindowZ() / setup_->chosenRofZ(); - static const int unusedMSBcot = floor(log2(baseUcot_ * pow(2, settings_->nbitst()) / (2. * maxCot))); - static const double baseCot = - baseUcot_ * pow(2, settings_->nbitst() - unusedMSBcot - 1 - setup_->widthAddrBRAM18()); - const int offsetTrack = region_ * channelAssignment_->numChannelsTrack(); - // count tracks and stubs to reserve container - int nTracks(0); - int nStubs(0); - for (int channel = 0; channel < channelAssignment_->numChannelsTrack(); channel++) { - const int channelTrack = offsetTrack + channel; - const int offsetStub = channelAssignment_->offsetStub(channelTrack); - const StreamTrack& streamTrack = streamsTrack[channelTrack]; + const int offsetTrack = region_ * channelAssignment_->numNodesDR(); + auto nonNullTrack = [](int sum, const FrameTrack& frame) { return sum + (frame.first.isNonnull() ? 1 : 0); }; + auto nonNullStub = [](int sum, const FrameStub& frame) { return sum + (frame.first.isNonnull() ? 1 : 0); }; + // count tracks and stubs and reserve corresponding vectors + int sizeTracks(0); + int sizeStubs(0); + for (int channel = 0; channel < channelAssignment_->numNodesDR(); channel++) { + const int streamTrackId = offsetTrack + channel; + const int offsetStub = streamTrackId * setup_->numLayers(); + const StreamTrack& streamTrack = streamsTrack[streamTrackId]; input_[channel].reserve(streamTrack.size()); - for (int frame = 0; frame < (int)streamTrack.size(); frame++) { - if (streamTrack[frame].first.isNull()) - continue; - nTracks++; - for (int layer = 0; layer < channelAssignment_->numProjectionLayers(channel); layer++) - if (streamsStub[offsetStub + layer][frame].first.isNonnull()) - nStubs++; + sizeTracks += accumulate(streamTrack.begin(), streamTrack.end(), 0, nonNullTrack); + for (int layer = 0; layer < setup_->numLayers(); layer++) { + const StreamStub& streamStub = streamsStub[offsetStub + layer]; + sizeStubs += accumulate(streamStub.begin(), streamStub.end(), 0, nonNullStub); } } - stubs_.reserve(nStubs + nTracks * channelAssignment_->numSeedingLayers()); - tracks_.reserve(nTracks); - // store tracks and stubs - for (int channel = 0; channel < channelAssignment_->numChannelsTrack(); channel++) { - const int channelTrack = offsetTrack + channel; - const int offsetStub = channelAssignment_->offsetStub(channelTrack); - const StreamTrack& streamTrack = streamsTrack[channelTrack]; + tracks_.reserve(sizeTracks); + stubs_.reserve(sizeStubs); + // transform input data into handy structs + for (int channel = 0; channel < channelAssignment_->numNodesDR(); channel++) { vector& input = input_[channel]; + const int streamTrackId = offsetTrack + channel; + const int offsetStub = streamTrackId * setup_->numLayers(); + const StreamTrack& streamTrack = streamsTrack[streamTrackId]; for (int frame = 0; frame < (int)streamTrack.size(); frame++) { - const TTTrackRef& ttTrackRef = streamTrack[frame].first; - if (ttTrackRef.isNull()) { - input.push_back(nullptr); - continue; - } - //convert track parameter - const double r2Inv = digi(-ttTrackRef->rInv() / 2., baseUinv2R_); - const double phi0U = - digi(tt::deltaPhi(ttTrackRef->phi() - region_ * setup_->baseRegion() + setup_->hybridRangePhi() / 2.), - baseUphiT_); - const double phi0S = digi(phi0U - setup_->hybridRangePhi() / 2., baseUphiT_); - const double cot = digi(ttTrackRef->tanL(), baseUcot_); - const double z0 = digi(ttTrackRef->z0(), baseUzT_); - const double phiT = digi(phi0S + r2Inv * digi(dataFormats_->chosenRofPhi(), baseUr_), baseUphiT_); - const double zT = digi(z0 + cot * digi(setup_->chosenRofZ(), baseUr_), baseUzT_); - // kill tracks outside of fiducial range - if (abs(phiT) > setup_->baseRegion() / 2. || abs(zT) > setup_->hybridMaxCot() * setup_->chosenRofZ() || - abs(z0) > setup_->beamWindowZ()) { + const FrameTrack& frameTrack = streamTrack[frame]; + if (frameTrack.first.isNull()) { input.push_back(nullptr); continue; } - // convert stubs vector stubs; - stubs.reserve(channelAssignment_->numProjectionLayers(channel) + channelAssignment_->numSeedingLayers()); - for (int layer = 0; layer < channelAssignment_->numProjectionLayers(channel); layer++) { + stubs.reserve(setup_->numLayers()); + for (int layer = 0; layer < setup_->numLayers(); layer++) { const FrameStub& frameStub = streamsStub[offsetStub + layer][frame]; - const TTStubRef& ttStubRef = frameStub.first; - const int layerId = channelAssignment_->layerId(channel, layer); - if (ttStubRef.isNull()) + if (frameStub.first.isNull()) continue; - // parse residuals from tt::Frame and take r and layerId from tt::TTStubRef - const bool barrel = setup_->barrel(ttStubRef); - const int layerIdTracklet = setup_->trackletLayerId(ttStubRef); - const double basePhi = barrel ? settings_->kphi1() : settings_->kphi(layerIdTracklet); - const double baseRZ = barrel ? settings_->kz(layerIdTracklet) : settings_->kz(); - const int widthRZ = barrel ? settings_->zresidbits() : settings_->rresidbits(); - TTBV hw(frameStub.second); - const TTBV hwRZ(hw, widthRZ, 0, true); - hw >>= widthRZ; - const TTBV hwPhi(hw, settings_->phiresidbits(), 0, true); - hw >>= settings_->phiresidbits(); - const double r = digi(setup_->stubR(hw, ttStubRef) - dataFormats_->chosenRofPhi(), baseUr_); - double phi = hwPhi.val(basePhi); - if (basePhi > baseUphi_) - phi += baseUphi_ / 2.; - const double z = digi(hwRZ.val(baseRZ) * (barrel ? 1. : -cot), baseUz_); - // determine module type - bool psTilt; - if (barrel) { - const double posZ = (r + digi(dataFormats_->chosenRofPhi(), baseUr_)) * cot + z0 + z; - const int indexLayerId = setup_->indexLayerId(ttStubRef); - const double limit = setup_->tiltedLayerLimitZ(indexLayerId); - psTilt = abs(posZ) < limit; - } else - psTilt = setup_->psModule(ttStubRef); - if (useTTStubResiduals_) { - const GlobalPoint gp = setup_->stubPos(ttStubRef); - const double ttR = r; - const double ttZ = gp.z() - (z0 + (ttR + dataFormats_->chosenRofPhi()) * cot); - stubs_.emplace_back(ttStubRef, layerId, ttR, phi, ttZ, psTilt); - } else - stubs_.emplace_back(ttStubRef, layerId, r, phi, z, psTilt); - stubs.push_back(&stubs_.back()); - } - // create fake seed stubs, since TrackBuilder doesn't output these stubs, required by the KF. - for (int layerId : channelAssignment_->seedingLayers(channel)) { - const vector& ttStubRefs = ttTrackRef->getStubRefs(); - auto sameLayer = [this, layerId](const TTStubRef& ttStubRef) { - return setup_->layerId(ttStubRef) == layerId; - }; - const TTStubRef& ttStubRef = *find_if(ttStubRefs.begin(), ttStubRefs.end(), sameLayer); - const bool barrel = setup_->barrel(ttStubRef); - double r; - if (barrel) - r = digi(setup_->hybridLayerR(layerId - setup_->offsetLayerId()) - dataFormats_->chosenRofPhi(), baseUr_); - else { - r = (z0 + - digi(setup_->hybridDiskZ(layerId - setup_->offsetLayerId() - setup_->offsetLayerDisks()), baseUzT_)) * - digi(1. / digi(abs(cot), baseCot), baseInvCot_); - r = digi(r - digi(dataFormats_->chosenRofPhi(), baseUr_), baseUr_); - } - static constexpr double phi = 0.; - static constexpr double z = 0.; - // determine module type - bool psTilt; - if (barrel) { - const double posZ = - digi(digi(setup_->hybridLayerR(layerId - setup_->offsetLayerId()), baseUr_) * cot + z0, baseUz_); - const int indexLayerId = setup_->indexLayerId(ttStubRef); - const double limit = digi(setup_->tiltedLayerLimitZ(indexLayerId), baseUz_); - psTilt = abs(posZ) < limit; - } else - psTilt = true; - const GlobalPoint gp = setup_->stubPos(ttStubRef); - const double ttR = gp.perp() - dataFormats_->chosenRofPhi(); - const double ttZ = gp.z() - (z0 + (ttR + dataFormats_->chosenRofPhi()) * cot); - if (useTTStubResiduals_) - stubs_.emplace_back(ttStubRef, layerId, ttR, phi, ttZ, psTilt); - else - stubs_.emplace_back(ttStubRef, layerId, r, phi, z, psTilt); + TTBV ttBV = frameStub.second; + const TTBV zBV(ttBV, dataFormats_->format(Variable::z, Process::kfin).width(), 0, true); + ttBV >>= dataFormats_->format(Variable::z, Process::kfin).width(); + const TTBV phiBV(ttBV, dataFormats_->format(Variable::phi, Process::kfin).width(), 0, true); + ttBV >>= dataFormats_->format(Variable::phi, Process::kfin).width(); + const TTBV rBV(ttBV, dataFormats_->format(Variable::r, Process::kfin).width(), 0, true); + ttBV >>= dataFormats_->format(Variable::r, Process::kfin).width(); + const TTBV layerIdBV(ttBV, channelAssignment_->widthLayerId(), 0); + ttBV >>= channelAssignment_->widthPSTilt(); + const TTBV tiltBV(ttBV, channelAssignment_->widthPSTilt(), 0); + const double r = rBV.val(dataFormats_->base(Variable::r, Process::kfin)); + const double phi = phiBV.val(dataFormats_->base(Variable::phi, Process::kfin)); + const double z = zBV.val(dataFormats_->base(Variable::z, Process::kfin)); + stubs_.emplace_back(frameStub.first, r, phi, z, layerIdBV.val(), tiltBV.val(), layer); stubs.push_back(&stubs_.back()); } - const bool valid = frame < setup_->numFrames() ? true : enableTruncation_; - tracks_.emplace_back(ttTrackRef, valid, r2Inv, phiT, cot, zT, stubs); + TTBV ttBV = frameTrack.second; + const TTBV cotBV(ttBV, dataFormats_->format(Variable::cot, Process::kfin).width(), 0, true); + ttBV >>= dataFormats_->format(Variable::cot, Process::kfin).width(); + const TTBV zTBV(ttBV, dataFormats_->format(Variable::zT, Process::kfin).width(), 0, true); + ttBV >>= dataFormats_->format(Variable::zT, Process::kfin).width(); + const TTBV phiTBV(ttBV, dataFormats_->format(Variable::phiT, Process::kfin).width(), 0, true); + ttBV >>= dataFormats_->format(Variable::phiT, Process::kfin).width(); + const TTBV inv2RBV(ttBV, dataFormats_->format(Variable::inv2R, Process::kfin).width(), 0, true); + ttBV >>= dataFormats_->format(Variable::inv2R, Process::kfin).width(); + const TTBV sectorEtaBV(ttBV, dataFormats_->format(Variable::sectorEta, Process::kfin).width(), 0); + ttBV >>= dataFormats_->format(Variable::sectorEta, Process::kfin).width(); + const TTBV sectorPhiBV(ttBV, dataFormats_->format(Variable::sectorPhi, Process::kfin).width(), 0); + const double cot = cotBV.val(dataFormats_->base(Variable::cot, Process::kfin)); + const double zT = zTBV.val(dataFormats_->base(Variable::zT, Process::kfin)); + const double inv2R = inv2RBV.val(dataFormats_->base(Variable::inv2R, Process::kfin)); + const int sectorEta = sectorEtaBV.val(); + const int zTu = dataFormats_->format(Variable::zT, Process::kfin).toUnsigned(zT); + const int cotu = dataFormats_->format(Variable::cot, Process::kfin).toUnsigned(cot); + const TTBV maybe = layerEncoding_->maybePattern(sectorEta, zTu, cotu); + const FrameTrack frameT(frameTrack.first, + Frame("1" + maybe.str() + sectorPhiBV.str() + sectorEtaBV.str() + phiTBV.str() + + inv2RBV.str() + zTBV.str() + cotBV.str())); + tracks_.emplace_back(frameT, stubs, cot, zT, inv2R, sectorEtaBV.val()); input.push_back(&tracks_.back()); } + // remove all gaps between end and last track + for (auto it = input.end(); it != input.begin();) + it = (*--it) ? input.begin() : input.erase(it); } } @@ -203,146 +116,40 @@ namespace trklet { StreamsTrack& acceptedTracks, StreamsStub& lostStubs, StreamsTrack& lostTracks) { + // calculate stub uncertainties static constexpr int usedMSBpitchOverRaddr = 1; - static const double baseR = - baseLr_ * + static const double baseRlut = + dataFormats_->base(Variable::r, Process::kfin) * pow(2, dataFormats_->width(Variable::r, Process::zht) - setup_->widthAddrBRAM18() + usedMSBpitchOverRaddr); - static const double baseRinvR = - baseLr_ * pow(2, dataFormats_->width(Variable::r, Process::zht) - setup_->widthAddrBRAM18()); - static const double basePhi = baseLinv2R_ * baseLr_; + static const double baseRinvR = dataFormats_->base(Variable::r, Process::kfin) * + pow(2, dataFormats_->width(Variable::r, Process::zht) - setup_->widthAddrBRAM18()); + static const double basePhi = + dataFormats_->base(Variable::inv2R, Process::kfin) * dataFormats_->base(Variable::r, Process::kfin); static const double baseInvR = - pow(2., ceil(log2(baseLr_ / setup_->tbInnerRadius())) - setup_->widthDSPbu()) / baseLr_; + pow(2., + ceil(log2(dataFormats_->base(Variable::r, Process::kfin) / setup_->tbInnerRadius())) - + setup_->widthDSPbu()) / + dataFormats_->base(Variable::r, Process::kfin); static const double maxCot = sinh(setup_->maxEta()) + setup_->beamWindowZ() / setup_->chosenRofZ(); static constexpr int usedMSBCotLutaddr = 3; static const double baseCotLut = pow(2., ceil(log2(maxCot)) - setup_->widthAddrBRAM18() + usedMSBCotLutaddr); - // base transform into high precision TMTT format - for (Track& track : tracks_) { - track.inv2R_ = redigi(track.inv2R_, baseUinv2R_, baseHinv2R_, setup_->widthDSPbu()); - track.phiT_ = redigi(track.phiT_, baseUphiT_, baseHphiT_, setup_->widthDSPbu()); - track.cot_ = redigi(track.cot_, baseUcot_, baseHcot_, setup_->widthDSPbu()); - track.zT_ = redigi(track.zT_, baseUzT_, baseHzT_, setup_->widthDSPbu()); - for (Stub* stub : track.stubs_) { - stub->r_ = redigi(stub->r_, baseUr_, baseHr_, setup_->widthDSPbu()); - stub->phi_ = redigi(stub->phi_, baseUphi_, baseHphi_, setup_->widthDSPbu()); - stub->z_ = redigi(stub->z_, baseUz_, baseHz_, setup_->widthDSPbu()); - } - } - // find sector - for (Track& track : tracks_) { - const int sectorPhi = track.phiT_ < 0. ? 0 : 1; - track.phiT_ -= (sectorPhi - .5) * setup_->baseSector(); - int sectorEta(-1); - for (; sectorEta < setup_->numSectorsEta(); sectorEta++) - if (track.zT_ < digi(setup_->chosenRofZ() * sinh(setup_->boundarieEta(sectorEta + 1)), baseHzT_)) - break; - if (sectorEta >= setup_->numSectorsEta() || sectorEta <= -1) { - track.valid_ = false; - continue; - } - track.cot_ = track.cot_ - digi(setup_->sectorCot(sectorEta), baseHcot_); - track.zT_ = track.zT_ - digi(setup_->chosenRofZ() * setup_->sectorCot(sectorEta), baseHzT_); - track.sector_ = sectorPhi * setup_->numSectorsEta() + sectorEta; - } - // base transform into TMTT format - for (Track& track : tracks_) { - if (!track.valid_) - continue; - // store track parameter shifts - const double dinv2R = digi(track.inv2R_ - digi(track.inv2R_, baseLinv2R_), baseHinv2R_); - const double dphiT = digi(track.phiT_ - digi(track.phiT_, baseLphiT_), baseHphiT_); - const double dcot = digi(track.cot_ - digi(track.cot_, baseLcot_), baseHcot_); - const double dzT = digi(track.zT_ - digi(track.zT_, baseLzT_), baseHzT_); - // shift track parameter; - track.inv2R_ = digi(track.inv2R_, baseLinv2R_); - track.phiT_ = digi(track.phiT_, baseLphiT_); - track.cot_ = digi(track.cot_, baseLcot_); - track.zT_ = digi(track.zT_, baseLzT_); - // range checks - if (!dataFormats_->format(Variable::inv2R, Process::kfin).inRange(track.inv2R_, true)) - track.valid_ = false; - if (!dataFormats_->format(Variable::phiT, Process::kfin).inRange(track.phiT_, true)) - track.valid_ = false; - if (!dataFormats_->format(Variable::cot, Process::kfin).inRange(track.cot_, true)) - track.valid_ = false; - if (!dataFormats_->format(Variable::zT, Process::kfin).inRange(track.zT_, true)) - track.valid_ = false; - if (!track.valid_) - continue; - // adjust stub residuals by track parameter shifts - for (Stub* stub : track.stubs_) { - const double dphi = digi(dphiT + stub->r_ * dinv2R, baseHphi_); - const double r = stub->r_ + digi(dataFormats_->chosenRofPhi() - setup_->chosenRofZ(), baseHr_); - const double dz = digi(dzT + r * dcot, baseHz_); - stub->phi_ = digi(stub->phi_ + dphi, baseLphi_); - stub->z_ = digi(stub->z_ + dz, baseLz_); - // range checks - if (!dataFormats_->format(Variable::phi, Process::kfin).inRange(stub->phi_)) - stub->valid_ = false; - if (!dataFormats_->format(Variable::z, Process::kfin).inRange(stub->z_)) - stub->valid_ = false; - } - } - // encode layer id - for (Track& track : tracks_) { - if (!track.valid_) - continue; - const int sectorEta = track.sector_ % setup_->numSectorsEta(); - const int zT = dataFormats_->format(Variable::zT, Process::kfin).toUnsigned(track.zT_); - const int cot = dataFormats_->format(Variable::cot, Process::kfin).toUnsigned(track.cot_); - track.maybe_ = TTBV(0, setup_->numLayers()); - for (Stub* stub : track.stubs_) { - if (!stub->valid_) - continue; - // replace layerId by encoded layerId - stub->layer_ = layerEncoding_->layerIdKF(sectorEta, zT, cot, stub->layer_); - // kill stubs from layers which can't be crossed by track - if (stub->layer_ == -1) - stub->valid_ = false; - if (stub->valid_) { - if (track.maybe_[stub->layer_]) { - for (Stub* s : track.stubs_) { - if (s == stub) - break; - if (s->layer_ == stub->layer_) - s->valid_ = false; - } - } else - track.maybe_.set(stub->layer_); - } - } - // lookup maybe layers - track.maybe_ &= layerEncoding_->maybePattern(sectorEta, zT, cot); - } - // kill tracks with not enough layer - for (Track& track : tracks_) { - if (!track.valid_) - continue; - TTBV hits(0, setup_->numLayers()); - for (const Stub* stub : track.stubs_) - if (stub->valid_) - hits.set(stub->layer_); - if (hits.count() < setup_->kfMinLayers()) - track.valid_ = false; - } - // calculate stub uncertainties - for (Track& track : tracks_) { - if (!track.valid_) - continue; - const int sectorEta = track.sector_ % setup_->numSectorsEta(); + static const double baseCot = dataFormats_->base(Variable::cot, Process::kfin); + static const double baseZ = dataFormats_->base(Variable::z, Process::kfin); + static const double baseR = dataFormats_->base(Variable::r, Process::kfin); + for (const Track& track : tracks_) { + const int sectorEta = track.sectorEta_; const double inv2R = abs(track.inv2R_); for (Stub* stub : track.stubs_) { - if (!stub->valid_) - continue; const bool barrel = setup_->barrel(stub->ttStubRef_); const bool ps = barrel ? setup_->psModule(stub->ttStubRef_) : stub->psTilt_; const bool tilt = barrel ? (ps && !stub->psTilt_) : false; const double length = ps ? setup_->lengthPS() : setup_->length2S(); const double pitch = ps ? setup_->pitchPS() : setup_->pitch2S(); - const double pitchOverR = digi(pitch / (digi(stub->r_, baseR) + dataFormats_->chosenRofPhi()), basePhi); + const double pitchOverR = digi(pitch / (digi(stub->r_, baseRlut) + dataFormats_->chosenRofPhi()), basePhi); const double r = digi(stub->r_, baseRinvR) + dataFormats_->chosenRofPhi(); const double sumdz = track.zT_ + stub->z_; - const double dZ = digi(sumdz - digi(setup_->chosenRofZ(), baseLr_) * track.cot_, baseLcot_ * baseLr_); - const double sumcot = track.cot_ + digi(setup_->sectorCot(sectorEta), baseHcot_); + const double dZ = digi(sumdz - digi(setup_->chosenRofZ(), baseR) * track.cot_, baseCot * baseR); + const double sumcot = track.cot_ + digi(setup_->sectorCot(sectorEta), baseCot); const double cot = digi(abs(dZ * digi(1. / r, baseInvR) + sumcot), baseCotLut); double lengthZ = length; double lengthR = 0.; @@ -353,31 +160,18 @@ namespace trklet { lengthZ = length * abs(setup_->tiltApproxSlope() * cot + setup_->tiltApproxIntercept()); lengthR = setup_->tiltUncertaintyR(); } - const double scat = digi(setup_->scattering(), baseLr_); - stub->dZ_ = lengthZ + baseLz_; - stub->dPhi_ = (scat + digi(lengthR, baseLr_)) * inv2R + pitchOverR; - stub->dPhi_ = digi(stub->dPhi_, baseLphi_) + baseLphi_; + const double scat = digi(setup_->scattering(), baseR); + stub->dZ_ = lengthZ + baseZ; + stub->dPhi_ = (scat + digi(lengthR, baseR)) * inv2R + pitchOverR; + stub->dPhi_ = digi(stub->dPhi_, basePhi) + basePhi; } } - // fill products StreamsStub& accpetedStubs, StreamsTrack& acceptedTracks, StreamsStub& lostStubs, StreamsTrack& lostTracks - auto frameTrack = [this](Track* track) { - const TTBV maybe(track->maybe_); - const TTBV sectorPhi( - dataFormats_->format(Variable::sectorPhi, Process::kfin).ttBV(track->sector_ / setup_->numSectorsEta())); - const TTBV sectorEta( - dataFormats_->format(Variable::sectorEta, Process::kfin).ttBV(track->sector_ % setup_->numSectorsEta())); - const TTBV inv2R(dataFormats_->format(Variable::inv2R, Process::kfin).ttBV(track->inv2R_)); - const TTBV phiT(dataFormats_->format(Variable::phiT, Process::kfin).ttBV(track->phiT_)); - const TTBV cot(dataFormats_->format(Variable::cot, Process::kfin).ttBV(track->cot_)); - const TTBV zT(dataFormats_->format(Variable::zT, Process::kfin).ttBV(track->zT_)); - return FrameTrack(track->ttTrackRef_, - Frame("1" + maybe.str() + sectorPhi.str() + sectorEta.str() + phiT.str() + inv2R.str() + - zT.str() + cot.str())); - }; + // store helper + auto frameTrack = [](Track* track) { return track->frame_; }; auto frameStub = [this](Track* track, int layer) { - auto equal = [layer](Stub* stub) { return stub->valid_ && stub->layer_ == layer; }; + auto equal = [layer](Stub* stub) { return stub->channel_ == layer; }; const auto it = find_if(track->stubs_.begin(), track->stubs_.end(), equal); - if (it == track->stubs_.end() || !(*it)->valid_) + if (it == track->stubs_.end()) return FrameStub(); Stub* stub = *it; const TTBV r(dataFormats_->format(Variable::r, Process::kfin).ttBV(stub->r_)); @@ -387,31 +181,69 @@ namespace trklet { const TTBV dZ(dataFormats_->format(Variable::dZ, Process::kfin).ttBV(stub->dZ_)); return FrameStub(stub->ttStubRef_, Frame("1" + r.str() + phi.str() + z.str() + dPhi.str() + dZ.str())); }; - auto invalid = [](Track* track) { return track && !track->valid_; }; - auto acc = [invalid](int sum, Track* track) { return sum + (invalid(track) ? 1 : 0); }; - const int offsetTrack = region_ * channelAssignment_->numChannelsTrack(); - for (int channel = 0; channel < channelAssignment_->numChannelsTrack(); channel++) { - const int channelTrack = offsetTrack + channel; + // merge number of nodes DR to number of Nodes KF and store result + static const int nMux = channelAssignment_->numNodesDR() / setup_->kfNumWorker(); + const int offsetTrack = region_ * setup_->kfNumWorker(); + for (int nodeKF = 0; nodeKF < setup_->kfNumWorker(); nodeKF++) { + const int offset = nodeKF * nMux; + deque accepted; + deque lost; + vector> stacks(nMux); + vector> inputs(nMux); + for (int channel = 0; channel < nMux; channel++) { + const vector& input = input_[offset + channel]; + inputs[channel] = deque(input.begin(), input.end()); + } + // clock accurate firmware emulation, each while trip describes one clock tick, one stub in and one stub out per tick + while (!all_of(inputs.begin(), inputs.end(), [](const deque& tracks) { return tracks.empty(); }) or + !all_of(stacks.begin(), stacks.end(), [](const deque& tracks) { return tracks.empty(); })) { + // fill input fifos + for (int channel = 0; channel < nMux; channel++) { + deque& stack = stacks[channel]; + Track* track = pop_front(inputs[channel]); + if (track) + stack.push_back(track); + } + // merge input fifos to one stream, prioritizing higher input channel over lower channel + bool nothingToRoute(true); + for (int channel = nMux - 1; channel >= 0; channel--) { + Track* track = pop_front(stacks[channel]); + if (track) { + nothingToRoute = false; + accepted.push_back(track); + break; + } + } + if (nothingToRoute) + accepted.push_back(nullptr); + } + // truncate if desired + if (enableTruncation_ && (int)accepted.size() > setup_->numFrames()) { + const auto limit = next(accepted.begin(), setup_->numFrames()); + copy_if(limit, accepted.end(), back_inserter(lost), [](const Track* track) { return track; }); + accepted.erase(limit, accepted.end()); + } + // remove all gaps between end and last track + for (auto it = accepted.end(); it != accepted.begin();) + it = (*--it) ? accepted.begin() : accepted.erase(it); + // fill products StreamsStub& accpetedStubs, StreamsTrack& acceptedTracks, StreamsStub& lostStubs, StreamsTrack& lostTracks + const int channelTrack = offsetTrack + nodeKF; const int offsetStub = channelTrack * setup_->numLayers(); - vector& input = input_[channel]; // fill lost tracks and stubs without gaps - const int lost = accumulate(input.begin(), input.end(), 0, acc); - lostTracks[channelTrack].reserve(lost); + lostTracks[channelTrack].reserve(lost.size()); for (int layer = 0; layer < setup_->numLayers(); layer++) - lostStubs[offsetStub + layer].reserve(lost); - for (Track* track : input) { - if (!track || track->valid_) - continue; + lostStubs[offsetStub + layer].reserve(lost.size()); + for (Track* track : lost) { lostTracks[channelTrack].emplace_back(frameTrack(track)); for (int layer = 0; layer < setup_->numLayers(); layer++) lostStubs[offsetStub + layer].emplace_back(frameStub(track, layer)); } // fill accepted tracks and stubs with gaps - acceptedTracks[channelTrack].reserve(input.size()); + acceptedTracks[channelTrack].reserve(accepted.size()); for (int layer = 0; layer < setup_->numLayers(); layer++) - accpetedStubs[offsetStub + layer].reserve(input.size()); - for (Track* track : input) { - if (!track || !track->valid_) { // fill gap + accpetedStubs[offsetStub + layer].reserve(accepted.size()); + for (Track* track : accepted) { + if (!track) { // fill gap acceptedTracks[channelTrack].emplace_back(FrameTrack()); for (int layer = 0; layer < setup_->numLayers(); layer++) accpetedStubs[offsetStub + layer].emplace_back(FrameStub()); @@ -424,11 +256,15 @@ namespace trklet { } } - // basetransformation of val from baseLow into baseHigh using widthMultiplier bit multiplication - double KFin::redigi(double val, double baseLow, double baseHigh, int widthMultiplier) const { - const double base = pow(2, 1 - widthMultiplier); - const double transform = digi(baseLow / baseHigh, base); - return (floor(val * transform / baseLow) + .5) * baseHigh; + // remove and return first element of deque, returns nullptr if empty + template + T* KFin::pop_front(deque& ts) const { + T* t = nullptr; + if (!ts.empty()) { + t = ts.front(); + ts.pop_front(); + } + return t; } } // namespace trklet diff --git a/L1Trigger/TrackFindingTracklet/src/MatchEngineUnit.cc b/L1Trigger/TrackFindingTracklet/src/MatchEngineUnit.cc index 456163c4e97d9..227b52622feb6 100644 --- a/L1Trigger/TrackFindingTracklet/src/MatchEngineUnit.cc +++ b/L1Trigger/TrackFindingTracklet/src/MatchEngineUnit.cc @@ -16,8 +16,14 @@ MatchEngineUnit::MatchEngineUnit(const Settings& settings, barrel_ = barrel; layerdisk_ = layerdisk; good__ = false; - good__t = false; good___ = false; + good____ = false; + ir2smin_ = 0; + if (layerdisk_ >= N_LAYER) { + double rmin2s = (layerdisk_ < N_LAYER + 2) ? settings_.rDSSinner(0) : settings_.rDSSouter(0); + ir2smin_ = (1 << (N_RZBITS + NFINERZBITS)) * (rmin2s - settings_.rmindiskvm()) / + (settings_.rmaxdisk() - settings_.rmindiskvm()); + } } void MatchEngineUnit::setAlmostFull() { almostfullsave_ = candmatches_.nearfull(); } @@ -89,6 +95,7 @@ void MatchEngineUnit::step() { } vmstub__ = vmstubsmemory_->getVMStubMEBin(slot, istub_); + rzbin__ = rzbin_ + use_[iuse_].first; isPSseed__ = isPSseed_; projrinv__ = projrinv_; @@ -106,12 +113,24 @@ void MatchEngineUnit::step() { } void MatchEngineUnit::processPipeline() { - if (good___) { - bool isPSmodule = vmstub___.isPSmodule(); - int stubfinerz = vmstub___.finerz().value(); - int stubfinephi = vmstub___.finephi().value(); + if (good____) { + int stubfinerz = vmstub____.finerz().value(); + int stubfinephi = vmstub____.finephi().value(); + bool isPSmodule = false; + + if (barrel_) { + isPSmodule = layerdisk_ < N_PSLAYER; + } else { + const int absz = (1 << settings_.MEBinsBits()) - 1; + unsigned int irstub = ((rzbin____ & absz) << NFINERZBITS) + stubfinerz; - int deltaphi = stubfinephi - projfinephi___; + //Verify that ir2smin_ is initialized and check if irstub is less than radius of innermost 2s module + assert(ir2smin_ > 0); + isPSmodule = irstub < ir2smin_; + } + assert(isPSmodule == vmstub____.isPSmodule()); + + int deltaphi = stubfinephi - projfinephi____; constexpr int idphicut = 3; @@ -122,14 +141,14 @@ void MatchEngineUnit::processPipeline() { int diskps = (!barrel_) && isPSmodule; //here we always use the larger number of bits for the bend - unsigned int index = (diskps << (N_BENDBITS_2S + NRINVBITS)) + (projrinv___ << nbits) + vmstub___.bend().value(); + unsigned int index = (diskps << (nbits + NRINVBITS)) + (projrinv____ << nbits) + vmstub____.bend().value(); //Check if stub z position consistent - int idrz = stubfinerz - projfinerz___; + int idrz = stubfinerz - projfinerz____; bool pass; if (barrel_) { - if (isPSseed___) { + if (isPSseed____) { constexpr int drzcut = 1; pass = std::abs(idrz) <= drzcut; } else { @@ -148,28 +167,30 @@ void MatchEngineUnit::processPipeline() { bool goodpair = (pass && dphicut) && luttable_.lookup(index); - std::pair tmppair(proj___, vmstub___.stub()); + std::pair tmppair(proj____, vmstub____.stub()); if (goodpair) { candmatches_.store(tmppair); } } - proj___ = proj__t; - projfinephi___ = projfinephi__t; - projfinerz___ = projfinerz__t; - projrinv___ = projrinv__t; - isPSseed___ = isPSseed__t; - good___ = good__t; - vmstub___ = vmstub__t; - - proj__t = proj__; - projfinephi__t = projfinephi__; - projfinerz__t = projfinerz__; - projrinv__t = projrinv__; - isPSseed__t = isPSseed__; - good__t = good__; - vmstub__t = vmstub__; + proj____ = proj___; + projfinephi____ = projfinephi___; + projfinerz____ = projfinerz___; + projrinv____ = projrinv___; + isPSseed____ = isPSseed___; + good____ = good___; + vmstub____ = vmstub___; + rzbin____ = rzbin___; + + proj___ = proj__; + projfinephi___ = projfinephi__; + projfinerz___ = projfinerz__; + projrinv___ = projrinv__; + isPSseed___ = isPSseed__; + good___ = good__; + vmstub___ = vmstub__; + rzbin___ = rzbin__; } void MatchEngineUnit::reset() { @@ -177,8 +198,8 @@ void MatchEngineUnit::reset() { idle_ = true; istub_ = 0; good__ = false; - good__t = false; good___ = false; + good____ = false; } int MatchEngineUnit::TCID() const { @@ -186,12 +207,12 @@ int MatchEngineUnit::TCID() const { return peek().first->TCID(); } - if (good___) { - return proj___->TCID(); + if (good____) { + return proj____->TCID(); } - if (good__t) { - return proj__t->TCID(); + if (good___) { + return proj___->TCID(); } if (good__) { diff --git a/L1Trigger/TrackFindingTracklet/src/MatchProcessor.cc b/L1Trigger/TrackFindingTracklet/src/MatchProcessor.cc index 83e07573af631..0c78d8ef38898 100644 --- a/L1Trigger/TrackFindingTracklet/src/MatchProcessor.cc +++ b/L1Trigger/TrackFindingTracklet/src/MatchProcessor.cc @@ -352,7 +352,7 @@ void MatchProcessor::execute(unsigned int iSector, double phimin) { unsigned int iphi = (fpgaphi.value() >> (fpgaphi.nbits() - nvmbits_)) & (nvmbins_ - 1); - int nextrabits = 2; + constexpr int nextrabits = 2; int overlapbits = nvmbits_ + nextrabits; unsigned int extrabits = fpgaphi.bits(fpgaphi.nbits() - overlapbits - nextrabits, nextrabits); @@ -435,7 +435,7 @@ void MatchProcessor::execute(unsigned int iSector, double phimin) { VMStubsMEMemory* stubmem = vmstubs_[0]; bool usefirstMinus = stubmem->nStubsBin(ivmMinus * nbins + slot) != 0; bool usesecondMinus = (second && (stubmem->nStubsBin(ivmMinus * nbins + slot + 1) != 0)); - bool usefirstPlus = ivmPlus != ivmMinus && stubmem->nStubsBin(ivmPlus * nbins + slot) != 0; + bool usefirstPlus = ivmPlus != ivmMinus && (stubmem->nStubsBin(ivmPlus * nbins + slot) != 0); bool usesecondPlus = ivmPlus != ivmMinus && (second && (stubmem->nStubsBin(ivmPlus * nbins + slot + 1) != 0)); good_ = usefirstPlus || usesecondPlus || usefirstMinus || usesecondMinus; @@ -583,7 +583,7 @@ bool MatchProcessor::matchCalculator(Tracklet* tracklet, const Stub* fpgastub, b // Update the "best" values if (imatch) { best_ideltaphi_barrel = std::abs(ideltaphi); - best_ideltaz_barrel = std::abs(ideltaz); + best_ideltaz_barrel = std::abs(ideltaz << dzshift_); } if (settings_.debugTracklet()) { @@ -671,7 +671,8 @@ bool MatchProcessor::matchCalculator(Tracklet* tracklet, const Stub* fpgastub, b } } - int ideltar = (irstub * settings_.kr()) / settings_.krprojshiftdisk() - ir; + constexpr int diff_bits = 1; + int ideltar = (irstub >> diff_bits) - ir; if (!stub->isPSmodule()) { int ialpha = fpgastub->alpha().value(); diff --git a/L1Trigger/TrackFindingTracklet/src/PurgeDuplicate.cc b/L1Trigger/TrackFindingTracklet/src/PurgeDuplicate.cc index be4858359f9fb..07408442a26cf 100644 --- a/L1Trigger/TrackFindingTracklet/src/PurgeDuplicate.cc +++ b/L1Trigger/TrackFindingTracklet/src/PurgeDuplicate.cc @@ -78,7 +78,7 @@ void PurgeDuplicate::addInput(MemoryBase* memory, std::string input) { throw cms::Exception("BadConfig") << __FILE__ << " " << __LINE__ << " could not find input: " << input; } -void PurgeDuplicate::execute(std::vector& outputtracks_, unsigned int iSector) { +void PurgeDuplicate::execute(std::vector& outputtracks, unsigned int iSector) { inputtracklets_.clear(); inputtracks_.clear(); @@ -110,7 +110,7 @@ void PurgeDuplicate::execute(std::vector& outputtracks_, unsigned int iSe if (settings_.removalType() == "merge") { // Track seed & duplicate flag std::vector> trackInfo; - // Flag for tracks in multiple bins that get merged but are not in the correct variable bin + // Flag for tracks in multiple bins that get merged but are not in the correct bin std::vector trackBinInfo; // Vector to store the relative rank of the track candidate for merging, based on seed type std::vector seedRank; @@ -125,269 +125,280 @@ void PurgeDuplicate::execute(std::vector& outputtracks_, unsigned int iSe std::vector prefTracks; // Stores all the tracks that are sent to the KF from each bin std::vector prefTrackFit; // Stores the track seed that corresponds to the associated track in prefTracks - for (unsigned int bin = 0; bin < settings_.varRInvBins().size() - 1; bin++) { - // Get vectors from TrackFit and save them - // inputtracklets: Tracklet objects from the FitTrack (not actually fit yet) - // inputstublists: L1Stubs for that track - // inputstubidslists: Stub stubIDs for that 3rack - // mergedstubidslists: the same as inputstubidslists, but will be used during duplicate removal - for (unsigned int i = 0; i < inputtrackfits_.size(); i++) { - if (inputtrackfits_[i]->nStublists() == 0) - continue; - if (inputtrackfits_[i]->nStublists() != inputtrackfits_[i]->nTracks()) - throw "Number of stublists and tracks don't match up!"; - for (unsigned int j = 0; j < inputtrackfits_[i]->nStublists(); j++) { - if (isTrackInBin(findOverlapRInvBins(inputtrackfits_[i]->getTrack(j)), bin)) { - if (inputtracklets_.size() >= settings_.maxStep("DR")) - continue; - Tracklet* aTrack = inputtrackfits_[i]->getTrack(j); - inputtracklets_.push_back(inputtrackfits_[i]->getTrack(j)); - std::vector stublist = inputtrackfits_[i]->getStublist(j); - inputstublists_.push_back(stublist); - std::vector> stubidslist = inputtrackfits_[i]->getStubidslist(j); - inputstubidslists_.push_back(stubidslist); - mergedstubidslists_.push_back(stubidslist); - - // Encoding: L1L2=0, L2L3=1, L3L4=2, L5L6=3, D1D2=4, D3D4=5, L1D1=6, L2D1=7 - // Best Guess: L1L2 > L1D1 > L2L3 > L2D1 > D1D2 > L3L4 > L5L6 > D3D4 - // Best Rank: L1L2 > L3L4 > D3D4 > D1D2 > L2L3 > L2D1 > L5L6 > L1D1 - // Rank-Informed Guess: L1L2 > L3L4 > L1D1 > L2L3 > L2D1 > D1D2 > L5L6 > D3D4 - unsigned int curSeed = aTrack->seedIndex(); - std::vector ranks{1, 5, 2, 7, 4, 3, 8, 6}; - if (settings_.extended()) - seedRank.push_back(9); - else - seedRank.push_back(ranks[curSeed]); - - if (stublist.size() != stubidslist.size()) - throw "Number of stubs and stubids don't match up!"; - - trackInfo.emplace_back(i, false); - trackBinInfo.emplace_back(false); - } else + for (unsigned int bin = 0; bin < settings_.rinvBins().size() - 1; bin++) { + for (unsigned int phiBin = 0; phiBin < settings_.phiBins().size() - 1; phiBin++) { + // Get vectors from TrackFit and save them + // inputtracklets: Tracklet objects from the FitTrack (not actually fit yet) + // inputstublists: L1Stubs for that track + // inputstubidslists: Stub stubIDs for that 3rack + // mergedstubidslists: the same as inputstubidslists, but will be used during duplicate removal + for (unsigned int i = 0; i < inputtrackfits_.size(); i++) { + if (inputtrackfits_[i]->nStublists() == 0) continue; + if (inputtrackfits_[i]->nStublists() != inputtrackfits_[i]->nTracks()) + throw cms::Exception("LogicError") + << __FILE__ << " " << __LINE__ << " Number of stublists and tracks don't match up! "; + for (unsigned int j = 0; j < inputtrackfits_[i]->nStublists(); j++) { + if (isTrackInBin(findOverlapRinvBins(inputtrackfits_[i]->getTrack(j)), bin)) { + if (!isTrackInBin(findOverlapPhiBins(inputtrackfits_[i]->getTrack(j)), phiBin)) + continue; + if (inputtracklets_.size() >= settings_.maxStep("DR")) + continue; + Tracklet* aTrack = inputtrackfits_[i]->getTrack(j); + inputtracklets_.push_back(inputtrackfits_[i]->getTrack(j)); + std::vector stublist = inputtrackfits_[i]->getStublist(j); + inputstublists_.push_back(stublist); + std::vector> stubidslist = inputtrackfits_[i]->getStubidslist(j); + inputstubidslists_.push_back(stubidslist); + mergedstubidslists_.push_back(stubidslist); + + // Encoding: L1L2=0, L2L3=1, L3L4=2, L5L6=3, D1D2=4, D3D4=5, L1D1=6, L2D1=7 + // Best Guess: L1L2 > L1D1 > L2L3 > L2D1 > D1D2 > L3L4 > L5L6 > D3D4 + // Best Rank: L1L2 > L3L4 > D3D4 > D1D2 > L2L3 > L2D1 > L5L6 > L1D1 + // Rank-Informed Guess: L1L2 > L3L4 > L1D1 > L2L3 > L2D1 > D1D2 > L5L6 > D3D4 + unsigned int curSeed = aTrack->seedIndex(); + std::vector ranks{1, 5, 2, 7, 4, 3, 8, 6}; + if (settings_.extended()) + seedRank.push_back(9); + else + seedRank.push_back(ranks[curSeed]); + + if (stublist.size() != stubidslist.size()) + throw cms::Exception("LogicError") + << __FILE__ << " " << __LINE__ << " Number of stubs and stubids don't match up! "; + + trackInfo.emplace_back(i, false); + trackBinInfo.emplace_back(false); + } else + continue; + } } - } - if (inputtracklets_.empty()) - continue; - const unsigned int numStublists = inputstublists_.size(); + if (inputtracklets_.empty()) + continue; + const unsigned int numStublists = inputstublists_.size(); - if (settings_.inventStubs()) { - for (unsigned int itrk = 0; itrk < numStublists; itrk++) { - inputstublists_[itrk] = getInventedSeedingStub(iSector, inputtracklets_[itrk], inputstublists_[itrk]); + if (settings_.inventStubs()) { + for (unsigned int itrk = 0; itrk < numStublists; itrk++) { + inputstublists_[itrk] = getInventedSeedingStub(iSector, inputtracklets_[itrk], inputstublists_[itrk]); + } } - } - // Initialize all-false 2D array of tracks being duplicates to other tracks - bool dupMap[numStublists][numStublists]; // Ends up symmetric - for (unsigned int itrk = 0; itrk < numStublists; itrk++) { - for (unsigned int jtrk = 0; jtrk < numStublists; jtrk++) { - dupMap[itrk][jtrk] = false; + // Initialize all-false 2D array of tracks being duplicates to other tracks + bool dupMap[numStublists][numStublists]; // Ends up symmetric + for (unsigned int itrk = 0; itrk < numStublists; itrk++) { + for (unsigned int jtrk = 0; jtrk < numStublists; jtrk++) { + dupMap[itrk][jtrk] = false; + } } - } - // Used to check if a track is in two bins, is not a duplicate in either bin, so is sent out twice - bool noMerge[numStublists]; - for (unsigned int itrk = 0; itrk < numStublists; itrk++) { - noMerge[itrk] = false; - } + // Used to check if a track is in two bins, is not a duplicate in either bin, so is sent out twice + bool noMerge[numStublists]; + for (unsigned int itrk = 0; itrk < numStublists; itrk++) { + noMerge[itrk] = false; + } - // Find duplicates; Fill dupMap by looping over all pairs of "tracks" - // numStublists-1 since last track has no other to compare to - for (unsigned int itrk = 0; itrk < numStublists - 1; itrk++) { - for (unsigned int jtrk = itrk + 1; jtrk < numStublists; jtrk++) { - if (itrk >= settings_.numTracksComparedPerBin()) - continue; - // Get primary track stubids = (layer, unique stub index within layer) - const std::vector>& stubsTrk1 = inputstubidslists_[itrk]; - - // Get and count secondary track stubids - const std::vector>& stubsTrk2 = inputstubidslists_[jtrk]; - - // Count number of layers that share stubs, and the number of UR that each track hits - unsigned int nShareLay = 0; - if (settings_.mergeComparison() == "CompareAll") { - bool layerArr[16]; - for (auto& i : layerArr) { - i = false; - }; - for (const auto& st1 : stubsTrk1) { - for (const auto& st2 : stubsTrk2) { - if (st1.first == st2.first && st1.second == st2.second) { // tracks share stub - // Converts layer/disk encoded in st1->first to an index in the layer array - int i = st1.first; // layer/disk - bool barrel = (i > 0 && i < 10); - bool endcapA = (i > 10); - bool endcapB = (i < 0); - int lay = barrel * (i - 1) + endcapA * (i - 5) - endcapB * i; // encode in range 0-15 - if (!layerArr[lay]) { - nShareLay++; - layerArr[lay] = true; + // Find duplicates; Fill dupMap by looping over all pairs of "tracks" + // numStublists-1 since last track has no other to compare to + for (unsigned int itrk = 0; itrk < numStublists - 1; itrk++) { + for (unsigned int jtrk = itrk + 1; jtrk < numStublists; jtrk++) { + if (itrk >= settings_.numTracksComparedPerBin()) + continue; + // Get primary track stubids = (layer, unique stub index within layer) + const std::vector>& stubsTrk1 = inputstubidslists_[itrk]; + + // Get and count secondary track stubids + const std::vector>& stubsTrk2 = inputstubidslists_[jtrk]; + + // Count number of layers that share stubs, and the number of UR that each track hits + unsigned int nShareLay = 0; + if (settings_.mergeComparison() == "CompareAll") { + bool layerArr[16]; + for (auto& i : layerArr) { + i = false; + }; + for (const auto& st1 : stubsTrk1) { + for (const auto& st2 : stubsTrk2) { + if (st1.first == st2.first && st1.second == st2.second) { // tracks share stub + // Converts layer/disk encoded in st1->first to an index in the layer array + int i = st1.first; // layer/disk + bool barrel = (i > 0 && i < 10); + bool endcapA = (i > 10); + bool endcapB = (i < 0); + int lay = barrel * (i - 1) + endcapA * (i - 5) - endcapB * i; // encode in range 0-15 + if (!layerArr[lay]) { + nShareLay++; + layerArr[lay] = true; + } } } } - } - } else if (settings_.mergeComparison() == "CompareBest") { - std::vector fullStubslistsTrk1 = inputstublists_[itrk]; - std::vector fullStubslistsTrk2 = inputstublists_[jtrk]; - - // Arrays to store the index of the best stub in each layer - int layStubidsTrk1[16]; - int layStubidsTrk2[16]; - for (int i = 0; i < 16; i++) { - layStubidsTrk1[i] = -1; - layStubidsTrk2[i] = -1; - } - // For each stub on the first track, find the stub with the best residual and store its index in the layStubidsTrk1 array - for (unsigned int stcount = 0; stcount < stubsTrk1.size(); stcount++) { - int i = stubsTrk1[stcount].first; // layer/disk - bool barrel = (i > 0 && i < 10); - bool endcapA = (i > 10); - bool endcapB = (i < 0); - int lay = barrel * (i - 1) + endcapA * (i - 5) - endcapB * i; // encode in range 0-15 - double nres = getPhiRes(inputtracklets_[itrk], fullStubslistsTrk1[stcount]); - double ores = 0; - if (layStubidsTrk1[lay] != -1) - ores = getPhiRes(inputtracklets_[itrk], fullStubslistsTrk1[layStubidsTrk1[lay]]); - if (layStubidsTrk1[lay] == -1 || nres < ores) { - layStubidsTrk1[lay] = stcount; + } else if (settings_.mergeComparison() == "CompareBest") { + std::vector fullStubslistsTrk1 = inputstublists_[itrk]; + std::vector fullStubslistsTrk2 = inputstublists_[jtrk]; + + // Arrays to store the index of the best stub in each layer + int layStubidsTrk1[16]; + int layStubidsTrk2[16]; + for (int i = 0; i < 16; i++) { + layStubidsTrk1[i] = -1; + layStubidsTrk2[i] = -1; } - } - // For each stub on the second track, find the stub with the best residual and store its index in the layStubidsTrk1 array - for (unsigned int stcount = 0; stcount < stubsTrk2.size(); stcount++) { - int i = stubsTrk2[stcount].first; // layer/disk - bool barrel = (i > 0 && i < 10); - bool endcapA = (i > 10); - bool endcapB = (i < 0); - int lay = barrel * (i - 1) + endcapA * (i - 5) - endcapB * i; // encode in range 0-15 - double nres = getPhiRes(inputtracklets_[jtrk], fullStubslistsTrk2[stcount]); - double ores = 0; - if (layStubidsTrk2[lay] != -1) - ores = getPhiRes(inputtracklets_[jtrk], fullStubslistsTrk2[layStubidsTrk2[lay]]); - if (layStubidsTrk2[lay] == -1 || nres < ores) { - layStubidsTrk2[lay] = stcount; + // For each stub on the first track, find the stub with the best residual and store its index in the layStubidsTrk1 array + for (unsigned int stcount = 0; stcount < stubsTrk1.size(); stcount++) { + int i = stubsTrk1[stcount].first; // layer/disk + bool barrel = (i > 0 && i < 10); + bool endcapA = (i > 10); + bool endcapB = (i < 0); + int lay = barrel * (i - 1) + endcapA * (i - 5) - endcapB * i; // encode in range 0-15 + double nres = getPhiRes(inputtracklets_[itrk], fullStubslistsTrk1[stcount]); + double ores = 0; + if (layStubidsTrk1[lay] != -1) + ores = getPhiRes(inputtracklets_[itrk], fullStubslistsTrk1[layStubidsTrk1[lay]]); + if (layStubidsTrk1[lay] == -1 || nres < ores) { + layStubidsTrk1[lay] = stcount; + } + } + // For each stub on the second track, find the stub with the best residual and store its index in the layStubidsTrk1 array + for (unsigned int stcount = 0; stcount < stubsTrk2.size(); stcount++) { + int i = stubsTrk2[stcount].first; // layer/disk + bool barrel = (i > 0 && i < 10); + bool endcapA = (i > 10); + bool endcapB = (i < 0); + int lay = barrel * (i - 1) + endcapA * (i - 5) - endcapB * i; // encode in range 0-15 + double nres = getPhiRes(inputtracklets_[jtrk], fullStubslistsTrk2[stcount]); + double ores = 0; + if (layStubidsTrk2[lay] != -1) + ores = getPhiRes(inputtracklets_[jtrk], fullStubslistsTrk2[layStubidsTrk2[lay]]); + if (layStubidsTrk2[lay] == -1 || nres < ores) { + layStubidsTrk2[lay] = stcount; + } + } + // For all 16 layers (6 layers and 10 disks), count the number of layers who's best stub on both tracks are the same + for (int i = 0; i < 16; i++) { + int t1i = layStubidsTrk1[i]; + int t2i = layStubidsTrk2[i]; + if (t1i != -1 && t2i != -1 && stubsTrk1[t1i].first == stubsTrk2[t2i].first && + stubsTrk1[t1i].second == stubsTrk2[t2i].second) + nShareLay++; } } - // For all 16 layers (6 layers and 10 disks), count the number of layers who's best stub on both tracks are the same - for (int i = 0; i < 16; i++) { - int t1i = layStubidsTrk1[i]; - int t2i = layStubidsTrk2[i]; - if (t1i != -1 && t2i != -1 && stubsTrk1[t1i].first == stubsTrk2[t2i].first && - stubsTrk1[t1i].second == stubsTrk2[t2i].second) - nShareLay++; + + // Fill duplicate map + if (nShareLay >= settings_.minIndStubs()) { // For number of shared stub merge condition + dupMap[itrk][jtrk] = true; + dupMap[jtrk][itrk] = true; } } - // Fill duplicate map - if (nShareLay >= settings_.minIndStubs()) { // For number of shared stub merge condition - dupMap[itrk][jtrk] = true; - dupMap[jtrk][itrk] = true; - } } - } - // Check to see if the track is a duplicate - for (unsigned int itrk = 0; itrk < numStublists; itrk++) { - for (unsigned int jtrk = 0; jtrk < numStublists; jtrk++) { - if (dupMap[itrk][jtrk]) { - noMerge[itrk] = true; + // Check to see if the track is a duplicate + for (unsigned int itrk = 0; itrk < numStublists; itrk++) { + for (unsigned int jtrk = 0; jtrk < numStublists; jtrk++) { + if (dupMap[itrk][jtrk]) { + noMerge[itrk] = true; + } } } - } - // If the track isn't a duplicate, and if it's in more than one bin, and it is not in the proper varrinvbin, then mark it so it won't be sent to output - for (unsigned int itrk = 0; itrk < numStublists; itrk++) { - if (noMerge[itrk] == false) { - if ((findOverlapRInvBins(inputtracklets_[itrk]).size() > 1) && - (findVarRInvBin(inputtracklets_[itrk]) != bin)) { - trackInfo[itrk].second = true; + // If the track isn't a duplicate, and if it's in more than one bin, and it is not in the proper rinv or phi bin, then mark it so it won't be sent to output + for (unsigned int itrk = 0; itrk < numStublists; itrk++) { + if (noMerge[itrk] == false) { + if (((findOverlapRinvBins(inputtracklets_[itrk]).size() > 1) && + (findRinvBin(inputtracklets_[itrk]) != bin)) || + ((findOverlapPhiBins(inputtracklets_[itrk]).size() > 1) && + findPhiBin(inputtracklets_[itrk]) != phiBin)) { + trackInfo[itrk].second = true; + } } } - } - // Merge duplicate tracks - for (unsigned int itrk = 0; itrk < numStublists - 1; itrk++) { - for (unsigned int jtrk = itrk + 1; jtrk < numStublists; jtrk++) { - // Merge a track with its first duplicate found. - if (dupMap[itrk][jtrk]) { - // Set preferred track based on seed rank - int preftrk; - int rejetrk; - if (seedRank[itrk] < seedRank[jtrk]) { - preftrk = itrk; - rejetrk = jtrk; - } else { - preftrk = jtrk; - rejetrk = itrk; - } - - // If the preffered track is in more than one bin, but not in the proper varrinvbin, then mark as true - if ((findOverlapRInvBins(inputtracklets_[preftrk]).size() > 1) && - (findVarRInvBin(inputtracklets_[preftrk]) != bin)) { - trackBinInfo[preftrk] = true; - trackBinInfo[rejetrk] = true; - } else { - // Get a merged stub list - std::vector newStubList; - std::vector stubsTrk1 = inputstublists_[preftrk]; - std::vector stubsTrk2 = inputstublists_[rejetrk]; - std::vector stubsTrk1indices; - std::vector stubsTrk2indices; - for (unsigned int stub1it = 0; stub1it < stubsTrk1.size(); stub1it++) { - stubsTrk1indices.push_back(stubsTrk1[stub1it]->l1tstub()->uniqueIndex()); + // Merge duplicate tracks + for (unsigned int itrk = 0; itrk < numStublists - 1; itrk++) { + for (unsigned int jtrk = itrk + 1; jtrk < numStublists; jtrk++) { + // Merge a track with its first duplicate found. + if (dupMap[itrk][jtrk]) { + // Set preferred track based on seed rank + int preftrk; + int rejetrk; + if (seedRank[itrk] < seedRank[jtrk]) { + preftrk = itrk; + rejetrk = jtrk; + } else { + preftrk = jtrk; + rejetrk = itrk; } - for (unsigned int stub2it = 0; stub2it < stubsTrk2.size(); stub2it++) { - stubsTrk2indices.push_back(stubsTrk2[stub2it]->l1tstub()->uniqueIndex()); - } - newStubList = stubsTrk1; - for (unsigned int stub2it = 0; stub2it < stubsTrk2.size(); stub2it++) { - if (find(stubsTrk1indices.begin(), stubsTrk1indices.end(), stubsTrk2indices[stub2it]) == - stubsTrk1indices.end()) { - newStubList.push_back(stubsTrk2[stub2it]); + + // If the preffered track is in more than one bin, but not in the proper rinv or phi bin, then mark as true + if (((findOverlapRinvBins(inputtracklets_[preftrk]).size() > 1) && + (findRinvBin(inputtracklets_[preftrk]) != bin)) || + ((findOverlapPhiBins(inputtracklets_[preftrk]).size() > 1) && + (findPhiBin(inputtracklets_[preftrk]) != phiBin))) { + trackBinInfo[preftrk] = true; + trackBinInfo[rejetrk] = true; + } else { + // Get a merged stub list + std::vector newStubList; + std::vector stubsTrk1 = inputstublists_[preftrk]; + std::vector stubsTrk2 = inputstublists_[rejetrk]; + std::vector stubsTrk1indices; + std::vector stubsTrk2indices; + for (unsigned int stub1it = 0; stub1it < stubsTrk1.size(); stub1it++) { + stubsTrk1indices.push_back(stubsTrk1[stub1it]->l1tstub()->uniqueIndex()); } - } - // Overwrite stublist of preferred track with merged list - inputstublists_[preftrk] = newStubList; - - std::vector> newStubidsList; - std::vector> stubidsTrk1 = mergedstubidslists_[preftrk]; - std::vector> stubidsTrk2 = mergedstubidslists_[rejetrk]; - newStubidsList = stubidsTrk1; - - for (unsigned int stub2it = 0; stub2it < stubsTrk2.size(); stub2it++) { - if (find(stubsTrk1indices.begin(), stubsTrk1indices.end(), stubsTrk2indices[stub2it]) == - stubsTrk1indices.end()) { - newStubidsList.push_back(stubidsTrk2[stub2it]); + for (unsigned int stub2it = 0; stub2it < stubsTrk2.size(); stub2it++) { + stubsTrk2indices.push_back(stubsTrk2[stub2it]->l1tstub()->uniqueIndex()); } - } - // Overwrite stubidslist of preferred track with merged list - mergedstubidslists_[preftrk] = newStubidsList; + newStubList = stubsTrk1; + for (unsigned int stub2it = 0; stub2it < stubsTrk2.size(); stub2it++) { + if (find(stubsTrk1indices.begin(), stubsTrk1indices.end(), stubsTrk2indices[stub2it]) == + stubsTrk1indices.end()) { + newStubList.push_back(stubsTrk2[stub2it]); + } + } + // Overwrite stublist of preferred track with merged list + inputstublists_[preftrk] = newStubList; + + std::vector> newStubidsList; + std::vector> stubidsTrk1 = mergedstubidslists_[preftrk]; + std::vector> stubidsTrk2 = mergedstubidslists_[rejetrk]; + newStubidsList = stubidsTrk1; + + for (unsigned int stub2it = 0; stub2it < stubsTrk2.size(); stub2it++) { + if (find(stubsTrk1indices.begin(), stubsTrk1indices.end(), stubsTrk2indices[stub2it]) == + stubsTrk1indices.end()) { + newStubidsList.push_back(stubidsTrk2[stub2it]); + } + } + // Overwrite stubidslist of preferred track with merged list + mergedstubidslists_[preftrk] = newStubidsList; - // Mark that rejected track has been merged into another track - trackInfo[rejetrk].second = true; + // Mark that rejected track has been merged into another track + trackInfo[rejetrk].second = true; + } } } } - } - for (unsigned int ktrk = 0; ktrk < numStublists; ktrk++) { - if ((trackInfo[ktrk].second != true) && (trackBinInfo[ktrk] != true)) { - prefTracks.push_back(ktrk); - prefTrackFit.push_back(trackInfo[ktrk].first); - inputtrackletsall.push_back(inputtracklets_[ktrk]); - inputstublistsall.push_back(inputstublists_[ktrk]); - inputstubidslistsall.push_back(inputstubidslists_[ktrk]); - mergedstubidslistsall.push_back(mergedstubidslists_[ktrk]); + for (unsigned int ktrk = 0; ktrk < numStublists; ktrk++) { + if ((trackInfo[ktrk].second != true) && (trackBinInfo[ktrk] != true)) { + prefTracks.push_back(ktrk); + prefTrackFit.push_back(trackInfo[ktrk].first); + inputtrackletsall.push_back(inputtracklets_[ktrk]); + inputstublistsall.push_back(inputstublists_[ktrk]); + inputstubidslistsall.push_back(inputstubidslists_[ktrk]); + mergedstubidslistsall.push_back(mergedstubidslists_[ktrk]); + } } - } - // Need to clear all the vectors which will be used in the next bin - seedRank.clear(); - trackInfo.clear(); - trackBinInfo.clear(); - inputtracklets_.clear(); - inputstublists_.clear(); - inputstubidslists_.clear(); - mergedstubidslists_.clear(); + // Need to clear all the vectors which will be used in the next bin + seedRank.clear(); + trackInfo.clear(); + trackBinInfo.clear(); + inputtracklets_.clear(); + inputstublists_.clear(); + inputstubidslists_.clear(); + mergedstubidslists_.clear(); + } } // Make the final track objects, fit with KF, and send to output @@ -410,7 +421,7 @@ void PurgeDuplicate::execute(std::vector& outputtracks_, unsigned int iSe // Add all tracks to standalone root file output outtrack->setStubIDpremerge(inputstubidslistsall[itrk]); outtrack->setStubIDprefit(mergedstubidslistsall[itrk]); - outputtracks_.push_back(*outtrack); + outputtracks.push_back(*outtrack); } } } @@ -539,7 +550,7 @@ void PurgeDuplicate::execute(std::vector& outputtracks_, unsigned int iSe outputtracklets_[i]->addTrack(inputtrackfits_[i]->getTrack(j)); } //For root file: - outputtracks_.push_back(*inputtrackfits_[i]->getTrack(j)->getTrack()); + outputtracks.push_back(*inputtrackfits_[i]->getTrack(j)->getTrack()); } } } @@ -640,7 +651,7 @@ std::vector PurgeDuplicate::getInventedCoords(unsigned int iSector, stub_r = 2 / tracklet_rinv * std::sin((stub_z - tracklet->z0()) * tracklet_rinv / 2 / tracklet->t()); } - std::vector invented_coords{stub_r, stub_z, stub_phi}; + std::vector invented_coords{stub_r, stub_z, stub_phi}; return invented_coords; } @@ -691,7 +702,7 @@ std::vector PurgeDuplicate::getInventedCoordsExtended(unsigned int iSect stub_r = st->l1tstub()->r(); } - std::vector invented_coords{stub_r, stub_z, stub_phi}; + std::vector invented_coords{stub_r, stub_z, stub_phi}; return invented_coords; } @@ -733,33 +744,77 @@ std::vector PurgeDuplicate::getInventedSeedingStub( } // Tells us the variable bin to which a track would belong -unsigned int PurgeDuplicate::findVarRInvBin(const Tracklet* trk) const { - std::vector rInvBins = settings_.varRInvBins(); +unsigned int PurgeDuplicate::findRinvBin(const Tracklet* trk) const { + std::vector rinvBins = settings_.rinvBins(); //Get rinverse of track - double rInv = trk->rinv(); + double rinv = trk->rinv(); //Check between what 2 values in rinvbins rinv is between - auto bins = std::upper_bound(rInvBins.begin(), rInvBins.end(), rInv); + auto bins = std::upper_bound(rinvBins.begin(), rinvBins.end(), rinv); //return integer for bin index - unsigned int rIndx = std::distance(rInvBins.begin(), bins); - if (rIndx == std::distance(rInvBins.end(), bins)) - return rInvBins.size() - 2; - else if (bins == rInvBins.begin()) - return std::distance(rInvBins.begin(), bins); + unsigned int rIndx = std::distance(rinvBins.begin(), bins); + if (rIndx == std::distance(rinvBins.end(), bins)) + return rinvBins.size() - 2; + else if (bins == rinvBins.begin()) + return std::distance(rinvBins.begin(), bins); else return rIndx - 1; } -// Tells us the overlap bin(s) to which a track belongs -std::vector PurgeDuplicate::findOverlapRInvBins(const Tracklet* trk) const { - double rInv = trk->rinv(); - const double overlapSize = settings_.overlapSize(); - const std::vector& varRInvBins = settings_.varRInvBins(); +// Tells us the phi bin to which a track would belong +unsigned int PurgeDuplicate::findPhiBin(const Tracklet* trk) const { + std::vector phiBins = settings_.phiBins(); + + //Get phi of track at rcrit + double phi0 = trk->phi0(); + double rcrit = settings_.rcrit(); + double rinv = trk->rinv(); + double phi = phi0 - asin(0.5 * rinv * rcrit); + + //Check between what 2 values in phibins phi is between + auto bins = std::upper_bound(phiBins.begin(), phiBins.end(), phi); + + //return integer for bin index + unsigned int phiIndx = std::distance(phiBins.begin(), bins); + if (phiIndx == std::distance(phiBins.end(), bins)) + return phiBins.size() - 2; + else if (bins == phiBins.begin()) + return std::distance(phiBins.begin(), bins); + else + return phiIndx - 1; +} + +// Tells us the overlap rinv bin(s) to which a track belongs +std::vector PurgeDuplicate::findOverlapRinvBins(const Tracklet* trk) const { + double rinv = trk->rinv(); + + const double rinvOverlapSize = settings_.rinvOverlapSize(); + const std::vector& rinvBins = settings_.rinvBins(); + + std::vector chosenBins; + for (long unsigned int i = 0; i < rinvBins.size() - 1; i++) { + if ((rinv < rinvBins[i + 1] + rinvOverlapSize) && (rinv > rinvBins[i] - rinvOverlapSize)) { + chosenBins.push_back(i); + } + } + return chosenBins; +} + +// Tells us the overlap phi bin(s) to which a track belongs +std::vector PurgeDuplicate::findOverlapPhiBins(const Tracklet* trk) const { + double phi0 = trk->phi0(); + double rcrit = settings_.rcrit(); + double rinv = trk->rinv(); + double phi = phi0 - asin(0.5 * rinv * rcrit); + + const double phiOverlapSize = settings_.phiOverlapSize(); + const std::vector& phiBins = settings_.phiBins(); + std::vector chosenBins; - for (long unsigned int i = 0; i < varRInvBins.size() - 1; i++) { - if ((rInv < varRInvBins[i + 1] + overlapSize) && (rInv > varRInvBins[i] - overlapSize)) { + for (long unsigned int i = 0; i < phiBins.size() - 1; i++) { + if ((phi < phiBins[i + 1] + phiOverlapSize) && (phi > phiBins[i] - phiOverlapSize)) { chosenBins.push_back(i); } } diff --git a/L1Trigger/TrackFindingTracklet/src/Stub.cc b/L1Trigger/TrackFindingTracklet/src/Stub.cc index 8359d17894966..18003aa99c26f 100644 --- a/L1Trigger/TrackFindingTracklet/src/Stub.cc +++ b/L1Trigger/TrackFindingTracklet/src/Stub.cc @@ -77,6 +77,8 @@ Stub::Stub(L1TStub& stub, Settings const& settings, Globals& globals) : settings alpha_.set(newalpha, nalphabits, false, __LINE__, __FILE__); nrbits = 4; } + int negdisk = (disk < 0) ? 1 : 0; + negdisk_.set(negdisk, 1, true, __LINE__, __FILE__); } else { disk_.set(0, 4, false, __LINE__, __FILE__); layer_.set(layerdisk_, 3, true, __LINE__, __FILE__); @@ -120,6 +122,12 @@ std::string Stub::phiregionaddressstr() const { return phiregion.str() + stubindex_.str(); } +std::string Stub::phiregionstr() const { + int iphi = (phicorr_.value() >> (phicorr_.nbits() - settings_.nbitsallstubs(layerdisk()))); + FPGAWord phiregion(iphi, 3, true, __LINE__, __FILE__); + return phiregion.str(); +} + void Stub::setAllStubIndex(int nstub) { if (nstub >= (1 << N_BITSMEMADDRESS)) { if (settings_.debugTracklet()) diff --git a/L1Trigger/TrackFindingTracklet/src/StubKiller.cc b/L1Trigger/TrackFindingTracklet/src/StubKiller.cc new file mode 100644 index 0000000000000..64a30d8390007 --- /dev/null +++ b/L1Trigger/TrackFindingTracklet/src/StubKiller.cc @@ -0,0 +1,288 @@ +#include "L1Trigger/TrackFindingTracklet/interface/StubKiller.h" + +using namespace std; + +StubKiller::StubKiller() + : killScenario_(0), + trackerTopology_(nullptr), + trackerGeometry_(nullptr), + layersToKill_(vector()), + minPhiToKill_(0), + maxPhiToKill_(0), + minZToKill_(0), + maxZToKill_(0), + minRToKill_(0), + maxRToKill_(0), + fractionOfStubsToKillInLayers_(0), + fractionOfStubsToKillEverywhere_(0), + fractionOfModulesToKillEverywhere_(0) {} + +void StubKiller::initialise(unsigned int killScenario, + const TrackerTopology* trackerTopology, + const TrackerGeometry* trackerGeometry) { + killScenario_ = killScenario; + trackerTopology_ = trackerTopology; + trackerGeometry_ = trackerGeometry; + + switch (killScenario_) { + // kill layer 5 in one quadrant + 5% random module loss to connect to what was done before + case 1: + layersToKill_ = {5}; + minPhiToKill_ = 0.0; + maxPhiToKill_ = TMath::PiOver2(); + minZToKill_ = -1000.0; + maxZToKill_ = 0.0; + minRToKill_ = 0.0; + maxRToKill_ = 1000.0; + fractionOfStubsToKillInLayers_ = 1; + fractionOfStubsToKillEverywhere_ = 0; + fractionOfModulesToKillEverywhere_ = 0.05; + break; + + // kill layer 1 in one quadrant + 5% random module loss + case 2: + layersToKill_ = {1}; + minPhiToKill_ = 0.0; + maxPhiToKill_ = TMath::PiOver2(); + minZToKill_ = -1000.0; + maxZToKill_ = 0.0; + minRToKill_ = 0.0; + maxRToKill_ = 1000.0; + fractionOfStubsToKillInLayers_ = 1; + fractionOfStubsToKillEverywhere_ = 0; + fractionOfModulesToKillEverywhere_ = 0.05; + break; + + // kill layer 1 + layer 2, both in same quadrant + case 3: + layersToKill_ = {1, 2}; + minPhiToKill_ = 0.0; + maxPhiToKill_ = TMath::PiOver2(); + minZToKill_ = -1000.0; + maxZToKill_ = 0.0; + minRToKill_ = 0.0; + maxRToKill_ = 1000.0; + fractionOfStubsToKillInLayers_ = 1; + fractionOfStubsToKillEverywhere_ = 0; + fractionOfModulesToKillEverywhere_ = 0; + break; + + // kill layer 1 and disk 1, both in same quadrant + case 4: + layersToKill_ = {1, 11}; + minPhiToKill_ = 0.0; + maxPhiToKill_ = TMath::PiOver2(); + minZToKill_ = -1000.0; + maxZToKill_ = 0.0; + minRToKill_ = 0.0; + maxRToKill_ = 66.5; + fractionOfStubsToKillInLayers_ = 1; + fractionOfStubsToKillEverywhere_ = 0; + fractionOfModulesToKillEverywhere_ = 0; + break; + + // 5% random module loss throughout tracker + case 5: + layersToKill_ = {}; + fractionOfStubsToKillInLayers_ = 0; + fractionOfStubsToKillEverywhere_ = 0; + fractionOfModulesToKillEverywhere_ = 0.05; + break; + + // 1% random module loss throughout tracker + case 6: + layersToKill_ = {}; + fractionOfStubsToKillInLayers_ = 0; + fractionOfStubsToKillEverywhere_ = 0; + fractionOfModulesToKillEverywhere_ = 0.01; + break; + + // kill layer 5 in one quadrant + 1% random module loss + case 7: + layersToKill_ = {5}; + minPhiToKill_ = 0.0; + maxPhiToKill_ = TMath::PiOver2(); + minZToKill_ = -1000.0; + maxZToKill_ = 0.0; + minRToKill_ = 0.0; + maxRToKill_ = 1000.0; + fractionOfStubsToKillInLayers_ = 1; + fractionOfStubsToKillEverywhere_ = 0; + fractionOfModulesToKillEverywhere_ = 0.01; + break; + + // kill layer 1 in one quadrant +1 % random module loss + case 8: + layersToKill_ = {1}; + minPhiToKill_ = 0.0; + maxPhiToKill_ = TMath::PiOver2(); + minZToKill_ = -1000.0; + maxZToKill_ = 0.0; + minRToKill_ = 0.0; + maxRToKill_ = 1000.0; + fractionOfStubsToKillInLayers_ = 1; + fractionOfStubsToKillEverywhere_ = 0; + fractionOfModulesToKillEverywhere_ = 0.01; + break; + + // 10% random module loss throughout tracker + case 9: + layersToKill_ = {}; + fractionOfStubsToKillInLayers_ = 0; + fractionOfStubsToKillEverywhere_ = 0; + fractionOfModulesToKillEverywhere_ = 0.10; + break; + } + + deadModules_.clear(); + if (fractionOfModulesToKillEverywhere_ > 0) { + this->chooseModulesToKill(); + } + this->addDeadLayerModulesToDeadModuleList(); +} + +void StubKiller::chooseModulesToKill() { + TRandom3* randomGenerator = new TRandom3(); + randomGenerator->SetSeed(0); + + for (const GeomDetUnit* gd : trackerGeometry_->detUnits()) { + if (!trackerTopology_->isLower(gd->geographicalId())) + continue; + if (randomGenerator->Uniform(0.0, 1.0) < fractionOfModulesToKillEverywhere_) { + deadModules_[gd->geographicalId()] = 1; + } + } +} + +void StubKiller::addDeadLayerModulesToDeadModuleList() { + for (const GeomDetUnit* gd : trackerGeometry_->detUnits()) { + float moduleR = gd->position().perp(); + float moduleZ = gd->position().z(); + float modulePhi = gd->position().phi(); + DetId geoDetId = gd->geographicalId(); + bool isInBarrel = geoDetId.subdetId() == StripSubdetector::TOB || geoDetId.subdetId() == StripSubdetector::TIB; + + int layerID = 0; + if (isInBarrel) { + layerID = trackerTopology_->layer(geoDetId); + } else { + layerID = 10 * trackerTopology_->side(geoDetId) + trackerTopology_->tidWheel(geoDetId); + } + + if (find(layersToKill_.begin(), layersToKill_.end(), layerID) != layersToKill_.end()) { + if (modulePhi < -1.0 * TMath::Pi()) + modulePhi += 2.0 * TMath::Pi(); + else if (modulePhi > TMath::Pi()) + modulePhi -= 2.0 * TMath::Pi(); + + if (modulePhi > minPhiToKill_ && modulePhi < maxPhiToKill_ && moduleZ > minZToKill_ && moduleZ < maxZToKill_ && + moduleR > minRToKill_ && moduleR < maxRToKill_) { + if (deadModules_.find(gd->geographicalId()) == deadModules_.end()) { + deadModules_[gd->geographicalId()] = fractionOfStubsToKillInLayers_; + } + } + } + } +} + +bool StubKiller::killStub(const TTStub* stub) { + if (killScenario_ == 0) + return false; + else { + bool killStubRandomly = killStub(stub, + layersToKill_, + minPhiToKill_, + maxPhiToKill_, + minZToKill_, + maxZToKill_, + minRToKill_, + maxRToKill_, + fractionOfStubsToKillInLayers_, + fractionOfStubsToKillEverywhere_); + bool killStubInDeadModules = killStubInDeadModule(stub); + return killStubRandomly || killStubInDeadModules; + } +} + +// layersToKill - a vector stating the layers we are killing stubs in. Can be an empty vector. +// Barrel layers are encoded as 1-6. The endcap layers are encoded as 11-15 (-z) and 21-25 (+z) +// min/max Phi/Z/R - stubs within the region specified by these boundaries and layersToKill are flagged for killing +// fractionOfStubsToKillInLayers - The fraction of stubs to kill in the specified layers/region. +// fractionOfStubsToKillEverywhere - The fraction of stubs to kill throughout the tracker +bool StubKiller::killStub(const TTStub* stub, + const vector layersToKill, + const double minPhiToKill, + const double maxPhiToKill, + const double minZToKill, + const double maxZToKill, + const double minRToKill, + const double maxRToKill, + const double fractionOfStubsToKillInLayers, + const double fractionOfStubsToKillEverywhere) { + // Only kill stubs in specified layers + if (layersToKill.empty()) { + // Get the layer the stub is in, and check if it's in the layer you want to kill + DetId stackDetid = stub->getDetId(); + DetId geoDetId(stackDetid.rawId() + 1); + + bool isInBarrel = geoDetId.subdetId() == StripSubdetector::TOB || geoDetId.subdetId() == StripSubdetector::TIB; + + int layerID = 0; + if (isInBarrel) { + layerID = trackerTopology_->layer(geoDetId); + } else { + layerID = 10 * trackerTopology_->side(geoDetId) + trackerTopology_->tidWheel(geoDetId); + } + + if (find(layersToKill.begin(), layersToKill.end(), layerID) != layersToKill.end()) { + // Get the phi and z of stub, and check if it's in the region you want to kill + const GeomDetUnit* det0 = trackerGeometry_->idToDetUnit(geoDetId); + const PixelGeomDetUnit* theGeomDet = dynamic_cast(det0); + const PixelTopology* topol = dynamic_cast(&(theGeomDet->specificTopology())); + MeasurementPoint measurementPoint = stub->clusterRef(0)->findAverageLocalCoordinatesCentered(); + LocalPoint clustlp = topol->localPosition(measurementPoint); + GlobalPoint pos = theGeomDet->surface().toGlobal(clustlp); + + // Just in case phi is outside of -pi -> pi + double stubPhi = pos.phi(); + if (stubPhi < -1.0 * TMath::Pi()) + stubPhi += 2.0 * TMath::Pi(); + else if (stubPhi > TMath::Pi()) + stubPhi -= 2.0 * TMath::Pi(); + + if (stubPhi > minPhiToKill && stubPhi < maxPhiToKill && pos.z() > minZToKill && pos.z() < maxZToKill && + pos.perp() > minRToKill && pos.perp() < maxRToKill) { + // Kill fraction of stubs + if (fractionOfStubsToKillInLayers == 1) { + return true; + } else { + static TRandom randomGenerator; + if (randomGenerator.Rndm() < fractionOfStubsToKillInLayers) { + return true; + } + } + } + } + } + + // Kill fraction of stubs throughout tracker + if (fractionOfStubsToKillEverywhere > 0) { + static TRandom randomGenerator; + if (randomGenerator.Rndm() < fractionOfStubsToKillEverywhere) { + return true; + } + } + + return false; +} + +bool StubKiller::killStubInDeadModule(const TTStub* stub) { + if (deadModules_.empty()) { + DetId stackDetid = stub->getDetId(); + DetId geoDetId(stackDetid.rawId() + 1); + if (deadModules_.find(geoDetId) != deadModules_.end()) + return true; + } + + return false; +} diff --git a/L1Trigger/TrackFindingTracklet/src/Tracklet.cc b/L1Trigger/TrackFindingTracklet/src/Tracklet.cc index 0b271e05d1ad1..3945895a6913d 100644 --- a/L1Trigger/TrackFindingTracklet/src/Tracklet.cc +++ b/L1Trigger/TrackFindingTracklet/src/Tracklet.cc @@ -194,7 +194,15 @@ std::string Tracklet::trackletparstr() { std::to_string(fpgapars_.t().value() * settings_.ktpars()); return oss; } else { - std::string str = innerFPGAStub_->stubindex().str() + "|"; + std::string str = ""; + if (settings_.combined()) { + if (seedIndex() == Seed::L1D1 || seedIndex() == Seed::L2D1) { + str += outerFPGAStub_->phiregionstr() + "|"; + } else { + str += innerFPGAStub_->phiregionstr() + "|"; + } + } + str += innerFPGAStub_->stubindex().str() + "|"; if (middleFPGAStub_) { str += middleFPGAStub_->stubindex().str() + "|"; } @@ -720,6 +728,8 @@ std::string Tracklet::trackfitstr() const { oss += "1|"; // valid bit oss += tmp.str() + "|"; + oss += innerFPGAStub()->stubindex().str() + "|"; + oss += outerFPGAStub()->stubindex().str() + "|"; oss += fpgapars_.rinv().str() + "|"; oss += fpgapars_.phi0().str() + "|"; oss += fpgapars_.z0().str() + "|"; diff --git a/L1Trigger/TrackFindingTracklet/src/TrackletEventProcessor.cc b/L1Trigger/TrackFindingTracklet/src/TrackletEventProcessor.cc index 3a0238126c82e..2104d3cea740b 100644 --- a/L1Trigger/TrackFindingTracklet/src/TrackletEventProcessor.cc +++ b/L1Trigger/TrackFindingTracklet/src/TrackletEventProcessor.cc @@ -179,7 +179,6 @@ void TrackletEventProcessor::event(SLHCEvent& ev, globals_->event() = &ev; tracks_.clear(); - eventnum_++; bool first = (eventnum_ == 1); diff --git a/L1Trigger/TrackFindingTracklet/src/TrackletLUT.cc b/L1Trigger/TrackFindingTracklet/src/TrackletLUT.cc index 8a769ff71aa5e..acb66ddf68382 100644 --- a/L1Trigger/TrackFindingTracklet/src/TrackletLUT.cc +++ b/L1Trigger/TrackFindingTracklet/src/TrackletLUT.cc @@ -9,7 +9,8 @@ using namespace std; using namespace trklet; -TrackletLUT::TrackletLUT(const Settings& settings) : settings_(settings), setup_(settings.setup()) {} +TrackletLUT::TrackletLUT(const Settings& settings) + : settings_(settings), setup_(settings.setup()), nbits_(0), positive_(true) {} std::vector TrackletLUT::getSensorModules( unsigned int layerdisk, bool isPS, std::array tan_range, unsigned int nzbins, unsigned int zbin) { @@ -237,33 +238,43 @@ void TrackletLUT::initmatchcut(unsigned int layerdisk, MatchType type, unsigned name_ = settings_.combined() ? "MP_" : "MC_"; if (type == barrelphi) { + nbits_ = 10; name_ += TrackletConfigBuilder::LayerName(layerdisk) + "PHI" + cregion + "_phicut.tab"; } if (type == barrelz) { + nbits_ = 9; name_ += TrackletConfigBuilder::LayerName(layerdisk) + "PHI" + cregion + "_zcut.tab"; } if (type == diskPSphi) { + nbits_ = 19; name_ += TrackletConfigBuilder::LayerName(layerdisk) + "PHI" + cregion + "_PSphicut.tab"; } if (type == disk2Sphi) { + nbits_ = 19; name_ += TrackletConfigBuilder::LayerName(layerdisk) + "PHI" + cregion + "_2Sphicut.tab"; } if (type == disk2Sr) { + nbits_ = 7; name_ += TrackletConfigBuilder::LayerName(layerdisk) + "PHI" + cregion + "_2Srcut.tab"; } if (type == diskPSr) { + nbits_ = 2; name_ += TrackletConfigBuilder::LayerName(layerdisk) + "PHI" + cregion + "_PSrcut.tab"; } if (type == alphainner) { + nbits_ = 8; name_ += TrackletConfigBuilder::LayerName(layerdisk) + "PHI" + cregion + "_alphainner.tab"; } if (type == alphaouter) { + nbits_ = 8; name_ += TrackletConfigBuilder::LayerName(layerdisk) + "PHI" + cregion + "_alphaouter.tab"; } if (type == rSSinner) { + nbits_ = 15; name_ += TrackletConfigBuilder::LayerName(layerdisk) + "PHI" + cregion + "_rDSSinner.tab"; } if (type == rSSouter) { + nbits_ = 15; name_ += TrackletConfigBuilder::LayerName(layerdisk) + "PHI" + cregion + "_rDSSouter.tab"; } @@ -472,9 +483,8 @@ void TrackletLUT::initTPlut(bool fillInner, } } - nbits_ = 8; - positive_ = false; + nbits_ = 1; char cTP = 'A' + iTP; name_ = "TP_" + TrackletConfigBuilder::LayerName(layerdisk1) + TrackletConfigBuilder::LayerName(layerdisk2) + cTP; @@ -537,6 +547,7 @@ void TrackletLUT::initTPregionlut(unsigned int iSeed, } positive_ = false; + nbits_ = 8; char cTP = 'A' + iTP; name_ = "TP_" + TrackletConfigBuilder::LayerName(layerdisk1) + TrackletConfigBuilder::LayerName(layerdisk2) + cTP + @@ -768,6 +779,7 @@ void TrackletLUT::initteptlut(bool fillInner, } positive_ = false; + nbits_ = 1; if (fillTEMem) { if (fillInner) { @@ -832,6 +844,7 @@ void TrackletLUT::initProjectionBend(double k_phider, } positive_ = false; + nbits_ = 5; name_ = settings_.combined() ? "MP_" : "PR_"; name_ += "ProjectionBend_" + TrackletConfigBuilder::LayerName(N_LAYER + idisk) + ".tab"; @@ -975,6 +988,7 @@ void TrackletLUT::initBendMatch(unsigned int layerdisk) { } positive_ = false; + nbits_ = 1; name_ = "METable_" + TrackletConfigBuilder::LayerName(layerdisk) + ".tab"; @@ -1129,6 +1143,7 @@ void TrackletLUT::initVMRTable(unsigned int layerdisk, VMRTableType type, int re //This if a hack where the same memory is used in both ME and TE modules if (layerdisk == LayerDisk::L2 || layerdisk == LayerDisk::L3 || layerdisk == LayerDisk::L4 || layerdisk == LayerDisk::L6) { + nbits_ = 6; positive_ = false; name_ = "VMTableOuter" + TrackletConfigBuilder::LayerName(layerdisk) + ".tab"; writeTable(); @@ -1137,23 +1152,27 @@ void TrackletLUT::initVMRTable(unsigned int layerdisk, VMRTableType type, int re assert(region >= 0); char cregion = 'A' + region; name_ = "VMR_" + TrackletConfigBuilder::LayerName(layerdisk) + "PHI" + cregion + "_finebin.tab"; + nbits_ = 6; positive_ = false; } if (type == VMRTableType::inner) { positive_ = false; + nbits_ = 10; name_ = "VMTableInner" + TrackletConfigBuilder::LayerName(layerdisk) + TrackletConfigBuilder::LayerName(layerdisk + 1) + ".tab"; } if (type == VMRTableType::inneroverlap) { positive_ = false; + nbits_ = 10; name_ = "VMTableInner" + TrackletConfigBuilder::LayerName(layerdisk) + TrackletConfigBuilder::LayerName(N_LAYER) + ".tab"; } if (type == VMRTableType::disk) { positive_ = false; + nbits_ = 10; name_ = "VMTableOuter" + TrackletConfigBuilder::LayerName(layerdisk) + ".tab"; } } @@ -1378,11 +1397,15 @@ int TrackletLUT::getphiCorrValue( // Write LUT table. void TrackletLUT::writeTable() const { - if (!settings_.writeTable()) { + if (name_.empty()) { return; } - if (name_.empty()) { + if (nbits_ == 0) { + throw cms::Exception("LogicError") << "Error in " << __FILE__ << " nbits_ == 0 "; + } + + if (!settings_.writeTable()) { return; } @@ -1431,6 +1454,10 @@ void TrackletLUT::writeTable() const { } int TrackletLUT::lookup(unsigned int index) const { + if (index >= table_.size()) { + throw cms::Exception("LogicError") << "Error in " << __FILE__ << " index >= size " << index << " " << table_.size() + << " in " << name_; + } assert(index < table_.size()); return table_[index]; } diff --git a/L1Trigger/TrackFindingTracklet/src/VMRouterCM.cc b/L1Trigger/TrackFindingTracklet/src/VMRouterCM.cc index 5a10ad3599e4c..371e9725612ad 100644 --- a/L1Trigger/TrackFindingTracklet/src/VMRouterCM.cc +++ b/L1Trigger/TrackFindingTracklet/src/VMRouterCM.cc @@ -258,9 +258,9 @@ void VMRouterCM::execute(unsigned int) { FPGAWord(stub->bend().value(), nbendbits, true, __LINE__, __FILE__), allStubIndex); - assert(vmstubsMEPHI_[0] != nullptr); - - vmstubsMEPHI_[0]->addStub(vmstub, ivm * nvmmebins_ + vmbin); + if (vmstubsMEPHI_[0] != nullptr) { + vmstubsMEPHI_[0]->addStub(vmstub, ivm * nvmmebins_ + vmbin); + } //Fill the TE VM memories if (layerdisk_ >= N_LAYER && (!stub->isPSmodule())) diff --git a/L1Trigger/TrackFindingTracklet/test/AnalyzerDR.cc b/L1Trigger/TrackFindingTracklet/test/AnalyzerDR.cc new file mode 100644 index 0000000000000..fc5e36f397075 --- /dev/null +++ b/L1Trigger/TrackFindingTracklet/test/AnalyzerDR.cc @@ -0,0 +1,328 @@ +#include "FWCore/Framework/interface/one/EDAnalyzer.h" +#include "FWCore/Framework/interface/Run.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ServiceRegistry/interface/Service.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/Utilities/interface/EDGetToken.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "FWCore/Utilities/interface/Exception.h" +#include "CommonTools/UtilAlgos/interface/TFileService.h" +#include "DataFormats/Common/interface/Handle.h" + +#include "SimTracker/TrackTriggerAssociation/interface/StubAssociation.h" +#include "L1Trigger/TrackTrigger/interface/Setup.h" +#include "L1Trigger/TrackerTFP/interface/DataFormats.h" +#include "L1Trigger/TrackFindingTracklet/interface/ChannelAssignment.h" + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +using namespace std; +using namespace edm; +using namespace trackerTFP; +using namespace tt; + +namespace trklet { + + /*! \class trklet::AnalyzerDR + * \brief Class to analyze hardware like structured TTStub Collection generated by DR module + * \author Thomas Schuh + * \date 2023, Feb + */ + class AnalyzerDR : public one::EDAnalyzer { + public: + AnalyzerDR(const ParameterSet& iConfig); + void beginJob() override {} + void beginRun(const Run& iEvent, const EventSetup& iSetup) override; + void analyze(const Event& iEvent, const EventSetup& iSetup) override; + void endRun(const Run& iEvent, const EventSetup& iSetup) override {} + void endJob() override; + + private: + // + void formTracks(const StreamsTrack& streamsTrack, + const StreamsStub& streamsStubs, + vector>& tracks, + int channel) const; + // + void associate(const vector>& tracks, + const StubAssociation* ass, + set& tps, + int& sum, + bool perfect = false) const; + // ED input token of stubs + EDGetTokenT edGetTokenAcceptedStubs_; + // ED input token of tracks + EDGetTokenT edGetTokenAcceptedTracks_; + // ED input token of lost stubs + EDGetTokenT edGetTokenLostStubs_; + // ED input token of lost tracks + EDGetTokenT edGetTokenLostTracks_; + // ED input token of TTStubRef to TPPtr association for tracking efficiency + EDGetTokenT edGetTokenSelection_; + // ED input token of TTStubRef to recontructable TPPtr association + EDGetTokenT edGetTokenReconstructable_; + // Setup token + ESGetToken esGetTokenSetup_; + // DataFormats token + ESGetToken esGetTokenDataFormats_; + // ChannelAssignment token + ESGetToken esGetTokenChannelAssignment_; + // stores, calculates and provides run-time constants + const Setup* setup_ = nullptr; + // helper class to extract structured data from tt::Frames + const DataFormats* dataFormats_ = nullptr; + // helper class to assign tracklet track to channel + const ChannelAssignment* channelAssignment_ = nullptr; + // enables analyze of TPs + bool useMCTruth_; + // + int nEvents_ = 0; + + // Histograms + + TProfile* prof_; + TProfile* profChannel_; + TH1F* hisChannel_; + + // printout + stringstream log_; + }; + + AnalyzerDR::AnalyzerDR(const ParameterSet& iConfig) : useMCTruth_(iConfig.getParameter("UseMCTruth")) { + usesResource("TFileService"); + // book in- and output ED products + const string& label = iConfig.getParameter("LabelDR"); + const string& branchAcceptedStubs = iConfig.getParameter("BranchAcceptedStubs"); + const string& branchAcceptedTracks = iConfig.getParameter("BranchAcceptedTracks"); + const string& branchLostStubs = iConfig.getParameter("BranchLostStubs"); + const string& branchLostTracks = iConfig.getParameter("BranchLostTracks"); + edGetTokenAcceptedStubs_ = consumes(InputTag(label, branchAcceptedStubs)); + edGetTokenAcceptedTracks_ = consumes(InputTag(label, branchAcceptedTracks)); + edGetTokenLostStubs_ = consumes(InputTag(label, branchLostStubs)); + edGetTokenLostTracks_ = consumes(InputTag(label, branchLostTracks)); + if (useMCTruth_) { + const auto& inputTagSelecttion = iConfig.getParameter("InputTagSelection"); + const auto& inputTagReconstructable = iConfig.getParameter("InputTagReconstructable"); + edGetTokenSelection_ = consumes(inputTagSelecttion); + edGetTokenReconstructable_ = consumes(inputTagReconstructable); + } + // book ES products + esGetTokenSetup_ = esConsumes(); + esGetTokenDataFormats_ = esConsumes(); + esGetTokenChannelAssignment_ = esConsumes(); + // log config + log_.setf(ios::fixed, ios::floatfield); + log_.precision(4); + } + + void AnalyzerDR::beginRun(const Run& iEvent, const EventSetup& iSetup) { + // helper class to store configurations + setup_ = &iSetup.getData(esGetTokenSetup_); + // helper class to extract structured data from tt::Frames + dataFormats_ = &iSetup.getData(esGetTokenDataFormats_); + // helper class to assign tracklet track to channel + channelAssignment_ = &iSetup.getData(esGetTokenChannelAssignment_); + // book histograms + Service fs; + TFileDirectory dir; + dir = fs->mkdir("DR"); + prof_ = dir.make("Counts", ";", 10, 0.5, 10.5); + prof_->GetXaxis()->SetBinLabel(1, "Stubs"); + prof_->GetXaxis()->SetBinLabel(2, "Tracks"); + prof_->GetXaxis()->SetBinLabel(3, "Lost Tracks"); + prof_->GetXaxis()->SetBinLabel(4, "Matched Tracks"); + prof_->GetXaxis()->SetBinLabel(5, "All Tracks"); + prof_->GetXaxis()->SetBinLabel(6, "Found TPs"); + prof_->GetXaxis()->SetBinLabel(7, "Found selected TPs"); + prof_->GetXaxis()->SetBinLabel(8, "Lost TPs"); + prof_->GetXaxis()->SetBinLabel(9, "All TPs"); + prof_->GetXaxis()->SetBinLabel(10, "Perfect TPs"); + // channel occupancy + constexpr int maxOcc = 180; + const int numChannels = channelAssignment_->numNodesDR(); + hisChannel_ = dir.make("His Channel Occupancy", ";", maxOcc, -.5, maxOcc - .5); + profChannel_ = dir.make("Prof Channel Occupancy", ";", numChannels, -.5, numChannels - .5); + } + + void AnalyzerDR::analyze(const Event& iEvent, const EventSetup& iSetup) { + // read in ht products + Handle handleAcceptedStubs; + iEvent.getByToken(edGetTokenAcceptedStubs_, handleAcceptedStubs); + const StreamsStub& acceptedStubs = *handleAcceptedStubs; + Handle handleAcceptedTracks; + iEvent.getByToken(edGetTokenAcceptedTracks_, handleAcceptedTracks); + const StreamsTrack& acceptedTracks = *handleAcceptedTracks; + Handle handleLostStubs; + iEvent.getByToken(edGetTokenLostStubs_, handleLostStubs); + const StreamsStub& lostStubs = *handleLostStubs; + Handle handleLostTracks; + iEvent.getByToken(edGetTokenLostTracks_, handleLostTracks); + const StreamsTrack& lostTracks = *handleLostTracks; + // read in MCTruth + const StubAssociation* selection = nullptr; + const StubAssociation* reconstructable = nullptr; + if (useMCTruth_) { + Handle handleSelection; + iEvent.getByToken(edGetTokenSelection_, handleSelection); + selection = handleSelection.product(); + prof_->Fill(9, selection->numTPs()); + Handle handleReconstructable; + iEvent.getByToken(edGetTokenReconstructable_, handleReconstructable); + reconstructable = handleReconstructable.product(); + } + // analyze ht products and associate found tracks with reconstrucable TrackingParticles + set tpPtrs; + set tpPtrsSelection; + set tpPtrsPerfect; + set tpPtrsLost; + int allMatched(0); + int allTracks(0); + for (int region = 0; region < setup_->numRegions(); region++) { + const int offset = region * channelAssignment_->numNodesDR(); + int nStubs(0); + int nTracks(0); + int nLost(0); + for (int channel = 0; channel < channelAssignment_->numNodesDR(); channel++) { + vector> tracks; + formTracks(acceptedTracks, acceptedStubs, tracks, offset + channel); + vector> lost; + formTracks(lostTracks, lostStubs, lost, offset + channel); + nTracks += tracks.size(); + nStubs += accumulate(tracks.begin(), tracks.end(), 0, [](int sum, const vector& track) { + return sum + static_cast(track.size()); + }); + nLost += lost.size(); + allTracks += tracks.size(); + if (!useMCTruth_) + continue; + int tmp(0); + associate(tracks, selection, tpPtrsSelection, tmp); + associate(tracks, selection, tpPtrsPerfect, tmp, true); + associate(lost, selection, tpPtrsLost, tmp); + associate(tracks, reconstructable, tpPtrs, allMatched); + const StreamTrack& stream = acceptedTracks[offset + channel]; + const auto end = + find_if(stream.rbegin(), stream.rend(), [](const FrameTrack& frame) { return frame.first.isNonnull(); }); + const int size = distance(stream.begin(), end.base()) - 1; + hisChannel_->Fill(size); + profChannel_->Fill(channel, size); + } + prof_->Fill(1, nStubs); + prof_->Fill(2, nTracks); + prof_->Fill(3, nLost); + } + vector recovered; + recovered.reserve(tpPtrsLost.size()); + set_intersection(tpPtrsLost.begin(), tpPtrsLost.end(), tpPtrs.begin(), tpPtrs.end(), back_inserter(recovered)); + for (const TPPtr& tpPtr : recovered) + tpPtrsLost.erase(tpPtr); + prof_->Fill(4, allMatched); + prof_->Fill(5, allTracks); + prof_->Fill(6, tpPtrs.size()); + prof_->Fill(7, tpPtrsSelection.size()); + prof_->Fill(8, tpPtrsLost.size()); + prof_->Fill(10, tpPtrsPerfect.size()); + nEvents_++; + } + + void AnalyzerDR::endJob() { + if (nEvents_ == 0) + return; + // printout SF summary + const double totalTPs = prof_->GetBinContent(9); + const double numStubs = prof_->GetBinContent(1); + const double numTracks = prof_->GetBinContent(2); + const double numTracksLost = prof_->GetBinContent(3); + const double totalTracks = prof_->GetBinContent(5); + const double numTracksMatched = prof_->GetBinContent(4); + const double numTPsAll = prof_->GetBinContent(6); + const double numTPsEff = prof_->GetBinContent(7); + const double numTPsLost = prof_->GetBinContent(8); + const double numTPsEffPerfect = prof_->GetBinContent(10); + const double errStubs = prof_->GetBinError(1); + const double errTracks = prof_->GetBinError(2); + const double errTracksLost = prof_->GetBinError(3); + const double fracFake = (totalTracks - numTracksMatched) / totalTracks; + const double fracDup = (numTracksMatched - numTPsAll) / totalTracks; + const double eff = numTPsEff / totalTPs; + const double errEff = sqrt(eff * (1. - eff) / totalTPs / nEvents_); + const double effLoss = numTPsLost / totalTPs; + const double errEffLoss = sqrt(effLoss * (1. - effLoss) / totalTPs / nEvents_); + const double effPerfect = numTPsEffPerfect / totalTPs; + const double errEffPerfect = sqrt(effPerfect * (1. - effPerfect) / totalTPs / nEvents_); + const vector nums = {numStubs, numTracks, numTracksLost}; + const vector errs = {errStubs, errTracks, errTracksLost}; + const int wNums = ceil(log10(*max_element(nums.begin(), nums.end()))) + 5; + const int wErrs = ceil(log10(*max_element(errs.begin(), errs.end()))) + 5; + log_ << " DR SUMMARY " << endl; + log_ << "number of stubs per TFP = " << setw(wNums) << numStubs << " +- " << setw(wErrs) << errStubs << endl; + log_ << "number of tracks per TFP = " << setw(wNums) << numTracks << " +- " << setw(wErrs) << errTracks + << endl; + log_ << "number of lost tracks per TFP = " << setw(wNums) << numTracksLost << " +- " << setw(wErrs) << errTracksLost + << endl; + log_ << " current tracking efficiency = " << setw(wNums) << effPerfect << " +- " << setw(wErrs) << errEffPerfect + << endl; + log_ << " max tracking efficiency = " << setw(wNums) << eff << " +- " << setw(wErrs) << errEff << endl; + log_ << " lost tracking efficiency = " << setw(wNums) << effLoss << " +- " << setw(wErrs) << errEffLoss << endl; + log_ << " fake rate = " << setw(wNums) << fracFake << endl; + log_ << " duplicate rate = " << setw(wNums) << fracDup << endl; + log_ << "============================================================="; + LogPrint("L1Trigger/TrackerTFP") << log_.str(); + } + + // + void AnalyzerDR::formTracks(const StreamsTrack& streamsTrack, + const StreamsStub& streamsStubs, + vector>& tracks, + int channel) const { + const int offset = channel * setup_->numLayers(); + const StreamTrack& streamTrack = streamsTrack[channel]; + const int numTracks = accumulate(streamTrack.begin(), streamTrack.end(), 0, [](int sum, const FrameTrack& frame) { + return sum + (frame.first.isNonnull() ? 1 : 0); + }); + tracks.reserve(numTracks); + for (int frame = 0; frame < (int)streamTrack.size(); frame++) { + const FrameTrack& frameTrack = streamTrack[frame]; + if (frameTrack.first.isNull()) + continue; + vector ttStubRefs; + ttStubRefs.reserve(setup_->numLayers()); + for (int layer = 0; layer < setup_->numLayers(); layer++) { + const FrameStub& stub = streamsStubs[offset + layer][frame]; + if (stub.first.isNonnull()) + ttStubRefs.push_back(stub.first); + } + tracks.push_back(ttStubRefs); + } + } + + // + void AnalyzerDR::associate(const vector>& tracks, + const StubAssociation* ass, + set& tps, + int& sum, + bool perfect) const { + for (const vector& ttStubRefs : tracks) { + const vector& tpPtrs = perfect ? ass->associateFinal(ttStubRefs) : ass->associate(ttStubRefs); + if (tpPtrs.empty()) + continue; + sum++; + copy(tpPtrs.begin(), tpPtrs.end(), inserter(tps, tps.begin())); + } + } + +} // namespace trklet + +DEFINE_FWK_MODULE(trklet::AnalyzerDR); diff --git a/L1Trigger/TrackFindingTracklet/test/AnalyzerDRin.cc b/L1Trigger/TrackFindingTracklet/test/AnalyzerDRin.cc new file mode 100644 index 0000000000000..e91d45c0b06e2 --- /dev/null +++ b/L1Trigger/TrackFindingTracklet/test/AnalyzerDRin.cc @@ -0,0 +1,329 @@ +#include "FWCore/Framework/interface/one/EDAnalyzer.h" +#include "FWCore/Framework/interface/Run.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ServiceRegistry/interface/Service.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/Utilities/interface/EDGetToken.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "FWCore/Utilities/interface/Exception.h" +#include "CommonTools/UtilAlgos/interface/TFileService.h" +#include "DataFormats/Common/interface/Handle.h" + +#include "SimTracker/TrackTriggerAssociation/interface/StubAssociation.h" +#include "L1Trigger/TrackTrigger/interface/Setup.h" +#include "L1Trigger/TrackerTFP/interface/DataFormats.h" +#include "L1Trigger/TrackFindingTracklet/interface/ChannelAssignment.h" + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +using namespace std; +using namespace edm; +using namespace trackerTFP; +using namespace tt; + +namespace trklet { + + /*! \class trklet::AnalyzerDRin + * \brief Class to analyze hardware like structured TTStub Collection generated by DRin module + * \author Thomas Schuh + * \date 2023, Jan + */ + class AnalyzerDRin : public one::EDAnalyzer { + public: + AnalyzerDRin(const ParameterSet& iConfig); + void beginJob() override {} + void beginRun(const Run& iEvent, const EventSetup& iSetup) override; + void analyze(const Event& iEvent, const EventSetup& iSetup) override; + void endRun(const Run& iEvent, const EventSetup& iSetup) override {} + void endJob() override; + + private: + // + void formTracks(const StreamsTrack& streamsTrack, + const StreamsStub& streamsStubs, + vector>& tracks, + int channel) const; + // + void associate(const vector>& tracks, + const StubAssociation* ass, + set& tps, + int& sum, + bool perfect = false) const; + + // ED input token of stubs + EDGetTokenT edGetTokenAcceptedStubs_; + // ED input token of tracks + EDGetTokenT edGetTokenAcceptedTracks_; + // ED input token of lost stubs + EDGetTokenT edGetTokenLostStubs_; + // ED input token of lost tracks + EDGetTokenT edGetTokenLostTracks_; + // ED input token of TTStubRef to TPPtr association for tracking efficiency + EDGetTokenT edGetTokenSelection_; + // ED input token of TTStubRef to recontructable TPPtr association + EDGetTokenT edGetTokenReconstructable_; + // Setup token + ESGetToken esGetTokenSetup_; + // DataFormats token + ESGetToken esGetTokenDataFormats_; + // ChannelAssignment token + ESGetToken esGetTokenChannelAssignment_; + // stores, calculates and provides run-time constants + const Setup* setup_ = nullptr; + // helper class to extract structured data from tt::Frames + const DataFormats* dataFormats_ = nullptr; + // helper class to assign tracklet track to channel + const ChannelAssignment* channelAssignment_ = nullptr; + // enables analyze of TPs + bool useMCTruth_; + // + int nEvents_ = 0; + + // Histograms + + TProfile* prof_; + TProfile* profChannel_; + TH1F* hisChannel_; + + // printout + stringstream log_; + }; + + AnalyzerDRin::AnalyzerDRin(const ParameterSet& iConfig) : useMCTruth_(iConfig.getParameter("UseMCTruth")) { + usesResource("TFileService"); + // book in- and output ED products + const string& label = iConfig.getParameter("LabelDRin"); + const string& branchAcceptedStubs = iConfig.getParameter("BranchAcceptedStubs"); + const string& branchAcceptedTracks = iConfig.getParameter("BranchAcceptedTracks"); + const string& branchLostStubs = iConfig.getParameter("BranchLostStubs"); + const string& branchLostTracks = iConfig.getParameter("BranchLostTracks"); + edGetTokenAcceptedStubs_ = consumes(InputTag(label, branchAcceptedStubs)); + edGetTokenAcceptedTracks_ = consumes(InputTag(label, branchAcceptedTracks)); + edGetTokenLostStubs_ = consumes(InputTag(label, branchLostStubs)); + edGetTokenLostTracks_ = consumes(InputTag(label, branchLostTracks)); + if (useMCTruth_) { + const auto& inputTagSelecttion = iConfig.getParameter("InputTagSelection"); + const auto& inputTagReconstructable = iConfig.getParameter("InputTagReconstructable"); + edGetTokenSelection_ = consumes(inputTagSelecttion); + edGetTokenReconstructable_ = consumes(inputTagReconstructable); + } + // book ES products + esGetTokenSetup_ = esConsumes(); + esGetTokenDataFormats_ = esConsumes(); + esGetTokenChannelAssignment_ = esConsumes(); + // log config + log_.setf(ios::fixed, ios::floatfield); + log_.precision(4); + } + + void AnalyzerDRin::beginRun(const Run& iEvent, const EventSetup& iSetup) { + // helper class to store configurations + setup_ = &iSetup.getData(esGetTokenSetup_); + // helper class to extract structured data from tt::Frames + dataFormats_ = &iSetup.getData(esGetTokenDataFormats_); + // helper class to assign tracklet track to channel + channelAssignment_ = &iSetup.getData(esGetTokenChannelAssignment_); + // book histograms + Service fs; + TFileDirectory dir; + dir = fs->mkdir("DRin"); + prof_ = dir.make("Counts", ";", 10, 0.5, 10.5); + prof_->GetXaxis()->SetBinLabel(1, "Stubs"); + prof_->GetXaxis()->SetBinLabel(2, "Tracks"); + prof_->GetXaxis()->SetBinLabel(3, "Lost Tracks"); + prof_->GetXaxis()->SetBinLabel(4, "Matched Tracks"); + prof_->GetXaxis()->SetBinLabel(5, "All Tracks"); + prof_->GetXaxis()->SetBinLabel(6, "Found TPs"); + prof_->GetXaxis()->SetBinLabel(7, "Found selected TPs"); + prof_->GetXaxis()->SetBinLabel(8, "Lost TPs"); + prof_->GetXaxis()->SetBinLabel(9, "All TPs"); + prof_->GetXaxis()->SetBinLabel(10, "Perfect TPs"); + // channel occupancy + constexpr int maxOcc = 180; + const int numChannels = channelAssignment_->numNodesDR(); + hisChannel_ = dir.make("His Channel Occupancy", ";", maxOcc, -.5, maxOcc - .5); + profChannel_ = dir.make("Prof Channel Occupancy", ";", numChannels, -.5, numChannels - .5); + } + + void AnalyzerDRin::analyze(const Event& iEvent, const EventSetup& iSetup) { + // read in ht products + Handle handleAcceptedStubs; + iEvent.getByToken(edGetTokenAcceptedStubs_, handleAcceptedStubs); + const StreamsStub& acceptedStubs = *handleAcceptedStubs; + Handle handleAcceptedTracks; + iEvent.getByToken(edGetTokenAcceptedTracks_, handleAcceptedTracks); + const StreamsTrack& acceptedTracks = *handleAcceptedTracks; + Handle handleLostStubs; + iEvent.getByToken(edGetTokenLostStubs_, handleLostStubs); + const StreamsStub& lostStubs = *handleLostStubs; + Handle handleLostTracks; + iEvent.getByToken(edGetTokenLostTracks_, handleLostTracks); + const StreamsTrack& lostTracks = *handleLostTracks; + // read in MCTruth + const StubAssociation* selection = nullptr; + const StubAssociation* reconstructable = nullptr; + if (useMCTruth_) { + Handle handleSelection; + iEvent.getByToken(edGetTokenSelection_, handleSelection); + selection = handleSelection.product(); + prof_->Fill(9, selection->numTPs()); + Handle handleReconstructable; + iEvent.getByToken(edGetTokenReconstructable_, handleReconstructable); + reconstructable = handleReconstructable.product(); + } + // analyze ht products and associate found tracks with reconstrucable TrackingParticles + set tpPtrs; + set tpPtrsSelection; + set tpPtrsPerfect; + set tpPtrsLost; + int allMatched(0); + int allTracks(0); + for (int region = 0; region < setup_->numRegions(); region++) { + const int offset = region * channelAssignment_->numNodesDR(); + int nStubs(0); + int nTracks(0); + int nLost(0); + for (int channel = 0; channel < channelAssignment_->numNodesDR(); channel++) { + vector> tracks; + formTracks(acceptedTracks, acceptedStubs, tracks, offset + channel); + vector> lost; + formTracks(lostTracks, lostStubs, lost, offset + channel); + nTracks += tracks.size(); + nStubs += accumulate(tracks.begin(), tracks.end(), 0, [](int sum, const vector& track) { + return sum + (int)track.size(); + }); + nLost += lost.size(); + allTracks += tracks.size(); + if (!useMCTruth_) + continue; + int tmp(0); + associate(tracks, selection, tpPtrsSelection, tmp); + associate(tracks, selection, tpPtrsPerfect, tmp, true); + associate(lost, selection, tpPtrsLost, tmp); + associate(tracks, reconstructable, tpPtrs, allMatched); + const StreamTrack& stream = acceptedTracks[offset + channel]; + const auto end = + find_if(stream.rbegin(), stream.rend(), [](const FrameTrack& frame) { return frame.first.isNonnull(); }); + const int size = distance(stream.begin(), end.base()) - 1; + hisChannel_->Fill(size); + profChannel_->Fill(channel, size); + } + prof_->Fill(1, nStubs); + prof_->Fill(2, nTracks); + prof_->Fill(3, nLost); + } + vector recovered; + recovered.reserve(tpPtrsLost.size()); + set_intersection(tpPtrsLost.begin(), tpPtrsLost.end(), tpPtrs.begin(), tpPtrs.end(), back_inserter(recovered)); + for (const TPPtr& tpPtr : recovered) + tpPtrsLost.erase(tpPtr); + prof_->Fill(4, allMatched); + prof_->Fill(5, allTracks); + prof_->Fill(6, tpPtrs.size()); + prof_->Fill(7, tpPtrsSelection.size()); + prof_->Fill(8, tpPtrsLost.size()); + prof_->Fill(10, tpPtrsPerfect.size()); + nEvents_++; + } + + void AnalyzerDRin::endJob() { + if (nEvents_ == 0) + return; + // printout SF summary + const double totalTPs = prof_->GetBinContent(9); + const double numStubs = prof_->GetBinContent(1); + const double numTracks = prof_->GetBinContent(2); + const double numTracksLost = prof_->GetBinContent(3); + const double totalTracks = prof_->GetBinContent(5); + const double numTracksMatched = prof_->GetBinContent(4); + const double numTPsAll = prof_->GetBinContent(6); + const double numTPsEff = prof_->GetBinContent(7); + const double numTPsLost = prof_->GetBinContent(8); + const double numTPsEffPerfect = prof_->GetBinContent(10); + const double errStubs = prof_->GetBinError(1); + const double errTracks = prof_->GetBinError(2); + const double errTracksLost = prof_->GetBinError(3); + const double fracFake = (totalTracks - numTracksMatched) / totalTracks; + const double fracDup = (numTracksMatched - numTPsAll) / totalTracks; + const double eff = numTPsEff / totalTPs; + const double errEff = sqrt(eff * (1. - eff) / totalTPs / nEvents_); + const double effLoss = numTPsLost / totalTPs; + const double errEffLoss = sqrt(effLoss * (1. - effLoss) / totalTPs / nEvents_); + const double effPerfect = numTPsEffPerfect / totalTPs; + const double errEffPerfect = sqrt(effPerfect * (1. - effPerfect) / totalTPs / nEvents_); + const vector nums = {numStubs, numTracks, numTracksLost}; + const vector errs = {errStubs, errTracks, errTracksLost}; + const int wNums = ceil(log10(*max_element(nums.begin(), nums.end()))) + 5; + const int wErrs = ceil(log10(*max_element(errs.begin(), errs.end()))) + 5; + log_ << " DRin SUMMARY " << endl; + log_ << "number of stubs per TFP = " << setw(wNums) << numStubs << " +- " << setw(wErrs) << errStubs << endl; + log_ << "number of tracks per TFP = " << setw(wNums) << numTracks << " +- " << setw(wErrs) << errTracks + << endl; + log_ << "number of lost tracks per TFP = " << setw(wNums) << numTracksLost << " +- " << setw(wErrs) << errTracksLost + << endl; + log_ << " current tracking efficiency = " << setw(wNums) << effPerfect << " +- " << setw(wErrs) << errEffPerfect + << endl; + log_ << " max tracking efficiency = " << setw(wNums) << eff << " +- " << setw(wErrs) << errEff << endl; + log_ << " lost tracking efficiency = " << setw(wNums) << effLoss << " +- " << setw(wErrs) << errEffLoss << endl; + log_ << " fake rate = " << setw(wNums) << fracFake << endl; + log_ << " duplicate rate = " << setw(wNums) << fracDup << endl; + log_ << "============================================================="; + LogPrint("L1Trigger/TrackerTFP") << log_.str(); + } + + // + void AnalyzerDRin::formTracks(const StreamsTrack& streamsTrack, + const StreamsStub& streamsStubs, + vector>& tracks, + int channel) const { + const int offset = channel * setup_->numLayers(); + const StreamTrack& streamTrack = streamsTrack[channel]; + const int numTracks = accumulate(streamTrack.begin(), streamTrack.end(), 0, [](int sum, const FrameTrack& frame) { + return sum + (frame.first.isNonnull() ? 1 : 0); + }); + tracks.reserve(numTracks); + for (int frame = 0; frame < (int)streamTrack.size(); frame++) { + const FrameTrack& frameTrack = streamTrack[frame]; + if (frameTrack.first.isNull()) + continue; + vector ttStubRefs; + ttStubRefs.reserve(setup_->numLayers()); + for (int layer = 0; layer < setup_->numLayers(); layer++) { + const FrameStub& stub = streamsStubs[offset + layer][frame]; + if (stub.first.isNonnull()) + ttStubRefs.push_back(stub.first); + } + tracks.push_back(ttStubRefs); + } + } + + // + void AnalyzerDRin::associate(const vector>& tracks, + const StubAssociation* ass, + set& tps, + int& sum, + bool perfect) const { + for (const vector& ttStubRefs : tracks) { + const vector& tpPtrs = perfect ? ass->associateFinal(ttStubRefs) : ass->associate(ttStubRefs); + if (tpPtrs.empty()) + continue; + sum++; + copy(tpPtrs.begin(), tpPtrs.end(), inserter(tps, tps.begin())); + } + } + +} // namespace trklet + +DEFINE_FWK_MODULE(trklet::AnalyzerDRin); diff --git a/L1Trigger/TrackFindingTracklet/test/AnalyzerKFin.cc b/L1Trigger/TrackFindingTracklet/test/AnalyzerKFin.cc index 19e7e83ec651b..bc9503631e16e 100644 --- a/L1Trigger/TrackFindingTracklet/test/AnalyzerKFin.cc +++ b/L1Trigger/TrackFindingTracklet/test/AnalyzerKFin.cc @@ -60,7 +60,6 @@ namespace trklet { set& tps, int& sum, bool perfect = false) const; - // ED input token of stubs EDGetTokenT edGetTokenAcceptedStubs_; // ED input token of tracks @@ -77,14 +76,10 @@ namespace trklet { ESGetToken esGetTokenSetup_; // DataFormats token ESGetToken esGetTokenDataFormats_; - // ChannelAssignment token - ESGetToken esGetTokenChannelAssignment_; // stores, calculates and provides run-time constants const Setup* setup_ = nullptr; // helper class to extract structured data from tt::Frames const DataFormats* dataFormats_ = nullptr; - // helper class to assign tracklet track to channel - const ChannelAssignment* channelAssignment_ = nullptr; // enables analyze of TPs bool useMCTruth_; // @@ -121,7 +116,6 @@ namespace trklet { // book ES products esGetTokenSetup_ = esConsumes(); esGetTokenDataFormats_ = esConsumes(); - esGetTokenChannelAssignment_ = esConsumes(); // log config log_.setf(ios::fixed, ios::floatfield); log_.precision(4); @@ -132,8 +126,6 @@ namespace trklet { setup_ = &iSetup.getData(esGetTokenSetup_); // helper class to extract structured data from tt::Frames dataFormats_ = &iSetup.getData(esGetTokenDataFormats_); - // helper class to assign tracklet track to channel - channelAssignment_ = &iSetup.getData(esGetTokenChannelAssignment_); // book histograms Service fs; TFileDirectory dir; @@ -151,7 +143,7 @@ namespace trklet { prof_->GetXaxis()->SetBinLabel(10, "Perfect TPs"); // channel occupancy constexpr int maxOcc = 180; - const int numChannels = channelAssignment_->numChannelsTrack() * setup_->numLayers() * setup_->numRegions(); + const int numChannels = setup_->kfNumWorker(); hisChannel_ = dir.make("His Channel Occupancy", ";", maxOcc, -.5, maxOcc - .5); profChannel_ = dir.make("Prof Channel Occupancy", ";", numChannels, -.5, numChannels - .5); } @@ -190,11 +182,11 @@ namespace trklet { int allMatched(0); int allTracks(0); for (int region = 0; region < setup_->numRegions(); region++) { - const int offset = region * channelAssignment_->numChannelsTrack(); + const int offset = region * setup_->kfNumWorker(); int nStubs(0); int nTracks(0); int nLost(0); - for (int channel = 0; channel < channelAssignment_->numChannelsTrack(); channel++) { + for (int channel = 0; channel < setup_->kfNumWorker(); channel++) { vector> tracks; formTracks(acceptedTracks, acceptedStubs, tracks, offset + channel); vector> lost; @@ -212,6 +204,12 @@ namespace trklet { associate(tracks, selection, tpPtrsPerfect, tmp, true); associate(lost, selection, tpPtrsLost, tmp); associate(tracks, reconstructable, tpPtrs, allMatched); + const StreamTrack& stream = acceptedTracks[offset + channel]; + const auto end = + find_if(stream.rbegin(), stream.rend(), [](const FrameTrack& frame) { return frame.first.isNonnull(); }); + const int size = distance(stream.begin(), end.base()) - 1; + hisChannel_->Fill(size); + profChannel_->Fill(channel, size); } prof_->Fill(1, nStubs); prof_->Fill(2, nTracks); diff --git a/L1Trigger/TrackFindingTracklet/test/AnalyzerTBout.cc b/L1Trigger/TrackFindingTracklet/test/AnalyzerTBout.cc index 2950610fc8753..d4a89e1a96100 100644 --- a/L1Trigger/TrackFindingTracklet/test/AnalyzerTBout.cc +++ b/L1Trigger/TrackFindingTracklet/test/AnalyzerTBout.cc @@ -104,8 +104,6 @@ namespace trklet { vector> regionStubs_; // int region_; - // - vector nOverflows_; // Histograms @@ -146,8 +144,6 @@ namespace trklet { esGetTokenSetup_ = esConsumes(); esGetTokenDataFormats_ = esConsumes(); esGetTokenChannelAssignment_ = esConsumes(); - // - nOverflows_ = vector(2, 0); // log config log_.setf(ios::fixed, ios::floatfield); log_.precision(4); @@ -321,7 +317,6 @@ namespace trklet { log_ << " lost tracking efficiency = " << setw(wNums) << effLoss << " +- " << setw(wErrs) << errEffLoss << endl; log_ << " fake rate = " << setw(wNums) << fracFake << endl; log_ << " duplicate rate = " << setw(wNums) << fracDup << endl; - log_ << "number of overflowed phi residuals: " << nOverflows_[0] << " and z: " << nOverflows_[1] << endl; log_ << "============================================================="; LogPrint("L1Trigger/TrackerTFP") << log_.str(); } @@ -343,19 +338,16 @@ namespace trklet { if (frameTrack.first.isNull()) continue; vector ttStubRefs; - ttStubRefs.reserve(channelAssignment_->numProjectionLayers(seedType) + - channelAssignment_->seedingLayers(seedType).size()); - for (int layer = 0; layer < channelAssignment_->numProjectionLayers(seedType); layer++) { - const FrameStub& stub = streamsStubs[offset + layer][frame]; - if (stub.first.isNonnull()) { + const int numProjectionLayers = channelAssignment_->numProjectionLayers(seedType); + const int numSeedingLayers = channelAssignment_->seedingLayers(seedType).size(); + ttStubRefs.reserve(numProjectionLayers + numSeedingLayers); + for (int channel = 0; channel < numProjectionLayers + numSeedingLayers; channel++) { + const FrameStub& stub = streamsStubs[offset + channel][frame]; + if (stub.first.isNull()) + continue; + if (channel < numProjectionLayers) this->fill(frameTrack, stub); - ttStubRefs.push_back(stub.first); - } - } - for (const TTStubRef& ttStubRef : frameTrack.first->getStubRefs()) { - int layer(0); - if (!channelAssignment_->layerId(seedType, ttStubRef, layer)) - ttStubRefs.push_back(ttStubRef); + ttStubRefs.push_back(stub.first); } tracks.push_back(ttStubRefs); } @@ -417,19 +409,10 @@ namespace trklet { const TTBV hwRZ(hw, widthRZ, 0, true); hw >>= widthRZ; const TTBV hwPhi(hw, widthPhi, 0, true); - bool overflowPhi = abs(phii - hwPhi.val()) > pow(2, widthPhi) * 7. / 8.; - bool overflowZ = abs(rzi - hwRZ.val()) > pow(2, widthRZ) * 7. / 8.; const double hwPhid = hwPhi.val(basePhi); const double hwRZd = hwRZ.val(baseRZ); const vector resolutions = {phid - hwPhid, rzd - hwRZd}; - const vector overflows = {overflowPhi, overflowZ}; for (Resolution r : AllResolution) { - if (overflows[r]) { - cout << rzi << " " << hwRZ.val() << " " << barrel << " " << setup_->psModule(frameStub.first) << " " - << frameStub.second << endl; - nOverflows_[r]++; - continue; - } hisResolution_[r]->Fill(resolutions[r]); profResolution_[r]->Fill(ttPos.z(), ttPos.perp(), abs(resolutions[r])); } diff --git a/L1Trigger/TrackFindingTracklet/test/BuildFile.xml b/L1Trigger/TrackFindingTracklet/test/BuildFile.xml index e0e2cfdb7e047..dca83b86b61b0 100644 --- a/L1Trigger/TrackFindingTracklet/test/BuildFile.xml +++ b/L1Trigger/TrackFindingTracklet/test/BuildFile.xml @@ -1,26 +1,19 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + diff --git a/L1Trigger/TrackFindingTracklet/test/HybridTracksNewKF_cfg.py b/L1Trigger/TrackFindingTracklet/test/HybridTracksNewKF_cfg.py index b1c007be27a36..5ef7ea33154b3 100644 --- a/L1Trigger/TrackFindingTracklet/test/HybridTracksNewKF_cfg.py +++ b/L1Trigger/TrackFindingTracklet/test/HybridTracksNewKF_cfg.py @@ -48,11 +48,13 @@ process.dtc = cms.Sequence( process.TrackerDTCProducer + process.TrackerDTCAnalyzer ) process.tracklet = cms.Sequence( process.L1THybridTracks + process.TrackFindingTrackletAnalyzerTracklet ) process.TBout = cms.Sequence( process.TrackFindingTrackletProducerTBout + process.TrackFindingTrackletAnalyzerTBout ) -process.interIn = cms.Sequence( process.TrackFindingTrackletProducerKFin + process.TrackFindingTrackletAnalyzerKFin ) +process.drin = cms.Sequence( process.TrackFindingTrackletProducerDRin + process.TrackFindingTrackletAnalyzerDRin ) +process.dr = cms.Sequence( process.TrackFindingTrackletProducerDR + process.TrackFindingTrackletAnalyzerDR ) +process.kfin = cms.Sequence( process.TrackFindingTrackletProducerKFin + process.TrackFindingTrackletAnalyzerKFin ) process.kf = cms.Sequence( process.TrackFindingTrackletProducerKF + process.TrackFindingTrackletAnalyzerKF ) -process.TTTracks = cms.Sequence( process.TrackFindingTrackletProducerTT + process.TrackFindingTrackletProducerAS + process.TrackTriggerAssociatorTracks ) -process.interOut = cms.Sequence( process.TrackFindingTrackletProducerKFout + process.TrackFindingTrackletAnalyzerKFout ) -process.tt = cms.Path( process.mc + process.dtc + process.tracklet + process.TBout + process.interIn + process.kf + process.TTTracks + process.interOut ) +#process.TTTracks = cms.Sequence( process.TrackFindingTrackletProducerTT + process.TrackFindingTrackletProducerAS + process.TrackTriggerAssociatorTracks ) +#process.interOut = cms.Sequence( process.TrackFindingTrackletProducerKFout + process.TrackFindingTrackletAnalyzerKFout ) +process.tt = cms.Path( process.mc + process.dtc + process.tracklet + process.TBout + process.drin + process.dr + process.kfin + process.kf )#+ process.TTTracks + process.interOut ) process.schedule = cms.Schedule( process.tt ) # create options @@ -61,11 +63,9 @@ # specify input MC #from MCsamples.Scripts.getCMSdata_cfi import * #from MCsamples.Scripts.getCMSlocaldata_cfi import * -#from MCsamples.RelVal_1130_D76.PU200_TTbar_14TeV_cfi import * +#from MCsamples.RelVal_1260_D88.PU200_TTbar_14TeV_cfi import * #inputMC = getCMSdataFromCards() -inputMC = [ -'/store/mc/CMSSW_12_6_0/RelValTTbar_14TeV/GEN-SIM-DIGI-RAW/PU_125X_mcRun4_realistic_v5_2026D88PU200RV183v2-v1/30000/0959f326-3f52-48d8-9fcf-65fc41de4e27.root' -] +inputMC = ["/store/mc/CMSSW_12_6_0/RelValTTbar_14TeV/GEN-SIM-DIGI-RAW/PU_125X_mcRun4_realistic_v5_2026D88PU200RV183v2-v1/30000/0959f326-3f52-48d8-9fcf-65fc41de4e27.root"] options.register( 'inputMC', inputMC, VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.string, "Files to be processed" ) # specify number of events to process. options.register( 'Events',100,VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.int, "Number of Events to analyze" ) diff --git a/L1Trigger/TrackFindingTracklet/test/HybridTracks_cfg.py b/L1Trigger/TrackFindingTracklet/test/HybridTracks_cfg.py index 6cfb82bc8b37a..0c49a60bf6167 100644 --- a/L1Trigger/TrackFindingTracklet/test/HybridTracks_cfg.py +++ b/L1Trigger/TrackFindingTracklet/test/HybridTracks_cfg.py @@ -31,10 +31,8 @@ # ---------------------------------------------------------------------------------- process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(10)) -Source_Files = cms.untracked.vstring( -"/store/mc/CMSSW_12_6_0/RelValTTbar_14TeV/GEN-SIM-DIGI-RAW/PU_125X_mcRun4_realistic_v5_2026D88PU200RV183v2-v1/30000/0959f326-3f52-48d8-9fcf-65fc41de4e27.root" -) -process.source = cms.Source("PoolSource", fileNames = Source_Files) +inputMC = ["/store/mc/CMSSW_12_6_0/RelValTTbar_14TeV/GEN-SIM-DIGI-RAW/PU_125X_mcRun4_realistic_v5_2026D88PU200RV183v2-v1/30000/0959f326-3f52-48d8-9fcf-65fc41de4e27.root"] +process.source = cms.Source("PoolSource", fileNames = cms.untracked.vstring(*inputMC)) # ---------------------------------------------------------------------------------- # DTC emulation diff --git a/L1Trigger/TrackFindingTracklet/test/L1TrackNtupleMaker.cc b/L1Trigger/TrackFindingTracklet/test/L1TrackNtupleMaker.cc index 683da96a6abfb..70955d51fc469 100644 --- a/L1Trigger/TrackFindingTracklet/test/L1TrackNtupleMaker.cc +++ b/L1Trigger/TrackFindingTracklet/test/L1TrackNtupleMaker.cc @@ -160,8 +160,11 @@ class L1TrackNtupleMaker : public one::EDAnalyzer* m_trk_d0; // (filled if L1Tk_nPar==5, else 999) std::vector* m_trk_z0; std::vector* m_trk_chi2; + std::vector* m_trk_chi2_dof; std::vector* m_trk_chi2rphi; + std::vector* m_trk_chi2rphi_dof; std::vector* m_trk_chi2rz; + std::vector* m_trk_chi2rz_dof; std::vector* m_trk_bendchi2; std::vector* m_trk_nstub; std::vector* m_trk_lhits; @@ -220,8 +223,11 @@ class L1TrackNtupleMaker : public one::EDAnalyzer* m_matchtrk_d0; //this variable is only filled if L1Tk_nPar==5 std::vector* m_matchtrk_z0; std::vector* m_matchtrk_chi2; + std::vector* m_matchtrk_chi2_dof; std::vector* m_matchtrk_chi2rphi; + std::vector* m_matchtrk_chi2rphi_dof; std::vector* m_matchtrk_chi2rz; + std::vector* m_matchtrk_chi2rz_dof; std::vector* m_matchtrk_bendchi2; std::vector* m_matchtrk_MVA1; std::vector* m_matchtrk_nstub; @@ -322,6 +328,113 @@ L1TrackNtupleMaker::~L1TrackNtupleMaker() {} void L1TrackNtupleMaker::endJob() { // things to be done at the exit of the event Loop edm::LogVerbatim("Tracklet") << "L1TrackNtupleMaker::endJob"; + + // clean up + delete m_trk_pt; + delete m_trk_eta; + delete m_trk_phi; + delete m_trk_z0; + delete m_trk_d0; + delete m_trk_chi2; + delete m_trk_chi2_dof; + delete m_trk_chi2rphi; + delete m_trk_chi2rphi_dof; + delete m_trk_chi2rz; + delete m_trk_chi2rz_dof; + delete m_trk_bendchi2; + delete m_trk_nstub; + delete m_trk_lhits; + delete m_trk_dhits; + delete m_trk_seed; + delete m_trk_hitpattern; + delete m_trk_lhits_hitpattern; + delete m_trk_dhits_hitpattern; + delete m_trk_nPSstub_hitpattern; + delete m_trk_n2Sstub_hitpattern; + delete m_trk_nLostPSstub_hitpattern; + delete m_trk_nLost2Sstub_hitpattern; + delete m_trk_nLoststub_V1_hitpattern; + delete m_trk_nLoststub_V2_hitpattern; + delete m_trk_phiSector; + delete m_trk_etaSector; + delete m_trk_genuine; + delete m_trk_loose; + delete m_trk_unknown; + delete m_trk_combinatoric; + delete m_trk_fake; + delete m_trk_MVA1; + delete m_trk_matchtp_pdgid; + delete m_trk_matchtp_pt; + delete m_trk_matchtp_eta; + delete m_trk_matchtp_phi; + delete m_trk_matchtp_z0; + delete m_trk_matchtp_dxy; + delete m_trk_matchtp_d0; + delete m_trk_injet; + delete m_trk_injet_highpt; + delete m_trk_injet_vhighpt; + + delete m_tp_pt; + delete m_tp_eta; + delete m_tp_phi; + delete m_tp_dxy; + delete m_tp_d0; + delete m_tp_z0; + delete m_tp_d0_prod; + delete m_tp_z0_prod; + delete m_tp_pdgid; + delete m_tp_nmatch; + delete m_tp_nstub; + delete m_tp_eventid; + delete m_tp_charge; + delete m_tp_injet; + delete m_tp_injet_highpt; + delete m_tp_injet_vhighpt; + + delete m_matchtrk_pt; + delete m_matchtrk_eta; + delete m_matchtrk_phi; + delete m_matchtrk_z0; + delete m_matchtrk_d0; + delete m_matchtrk_chi2; + delete m_matchtrk_chi2_dof; + delete m_matchtrk_chi2rphi; + delete m_matchtrk_chi2rphi_dof; + delete m_matchtrk_chi2rz; + delete m_matchtrk_chi2rz_dof; + delete m_matchtrk_bendchi2; + delete m_matchtrk_MVA1; + delete m_matchtrk_nstub; + delete m_matchtrk_dhits; + delete m_matchtrk_lhits; + delete m_matchtrk_seed; + delete m_matchtrk_hitpattern; + delete m_matchtrk_injet; + delete m_matchtrk_injet_highpt; + delete m_matchtrk_injet_vhighpt; + + delete m_allstub_x; + delete m_allstub_y; + delete m_allstub_z; + delete m_allstub_isBarrel; + delete m_allstub_layer; + delete m_allstub_isPSmodule; + delete m_allstub_trigDisplace; + delete m_allstub_trigOffset; + delete m_allstub_trigPos; + delete m_allstub_trigBend; + delete m_allstub_matchTP_pdgid; + delete m_allstub_matchTP_pt; + delete m_allstub_matchTP_eta; + delete m_allstub_matchTP_phi; + delete m_allstub_genuine; + + delete m_jet_eta; + delete m_jet_phi; + delete m_jet_pt; + delete m_jet_tp_sumpt; + delete m_jet_trk_sumpt; + delete m_jet_matchtrk_sumpt; } //////////// @@ -344,8 +457,11 @@ void L1TrackNtupleMaker::beginJob() { m_trk_z0 = new std::vector; m_trk_d0 = new std::vector; m_trk_chi2 = new std::vector; + m_trk_chi2_dof = new std::vector; m_trk_chi2rphi = new std::vector; + m_trk_chi2rphi_dof = new std::vector; m_trk_chi2rz = new std::vector; + m_trk_chi2rz_dof = new std::vector; m_trk_bendchi2 = new std::vector; m_trk_nstub = new std::vector; m_trk_lhits = new std::vector; @@ -402,8 +518,11 @@ void L1TrackNtupleMaker::beginJob() { m_matchtrk_z0 = new std::vector; m_matchtrk_d0 = new std::vector; m_matchtrk_chi2 = new std::vector; + m_matchtrk_chi2_dof = new std::vector; m_matchtrk_chi2rphi = new std::vector; + m_matchtrk_chi2rphi_dof = new std::vector; m_matchtrk_chi2rz = new std::vector; + m_matchtrk_chi2rz_dof = new std::vector; m_matchtrk_bendchi2 = new std::vector; m_matchtrk_MVA1 = new std::vector; m_matchtrk_nstub = new std::vector; @@ -451,8 +570,11 @@ void L1TrackNtupleMaker::beginJob() { eventTree->Branch("trk_d0", &m_trk_d0); eventTree->Branch("trk_z0", &m_trk_z0); eventTree->Branch("trk_chi2", &m_trk_chi2); + eventTree->Branch("trk_chi2_dof", &m_trk_chi2_dof); eventTree->Branch("trk_chi2rphi", &m_trk_chi2rphi); + eventTree->Branch("trk_chi2rphi_dof", &m_trk_chi2rphi_dof); eventTree->Branch("trk_chi2rz", &m_trk_chi2rz); + eventTree->Branch("trk_chi2rz_dof", &m_trk_chi2rz_dof); eventTree->Branch("trk_bendchi2", &m_trk_bendchi2); eventTree->Branch("trk_nstub", &m_trk_nstub); eventTree->Branch("trk_lhits", &m_trk_lhits); @@ -514,8 +636,11 @@ void L1TrackNtupleMaker::beginJob() { eventTree->Branch("matchtrk_z0", &m_matchtrk_z0); eventTree->Branch("matchtrk_d0", &m_matchtrk_d0); eventTree->Branch("matchtrk_chi2", &m_matchtrk_chi2); + eventTree->Branch("matchtrk_chi2_dof", &m_matchtrk_chi2_dof); eventTree->Branch("matchtrk_chi2rphi", &m_matchtrk_chi2rphi); + eventTree->Branch("matchtrk_chi2rphi_dof", &m_matchtrk_chi2rphi_dof); eventTree->Branch("matchtrk_chi2rz", &m_matchtrk_chi2rz); + eventTree->Branch("matchtrk_chi2rz_dof", &m_matchtrk_chi2rz_dof); eventTree->Branch("matchtrk_bendchi2", &m_matchtrk_bendchi2); eventTree->Branch("matchtrk_MVA1", &m_matchtrk_MVA1); eventTree->Branch("matchtrk_nstub", &m_matchtrk_nstub); @@ -587,8 +712,11 @@ void L1TrackNtupleMaker::analyze(const edm::Event& iEvent, const edm::EventSetup m_trk_d0->clear(); m_trk_z0->clear(); m_trk_chi2->clear(); + m_trk_chi2_dof->clear(); m_trk_chi2rphi->clear(); + m_trk_chi2rphi_dof->clear(); m_trk_chi2rz->clear(); + m_trk_chi2rz_dof->clear(); m_trk_bendchi2->clear(); m_trk_nstub->clear(); m_trk_lhits->clear(); @@ -646,8 +774,11 @@ void L1TrackNtupleMaker::analyze(const edm::Event& iEvent, const edm::EventSetup m_matchtrk_z0->clear(); m_matchtrk_d0->clear(); m_matchtrk_chi2->clear(); + m_matchtrk_chi2_dof->clear(); m_matchtrk_chi2rphi->clear(); + m_matchtrk_chi2rphi_dof->clear(); m_matchtrk_chi2rz->clear(); + m_matchtrk_chi2rz_dof->clear(); m_matchtrk_bendchi2->clear(); m_matchtrk_MVA1->clear(); m_matchtrk_nstub->clear(); @@ -912,6 +1043,13 @@ void L1TrackNtupleMaker::analyze(const edm::Event& iEvent, const edm::EventSetup float tmp_trk_phi = iterL1Track->momentum().phi(); float tmp_trk_z0 = iterL1Track->z0(); //cm float tmp_trk_tanL = iterL1Track->tanL(); + bool usingNewKF = hphSetup->useNewKF(); + if (usingNewKF) { + // Skip crazy tracks to avoid crash (as NewKF applies no cuts to kill them). + constexpr float crazy_z0_cut = 30.; // Cut to kill any crazy tracks found by New KF (which applies no cuts) + if (fabs(tmp_trk_z0) > crazy_z0_cut) + continue; + } int tmp_trk_hitpattern = 0; tmp_trk_hitpattern = (int)iterL1Track->hitPattern(); @@ -951,6 +1089,12 @@ void L1TrackNtupleMaker::analyze(const edm::Event& iEvent, const edm::EventSetup std::vector >, TTStub > > stubRefs = iterL1Track->getStubRefs(); int tmp_trk_nstub = (int)stubRefs.size(); + int ndof = 2 * tmp_trk_nstub - L1Tk_nPar; + int ndofrphi = tmp_trk_nstub - L1Tk_nPar + 2; + int ndofrz = tmp_trk_nstub - 2; + float tmp_trk_chi2_dof = (float)tmp_trk_chi2 / ndof; + float tmp_trk_chi2rphi_dof = (float)tmp_trk_chi2rphi / ndofrphi; + float tmp_trk_chi2rz_dof = (float)tmp_trk_chi2rz / ndofrz; int tmp_trk_seed = 0; tmp_trk_seed = (int)iterL1Track->trackSeedType(); @@ -1034,8 +1178,11 @@ void L1TrackNtupleMaker::analyze(const edm::Event& iEvent, const edm::EventSetup else m_trk_d0->push_back(999.); m_trk_chi2->push_back(tmp_trk_chi2); + m_trk_chi2_dof->push_back(tmp_trk_chi2_dof); m_trk_chi2rphi->push_back(tmp_trk_chi2rphi); + m_trk_chi2rphi_dof->push_back(tmp_trk_chi2rphi_dof); m_trk_chi2rz->push_back(tmp_trk_chi2rz); + m_trk_chi2rz_dof->push_back(tmp_trk_chi2rz_dof); m_trk_bendchi2->push_back(tmp_trk_bendchi2); m_trk_MVA1->push_back(tmp_trk_MVA1); m_trk_nstub->push_back(tmp_trk_nstub); @@ -1439,8 +1586,11 @@ void L1TrackNtupleMaker::analyze(const edm::Event& iEvent, const edm::EventSetup float tmp_matchtrk_z0 = -999; float tmp_matchtrk_d0 = -999; float tmp_matchtrk_chi2 = -999; + float tmp_matchtrk_chi2_dof = -999; float tmp_matchtrk_chi2rphi = -999; + float tmp_matchtrk_chi2rphi_dof = -999; float tmp_matchtrk_chi2rz = -999; + float tmp_matchtrk_chi2rz_dof = -999; float tmp_matchtrk_bendchi2 = -999; float tmp_matchtrk_MVA1 = -999; int tmp_matchtrk_nstub = -999; @@ -1473,6 +1623,13 @@ void L1TrackNtupleMaker::analyze(const edm::Event& iEvent, const edm::EventSetup tmp_matchtrk_seed = (int)matchedTracks.at(i_track)->trackSeedType(); tmp_matchtrk_hitpattern = (int)matchedTracks.at(i_track)->hitPattern(); + int ndof = 2 * tmp_matchtrk_nstub - L1Tk_nPar; + int ndofrphi = tmp_matchtrk_nstub - L1Tk_nPar + 2; + int ndofrz = tmp_matchtrk_nstub - 2; + tmp_matchtrk_chi2_dof = (float)tmp_matchtrk_chi2 / ndof; + tmp_matchtrk_chi2rphi_dof = (float)tmp_matchtrk_chi2rphi / ndofrphi; + tmp_matchtrk_chi2rz_dof = (float)tmp_matchtrk_chi2rz / ndofrz; + // ------------------------------------------------------------------------------------------ //float tmp_matchtrk_bend_chi2 = 0; @@ -1534,6 +1691,9 @@ void L1TrackNtupleMaker::analyze(const edm::Event& iEvent, const edm::EventSetup m_matchtrk_lhits->push_back(tmp_matchtrk_lhits); m_matchtrk_seed->push_back(tmp_matchtrk_seed); m_matchtrk_hitpattern->push_back(tmp_matchtrk_hitpattern); + m_matchtrk_chi2_dof->push_back(tmp_matchtrk_chi2_dof); + m_matchtrk_chi2rphi_dof->push_back(tmp_matchtrk_chi2rphi_dof); + m_matchtrk_chi2rz_dof->push_back(tmp_matchtrk_chi2rz_dof); // ---------------------------------------------------------------------------------------------- // for tracking in jets diff --git a/L1Trigger/TrackFindingTracklet/test/L1TrackNtupleMaker_cfg.py b/L1Trigger/TrackFindingTracklet/test/L1TrackNtupleMaker_cfg.py index 4e2851197c5f4..761c5ad402f55 100644 --- a/L1Trigger/TrackFindingTracklet/test/L1TrackNtupleMaker_cfg.py +++ b/L1Trigger/TrackFindingTracklet/test/L1TrackNtupleMaker_cfg.py @@ -66,23 +66,24 @@ #from MCsamples.Scripts.getCMSlocaldata_cfi import * if GEOMETRY == "D76": + + # Read specified .root file: + inputMC = ["/store/relval/CMSSW_11_3_0_pre6/RelValTTbar_14TeV/GEN-SIM-DIGI-RAW/PU_113X_mcRun4_realistic_v6_2026D76PU200-v1/00000/00026541-6200-4eed-b6f8-d3a1fd720e9c.root"] + +elif GEOMETRY == "D88": + # Read data from card files (defines getCMSdataFromCards()): - #from MCsamples.RelVal_1130_D76.PU200_TTbar_14TeV_cfi import * + #from MCsamples.RelVal_1260_D88.PU200_TTbar_14TeV_cfi import * #inputMC = getCMSdataFromCards() # Or read .root files from directory on local computer: - #dirName = "$scratchmc/MCsamples1130_D76/RelVal/TTbar/PU200/" + #dirName = "$scratchmc/MCsamples1260_D88/RelVal/TTbar/PU200/" #inputMC=getCMSlocaldata(dirName) # Or read specified dataset (accesses CMS DB, so use this method only occasionally): - #dataName="/RelValTTbar_14TeV/CMSSW_11_3_0_pre6-PU_113X_mcRun4_realistic_v6_2026D76PU200-v1/GEN-SIM-DIGI-RAW" + #dataName="/RelValTTbar_14TeV/CMSSW_12_6_0-PU_125X_mcRun4_realistic_v5_2026D88PU200RV183v2-v1/GEN-SIM-DIGI-RAW" #inputMC=getCMSdata(dataName) - # Or read specified .root file: - inputMC = ["/store/relval/CMSSW_11_3_0_pre6/RelValTTbar_14TeV/GEN-SIM-DIGI-RAW/PU_113X_mcRun4_realistic_v6_2026D76PU200-v1/00000/00026541-6200-4eed-b6f8-d3a1fd720e9c.root"] - -elif GEOMETRY == "D88": - # Read specified .root file: inputMC = ["/store/mc/CMSSW_12_6_0/RelValTTbar_14TeV/GEN-SIM-DIGI-RAW/PU_125X_mcRun4_realistic_v5_2026D88PU200RV183v2-v1/30000/0959f326-3f52-48d8-9fcf-65fc41de4e27.root"] @@ -173,18 +174,20 @@ L1TRK_LABEL = process.TrackFindingTrackletProducer_params.BranchAcceptedTracks.value() L1TRUTH_NAME = "TTTrackAssociatorFromPixelDigis" process.TTTrackAssociatorFromPixelDigis.TTTracks = cms.VInputTag( cms.InputTag(L1TRK_NAME, L1TRK_LABEL) ) - process.HybridNewKF = cms.Sequence(process.L1THybridTracks + process.TrackFindingTrackletProducerTBout + process.TrackFindingTrackletProducerKFin + process.TrackFindingTrackletProducerKF + process.TrackFindingTrackletProducerTT + process.TrackFindingTrackletProducerAS + process.TrackFindingTrackletProducerKFout) + process.HybridNewKF = cms.Sequence(process.L1THybridTracks + process.TrackFindingTrackletProducerTBout + process.TrackFindingTrackletProducerDRin + process.TrackFindingTrackletProducerDR + process.TrackFindingTrackletProducerKFin + process.TrackFindingTrackletProducerKF + process.TrackFindingTrackletProducerTT + process.TrackFindingTrackletProducerAS + process.TrackFindingTrackletProducerKFout) process.TTTracksEmulation = cms.Path(process.HybridNewKF) #process.TTTracksEmulationWithTruth = cms.Path(process.HybridNewKF + process.TrackTriggerAssociatorTracks) # Optionally include code producing performance plots & end-of-job summary. process.load( 'SimTracker.TrackTriggerAssociation.StubAssociator_cff' ) process.load( 'L1Trigger.TrackFindingTracklet.Analyzer_cff' ) - process.TTTracksEmulationWithTruth = cms.Path(process.HybridNewKF + process.TrackTriggerAssociatorTracks + process.StubAssociator + process.TrackFindingTrackletAnalyzerTracklet + process.TrackFindingTrackletAnalyzerTBout + process.TrackFindingTrackletAnalyzerKFin + process.TrackFindingTrackletAnalyzerKF + process.TrackFindingTrackletAnalyzerKFout) + process.TTTracksEmulationWithTruth = cms.Path(process.HybridNewKF + process.TrackTriggerAssociatorTracks + process.StubAssociator + process.TrackFindingTrackletAnalyzerTracklet + process.TrackFindingTrackletAnalyzerTBout + process.TrackFindingTrackletAnalyzerDRin + process.TrackFindingTrackletAnalyzerDR + process.TrackFindingTrackletAnalyzerKFin + process.TrackFindingTrackletAnalyzerKF + process.TrackFindingTrackletAnalyzerKFout) from L1Trigger.TrackFindingTracklet.Customize_cff import * if (L1TRKALGO == 'HYBRID_NEWKF'): fwConfig( process ) if (L1TRKALGO == 'HYBRID_REDUCED'): reducedConfig( process ) + # Needed by L1TrackNtupleMaker + process.HitPatternHelperSetup.useNewKF = True # LEGACY ALGORITHM (EXPERTS ONLY): TRACKLET elif (L1TRKALGO == 'TRACKLET'): diff --git a/L1Trigger/TrackFindingTracklet/test/L1TrackNtuplePlot.C b/L1Trigger/TrackFindingTracklet/test/L1TrackNtuplePlot.C index f026c2ce6cd31..58a048c1066ac 100644 --- a/L1Trigger/TrackFindingTracklet/test/L1TrackNtuplePlot.C +++ b/L1Trigger/TrackFindingTracklet/test/L1TrackNtuplePlot.C @@ -162,8 +162,11 @@ void L1TrackNtuplePlot(TString type, vector* matchtrk_d0; vector* matchtrk_z0; vector* matchtrk_chi2; + vector* matchtrk_chi2_dof; vector* matchtrk_chi2rphi; + vector* matchtrk_chi2rphi_dof; vector* matchtrk_chi2rz; + vector* matchtrk_chi2rz_dof; vector* matchtrk_nstub; vector* matchtrk_lhits; vector* matchtrk_dhits; @@ -178,8 +181,11 @@ void L1TrackNtuplePlot(TString type, vector* trk_eta; vector* trk_phi; vector* trk_chi2; + vector* trk_chi2_dof; vector* trk_chi2rphi; + vector* trk_chi2rphi_dof; vector* trk_chi2rz; + vector* trk_chi2rz_dof; vector* trk_nstub; vector* trk_lhits; vector* trk_dhits; @@ -213,8 +219,11 @@ void L1TrackNtuplePlot(TString type, TBranch* b_matchtrk_d0; TBranch* b_matchtrk_z0; TBranch* b_matchtrk_chi2; + TBranch* b_matchtrk_chi2_dof; TBranch* b_matchtrk_chi2rphi; + TBranch* b_matchtrk_chi2rphi_dof; TBranch* b_matchtrk_chi2rz; + TBranch* b_matchtrk_chi2rz_dof; TBranch* b_matchtrk_nstub; TBranch* b_matchtrk_lhits; TBranch* b_matchtrk_dhits; @@ -228,8 +237,11 @@ void L1TrackNtuplePlot(TString type, TBranch* b_trk_eta; TBranch* b_trk_phi; TBranch* b_trk_chi2; + TBranch* b_trk_chi2_dof; TBranch* b_trk_chi2rphi; + TBranch* b_trk_chi2rphi_dof; TBranch* b_trk_chi2rz; + TBranch* b_trk_chi2rz_dof; TBranch* b_trk_nstub; TBranch* b_trk_lhits; TBranch* b_trk_dhits; @@ -263,8 +275,11 @@ void L1TrackNtuplePlot(TString type, matchtrk_d0 = 0; matchtrk_z0 = 0; matchtrk_chi2 = 0; + matchtrk_chi2_dof = 0; matchtrk_chi2rphi = 0; + matchtrk_chi2rphi_dof = 0; matchtrk_chi2rz = 0; + matchtrk_chi2rz_dof = 0; matchtrk_nstub = 0; matchtrk_lhits = 0; matchtrk_dhits = 0; @@ -278,8 +293,11 @@ void L1TrackNtuplePlot(TString type, trk_eta = 0; trk_phi = 0; trk_chi2 = 0; + trk_chi2_dof = 0; trk_chi2rphi = 0; + trk_chi2rphi_dof = 0; trk_chi2rz = 0; + trk_chi2rz_dof = 0; trk_nstub = 0; trk_lhits = 0; trk_dhits = 0; @@ -319,8 +337,11 @@ void L1TrackNtuplePlot(TString type, tree->SetBranchAddress("loosematchtrk_d0", &matchtrk_d0, &b_matchtrk_d0); tree->SetBranchAddress("loosematchtrk_z0", &matchtrk_z0, &b_matchtrk_z0); tree->SetBranchAddress("loosematchtrk_chi2", &matchtrk_chi2, &b_matchtrk_chi2); + tree->SetBranchAddress("loosematchtrk_chi2_dof", &matchtrk_chi2_dof, &b_matchtrk_chi2_dof); tree->SetBranchAddress("loosematchtrk_chi2rphi", &matchtrk_chi2rphi, &b_matchtrk_chi2rphi); + tree->SetBranchAddress("loosematchtrk_chi2rphi_dof", &matchtrk_chi2rphi_dof, &b_matchtrk_chi2rphi_dof); tree->SetBranchAddress("loosematchtrk_chi2rz", &matchtrk_chi2rz, &b_matchtrk_chi2rz); + tree->SetBranchAddress("loosematchtrk_chi2rz_dof", &matchtrk_chi2rz_dof, &b_matchtrk_chi2rz_dof); tree->SetBranchAddress("loosematchtrk_nstub", &matchtrk_nstub, &b_matchtrk_nstub); tree->SetBranchAddress("loosematchtrk_seed", &matchtrk_seed, &b_matchtrk_seed); tree->SetBranchAddress("loosematchtrk_hitpattern", &matchtrk_hitpattern, &b_matchtrk_hitpattern); @@ -336,8 +357,11 @@ void L1TrackNtuplePlot(TString type, tree->SetBranchAddress("matchtrk_d0", &matchtrk_d0, &b_matchtrk_d0); tree->SetBranchAddress("matchtrk_z0", &matchtrk_z0, &b_matchtrk_z0); tree->SetBranchAddress("matchtrk_chi2", &matchtrk_chi2, &b_matchtrk_chi2); + tree->SetBranchAddress("matchtrk_chi2_dof", &matchtrk_chi2_dof, &b_matchtrk_chi2_dof); tree->SetBranchAddress("matchtrk_chi2rphi", &matchtrk_chi2rphi, &b_matchtrk_chi2rphi); + tree->SetBranchAddress("matchtrk_chi2rphi_dof", &matchtrk_chi2rphi_dof, &b_matchtrk_chi2rphi_dof); tree->SetBranchAddress("matchtrk_chi2rz", &matchtrk_chi2rz, &b_matchtrk_chi2rz); + tree->SetBranchAddress("matchtrk_chi2rz_dof", &matchtrk_chi2rz_dof, &b_matchtrk_chi2rz_dof); tree->SetBranchAddress("matchtrk_nstub", &matchtrk_nstub, &b_matchtrk_nstub); tree->SetBranchAddress("matchtrk_lhits", &matchtrk_lhits, &b_matchtrk_lhits); tree->SetBranchAddress("matchtrk_dhits", &matchtrk_dhits, &b_matchtrk_dhits); @@ -354,8 +378,11 @@ void L1TrackNtuplePlot(TString type, tree->SetBranchAddress("trk_eta", &trk_eta, &b_trk_eta); tree->SetBranchAddress("trk_phi", &trk_phi, &b_trk_phi); tree->SetBranchAddress("trk_chi2", &trk_chi2, &b_trk_chi2); + tree->SetBranchAddress("trk_chi2_dof", &trk_chi2_dof, &b_trk_chi2_dof); tree->SetBranchAddress("trk_chi2rphi", &trk_chi2rphi, &b_trk_chi2rphi); + tree->SetBranchAddress("trk_chi2rphi_dof", &trk_chi2rphi_dof, &b_trk_chi2rphi_dof); tree->SetBranchAddress("trk_chi2rz", &trk_chi2rz, &b_trk_chi2rz); + tree->SetBranchAddress("trk_chi2rz_dof", &trk_chi2rz_dof, &b_trk_chi2rz_dof); tree->SetBranchAddress("trk_nstub", &trk_nstub, &b_trk_nstub); tree->SetBranchAddress("trk_lhits", &trk_lhits, &b_trk_lhits); tree->SetBranchAddress("trk_dhits", &trk_dhits, &b_trk_dhits); @@ -1051,13 +1078,12 @@ void L1TrackNtuplePlot(TString type, h_trk_eta->Fill(trk_eta->at(it)); // fill all trk chi2 & chi2/dof histograms, including for chi2 r-phi and chi2 r-z - int ndof = 2 * trk_nstub->at(it) - 4; float chi2 = trk_chi2->at(it); - float chi2dof = (float)chi2 / ndof; + float chi2dof = trk_chi2_dof->at(it); float chi2rphi = trk_chi2rphi->at(it); - float chi2rphidof = (float)chi2rphi / ndof; + float chi2rphidof = trk_chi2rphi_dof->at(it); float chi2rz = trk_chi2rz->at(it); - float chi2rzdof = (float)chi2rz / ndof; + float chi2rzdof = trk_chi2rz_dof->at(it); // create overflow bins by restricting range of chi2 int chi2Overflow = 100; @@ -1111,9 +1137,8 @@ void L1TrackNtuplePlot(TString type, continue; // Uncomment these cuts to see effect on rate & fake rate. - //int ndof = 2*trk_nstub->at(it)-4; //if (trk_chi2->at(it) > L1Tk_maxChi2) continue; - //if (trk_chi2->at(it)/ndof > L1Tk_maxChi2dof) continue; + //if (trk_chi2_dof->at(it) > L1Tk_maxChi2dof) continue; //if (trk_nstub->at(it) < L1Tk_minNstub) continue; // Tracklet & Hybrid have 9 sectors, but TMTT has 18 (with sectors 0 & 1 in nonant 0 etc). @@ -1319,13 +1344,12 @@ void L1TrackNtuplePlot(TString type, // ---------------------------------------------------------------------------------------------------------------- // fill matchtrk chi2 & chi2/dof histograms before making chi2 cut - int ndof = 2 * matchtrk_nstub->at(it) - 4; float chi2 = matchtrk_chi2->at(it); - float chi2dof = (float)chi2 / ndof; + float chi2dof = matchtrk_chi2_dof->at(it); float chi2rphi = matchtrk_chi2rphi->at(it); - float chi2rphidof = (float)chi2rphi / ndof; + float chi2rphidof = matchtrk_chi2rphi_dof->at(it); float chi2rz = matchtrk_chi2rz->at(it); - float chi2rzdof = (float)chi2rz / ndof; + float chi2rzdof = matchtrk_chi2rz_dof->at(it); // create overflow bins by restricting range of chi2 int chi2Overflow = 100; @@ -1392,7 +1416,7 @@ void L1TrackNtuplePlot(TString type, // cut on chi2? if (matchtrk_chi2->at(it) > L1Tk_maxChi2) continue; - if (matchtrk_chi2->at(it) / ndof > L1Tk_maxChi2dof) + if (matchtrk_chi2_dof->at(it) > L1Tk_maxChi2dof) continue; // use tight quality cut selection? diff --git a/L1Trigger/TrackFindingTracklet/test/demonstrator_cfg.py b/L1Trigger/TrackFindingTracklet/test/demonstrator_cfg.py index 9c29d51308110..fe180df9069d5 100644 --- a/L1Trigger/TrackFindingTracklet/test/demonstrator_cfg.py +++ b/L1Trigger/TrackFindingTracklet/test/demonstrator_cfg.py @@ -22,16 +22,16 @@ #--- Load code that compares s/w with f/w process.load( 'L1Trigger.TrackFindingTracklet.Demonstrator_cff' ) from L1Trigger.TrackFindingTracklet.Customize_cff import * -reducedConfig( process ) -#fwConfig( process ) +#reducedConfig( process ) +fwConfig( process ) # build schedule process.tt = cms.Sequence ( process.TrackerDTCProducer + process.L1THybridTracks + process.TrackFindingTrackletProducerIRin + process.TrackFindingTrackletProducerTBout - + process.TrackFindingTrackletProducerKFin - + process.TrackFindingTrackletProducerKF + + process.TrackFindingTrackletProducerDRin + + process.TrackFindingTrackletProducerDR ) process.demo = cms.Path( process.tt + process.TrackerTFPDemonstrator ) process.schedule = cms.Schedule( process.demo ) @@ -40,7 +40,8 @@ import FWCore.ParameterSet.VarParsing as VarParsing options = VarParsing.VarParsing( 'analysis' ) # specify input MC - inputMC = ["/store/relval/CMSSW_12_6_0_pre4/RelValTTbar_14TeV/GEN-SIM-DIGI-RAW/PU_125X_mcRun4_realistic_v2_2026D88PU200-v1/2590000/00b3d04b-4c7b-4506-8d82-9538fb21ee19.root"] +inputMC = ["/store/relval/CMSSW_12_6_0_pre4/RelValTTbar_14TeV/GEN-SIM-DIGI-RAW/PU_125X_mcRun4_realistic_v2_2026D88PU200-v1/2590000/00b3d04b-4c7b-4506-8d82-9538fb21ee19.root"] + options.register( 'inputMC', inputMC, VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.string, "Files to be processed" ) # specify number of events to process. options.register( 'Events',100,VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.int, "Number of Events to analyze" ) @@ -51,7 +52,7 @@ process.source = cms.Source( "PoolSource", fileNames = cms.untracked.vstring( options.inputMC ), - skipEvents = cms.untracked.uint32( 1 ), + #skipEvents = cms.untracked.uint32( 1 ), secondaryFileNames = cms.untracked.vstring(), duplicateCheckMode = cms.untracked.string( 'noDuplicateCheck' ) ) diff --git a/L1Trigger/TrackTrigger/BuildFile.xml b/L1Trigger/TrackTrigger/BuildFile.xml index d05d39ed5d315..16ddda0798908 100644 --- a/L1Trigger/TrackTrigger/BuildFile.xml +++ b/L1Trigger/TrackTrigger/BuildFile.xml @@ -21,6 +21,8 @@ + + diff --git a/L1Trigger/TrackTrigger/interface/L1TrackQuality.h b/L1Trigger/TrackTrigger/interface/L1TrackQuality.h index 5763a825e1ab4..0d697687a812f 100644 --- a/L1Trigger/TrackTrigger/interface/L1TrackQuality.h +++ b/L1Trigger/TrackTrigger/interface/L1TrackQuality.h @@ -1,6 +1,5 @@ /* Track Quality Header file - C.Brown 28/07/20 */ @@ -19,14 +18,18 @@ C.Brown 28/07/20 #include "FWCore/Framework/interface/MakerMacros.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "DataFormats/L1TrackTrigger/interface/TTTrack.h" +#include "DataFormats/L1TrackTrigger/interface/TTTrack_TrackWord.h" #include "DataFormats/L1TrackTrigger/interface/TTTypes.h" #include "PhysicsTools/ONNXRuntime/interface/ONNXRuntime.h" #include +#include "conifer.h" +#include "ap_fixed.h" + class L1TrackQuality { public: // Enum class used for determining prediction behaviour in setL1TrackQuality - enum class QualityAlgorithm { Cut, GBDT, NN, None }; + enum class QualityAlgorithm { Cut, GBDT, GBDT_cpp, NN, None }; //Default Constructor L1TrackQuality(); @@ -42,7 +45,10 @@ class L1TrackQuality { // Passed by reference a track without MVA filled, method fills the track's MVA field void setL1TrackQuality(TTTrack& aTrack); - + // Function to run the BDT in isolation allowing a feature vector in the ap_fixed datatype to be passed + // and a single output to be returned which is then used to fill the bits in the Track Word for situations + // where a TTTrack datatype is unavailable to be passed to the track quality + float runEmulatedTQ(std::vector> inputFeatures); // To set private member data void setCutParameters(std::string const& AlgorithmString, float maxZ0, diff --git a/L1Trigger/TrackTrigger/interface/Setup.h b/L1Trigger/TrackTrigger/interface/Setup.h index 584e8823e7a68..0f06238eba279 100644 --- a/L1Trigger/TrackTrigger/interface/Setup.h +++ b/L1Trigger/TrackTrigger/interface/Setup.h @@ -302,6 +302,10 @@ namespace tt { double hybridRangeR() const { return hybridRangesR_[SensorModule::DiskPS]; } // smallest stub radius after TrackBuilder in cm double tbInnerRadius() const { return tbInnerRadius_; } + // center radius of outer tracker endcap 2S diks strips + double disk2SR(int layerId, int r) const { return disk2SRs_.at(layerId).at(r); } + // number of bits used for stub r w.r.t layer/disk centre for module types (barrelPS, barrel2S, diskPS, disk2S) after TrackBuilder + int tbWidthR(SensorModule::Type type) const { return tbWidthsR_.at(type); } // Parameter specifying TTStub algorithm @@ -491,21 +495,26 @@ namespace tt { int kfMaxLayers() const { return kfMaxLayers_; } // search window of each track parameter in initial uncertainties double kfRangeFactor() const { return kfRangeFactor_; } + // + int kfShiftInitialC00() const { return kfShiftInitialC00_; } + // + int kfShiftInitialC11() const { return kfShiftInitialC11_; } + // + int kfShiftInitialC22() const { return kfShiftInitialC22_; } + // + int kfShiftInitialC33() const { return kfShiftInitialC33_; } // Parameter specifying KalmanFilter Output Formatter - - // Final Chi2rphi digitization TODO extract from TTTrack Word - std::vector kfoutchi2rphiBins() const { return kfoutchi2rphiBins_; } - // Final Chi2rz digitization TODO extract from TTTrack Word - std::vector kfoutchi2rzBins() const { return kfoutchi2rzBins_; } // Conversion factor between dphi^2/weight and chi2rphi int kfoutchi2rphiConv() const { return kfoutchi2rphiConv_; } // Conversion factor between dz^2/weight and chi2rz int kfoutchi2rzConv() const { return kfoutchi2rzConv_; } - // Number of bits for the tttrack word - int tttrackBits() const { return tttrackBits_; } // Fraction of total dphi and dz ranges to calculate v0 and v1 LUT for int weightBinFraction() const { return weightBinFraction_; } + // Constant used in FW to prevent 32-bit int overflow + int dzTruncation() const { return dzTruncation_; } + // Constant used in FW to prevent 32-bit int overflow + int dphiTruncation() const { return dphiTruncation_; } // Parameter specifying DuplicateRemoval @@ -656,6 +665,8 @@ namespace tt { double hybridRangePhi_; // smallest stub radius after TrackBuilder in cm double tbInnerRadius_; + // number of bits used for stub r w.r.t layer/disk centre for module types (barrelPS, barrel2S, diskPS, disk2S) after TrackBuilder + std::vector tbWidthsR_; // Parameter specifying TrackingParticle used for Efficiency measurements edm::ParameterSet pSetTP_; @@ -897,29 +908,27 @@ namespace tt { int kfMaxLayers_; // search window of each track parameter in initial uncertainties double kfRangeFactor_; + // + int kfShiftInitialC00_; + // + int kfShiftInitialC11_; + // + int kfShiftInitialC22_; + // + int kfShiftInitialC33_; // Parameter specifying KalmanFilter Output Formatter edm::ParameterSet pSetKFOut_; - // Bins used to digitize dPhi for chi2 calculation - std::vector kfoutdPhiBins_; - // Bins used to digitize dZ for chi2 calculation - std::vector kfoutdZBins_; - // v0 weight Bins corresponding to dPhi Bins for chi2 calculation - std::vector kfoutv0Bins_; - // v1 weight Bins corresponding to dZ Bins for chi2 calculation - std::vector kfoutv1Bins_; - // Final Chi2rphi digitization TODO extract from TTTrack Word - std::vector kfoutchi2rphiBins_; - // Final Chi2rz digitization TODO extract from TTTrack Word - std::vector kfoutchi2rzBins_; // Conversion factor between dphi^2/weight and chi2rphi int kfoutchi2rphiConv_; // Conversion factor between dz^2/weight and chi2rz int kfoutchi2rzConv_; - // Number of bits for the tttrack word - int tttrackBits_; // Fraction of total dphi and dz ranges to calculate v0 and v1 LUT for int weightBinFraction_; + // Constant used in FW to prevent 32-bit int overflow + int dzTruncation_; + // Constant used in FW to prevent 32-bit int overflow + int dphiTruncation_; // Parameter specifying DuplicateRemoval edm::ParameterSet pSetDR_; diff --git a/L1Trigger/TrackTrigger/python/ProducerSetup_cfi.py b/L1Trigger/TrackTrigger/python/ProducerSetup_cfi.py index 4569dd6d95ff0..333c47a2770d6 100644 --- a/L1Trigger/TrackTrigger/python/ProducerSetup_cfi.py +++ b/L1Trigger/TrackTrigger/python/ProducerSetup_cfi.py @@ -66,6 +66,7 @@ cms.PSet( Disk2SRs = cms.vdouble( 63.9903, 68.9903, 74.2750, 79.2750, 81.9562, 86.9562, 92.4920, 97.4920, 99.8160, 104.8160 ) ) # disk 5 ), InnerRadius = cms.double( 19.6 ), # smallest stub radius after TrackBuilder in cm + WidthsRTB = cms.vint32 ( 7, 7, 12, 12 ), # number of bits used for stub r w.r.t layer/disk centre for module types (barrelPS, barrel2S, diskPS, disk2S) at TB output ), # Parameter specifying TrackingParticle used for Efficiency measurements @@ -202,23 +203,23 @@ # Parmeter specifying KalmanFilter KalmanFilter = cms.PSet ( - NumWorker = cms.int32 ( 2 ), # number of kf worker - RangeFactor = cms.double( 2.0 ), # search window of each track parameter in initial uncertainties - MinLayers = cms.int32 ( 4 ), # required number of stub layers to form a track - MaxLayers = cms.int32 ( 7 ) # maximum number of layers added to a track + NumWorker = cms.int32 ( 2 ), # number of kf worker + RangeFactor = cms.double( 2.0 ), # search window of each track parameter in initial uncertainties + MinLayers = cms.int32 ( 4 ), # required number of stub layers to form a track + MaxLayers = cms.int32 ( 7 ), # maximum number of layers added to a track + ShiftInitialC00 = cms.int32 ( 0 ), # + ShiftInitialC11 = cms.int32 ( -2 ), # + ShiftInitialC22 = cms.int32 ( 0 ), # + ShiftInitialC33 = cms.int32 ( 0 ) # ), # Parmeter specifying KalmanFilter Output Formatter KalmanFilterOut = cms.PSet ( - chi2rphiBins = cms.vdouble( 0, 0.25, 0.5, 1, 2, 3, 5, 7, 10, 20, 40, 100, 200, 500, 1000, 3000,6000 ), # Final Chi2rphi digitization TODO extract from TTTrack Word - chi2rzBins = cms.vdouble( 0, 0.25, 0.5, 1, 2, 3, 5, 7, 10, 20, 40, 100, 200, 500, 1000, 3000,6000 ), # Final Chi2rz digitization TODO extract from TTTrack Word - - chi2rphiConv = cms.int32 ( 3 ), # Conversion factor between dphi^2/weight and chi2rphi - chi2rzConv = cms.int32 ( 13 ), # Conversion factor between dz^2/weight and chi2rz - - WeightBinFraction = cms.int32( 0 ), # Number of bits dropped from dphi and dz for v0 and v1 LUTs - - TTTrackBits = cms.int32( 96 ) # Number of bits for the tttrack word TODO extract from TTTrack_word dataformat + Chi2rphiConv = cms.int32( 3 ), # Conversion factor between dphi^2/weight and chi2rphi + Chi2rzConv = cms.int32( 13 ), # Conversion factor between dz^2/weight and chi2rz + WeightBinFraction = cms.int32( 0 ), # Number of bits dropped from dphi and dz for v0 and v1 LUTs + DzTruncation = cms.int32( 262144 ), # Constant used in FW to prevent 32-bit int overflow + DphiTruncation = cms.int32( 16 ) # Constant used in FW to prevent 32-bit int overflow ), # Parmeter specifying DuplicateRemoval diff --git a/L1Trigger/TrackTrigger/python/TTStubAlgorithmRegister_cfi.py b/L1Trigger/TrackTrigger/python/TTStubAlgorithmRegister_cfi.py index 16276daa49e51..b7dea492ce517 100644 --- a/L1Trigger/TrackTrigger/python/TTStubAlgorithmRegister_cfi.py +++ b/L1Trigger/TrackTrigger/python/TTStubAlgorithmRegister_cfi.py @@ -2,55 +2,33 @@ # First register all the hit matching algorithms, then specify preferred ones at end. -# The stub windows used has been optimized for for PU200 events -# We use by default the tight tuning -# -# Definition is presented here: -# -# https://indico.cern.ch/event/681577/#4-update-of-the-track-trigger -# -# This script is adapted to the very last Tilted Tracker geometry to date (tracker T5) -# This version was tested on CMSSW 10_0_0_pre1 -# +# The stub windows used has been optimized for PU200 events +# We use by default the new modified tight tuning +# more details can be found in the following detector note: CMS DN-2020/005 -TTStubAlgorithm_official_Phase2TrackerDigi_ = cms.ESProducer("TTStubAlgorithm_official_Phase2TrackerDigi_", - zMatchingPS = cms.bool(True), - zMatching2S = cms.bool(True), - #Number of tilted rings per side in barrel layers (for tilted geom only) - NTiltedRings = cms.vdouble( 0., 12., 12., 12., 0., 0., 0.), - # PU200 tight tuning, optimized for muons - BarrelCut = cms.vdouble( 0, 2, 2.5, 3.5, 4.5, 5.5, 7), - TiltedBarrelCutSet = cms.VPSet( - cms.PSet( TiltedCut = cms.vdouble( 0 ) ), - cms.PSet( TiltedCut = cms.vdouble( 0, 3, 3, 2.5, 3, 3, 2.5, 2.5, 2, 1.5, 1.5, 1, 1) ), - cms.PSet( TiltedCut = cms.vdouble( 0, 3.5, 3, 3, 3, 3, 2.5, 2.5, 3, 3, 2.5, 2.5, 2.5) ), - cms.PSet( TiltedCut = cms.vdouble( 0, 4, 4, 4, 3.5, 3.5, 3.5, 3.5, 3, 3, 3, 3, 3) ), - ), - EndcapCutSet = cms.VPSet( - cms.PSet( EndcapCut = cms.vdouble( 0 ) ), - cms.PSet( EndcapCut = cms.vdouble( 0, 1, 2.5, 2.5, 3, 2.5, 3, 3.5, 4, 4, 4.5, 3.5, 4, 4.5, 5, 5.5) ), - cms.PSet( EndcapCut = cms.vdouble( 0, 0.5, 2.5, 2.5, 3, 2.5, 3, 3, 3.5, 3.5, 4, 3.5, 3.5, 4, 4.5, 5) ), - cms.PSet( EndcapCut = cms.vdouble( 0, 1, 3, 3, 2.5, 3.5, 3.5, 3.5, 4, 3.5, 3.5, 4, 4.5) ), - cms.PSet( EndcapCut = cms.vdouble( 0, 1, 2.5, 3, 2.5, 3.5, 3, 3, 3.5, 3.5, 3.5, 4, 4) ), - cms.PSet( EndcapCut = cms.vdouble( 0, 0.5, 1.5, 3, 2.5, 3.5, 3, 3, 3.5, 4, 3.5, 4, 3.5) ), - ) +# This script is adapted to the very last Tilted Tracker geometry (D76) +# This version was tested on CMSSW_13_3_0_pre2 - # PU200 loose tuning, optimized for robustness (uncomment if you want to use it) - #BarrelCut = cms.vdouble( 0, 2.0, 3, 4.5, 6, 6.5, 7.0), - #TiltedBarrelCutSet = cms.VPSet( - # cms.PSet( TiltedCut = cms.vdouble( 0 ) ), - # cms.PSet( TiltedCut = cms.vdouble( 0, 3, 3., 2.5, 3., 3., 2.5, 2.5, 2., 1.5, 1.5, 1, 1) ), - # cms.PSet( TiltedCut = cms.vdouble( 0, 4., 4, 4, 4, 4., 4., 4.5, 5, 4., 3.5, 3.5, 3) ), - # cms.PSet( TiltedCut = cms.vdouble( 0, 5, 5, 5, 5, 5, 5, 5.5, 5, 5, 5.5, 5.5, 5.5) ), - # ), - #EndcapCutSet = cms.VPSet( - # cms.PSet( EndcapCut = cms.vdouble( 0 ) ), - # cms.PSet( EndcapCut = cms.vdouble( 0, 1., 2.5, 2.5, 3.5, 5.5, 5.5, 6, 6.5, 6.5, 6.5, 6.5, 6.5, 6.5, 7, 7) ), - # cms.PSet( EndcapCut = cms.vdouble( 0, 0.5, 2.5, 2.5, 3, 5, 6, 6, 6.5, 6.5, 6.5, 6.5, 6.5, 6.5, 7, 7) ), - # cms.PSet( EndcapCut = cms.vdouble( 0, 1, 3., 4.5, 6., 6.5, 6.5, 6.5, 7, 7, 7, 7, 7) ), - # cms.PSet( EndcapCut = cms.vdouble( 0, 1., 2.5, 3.5, 6., 6.5, 6.5, 6.5, 6.5, 7, 7, 7, 7) ), - # cms.PSet( EndcapCut = cms.vdouble( 0, 0.5, 1.5, 3., 4.5, 6.5, 6.5, 7, 7, 7, 7, 7, 7) ), - # ) + # PU200 new modified tight tuning +TTStubAlgorithm_official_Phase2TrackerDigi_ = cms.ESProducer("TTStubAlgorithm_official_Phase2TrackerDigi_", + zMatchingPS = cms.bool(True), + zMatching2S = cms.bool(True), + NTiltedRings = cms.vdouble( 0., 12., 12., 12., 0., 0., 0.), + BarrelCut = cms.vdouble(0, 2.0, 2.5, 3.5, 4.0, 5.5, 6.5), + TiltedBarrelCutSet = cms.VPSet( + cms.PSet( TiltedCut = cms.vdouble( 0 ) ), + cms.PSet( TiltedCut = cms.vdouble( 0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.5, 1.5, 1.5, 1.0, 1.0) ), + cms.PSet( TiltedCut = cms.vdouble( 0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.5, 2.5, 3.0, 3.0, 2.5, 2.5, 2.5) ), + cms.PSet( TiltedCut = cms.vdouble(0, 4.0, 4.0, 4.0, 3.5, 3.5, 3.5, 3.0, 3.0, 2.5, 2.5, 2.5, 2.5) ), + ), + EndcapCutSet = cms.VPSet( + cms.PSet( EndcapCut = cms.vdouble( 0 ) ), + cms.PSet( EndcapCut = cms.vdouble(0, 1.0, 1.5, 1.5, 2.0, 2.0, 2.5, 2.5, 3.0, 4.0, 4.0, 2.5, 3.0, 3.5, 4.0, 5.0) ), + cms.PSet( EndcapCut = cms.vdouble(0, 0.5, 1.5, 1.5, 2.0, 2.0, 2.0, 2.5, 2.5, 3.0, 3.5, 2.0, 2.5, 3.0, 4.0, 4.0) ), + cms.PSet( EndcapCut = cms.vdouble(0, 1.5, 2.0, 2.0, 2.0, 2.0, 2.5, 3.0, 3.5, 2.5, 2.5, 3.0, 3.5) ), + cms.PSet( EndcapCut = cms.vdouble(0, 1.0, 1.5, 1.5, 2.0, 2.0, 2.0, 2.0, 3.0, 2.0, 2.0, 3.0, 3.0) ), + cms.PSet( EndcapCut = cms.vdouble(0, 1.0, 1.5, 1.5, 2.0, 2.0, 2.0, 2.0, 2.5, 3.0, 2.0, 2.0, 2.5) ), + ) ) # CBC3 hit matching algorithm diff --git a/L1Trigger/TrackTrigger/python/TrackQualityParams_cfi.py b/L1Trigger/TrackTrigger/python/TrackQualityParams_cfi.py index fbfd22f0944a9..f5928d461da40 100644 --- a/L1Trigger/TrackTrigger/python/TrackQualityParams_cfi.py +++ b/L1Trigger/TrackTrigger/python/TrackQualityParams_cfi.py @@ -1,12 +1,14 @@ import FWCore.ParameterSet.Config as cms -TrackQualityParams = cms.PSet(qualityAlgorithm = cms.string("GBDT"), #None, Cut, NN, GBDT - ONNXmodel = cms.FileInPath("L1Trigger/TrackTrigger/data/GBDT_default.onnx"), +TrackQualityParams = cms.PSet(qualityAlgorithm = cms.string("GBDT_cpp"), #None, Cut, NN, GBDT, GBDT_cpp + # This emulation GBDT is optimised for the HYBRID_NEWKF emulation and works with the emulation of the KF out module + # It is compatible with the HYBRID simulation and will give equivilant performance with this workflow + ONNXmodel = cms.FileInPath("L1Trigger/TrackTrigger/data/L1_TrackQuality_GBDT_emulation_digitized.json"), # The ONNX model should be found at this path, if you want a local version of the model: # git clone https://github.com/cms-data/L1Trigger-TrackTrigger.git L1Trigger/TrackTrigger/data ONNXInputName = cms.string("feature_input"), #Vector of strings of training features, in the order that the model was trained with - featureNames = cms.vstring(["phi", "eta", "z0", "bendchi2_bin", "nstub", + featureNames = cms.vstring(["tanl", "z0_scaled", "bendchi2_bin", "nstub", "nlaymiss_interior", "chi2rphi_bin", "chi2rz_bin"]), # Parameters for cut based classifier, optimized for L1 Track MET # (Table 3.7 The Phase-2 Upgrade of the CMS Level-1 Trigger http://cds.cern.ch/record/2714892) @@ -16,4 +18,7 @@ bendchi2Max = cms.double( 2.4 ), minPt = cms.double( 2. ), # in GeV nStubsmin = cms.int32( 4 ), + tqemu_bins = cms.vint32( [-480, -62, -35, -16, 0, 16, 35, 62, 480] ), + tqemu_TanlScale = cms.double( 128.0), + tqemu_Z0Scale = cms.double( 64.0 ), ) diff --git a/L1Trigger/TrackTrigger/src/L1TrackQuality.cc b/L1Trigger/TrackTrigger/src/L1TrackQuality.cc index 60e23bddb8c09..f3472beab34b6 100644 --- a/L1Trigger/TrackTrigger/src/L1TrackQuality.cc +++ b/L1Trigger/TrackTrigger/src/L1TrackQuality.cc @@ -1,6 +1,5 @@ /* Track Quality Body file - C.Brown & C.Savard 07/2020 */ @@ -28,7 +27,8 @@ L1TrackQuality::L1TrackQuality(const edm::ParameterSet& qualityParams) : useHPH_ qualityParams.getParameter("ONNXmodel"), qualityParams.getParameter("ONNXInputName"), qualityParams.getParameter>("featureNames")); - runTime_ = std::make_unique(this->ONNXmodel_.fullPath()); + if ((AlgorithmString == "GBDT") || (AlgorithmString == "NN")) + runTime_ = std::make_unique(this->ONNXmodel_.fullPath()); } } @@ -74,19 +74,23 @@ std::vector L1TrackQuality::featureTransform(TTTrack& aTrack) aTrack.settrkMVA1(classification); } - if ((this->qualityAlgorithm_ == QualityAlgorithm::NN) || (this->qualityAlgorithm_ == QualityAlgorithm::GBDT)) { + else if (this->qualityAlgorithm_ == QualityAlgorithm::GBDT_cpp) { + // load in bdt + conifer::BDT bdt(this->ONNXmodel_.fullPath()); + + // collect features and classify using bdt + std::vector inputs = featureTransform(aTrack, this->featureNames_); + std::vector output = bdt.decision_function(inputs); + aTrack.settrkMVA1(1. / (1. + exp(-output.at(0)))); // need logistic sigmoid fcn applied to xgb output + } + + else if ((this->qualityAlgorithm_ == QualityAlgorithm::NN) || (this->qualityAlgorithm_ == QualityAlgorithm::GBDT)) { // Setup ONNX input and output names and arrays std::vector ortinput_names; std::vector ortoutput_names; @@ -156,6 +170,16 @@ void L1TrackQuality::setL1TrackQuality(TTTrack& aTrack) } } +float L1TrackQuality::runEmulatedTQ(std::vector> inputFeatures) { + // load in bdt + + conifer::BDT, ap_fixed<10, 5>> bdt(this->ONNXmodel_.fullPath()); + + // collect features and classify using bdt + std::vector> output = bdt.decision_function(inputFeatures); + return output.at(0).to_float(); // need logistic sigmoid fcn applied to xgb output +} + void L1TrackQuality::setCutParameters(std::string const& AlgorithmString, float maxZ0, float maxEta, @@ -181,6 +205,8 @@ void L1TrackQuality::setONNXModel(std::string const& AlgorithmString, qualityAlgorithm_ = QualityAlgorithm::NN; } else if (AlgorithmString == "GBDT") { qualityAlgorithm_ = QualityAlgorithm::GBDT; + } else if (AlgorithmString == "GBDT_cpp") { + qualityAlgorithm_ = QualityAlgorithm::GBDT_cpp; } else { qualityAlgorithm_ = QualityAlgorithm::None; } diff --git a/L1Trigger/TrackTrigger/src/Setup.cc b/L1Trigger/TrackTrigger/src/Setup.cc index 40d5c64af3a00..f26ed4ac2c238 100644 --- a/L1Trigger/TrackTrigger/src/Setup.cc +++ b/L1Trigger/TrackTrigger/src/Setup.cc @@ -85,6 +85,7 @@ namespace tt { hybridDiskZs_(pSetHybrid_.getParameter>("DiskZs")), hybridDisk2SRsSet_(pSetHybrid_.getParameter>("Disk2SRsSet")), tbInnerRadius_(pSetHybrid_.getParameter("InnerRadius")), + tbWidthsR_(pSetHybrid_.getParameter>("WidthsRTB")), // Parameter specifying TrackingParticle used for Efficiency measurements pSetTP_(iConfig.getParameter("TrackingParticle")), tpMinPt_(pSetTP_.getParameter("MinPt")), @@ -201,14 +202,17 @@ namespace tt { kfMinLayers_(pSetKF_.getParameter("MinLayers")), kfMaxLayers_(pSetKF_.getParameter("MaxLayers")), kfRangeFactor_(pSetKF_.getParameter("RangeFactor")), + kfShiftInitialC00_(pSetKF_.getParameter("ShiftInitialC00")), + kfShiftInitialC11_(pSetKF_.getParameter("ShiftInitialC11")), + kfShiftInitialC22_(pSetKF_.getParameter("ShiftInitialC22")), + kfShiftInitialC33_(pSetKF_.getParameter("ShiftInitialC33")), // Parmeter specifying KalmanFilter Output Formatter pSetKFOut_(iConfig.getParameter("KalmanFilterOut")), - kfoutchi2rphiBins_(pSetKFOut_.getParameter>("chi2rphiBins")), - kfoutchi2rzBins_(pSetKFOut_.getParameter>("chi2rzBins")), - kfoutchi2rphiConv_(pSetKFOut_.getParameter("chi2rphiConv")), - kfoutchi2rzConv_(pSetKFOut_.getParameter("chi2rzConv")), - tttrackBits_(pSetKFOut_.getParameter("TTTrackBits")), + kfoutchi2rphiConv_(pSetKFOut_.getParameter("Chi2rphiConv")), + kfoutchi2rzConv_(pSetKFOut_.getParameter("Chi2rzConv")), weightBinFraction_(pSetKFOut_.getParameter("WeightBinFraction")), + dzTruncation_(pSetKFOut_.getParameter("DzTruncation")), + dphiTruncation_(pSetKFOut_.getParameter("DphiTruncation")), // Parmeter specifying DuplicateRemoval pSetDR_(iConfig.getParameter("DuplicateRemoval")), drDepthMemory_(pSetDR_.getParameter("DepthMemory")) { diff --git a/L1Trigger/TrackTrigger/test/BuildFile.xml b/L1Trigger/TrackTrigger/test/BuildFile.xml index 724dea5fcc1ee..427283dbef53b 100644 --- a/L1Trigger/TrackTrigger/test/BuildFile.xml +++ b/L1Trigger/TrackTrigger/test/BuildFile.xml @@ -15,7 +15,6 @@ - diff --git a/L1Trigger/TrackerDTC/interface/DTC.h b/L1Trigger/TrackerDTC/interface/DTC.h index 0d1a6896a26d2..74df2f0da6084 100644 --- a/L1Trigger/TrackerDTC/interface/DTC.h +++ b/L1Trigger/TrackerDTC/interface/DTC.h @@ -41,7 +41,6 @@ namespace trackerDTC { void produce(const Stubss& stubss, TTDTC& product); // pop_front function which additionally returns copy of deleted front Stub* pop_front(Stubs& stubs); - // helper class to store configurations const tt::Setup* setup_; // enables emulation of truncation diff --git a/L1Trigger/TrackerDTC/interface/Stub.h b/L1Trigger/TrackerDTC/interface/Stub.h index 405c1011709de..67493efb27f7e 100644 --- a/L1Trigger/TrackerDTC/interface/Stub.h +++ b/L1Trigger/TrackerDTC/interface/Stub.h @@ -19,7 +19,6 @@ namespace trackerDTC { public: Stub(const edm::ParameterSet&, const tt::Setup*, const LayerEncoding*, tt::SensorModule*, const TTStubRef&); ~Stub() {} - // underlying TTStubRef TTStubRef ttStubRef() const { return ttStubRef_; } // did pass pt and eta cut @@ -38,7 +37,6 @@ namespace trackerDTC { tt::Frame formatHybrid(int region) const; // 64 bit stub in tmtt data format tt::Frame formatTMTT(int region) const; - // stores, calculates and provides run-time constants const tt::Setup* setup_; // class to encode layer ids used between DTC and TFP in Hybrid diff --git a/L1Trigger/TrackerTFP/interface/DistServer.h b/L1Trigger/TrackerTFP/interface/DistServer.h deleted file mode 100644 index 5c093ab796b06..0000000000000 --- a/L1Trigger/TrackerTFP/interface/DistServer.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef L1Trigger_TrackerTFP_DistServer_h__ -#define L1Trigger_TrackerTFP_DistServer_h__ - -#include "L1Trigger/TrackerTFP/interface/DataFormats.h" - -#include - -namespace trackerTFP { - - class DistServer { - public: - DistServer(unsigned int nInputs, unsigned int nOutputs, unsigned int nInterleaving); - ~DistServer() {} - - TrackKFOutSAPtrCollection clock(TrackKFOutSAPtrCollection& inputs); - - unsigned int nInputs() const { return nInputs_; } - unsigned int nOutputs() const { return nOutputs_; } - unsigned int nInterleaving() const { return nInterleaving_; } - std::vector >& addr() { return addr_; } - TrackKFOutSAPtrCollections& inputs() { return inputs_; } - - private: - unsigned int nInputs_; - unsigned int nOutputs_; - unsigned int nInterleaving_; - - TrackKFOutSAPtrCollections inputs_; - std::vector > addr_; - }; -} // namespace trackerTFP - -#endif \ No newline at end of file diff --git a/L1Trigger/TrackerTFP/src/DataFormats.cc b/L1Trigger/TrackerTFP/src/DataFormats.cc index 628b32a4b0f50..d58ba3d15b50b 100644 --- a/L1Trigger/TrackerTFP/src/DataFormats.cc +++ b/L1Trigger/TrackerTFP/src/DataFormats.cc @@ -70,6 +70,7 @@ namespace trackerTFP { numStreamsTracks_[+Process::kfin] = numStreams_[+Process::kf]; numStreamsTracks_[+Process::kf] = numStreams_[+Process::kf]; // Print digi data format of all variables of any specified algo step + // (Look at DataFormat.h::tracks_ to see variable names). //for (const Variable v : tracks_[+Process::kf]) { // const DataFormat& f = format(v, Process::kf); // cout <<" KF "<< f.base() << " " << f.range() << " " << f.width() << endl; diff --git a/L1Trigger/TrackerTFP/src/Demonstrator.cc b/L1Trigger/TrackerTFP/src/Demonstrator.cc index 98a509f33cb13..73253c3100c11 100644 --- a/L1Trigger/TrackerTFP/src/Demonstrator.cc +++ b/L1Trigger/TrackerTFP/src/Demonstrator.cc @@ -114,7 +114,7 @@ namespace trackerTFP { << "Metadata: (strobe,) start of orbit, start of packet, end of packet, valid" << endl << endl; // link header - ss << " Link :"; + ss << " Link "; for (int link = 0; link < numLinks; link++) ss << " " << setfill('0') << setw(3) << link << " "; ss << endl; @@ -136,7 +136,7 @@ namespace trackerTFP { // creates frame number string Demonstrator::frame(int& nFrame) const { stringstream ss; - ss << "Frame " << setfill('0') << setw(4) << nFrame++ << " :"; + ss << "Frame " << setfill('0') << setw(4) << nFrame++ << " "; return ss.str(); } diff --git a/L1Trigger/TrackerTFP/src/DistServer.cc b/L1Trigger/TrackerTFP/src/DistServer.cc deleted file mode 100644 index f4115082a3e49..0000000000000 --- a/L1Trigger/TrackerTFP/src/DistServer.cc +++ /dev/null @@ -1,59 +0,0 @@ -#include "L1Trigger/TrackerTFP/interface/DistServer.h" -#include "L1Trigger/TrackerTFP/interface/DataFormats.h" - -#include -#include - -using namespace std; -using namespace trackerTFP; - -DistServer::DistServer(unsigned int nInputs, unsigned int nOutputs, unsigned int nInterleaving) - : nInputs_(nInputs), nOutputs_(nOutputs), nInterleaving_(nInterleaving), inputs_(nInputs_) { - for (unsigned int iInput = 0; iInput < this->nInputs(); ++iInput) { - addr_.emplace_back(this->nInterleaving(), 0); - for (unsigned int iInterleave = 0; iInterleave < this->nInterleaving(); ++iInterleave) { - addr_[iInput][iInterleave] = iInterleave; - } - } -} - -TrackKFOutSAPtrCollection DistServer::clock(TrackKFOutSAPtrCollection& data) { - for (unsigned int iInput = 0; iInput < nInputs(); ++iInput) { - if (data[iInput]->dataValid()) { - inputs()[iInput].push_back(data[iInput]); - } - } - - vector > lMap(nInputs(), vector(nOutputs())); - - TrackKFOutSAPtrCollection lInputs(nInputs(), std::make_shared()); - - std::vector >& addr = this->addr(); - - for (unsigned int iInput = 0; iInput < nInputs(); ++iInput) { - unsigned int lAddr = addr[iInput][0]; - if (lAddr < inputs()[iInput].size()) { - lInputs[iInput] = inputs()[iInput][lAddr]; - lMap[iInput][lInputs[iInput]->sortKey()] = true; - } - } - - for (unsigned int iInput = 0; iInput < nInputs(); ++iInput) { - vector& toRotate = addr[iInput]; - rotate(toRotate.begin(), toRotate.begin() + 1, toRotate.end()); - } - - TrackKFOutSAPtrCollection lOutputs(nOutputs(), std::make_shared()); - - for (unsigned int iOutput = 0; iOutput < lOutputs.size(); ++iOutput) { - for (unsigned int iInput = 0; iInput < nInputs(); ++iInput) { - if (lMap[iInput][iOutput]) { - lOutputs[iOutput] = lInputs[iInput]; - addr[iInput].back() += this->nInterleaving(); - break; - } - } - } - - return lOutputs; -} diff --git a/L1Trigger/TrackerTFP/src/State.cc b/L1Trigger/TrackerTFP/src/State.cc index 565cd43a162f7..739be28ddbe2a 100644 --- a/L1Trigger/TrackerTFP/src/State.cc +++ b/L1Trigger/TrackerTFP/src/State.cc @@ -46,10 +46,10 @@ namespace trackerTFP { x2_ = 0.; x3_ = 0.; // initial uncertainties - C00_ = pow(dataFormats_->base(Variable::inv2R, Process::kfin), 2); - C11_ = pow(dataFormats_->base(Variable::phiT, Process::kfin), 2); - C22_ = pow(dataFormats_->base(Variable::cot, Process::kfin), 2); - C33_ = pow(dataFormats_->base(Variable::zT, Process::kfin), 2); + C00_ = pow(dataFormats_->base(Variable::inv2R, Process::kfin), 2) * pow(2, setup_->kfShiftInitialC00()); + C11_ = pow(dataFormats_->base(Variable::phiT, Process::kfin), 2) * pow(2, setup_->kfShiftInitialC11()); + C22_ = pow(dataFormats_->base(Variable::cot, Process::kfin), 2) * pow(2, setup_->kfShiftInitialC22()); + C33_ = pow(dataFormats_->base(Variable::zT, Process::kfin), 2) * pow(2, setup_->kfShiftInitialC33()); C01_ = 0.; C23_ = 0.; // first stub from first layer on input track with stubs diff --git a/L1Trigger/TrackerTFP/test/demonstrator_cfg.py b/L1Trigger/TrackerTFP/test/demonstrator_cfg.py index fef2ececea3d8..787c2acc85bc2 100644 --- a/L1Trigger/TrackerTFP/test/demonstrator_cfg.py +++ b/L1Trigger/TrackerTFP/test/demonstrator_cfg.py @@ -41,7 +41,7 @@ options = VarParsing.VarParsing( 'analysis' ) # specify input MC Samples = [ -'/store/relval/CMSSW_12_6_0_pre4/RelValTTbar_14TeV/GEN-SIM-DIGI-RAW/PU_125X_mcRun4_realistic_v2_2026D88PU200-v1/2590000/00b3d04b-4c7b-4506-8d82-9538fb21ee19.root' +'/store/mc/CMSSW_12_6_0/RelValTTbar_14TeV/GEN-SIM-DIGI-RAW/PU_125X_mcRun4_realistic_v5_2026D88PU200RV183v2-v1/30000/0959f326-3f52-48d8-9fcf-65fc41de4e27.root' ] options.register( 'inputMC', Samples, VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.string, "Files to be processed" ) # specify number of events to process. diff --git a/L1Trigger/TrackerTFP/test/test_cfg.py b/L1Trigger/TrackerTFP/test/test_cfg.py index 9ccf4a01e99f0..efaa1e64f12b7 100644 --- a/L1Trigger/TrackerTFP/test/test_cfg.py +++ b/L1Trigger/TrackerTFP/test/test_cfg.py @@ -54,7 +54,7 @@ options = VarParsing.VarParsing( 'analysis' ) # specify input MC Samples = [ -'/store/relval/CMSSW_12_6_0_pre4/RelValTTbar_14TeV/GEN-SIM-DIGI-RAW/PU_125X_mcRun4_realistic_v2_2026D88PU200-v1/2590000/00b3d04b-4c7b-4506-8d82-9538fb21ee19.root' +'/store/mc/CMSSW_12_6_0/RelValTTbar_14TeV/GEN-SIM-DIGI-RAW/PU_125X_mcRun4_realistic_v5_2026D88PU200RV183v2-v1/30000/0959f326-3f52-48d8-9fcf-65fc41de4e27.root' ] options.register( 'inputMC', Samples, VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.string, "Files to be processed" ) # specify number of events to process. diff --git a/L1TriggerConfig/GMTConfigProducers/interface/RecordHelper.h b/L1TriggerConfig/GMTConfigProducers/interface/RecordHelper.h index 8732a7a4486fa..a2ac622ca74ef 100644 --- a/L1TriggerConfig/GMTConfigProducers/interface/RecordHelper.h +++ b/L1TriggerConfig/GMTConfigProducers/interface/RecordHelper.h @@ -13,7 +13,7 @@ */ -#include +#include #include "RelationalAccess/ICursor.h" #include "CoralBase/AttributeList.h" @@ -30,6 +30,7 @@ class FieldHandlerBase { typedef coral::AttributeList AttributeList; /** Construct a new field handler with the C++ field name as its argument */ FieldHandlerBase(const std::string& name) : name_(name) {} + FieldHandlerBase() = delete; /** Return the name of the field handled by this object. */ const std::string& getName() { return name_; } @@ -64,14 +65,16 @@ class FieldHandler : public FieldHandlerBase { FieldHandler(const std::string& fieldName, TSetMethod setter) : FieldHandlerBase(fieldName), setter_(setter) {} + FieldHandler() = delete; + /** Actual data extraction. */ void extractValue(const AttributeList& src, TOutput& dest) override { #ifdef RECORDHELPER_DEBUG std::cout << "Parsing field " << this->getName() << " with type " << typeid(TCField).name(); #endif - typedef typename boost::remove_cv::type>::type TDBFieldT; + typedef typename std::remove_cv::type>::type TDBFieldT; const TDBFieldT& value = src[this->getColumnName()].template data(); - ((dest).*setter_)(TCField(value)); + call(dest, TCField(value)); #ifdef RECORDHELPER_DEBUG std::cout << "=" << TCField(value) << std::endl; @@ -79,9 +82,10 @@ class FieldHandler : public FieldHandlerBase { } protected: + void call(TOutput& dest, const TCField value) { ((dest).*setter_)(value); } /** Points to the setter method used to stuff the field's value into the destination object. */ - TSetMethod setter_; + TSetMethod setter_ = nullptr; }; /** A special handler for bool fields in the GT/GMT DBs. These can't be imported @@ -96,13 +100,15 @@ class ASCIIBoolFieldHandler : public FieldHandler { ASCIIBoolFieldHandler(const std::string& fieldName, typename FieldHandler::TSetMethod setter) : FieldHandler(fieldName, setter) {} + ASCIIBoolFieldHandler() = delete; + /** Extract value as char, then see compare it to '0' to get its truth value. */ void extractValue(const AttributeList& src, TOutput& dest) override { char value = src[this->getColumnName()].template data(); #ifdef RECORDHELPER_DEBUG std::cout << " .. and " << this->getColumnName() << " is (in integers) " << (int)value << std::endl; #endif - ((dest).*(this->setter_))(value != FalseCharacter); + this->call(dest, value != FalseCharacter); } }; diff --git a/L1TriggerConfig/L1GtConfigProducers/interface/L1GtVhdlTemplateFile.h b/L1TriggerConfig/L1GtConfigProducers/interface/L1GtVhdlTemplateFile.h index a00733320d4a1..e7f2382c4a181 100644 --- a/L1TriggerConfig/L1GtConfigProducers/interface/L1GtVhdlTemplateFile.h +++ b/L1TriggerConfig/L1GtConfigProducers/interface/L1GtVhdlTemplateFile.h @@ -37,10 +37,6 @@ class L1GtVhdlTemplateFile { L1GtVhdlTemplateFile(); /// constructor with filename L1GtVhdlTemplateFile(const std::string &filename); - /// copy constructor - L1GtVhdlTemplateFile(const L1GtVhdlTemplateFile &rhs); - /// destructor - ~L1GtVhdlTemplateFile(); /// replaces searchString with replaceString at it's first occurance in string static const bool findAndReplaceString(std::string ¶mString, const std::string &searchString, diff --git a/L1TriggerConfig/L1GtConfigProducers/src/L1GtVhdlTemplateFile.cc b/L1TriggerConfig/L1GtConfigProducers/src/L1GtVhdlTemplateFile.cc index ef0cd8cc80d0a..5aad609338f54 100644 --- a/L1TriggerConfig/L1GtConfigProducers/src/L1GtVhdlTemplateFile.cc +++ b/L1TriggerConfig/L1GtConfigProducers/src/L1GtVhdlTemplateFile.cc @@ -34,18 +34,6 @@ L1GtVhdlTemplateFile::L1GtVhdlTemplateFile(const std::string &filename) { std::cout << "Error while opening file: " << filename << std::endl; } -//copy constructor -L1GtVhdlTemplateFile::L1GtVhdlTemplateFile(const L1GtVhdlTemplateFile &rhs) { - lines_ = rhs.lines_; - intern_ = rhs.intern_; - parameterMap_ = rhs.parameterMap_; -} - -// destructor -L1GtVhdlTemplateFile::~L1GtVhdlTemplateFile() { - // empty -} - const bool L1GtVhdlTemplateFile::findAndReplaceString(std::string ¶mString, const std::string &searchString, const std::string &replaceString) { diff --git a/L1TriggerConfig/L1TConfigProducers/src/L1TCaloParamsOnlineProd.cc b/L1TriggerConfig/L1TConfigProducers/src/L1TCaloParamsOnlineProd.cc index 0c8528a8b375e..2ed2a285ee7aa 100644 --- a/L1TriggerConfig/L1TConfigProducers/src/L1TCaloParamsOnlineProd.cc +++ b/L1TriggerConfig/L1TConfigProducers/src/L1TCaloParamsOnlineProd.cc @@ -141,11 +141,11 @@ bool L1TCaloParamsOnlineProd::readCaloLayer2OnlineSettings(l1t::CaloParamsHelper } } // Layer 2 params specification - paramsHelper.setEgSeedThreshold((conf["leptonSeedThreshold"].getValue()) / 2); - paramsHelper.setTauSeedThreshold((conf["leptonSeedThreshold"].getValue()) / 2); - paramsHelper.setEgNeighbourThreshold((conf["leptonTowerThreshold"].getValue()) / 2); - paramsHelper.setTauNeighbourThreshold((conf["leptonTowerThreshold"].getValue()) / 2); - paramsHelper.setPileUpTowerThreshold((conf["pileUpTowerThreshold"].getValue()) / 2); + paramsHelper.setEgSeedThreshold((conf["leptonSeedThreshold"].getValue()) / 2.); + paramsHelper.setTauSeedThreshold((conf["leptonSeedThreshold"].getValue()) / 2.); + paramsHelper.setEgNeighbourThreshold((conf["leptonTowerThreshold"].getValue()) / 2.); + paramsHelper.setTauNeighbourThreshold((conf["leptonTowerThreshold"].getValue()) / 2.); + paramsHelper.setPileUpTowerThreshold((conf["pileUpTowerThreshold"].getValue()) / 2.); paramsHelper.setEgMaxPtHOverE((conf["egammaRelaxationThreshold"].getValue()) / 2.); paramsHelper.setEgEtaCut((conf["egammaMaxEta"].getValue())); @@ -164,7 +164,7 @@ bool L1TCaloParamsOnlineProd::readCaloLayer2OnlineSettings(l1t::CaloParamsHelper paramsHelper.setTauIsolationLUT(l1t::convertToLUT(conf["tauIsoLUT"].getVector())); paramsHelper.setTauTrimmingShapeVetoLUT(l1t::convertToLUT(conf["tauTrimmingLUT"].getVector())); - paramsHelper.setJetSeedThreshold((conf["jetSeedThreshold"].getValue()) / 2); + paramsHelper.setJetSeedThreshold((conf["jetSeedThreshold"].getValue()) / 2.); paramsHelper.setJetBypassPUS(conf["jetBypassPileUpSub"].getValue()); paramsHelper.setJetPUSUsePhiRing(conf["jetPUSUsePhiRing"].getValue()); paramsHelper.setJetCalibrationLUT(l1t::convertToLUT(conf["jetEnergyCalibLUT"].getVector())); @@ -179,10 +179,10 @@ bool L1TCaloParamsOnlineProd::readCaloLayer2OnlineSettings(l1t::CaloParamsHelper etSumEtaMax.push_back(conf["towerCountMaxEta"].getValue()); etSumEtThresh.push_back(0); //deprecated by EttPUSLUT - etSumEtThresh.push_back(conf["HT_jetThreshold"].getValue() / 2); + etSumEtThresh.push_back(conf["HT_jetThreshold"].getValue() / 2.); etSumEtThresh.push_back(0); //deprecated by MetPUSLUT - etSumEtThresh.push_back(conf["MHT_jetThreshold"].getValue() / 2); - etSumEtThresh.push_back(conf["towerCountThreshold"].getValue() / 2); + etSumEtThresh.push_back(conf["MHT_jetThreshold"].getValue() / 2.); + etSumEtThresh.push_back(conf["towerCountThreshold"].getValue() / 2.); for (uint i = 0; i < 5; ++i) { paramsHelper.setEtSumEtaMax(i, etSumEtaMax.at(i)); @@ -200,8 +200,8 @@ bool L1TCaloParamsOnlineProd::readCaloLayer2OnlineSettings(l1t::CaloParamsHelper etSumCentUpperValues = conf["ET_centralityUpperThresholds"].getVector(); for (uint i = 0; i < 8; ++i) { - paramsHelper.setEtSumCentLower(i, etSumCentLowerValues[i] / 2); - paramsHelper.setEtSumCentUpper(i, etSumCentUpperValues[i] / 2); + paramsHelper.setEtSumCentLower(i, etSumCentLowerValues[i] / 2.); + paramsHelper.setEtSumCentUpper(i, etSumCentUpperValues[i] / 2.); } // demux tower sum calib LUTs diff --git a/L1TriggerConfig/RPCTriggerConfig/test/TestBxOrConfig.cc b/L1TriggerConfig/RPCTriggerConfig/test/TestBxOrConfig.cc deleted file mode 100644 index 83634951b6ef2..0000000000000 --- a/L1TriggerConfig/RPCTriggerConfig/test/TestBxOrConfig.cc +++ /dev/null @@ -1,111 +0,0 @@ -// -*- C++ -*- -// -// Package: TestBxOrConfig -// Class: TestBxOrConfig -// -/**\class TestBxOrConfig TestBxOrConfig.cc L1TriggerConfig/TestBxOrConfig/src/TestBxOrConfig.cc - - Description: - - Implementation: - -*/ - -// system include files -#include - -// user include files -#include "FWCore/Framework/interface/Frameworkfwd.h" -#include "FWCore/Framework/interface/EDAnalyzer.h" - -#include "FWCore/Framework/interface/Event.h" -#include "FWCore/Framework/interface/MakerMacros.h" - -#include "FWCore/ParameterSet/interface/ParameterSet.h" - -#include "FWCore/Framework/interface/EventSetup.h" -#include "FWCore/Framework/interface/ESHandle.h" - - - -#include "CondFormats/DataRecord/interface/L1RPCBxOrConfigRcd.h" -#include "CondFormats/L1TObjects/interface/L1RPCBxOrConfig.h" - -// -// class decleration -// - -class TestBxOrConfig : public edm::EDAnalyzer { - public: - explicit TestBxOrConfig(const edm::ParameterSet&); - ~TestBxOrConfig(); - - - private: - virtual void beginJob() ; - virtual void analyze(const edm::Event&, const edm::EventSetup&); - virtual void endJob() ; - - // ----------member data --------------------------- -}; - -// -// constants, enums and typedefs -// - -// -// static data member definitions -// - -// -// constructors and destructor -// -TestBxOrConfig::TestBxOrConfig(const edm::ParameterSet& iConfig) - -{ - //now do what ever initialization is needed - -} - - -TestBxOrConfig::~TestBxOrConfig() -{ - - // do anything here that needs to be done at desctruction time - // (e.g. close files, deallocate resources etc.) - -} - - -// -// member functions -// - -// ------------ method called to for each event ------------ -void -TestBxOrConfig::analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) -{ - using namespace edm; - edm::ESHandle bxOrConfig; - iSetup.get().get(bxOrConfig); - - std::cout << "Checking BX Or settings" << std::endl; - - std::cout<< "First BX : "<getFirstBX()<<", Last BX : "<getLastBX()< + + + + + + \ No newline at end of file diff --git a/L1TriggerScouting/Utilities/interface/conversion.h b/L1TriggerScouting/Utilities/interface/conversion.h new file mode 100644 index 0000000000000..45b233cbb07b9 --- /dev/null +++ b/L1TriggerScouting/Utilities/interface/conversion.h @@ -0,0 +1,37 @@ +#ifndef L1TriggerScouting_Utilities_conversion_h +#define L1TriggerScouting_Utilities_conversion_h + +#include "L1TriggerScouting/Utilities/interface/scales.h" +#include + +namespace l1ScoutingRun3 { + + inline float _setPhiRange(float phi) { + phi = phi >= M_PI ? phi - 2. * M_PI : phi; + return phi; + } + + namespace ugmt { + + inline float fPt(int hwPt) { return scales::pt_scale * (hwPt - 1); }; + inline float fEta(int hwEta) { return scales::eta_scale * hwEta; }; + inline float fPhi(int hwPhi) { return _setPhiRange(scales::phi_scale * hwPhi); }; + inline float fPtUnconstrained(int hwPtUnconstrained) { + return scales::ptunconstrained_scale * (hwPtUnconstrained - 1); + }; + inline float fEtaAtVtx(int hwEtaAtVtx) { return scales::eta_scale * hwEtaAtVtx; }; + inline float fPhiAtVtx(int hwPhiAtVtx) { return _setPhiRange(scales::phi_scale * hwPhiAtVtx); }; + + } // namespace ugmt + + namespace demux { + + inline float fEt(int hwEt) { return scales::et_scale * hwEt; }; + inline float fEta(int hwEta) { return scales::eta_scale * hwEta; }; + inline float fPhi(int hwPhi) { return _setPhiRange(scales::phi_scale * hwPhi); }; + + } // namespace demux + +} // namespace l1ScoutingRun3 + +#endif \ No newline at end of file diff --git a/L1TriggerScouting/Utilities/interface/convertToL1TFormat.h b/L1TriggerScouting/Utilities/interface/convertToL1TFormat.h new file mode 100644 index 0000000000000..93914e4afea23 --- /dev/null +++ b/L1TriggerScouting/Utilities/interface/convertToL1TFormat.h @@ -0,0 +1,26 @@ +#ifndef L1TriggerScouting_Utilities_convertToL1TFormat_h +#define L1TriggerScouting_Utilities_convertToL1TFormat_h + +#include "DataFormats/L1Scouting/interface/L1ScoutingMuon.h" +#include "DataFormats/L1Scouting/interface/L1ScoutingCalo.h" +#include "DataFormats/L1Trigger/interface/Muon.h" +#include "DataFormats/L1Trigger/interface/Jet.h" +#include "DataFormats/L1Trigger/interface/EGamma.h" +#include "DataFormats/L1Trigger/interface/Tau.h" +#include "DataFormats/L1Trigger/interface/EtSum.h" + +#include "L1TriggerScouting/Utilities/interface/conversion.h" + +#include "iostream" + +namespace l1ScoutingRun3 { + + l1t::Muon getL1TMuon(const Muon& muon); + l1t::Jet getL1TJet(const Jet& jet); + l1t::EGamma getL1TEGamma(const EGamma& eGamma); + l1t::Tau getL1TTau(const Tau& scTau); + l1t::EtSum getL1TEtSum(const BxSums& sums, l1t::EtSum::EtSumType); + +} // namespace l1ScoutingRun3 + +#endif // L1TriggerScouting_Utilities_convertToL1TFormat_h \ No newline at end of file diff --git a/L1TriggerScouting/Utilities/interface/printScObjects.h b/L1TriggerScouting/Utilities/interface/printScObjects.h new file mode 100644 index 0000000000000..c2a6257811de5 --- /dev/null +++ b/L1TriggerScouting/Utilities/interface/printScObjects.h @@ -0,0 +1,22 @@ +#ifndef L1TriggerScouting_Utilities_printScObjects_h +#define L1TriggerScouting_Utilities_printScObjects_h + +#include "DataFormats/L1Scouting/interface/L1ScoutingMuon.h" +#include "DataFormats/L1Scouting/interface/L1ScoutingCalo.h" +#include "L1TriggerScouting/Utilities/interface/conversion.h" + +#include "iostream" + +namespace l1ScoutingRun3 { + + void printMuon(const Muon& muon, std::ostream& outs = std::cout); + template + void printCaloObject(const T& obj, std::ostream& outs = std::cout); + void printJet(const Jet& jet, std::ostream& outs = std::cout); + void printEGamma(const EGamma& eGamma, std::ostream& outs = std::cout); + void printTau(const Tau& tau, std::ostream& outs = std::cout); + void printBxSums(const BxSums& sums, std::ostream& outs = std::cout); + +} // namespace l1ScoutingRun3 + +#endif // L1TriggerScouting_Utilities_printScObjects_h \ No newline at end of file diff --git a/L1TriggerScouting/Utilities/interface/scales.h b/L1TriggerScouting/Utilities/interface/scales.h new file mode 100644 index 0000000000000..45995518619ff --- /dev/null +++ b/L1TriggerScouting/Utilities/interface/scales.h @@ -0,0 +1,30 @@ +#ifndef L1TriggerScouting_Utilities_scales_h +#define L1TriggerScouting_Utilities_scales_h + +#include +#include + +namespace l1ScoutingRun3 { + + // Scaled used to convert scouting hw values to physical quantities + + namespace ugmt { + struct scales { + static constexpr float pt_scale = 0.5; + static constexpr float ptunconstrained_scale = 1.0; + static constexpr float phi_scale = 2. * M_PI / 576.; + static constexpr float eta_scale = 0.0870 / 8; + static constexpr float phi_range = M_PI; + }; + } // namespace ugmt + + namespace demux { + struct scales { + static constexpr float phi_scale = 2. * M_PI / 144.; + static constexpr float eta_scale = 0.0435; + static constexpr float et_scale = 0.5; + }; + } // namespace demux + +} // namespace l1ScoutingRun3 +#endif // L1TriggerScouting_Utilities_scales_h diff --git a/L1TriggerScouting/Utilities/plugins/BuildFile.xml b/L1TriggerScouting/Utilities/plugins/BuildFile.xml new file mode 100644 index 0000000000000..2e16c87937d04 --- /dev/null +++ b/L1TriggerScouting/Utilities/plugins/BuildFile.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/L1TriggerScouting/Utilities/plugins/DumpScObjects.cc b/L1TriggerScouting/Utilities/plugins/DumpScObjects.cc new file mode 100644 index 0000000000000..bb57a7ed37da6 --- /dev/null +++ b/L1TriggerScouting/Utilities/plugins/DumpScObjects.cc @@ -0,0 +1,235 @@ +#include "FWCore/Framework/interface/MakerMacros.h" + +#include +#include +#include +#include +#include + +#include "FWCore/Framework/interface/stream/EDAnalyzer.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/Utilities/interface/EDGetToken.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/MessageLogger/interface/MessageDrop.h" + +#include "DataFormats/L1Scouting/interface/OrbitCollection.h" +#include "DataFormats/L1Scouting/interface/L1ScoutingMuon.h" +#include "DataFormats/L1Scouting/interface/L1ScoutingCalo.h" +#include "L1TriggerScouting/Utilities/interface/printScObjects.h" +#include "L1TriggerScouting/Utilities/interface/convertToL1TFormat.h" + +using namespace l1ScoutingRun3; + +// ----------------------------- CLASS DECLARATION ---------------------------- +class DumpScObjects : public edm::stream::EDAnalyzer<> { +public: + // constructor and destructor + explicit DumpScObjects(const edm::ParameterSet&); + ~DumpScObjects() override{}; + + // method for analyzing the events + void analyze(const edm::Event&, const edm::EventSetup&) override; + +private: + // dump contenct of BX + void printBx(unsigned bx); + + // the tokens to access the data + edm::EDGetTokenT gmtMuonsToken_; + edm::EDGetTokenT caloJetsToken_; + edm::EDGetTokenT caloEGammasToken_; + edm::EDGetTokenT caloTausToken_; + edm::EDGetTokenT caloEtSumsToken_; + + edm::Handle muonHandle_; + edm::Handle jetHandle_; + edm::Handle eGammaHandle_; + edm::Handle tauHandle_; + edm::Handle etSumHandle_; + + // the min and max BX to be analyzed + unsigned minBx_; + unsigned maxBx_; + + // select collection to be printed + bool checkMuons_; + bool checkJets_; + bool checkEGammas_; + bool checkTaus_; + bool checkEtSums_; + + // dump a specific (ORBIT, BX RANGE) + bool searchEvent_; + unsigned orbitNum_; + unsigned searchStartBx_; + unsigned searchStopBx_; + + // utils + bool skipEmptyBx_; +}; +// ----------------------------------------------------------------------------- + +// -------------------------------- constructor ------------------------------- + +DumpScObjects::DumpScObjects(const edm::ParameterSet& iConfig) + : minBx_(iConfig.getUntrackedParameter("minBx", 0)), + maxBx_(iConfig.getUntrackedParameter("maxBx", 3564)), + + checkMuons_(iConfig.getUntrackedParameter("checkMuons", true)), + checkJets_(iConfig.getUntrackedParameter("checkJets", true)), + checkEGammas_(iConfig.getUntrackedParameter("checkEGammas", true)), + checkTaus_(iConfig.getUntrackedParameter("checkTaus", true)), + checkEtSums_(iConfig.getUntrackedParameter("checkEtSums", true)), + + searchEvent_(iConfig.getUntrackedParameter("searchEvent", false)), + orbitNum_(iConfig.getUntrackedParameter("orbitNumber", 0)), + searchStartBx_(iConfig.getUntrackedParameter("searchStartBx", 0)), + searchStopBx_(iConfig.getUntrackedParameter("searchStopBx", 0)), + + skipEmptyBx_(iConfig.getUntrackedParameter("skipEmptyBx", true)) { + if (checkMuons_) + gmtMuonsToken_ = consumes(iConfig.getParameter("gmtMuonsTag")); + if (checkJets_) + caloJetsToken_ = consumes(iConfig.getParameter("caloJetsTag")); + if (checkEGammas_) + caloEGammasToken_ = consumes(iConfig.getParameter("caloEGammasTag")); + if (checkTaus_) + caloTausToken_ = consumes(iConfig.getParameter("caloTausTag")); + if (checkEtSums_) + caloEtSumsToken_ = consumes(iConfig.getParameter("caloEtSumsTag")); +} +// ----------------------------------------------------------------------------- + +// ----------------------- method called for each orbit ----------------------- +void DumpScObjects::analyze(const edm::Event& iEvent, const edm::EventSetup& evSetup) { + if (checkMuons_) + iEvent.getByToken(gmtMuonsToken_, muonHandle_); + if (checkJets_) + iEvent.getByToken(caloJetsToken_, jetHandle_); + if (checkEGammas_) + iEvent.getByToken(caloEGammasToken_, eGammaHandle_); + if (checkTaus_) + iEvent.getByToken(caloTausToken_, tauHandle_); + if (checkEtSums_) + iEvent.getByToken(caloEtSumsToken_, etSumHandle_); + + // get the orbit number + unsigned currOrbit = iEvent.id().event(); + + // if we are looking for a specific orbit + if (searchEvent_) { + if (currOrbit != orbitNum_) + return; + + // found the orbit + for (unsigned bx = searchStartBx_; bx <= searchStopBx_; bx++) { + printBx(bx); + } + } else { + if (skipEmptyBx_) { + // create a set of non empty BXs + std::set uniqueBx; + + if (checkMuons_) { + for (const unsigned& bx : muonHandle_->getFilledBxs()) { + if ((bx >= minBx_) || (bx <= maxBx_)) + uniqueBx.insert(bx); + } + } + if (checkJets_) { + for (const unsigned& bx : jetHandle_->getFilledBxs()) { + if ((bx >= minBx_) || (bx <= maxBx_)) + uniqueBx.insert(bx); + } + } + if (checkEGammas_) { + for (const unsigned& bx : eGammaHandle_->getFilledBxs()) { + if ((bx >= minBx_) || (bx <= maxBx_)) + uniqueBx.insert(bx); + } + } + if (checkTaus_) { + for (const unsigned& bx : tauHandle_->getFilledBxs()) { + if ((bx >= minBx_) || (bx <= maxBx_)) + uniqueBx.insert(bx); + } + } + if (checkEtSums_) { + for (const unsigned& bx : etSumHandle_->getFilledBxs()) { + if ((bx >= minBx_) || (bx <= maxBx_)) + uniqueBx.insert(bx); + } + } + + // process bx + for (const unsigned& bx : uniqueBx) { + printBx(bx); + } + + } else { + // dump all objects + for (unsigned bx = minBx_; bx <= maxBx_; bx++) { + printBx(bx); + } + } + } +} +// ----------------------------------------------------------------------------- + +void DumpScObjects::printBx(unsigned bx) { + std::cout << "BX = " << bx << " ****" << std::endl; + + if (checkMuons_ && muonHandle_.isValid()) { + int i = 0; + const auto& muons = muonHandle_->bxIterator(bx); + for (const auto& muon : muons) { + std::cout << "--- Muon " << i << " ---\n"; + printMuon(muon); + i++; + } + } + + if (checkJets_ && jetHandle_.isValid()) { + int i = 0; + const auto& jets = jetHandle_->bxIterator(bx); + for (const auto& jet : jets) { + std::cout << "--- Jet " << i << " ---\n"; + printJet(jet); + i++; + } + } + + if (checkEGammas_ && jetHandle_.isValid()) { + int i = 0; + const auto& eGammas = eGammaHandle_->bxIterator(bx); + for (const auto& egamma : eGammas) { + std::cout << "--- E/Gamma " << i << " ---\n"; + printEGamma(egamma); + i++; + } + } + + if (checkTaus_ && tauHandle_.isValid()) { + int i = 0; + const auto& taus = tauHandle_->bxIterator(bx); + for (const auto& tau : taus) { + std::cout << "--- Tau " << i << " ---\n"; + printTau(tau); + i++; + } + } + + if (checkEtSums_ && etSumHandle_.isValid()) { + const auto& sums = etSumHandle_->bxIterator(bx); + for (const auto& sum : sums) { + std::cout << "--- Calo Sums ---\n"; + printBxSums(sum); + } + } +} + +DEFINE_FWK_MODULE(DumpScObjects); diff --git a/L1TriggerScouting/Utilities/src/convertToL1TFormat.cc b/L1TriggerScouting/Utilities/src/convertToL1TFormat.cc new file mode 100644 index 0000000000000..bb2ffda7440ba --- /dev/null +++ b/L1TriggerScouting/Utilities/src/convertToL1TFormat.cc @@ -0,0 +1,144 @@ +#include "L1TriggerScouting/Utilities/interface/convertToL1TFormat.h" + +namespace l1ScoutingRun3 { + + l1t::Muon getL1TMuon(const Muon& muon) { + return l1t::Muon( + math::PtEtaPhiMLorentzVector(ugmt::fPt(muon.hwPt()), ugmt::fEta(muon.hwEta()), ugmt::fPhi(muon.hwPhi()), 0.), + muon.hwPt(), + muon.hwEta(), + muon.hwPhi(), + muon.hwQual(), + muon.hwCharge(), + muon.hwChargeValid(), + 0, + muon.tfMuonIndex(), + 0, + false, + 0, + 0, + 0, + 0, + muon.hwEtaAtVtx(), + muon.hwPhiAtVtx(), + ugmt::fEtaAtVtx(muon.hwEtaAtVtx()), + ugmt::fPhiAtVtx(muon.hwPhiAtVtx()), + muon.hwPtUnconstrained(), + ugmt::fPtUnconstrained(muon.hwPtUnconstrained()), + muon.hwDXY()); + } + + l1t::Jet getL1TJet(const Jet& jet) { + return l1t::Jet( + math::PtEtaPhiMLorentzVector(demux::fEt(jet.hwEt()), demux::fEta(jet.hwEta()), demux::fPhi(jet.hwPhi()), 0.), + jet.hwEt(), + jet.hwEta(), + jet.hwPhi(), + jet.hwIso()); + } + + l1t::EGamma getL1TEGamma(const EGamma& eGamma) { + return l1t::EGamma(math::PtEtaPhiMLorentzVector( + demux::fEt(eGamma.hwEt()), demux::fEta(eGamma.hwEta()), demux::fPhi(eGamma.hwPhi()), 0.), + eGamma.hwEt(), + eGamma.hwEta(), + eGamma.hwPhi(), + 0, + eGamma.hwIso()); + } + + l1t::Tau getL1TTau(const Tau& tau) { + return l1t::Tau( + math::PtEtaPhiMLorentzVector(demux::fEt(tau.hwEt()), demux::fEta(tau.hwEta()), demux::fPhi(tau.hwPhi()), 0.), + tau.hwEt(), + tau.hwEta(), + tau.hwPhi(), + 0, + tau.hwIso()); + } + + l1t::EtSum getL1TEtSum(const BxSums& sums, l1t::EtSum::EtSumType sumType) { + switch (sumType) { + case l1t::EtSum::kTotalEt: + return l1t::EtSum( + math::PtEtaPhiMLorentzVector(demux::fEt(sums.hwTotalEt()), 0., 0., 0.), sumType, sums.hwTotalEt(), 0, 0, 0); + case l1t::EtSum::kTotalEtEm: + return l1t::EtSum(math::PtEtaPhiMLorentzVector(demux::fEt(sums.hwTotalEtEm()), 0., 0., 0.), + sumType, + sums.hwTotalEtEm(), + 0, + 0, + 0); + case l1t::EtSum::kTotalHt: + return l1t::EtSum( + math::PtEtaPhiMLorentzVector(demux::fEt(sums.hwTotalHt()), 0., 0., 0.), sumType, sums.hwTotalHt(), 0, 0, 0); + case l1t::EtSum::kMissingEt: + return l1t::EtSum( + math::PtEtaPhiMLorentzVector(demux::fEt(sums.hwMissEt()), 0., demux::fPhi(sums.hwMissEtPhi()), 0.), + sumType, + sums.hwMissEt(), + 0, + sums.hwMissEtPhi(), + 0); + case l1t::EtSum::kMissingHt: + return l1t::EtSum( + math::PtEtaPhiMLorentzVector(demux::fEt(sums.hwMissHt()), 0., demux::fPhi(sums.hwMissHtPhi()), 0.), + sumType, + sums.hwMissHt(), + 0, + sums.hwMissHtPhi(), + 0); + case l1t::EtSum::kMissingEtHF: + return l1t::EtSum( + math::PtEtaPhiMLorentzVector(demux::fEt(sums.hwMissEtHF()), 0., demux::fPhi(sums.hwMissEtHFPhi()), 0.), + sumType, + sums.hwMissEtHF(), + 0, + sums.hwMissEtHFPhi(), + 0); + case l1t::EtSum::kMissingHtHF: + return l1t::EtSum( + math::PtEtaPhiMLorentzVector(demux::fEt(sums.hwMissHtHF()), 0., demux::fPhi(sums.hwMissHtHFPhi()), 0.), + sumType, + sums.hwMissHtHF(), + 0, + sums.hwMissHtHFPhi(), + 0); + case l1t::EtSum::kAsymEt: + return l1t::EtSum( + math::PtEtaPhiMLorentzVector(demux::fEt(sums.hwAsymEt()), 0., 0., 0.), sumType, sums.hwAsymEt(), 0, 0, 0); + case l1t::EtSum::kAsymHt: + return l1t::EtSum( + math::PtEtaPhiMLorentzVector(demux::fEt(sums.hwAsymHt()), 0., 0., 0.), sumType, sums.hwAsymHt(), 0, 0, 0); + case l1t::EtSum::kAsymEtHF: + return l1t::EtSum(math::PtEtaPhiMLorentzVector(demux::fEt(sums.hwAsymEtHF()), 0., 0., 0.), + sumType, + sums.hwAsymEtHF(), + 0, + 0, + 0); + case l1t::EtSum::kAsymHtHF: + return l1t::EtSum(math::PtEtaPhiMLorentzVector(demux::fEt(sums.hwAsymHtHF()), 0., 0., 0.), + sumType, + sums.hwAsymHtHF(), + 0, + 0, + 0); + case l1t::EtSum::kMinBiasHFP0: + return l1t::EtSum(math::PtEtaPhiMLorentzVector(0., 0., 0., 0.), sumType, sums.minBiasHFP0(), 0, 0, 0); + case l1t::EtSum::kMinBiasHFP1: + return l1t::EtSum(math::PtEtaPhiMLorentzVector(0., 0., 0., 0.), sumType, sums.minBiasHFP1(), 0, 0, 0); + case l1t::EtSum::kMinBiasHFM0: + return l1t::EtSum(math::PtEtaPhiMLorentzVector(0., 0., 0., 0.), sumType, sums.minBiasHFM0(), 0, 0, 0); + case l1t::EtSum::kMinBiasHFM1: + return l1t::EtSum(math::PtEtaPhiMLorentzVector(0., 0., 0., 0.), sumType, sums.minBiasHFM1(), 0, 0, 0); + case l1t::EtSum::kCentrality: + return l1t::EtSum(math::PtEtaPhiMLorentzVector(0., 0., 0., 0.), sumType, sums.centrality(), 0, 0, 0); + case l1t::EtSum::kTowerCount: + return l1t::EtSum(math::PtEtaPhiMLorentzVector(0., 0., 0., 0.), sumType, sums.towerCount(), 0, 0, 0); + default: + return l1t::EtSum(math::PtEtaPhiMLorentzVector(0., 0., 0., 0.), l1t::EtSum::kUninitialized, 0, 0, 0, 0); + } + } + +} // namespace l1ScoutingRun3 \ No newline at end of file diff --git a/L1TriggerScouting/Utilities/src/printScObjects.cc b/L1TriggerScouting/Utilities/src/printScObjects.cc new file mode 100644 index 0000000000000..42a9e8e27bb3b --- /dev/null +++ b/L1TriggerScouting/Utilities/src/printScObjects.cc @@ -0,0 +1,71 @@ +#include "L1TriggerScouting/Utilities/interface/printScObjects.h" + +namespace l1ScoutingRun3 { + + void printMuon(const Muon& muon, std::ostream& outs) { + outs << " Pt [GeV/Hw]: " << ugmt::fPt(muon.hwPt()) << "/" << muon.hwPt() << "\n" + << " Eta [rad/Hw]: " << ugmt::fEta(muon.hwEta()) << "/" << muon.hwEta() << "\n" + << " Phi [rad/Hw]: " << ugmt::fPhi(muon.hwPhi()) << "/" << muon.hwPhi() << "\n" + << " Charge/valid: " << muon.hwCharge() << "/" << muon.hwChargeValid() << "\n" + << " PhiVtx [rad/Hw]: " << ugmt::fPhiAtVtx(muon.hwPhiAtVtx()) << "/" << muon.hwPhiAtVtx() << "\n" + << " EtaVtx [rad/Hw]: " << ugmt::fEtaAtVtx(muon.hwEtaAtVtx()) << "/" << muon.hwEtaAtVtx() << "\n" + << " Pt uncon[GeV/Hw]: " << ugmt::fPtUnconstrained(muon.hwPtUnconstrained()) << "/" + << muon.hwPtUnconstrained() << "\n" + << " Dxy: " << muon.hwDXY() << "\n" + << " Qual: " << muon.hwQual() << "\n" + << " TF index: " << muon.tfMuonIndex() << "\n"; + } + + template + void printCaloObject(const T& obj, std::ostream& outs) { + outs << " Et [GeV/Hw]: " << demux::fEt(obj.hwEt()) << "/" << obj.hwEt() << "\n" + << " Eta [rad/Hw]: " << demux::fEta(obj.hwEta()) << "/" << obj.hwEta() << "\n" + << " Phi [rad/Hw]: " << demux::fPhi(obj.hwPhi()) << "/" << obj.hwPhi() << "\n" + << " Iso [Hw]: " << obj.hwIso() << "\n"; + } + + void printJet(const Jet& jet, std::ostream& outs) { printCaloObject(jet, outs); } + void printEGamma(const EGamma& eGamma, std::ostream& outs) { printCaloObject(eGamma, outs); } + void printTau(const Tau& tau, std::ostream& outs) { printCaloObject(tau, outs); } + + void printBxSums(const BxSums& sums, std::ostream& outs) { + outs << "Total ET\n" + << " Et [GeV/Hw]: " << demux::fEt(sums.hwTotalEt()) << "/" << sums.hwTotalEt() << "\n" + << "Total ETEm\n" + << " Et [GeV/Hw]: " << demux::fEt(sums.hwTotalEtEm()) << "/" << sums.hwTotalEtEm() << "\n" + << "Total HT\n" + << " Et [GeV/Hw]: " << demux::fEt(sums.hwTotalHt()) << "/" << sums.hwTotalHt() << "\n" + << "Missing ET\n" + << " Et [GeV/Hw] : " << demux::fEt(sums.hwMissEt()) << "/" << sums.hwMissEt() << "\n" + << " Phi [Rad/Hw]: " << demux::fPhi(sums.hwMissEtPhi()) << "/" << sums.hwMissEtPhi() << "\n" + << "Missing HT\n" + << " Et [GeV/Hw] : " << demux::fEt(sums.hwMissHt()) << "/" << sums.hwMissHt() << "\n" + << " Phi [Rad/Hw]: " << demux::fPhi(sums.hwMissHtPhi()) << "/" << sums.hwMissHtPhi() << "\n" + << "Missing ETHF\n" + << " Et [GeV/Hw] : " << demux::fEt(sums.hwMissEtHF()) << "/" << sums.hwMissEtHF() << "\n" + << " Phi [Rad/Hw]: " << demux::fPhi(sums.hwMissEtHFPhi()) << "/" << sums.hwMissEtHFPhi() << "\n" + << "Missing HTHF\n" + << " Et [GeV/Hw] : " << demux::fEt(sums.hwMissHtHF()) << "/" << sums.hwMissHtHF() << "\n" + << " Phi [Rad/Hw]: " << demux::fPhi(sums.hwMissHtHFPhi()) << "/" << sums.hwMissHtHFPhi() << "\n" + << "AsymEt\n" + << " Et [GeV/Hw] : " << demux::fEt(sums.hwAsymEt()) << "/" << sums.hwAsymEt() << "\n" + << "AsymHt\n" + << " Et [GeV/Hw] : " << demux::fEt(sums.hwAsymHt()) << "/" << sums.hwAsymHt() << "\n" + << "AsymEtHF\n" + << " Et [GeV/Hw] : " << demux::fEt(sums.hwAsymEtHF()) << "/" << sums.hwAsymEtHF() << "\n" + << "AsymHtHF\n" + << " Et [GeV/Hw] : " << demux::fEt(sums.hwAsymHtHF()) << "/" << sums.hwAsymHtHF() << "\n" + << "MinBiasHFP0\n" + << " Hw: " << sums.minBiasHFP0() << "\n" + << "MinBiasHFM0\n" + << " Hw: " << sums.minBiasHFM0() << "\n" + << "MinBiasHFP1\n" + << " Hw: " << sums.minBiasHFP1() << "\n" + << "MinBiasHFM1\n" + << " Hw: " << sums.minBiasHFM1() << "\n" + << "Centrality\n" + << " Hw: " << sums.centrality() << "\n" + << "Tower Count\n" + << " Hw: " << sums.towerCount() << "\n"; + } +} // namespace l1ScoutingRun3 \ No newline at end of file diff --git a/L1TriggerScouting/Utilities/test/dumpScObjects.py b/L1TriggerScouting/Utilities/test/dumpScObjects.py new file mode 100644 index 0000000000000..957ac3d77e7a2 --- /dev/null +++ b/L1TriggerScouting/Utilities/test/dumpScObjects.py @@ -0,0 +1,54 @@ +import FWCore.ParameterSet.Config as cms +import FWCore.ParameterSet.VarParsing as VarParsing + +options = VarParsing.VarParsing ('analysis') + +options.register ('numOrbits', + -1, + VarParsing.VarParsing.multiplicity.singleton, + VarParsing.VarParsing.varType.int, + "Number of orbits to process") + +options.register ('filePath', + "file:/dev/shm/PoolOutputTest.root", + VarParsing.VarParsing.multiplicity.singleton, + VarParsing.VarParsing.varType.string, + "Sub lumisection number to process") + +options.parseArguments() + +process = cms.Process( "DUMP" ) + + +process.maxEvents = cms.untracked.PSet( + input = cms.untracked.int32(options.numOrbits) +) + +process.load("FWCore.MessageService.MessageLogger_cfi") + +process.source = cms.Source("PoolSource", + fileNames = cms.untracked.vstring(options.filePath) +) + +process.dump = cms.EDAnalyzer("DumpScObjects", + gmtMuonsTag = cms.InputTag("GmtUnpacker", "", "SCPU"), + caloJetsTag = cms.InputTag("CaloUnpacker", "", "SCPU"), + caloEGammasTag = cms.InputTag("CaloUnpacker", "", "SCPU"), + caloTausTag = cms.InputTag("CaloUnpacker", "", "SCPU"), + caloEtSumsTag = cms.InputTag("CaloUnpacker", "", "SCPU"), + minBx = cms.untracked.uint32(0), + maxBx = cms.untracked.uint32(3564), + + skipEmptyBx = cms.untracked.bool(True), # don't show empty BX + + #checkMuons = cms.untracked.bool(False), # test removing a collection + + searchEvent = cms.untracked.bool(True), + orbitNumber = cms.untracked.uint32(88981531), + searchStartBx = cms.untracked.uint32(1027-2), + searchStopBx = cms.untracked.uint32(1027+2), +) + +process.p = cms.Path( + process.dump +) \ No newline at end of file diff --git a/MagneticField/ParametrizedEngine/src/rz_harm_poly.h b/MagneticField/ParametrizedEngine/src/rz_harm_poly.h index d197892619c69..731c1e9852115 100644 --- a/MagneticField/ParametrizedEngine/src/rz_harm_poly.h +++ b/MagneticField/ParametrizedEngine/src/rz_harm_poly.h @@ -17,7 +17,6 @@ namespace magfieldparam { double SinPhi; trig_pair() : CosPhi(1.), SinPhi(0.) {} - trig_pair(const trig_pair &tp) : CosPhi(tp.CosPhi), SinPhi(tp.SinPhi) {} trig_pair(const double C, const double S) : CosPhi(C), SinPhi(S) {} trig_pair(const double phi) : CosPhi(cos(phi)), SinPhi(sin(phi)) {} diff --git a/PhysicsTools/HepMCCandAlgos/test/TestGenParticleCandidates.cc b/PhysicsTools/HepMCCandAlgos/test/TestGenParticleCandidates.cc deleted file mode 100755 index 2eae9a92e269c..0000000000000 --- a/PhysicsTools/HepMCCandAlgos/test/TestGenParticleCandidates.cc +++ /dev/null @@ -1,58 +0,0 @@ -#include "FWCore/Framework/interface/EDAnalyzer.h" -#include "FWCore/Framework/interface/Event.h" -#include "DataFormats/Common/interface/Handle.h" -#include "FWCore/ParameterSet/interface/ParameterSet.h" -#include "FWCore/Utilities/interface/InputTag.h" -#include "DataFormats/Candidate/interface/Candidate.h" -#include "FWCore/Utilities/interface/EDMException.h" -using namespace std; -using namespace edm; -using namespace reco; - -class TestGenParticleCandidates : public EDAnalyzer { -private: - bool dumpHepMC_; -public: - explicit TestGenParticleCandidates( const ParameterSet & cfg ) : - srcToken_( consumes( cfg.getParameter( "src" ) ) ) { - } -private: - void analyze( const Event & evt, const EventSetup&) override { - Handle gen; - evt.getByToken( srcToken_, gen ); - size_t n = gen->size(); - if (n == 0) - throw Exception(errors::EventCorruption) - << "No particles in genParticleCandidates\n"; - for(size_t i = 0; i < n; ++ i) { - const Candidate & p = (*gen)[i]; - size_t nd = p.numberOfDaughters(); - if(nd==0 && p.status()==3) - throw Exception(errors::EventCorruption) - << "Particle with no daughters and status " << p.status() - << ", pdgId = " << p.pdgId() << "\n"; - for(size_t j = 0; j < nd; ++ j ) { - const Candidate * d = p.daughter(j); - size_t nm = d->numberOfMothers(); - bool noMother = true; - for(size_t k = 0; k < nm; ++ k ) { - if(d->mother(k)==&p) { - noMother = false; - break; - } - } - if(noMother) - throw Exception(errors::EventCorruption) - << "Inconsistent mother/daughter relation, pdgId = " << d->pdgId() << "\n"; - } - } - } - EDGetTokenT srcToken_; -}; - -#include "FWCore/Framework/interface/MakerMacros.h" - -DEFINE_FWK_MODULE( TestGenParticleCandidates ); - - - diff --git a/PhysicsTools/IsolationAlgos/plugins/CITKPFIsolationSumProducer.cc b/PhysicsTools/IsolationAlgos/plugins/CITKPFIsolationSumProducer.cc index 2af04cc18dc03..886a72515be46 100644 --- a/PhysicsTools/IsolationAlgos/plugins/CITKPFIsolationSumProducer.cc +++ b/PhysicsTools/IsolationAlgos/plugins/CITKPFIsolationSumProducer.cc @@ -37,8 +37,6 @@ namespace citk { public: PFIsolationSumProducer(const edm::ParameterSet&); - ~PFIsolationSumProducer() override {} - void beginLuminosityBlock(const edm::LuminosityBlock&, const edm::EventSetup&) final; void produce(edm::Event&, const edm::EventSetup&) final; diff --git a/PhysicsTools/IsolationAlgos/plugins/CITKPFIsolationSumProducerForPUPPI.cc b/PhysicsTools/IsolationAlgos/plugins/CITKPFIsolationSumProducerForPUPPI.cc index 815977dab6177..db9732eb9a8ba 100644 --- a/PhysicsTools/IsolationAlgos/plugins/CITKPFIsolationSumProducerForPUPPI.cc +++ b/PhysicsTools/IsolationAlgos/plugins/CITKPFIsolationSumProducerForPUPPI.cc @@ -27,8 +27,6 @@ namespace citk { public: PFIsolationSumProducerForPUPPI(const edm::ParameterSet&); - ~PFIsolationSumProducerForPUPPI() override {} - void beginLuminosityBlock(const edm::LuminosityBlock&, const edm::EventSetup&) final; void produce(edm::Event&, const edm::EventSetup&) final; diff --git a/PhysicsTools/JetMCAlgos/plugins/ttHFGenFilter.cc b/PhysicsTools/JetMCAlgos/plugins/ttHFGenFilter.cc index 7e86bfaf25a40..d98f1c1314615 100644 --- a/PhysicsTools/JetMCAlgos/plugins/ttHFGenFilter.cc +++ b/PhysicsTools/JetMCAlgos/plugins/ttHFGenFilter.cc @@ -44,14 +44,11 @@ class ttHFGenFilter : public edm::stream::EDFilter<> { public: explicit ttHFGenFilter(const edm::ParameterSet&); - ~ttHFGenFilter() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: - void beginStream(edm::StreamID) override; bool filter(edm::Event&, const edm::EventSetup&) override; - void endStream() override; virtual bool HasAdditionalBHadron(const std::vector&, const std::vector&, @@ -100,11 +97,6 @@ ttHFGenFilter::ttHFGenFilter(const edm::ParameterSet& iConfig) produces(); } -ttHFGenFilter::~ttHFGenFilter() { - // do anything here that needs to be done at destruction time - // (e.g. close files, deallocate resources etc.) -} - // // member functions // @@ -260,44 +252,6 @@ void ttHFGenFilter::FindAllTopMothers(const reco::Candidate* particle, } } -// ------------ method called once each stream before processing any runs, lumis or events ------------ -void ttHFGenFilter::beginStream(edm::StreamID) {} - -// ------------ method called once each stream after processing all runs, lumis and events ------------ -void ttHFGenFilter::endStream() {} - -// ------------ method called when starting to processes a run ------------ -/* -void -ttHFGenFilter::beginRun(edm::Run const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when ending the processing of a run ------------ -/* -void -ttHFGenFilter::endRun(edm::Run const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when starting to processes a luminosity block ------------ -/* -void -ttHFGenFilter::beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when ending the processing of a luminosity block ------------ -/* -void -ttHFGenFilter::endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) -{ -} -*/ - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void ttHFGenFilter::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { //The following says we do not know what parameters are allowed so do no validation diff --git a/PhysicsTools/MVAComputer/interface/Variable.h b/PhysicsTools/MVAComputer/interface/Variable.h index ea74807050bea..ff7040d48d4e6 100644 --- a/PhysicsTools/MVAComputer/interface/Variable.h +++ b/PhysicsTools/MVAComputer/interface/Variable.h @@ -132,6 +132,7 @@ namespace PhysicsTools { inline Variable() {} inline Variable(const Variable &orig) : name(orig.name), flags(orig.flags) {} inline Variable(AtomicId name, Flags flags = FLAG_NONE) : name(name), flags(flags) {} + Variable &operator=(const Variable &other) = default; const AtomicId getName() const { return name; } Flags getFlags() const { return flags; } diff --git a/PhysicsTools/NanoAOD/interface/SimpleFlatTableProducer.h b/PhysicsTools/NanoAOD/interface/SimpleFlatTableProducer.h index 85d63e7ca8948..4644c3ccf78de 100644 --- a/PhysicsTools/NanoAOD/interface/SimpleFlatTableProducer.h +++ b/PhysicsTools/NanoAOD/interface/SimpleFlatTableProducer.h @@ -135,8 +135,6 @@ class SimpleFlatTableProducerBase : public edm::stream::EDProducer<> { vars_.push_back(std::make_unique(vname, varPSet)); else if (type == "double") vars_.push_back(std::make_unique(vname, varPSet)); - else if (type == "int8") - vars_.push_back(std::make_unique(vname, varPSet)); else if (type == "uint8") vars_.push_back(std::make_unique(vname, varPSet)); else if (type == "int16") @@ -170,7 +168,7 @@ class SimpleFlatTableProducerBase : public edm::stream::EDProducer<> { variable.ifValue( edm::ParameterDescription( "type", "int", true, edm::Comment("the c++ type of the branch in the flat table")), - edm::allowedValues("int", "uint", "float", "double", "int8", "uint8", "int16", "uint16", "bool")); + edm::allowedValues("int", "uint", "float", "double", "uint8", "int16", "uint16", "bool")); variable.addOptionalNode( edm::ParameterDescription( "precision", true, edm::Comment("the precision with which to store the value in the flat table")) xor @@ -211,7 +209,6 @@ class SimpleFlatTableProducerBase : public edm::stream::EDProducer<> { typedef FuncVariable, uint32_t> UIntVar; typedef FuncVariable, float> FloatVar; typedef FuncVariable, double> DoubleVar; - typedef FuncVariable, int8_t> Int8Var; typedef FuncVariable, uint8_t> UInt8Var; typedef FuncVariable, int16_t> Int16Var; typedef FuncVariable, uint16_t> UInt16Var; @@ -245,9 +242,6 @@ class SimpleFlatTableProducer : public SimpleFlatTableProducerBase(vname, varPSet, this->consumesCollector(), this->skipNonExistingSrc_)); - else if (type == "int8") - extvars_.push_back( - std::make_unique(vname, varPSet, this->consumesCollector(), this->skipNonExistingSrc_)); else if (type == "uint8") extvars_.push_back( std::make_unique(vname, varPSet, this->consumesCollector(), this->skipNonExistingSrc_)); @@ -285,7 +279,7 @@ class SimpleFlatTableProducer : public SimpleFlatTableProducerBase( "type", "int", true, edm::Comment("the c++ type of the branch in the flat table")), - edm::allowedValues("int", "uint", "float", "double", "int8", "uint8", "int16", "uint16", "bool")); + edm::allowedValues("int", "uint", "float", "double", "uint8", "int16", "uint16", "bool")); extvariable.addOptionalNode( edm::ParameterDescription( "precision", true, edm::Comment("the precision with which to store the value in the flat table")) xor @@ -343,7 +337,6 @@ class SimpleFlatTableProducer : public SimpleFlatTableProducerBase FloatExtVar; typedef ValueMapVariable DoubleExtVar; typedef ValueMapVariable BoolExtVar; - typedef ValueMapVariable Int8ExtVar; typedef ValueMapVariable UInt8ExtVar; typedef ValueMapVariable Int16ExtVar; typedef ValueMapVariable UInt16ExtVar; diff --git a/PhysicsTools/NanoAOD/plugins/NanoAODBaseCrossCleaner.cc b/PhysicsTools/NanoAOD/plugins/NanoAODBaseCrossCleaner.cc index eeec3a7315009..da4ed564b245a 100644 --- a/PhysicsTools/NanoAOD/plugins/NanoAODBaseCrossCleaner.cc +++ b/PhysicsTools/NanoAOD/plugins/NanoAODBaseCrossCleaner.cc @@ -54,11 +54,6 @@ NanoAODBaseCrossCleaner::NanoAODBaseCrossCleaner(const edm::ParameterSet& params produces("photons"); } -NanoAODBaseCrossCleaner::~NanoAODBaseCrossCleaner() { - // do anything here that needs to be done at destruction time - // (e.g. close files, deallocate resources etc.) -} - // // member functions // @@ -130,12 +125,6 @@ void NanoAODBaseCrossCleaner::produce(edm::Event& iEvent, const edm::EventSetup& iEvent.put(std::move(lowPtElectronsTable), "lowPtElectrons"); } -// ------------ method called once each stream before processing any runs, lumis or events ------------ -void NanoAODBaseCrossCleaner::beginStream(edm::StreamID) {} - -// ------------ method called once each stream after processing all runs, lumis and events ------------ -void NanoAODBaseCrossCleaner::endStream() {} - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void NanoAODBaseCrossCleaner::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { edm::ParameterSetDescription desc; diff --git a/PhysicsTools/NanoAOD/plugins/NanoAODBaseCrossCleaner.h b/PhysicsTools/NanoAOD/plugins/NanoAODBaseCrossCleaner.h index 849218af1a95f..5c628db0c8e55 100644 --- a/PhysicsTools/NanoAOD/plugins/NanoAODBaseCrossCleaner.h +++ b/PhysicsTools/NanoAOD/plugins/NanoAODBaseCrossCleaner.h @@ -48,14 +48,11 @@ class NanoAODBaseCrossCleaner : public edm::stream::EDProducer<> { public: explicit NanoAODBaseCrossCleaner(const edm::ParameterSet&); - ~NanoAODBaseCrossCleaner() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: - void beginStream(edm::StreamID) override; void produce(edm::Event&, const edm::EventSetup&) override; - void endStream() override; virtual void objectSelection(const edm::View& jets, const edm::View& muons, const edm::View& eles, @@ -67,11 +64,6 @@ class NanoAODBaseCrossCleaner : public edm::stream::EDProducer<> { std::vector& tauBits, std::vector& photonBits){}; - //virtual void beginRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void endRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - //virtual void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - // ----------member data --------------------------- const std::string name_; const std::string doc_; diff --git a/PhysicsTools/NanoAOD/plugins/NanoAODDQM.cc b/PhysicsTools/NanoAOD/plugins/NanoAODDQM.cc index 796325e9e3499..17e5fde624ea2 100644 --- a/PhysicsTools/NanoAOD/plugins/NanoAODDQM.cc +++ b/PhysicsTools/NanoAOD/plugins/NanoAODDQM.cc @@ -97,9 +97,6 @@ class NanoAODDQM : public DQMEDAnalyzer { if (icol == -1) return; // columns may be missing (e.g. mc-only) switch (table.columnType(icol)) { - case FlatTable::ColumnType::Int8: - vfill(table, icol, rowsel); - break; case FlatTable::ColumnType::UInt8: vfill(table, icol, rowsel); break; diff --git a/PhysicsTools/NanoAOD/plugins/TableOutputBranches.cc b/PhysicsTools/NanoAOD/plugins/TableOutputBranches.cc index 784f89ae8b9ac..591c3b97795b8 100644 --- a/PhysicsTools/NanoAOD/plugins/TableOutputBranches.cc +++ b/PhysicsTools/NanoAOD/plugins/TableOutputBranches.cc @@ -14,9 +14,6 @@ void TableOutputBranches::defineBranchesFromFirstEvent(const nanoaod::FlatTable for (size_t i = 0; i < tab.nColumns(); i++) { const std::string &var = tab.columnName(i); switch (tab.columnType(i)) { - case nanoaod::FlatTable::ColumnType::Int8: - m_int8Branches.emplace_back(var, tab.columnDoc(i), "B"); - break; case nanoaod::FlatTable::ColumnType::UInt8: m_uint8Branches.emplace_back(var, tab.columnDoc(i), "b"); break; @@ -65,8 +62,7 @@ void TableOutputBranches::branch(TTree &tree) { } } std::string varsize = m_singleton ? "" : "[n" + m_baseName + "]"; - for (std::vector *branches : {&m_int8Branches, - &m_uint8Branches, + for (std::vector *branches : {&m_uint8Branches, &m_int16Branches, &m_uint16Branches, &m_int32Branches, @@ -115,8 +111,6 @@ void TableOutputBranches::fill(const edm::OccurrenceForOutput &iWhatever, TTree "Mismatch in number of entries between extension and main table for " + tab.name()); } } - for (auto &pair : m_int8Branches) - fillColumn(pair, tab); for (auto &pair : m_uint8Branches) fillColumn(pair, tab); for (auto &pair : m_int16Branches) diff --git a/PhysicsTools/NanoAOD/plugins/TableOutputBranches.h b/PhysicsTools/NanoAOD/plugins/TableOutputBranches.h index f610559428781..29e71cd12856e 100644 --- a/PhysicsTools/NanoAOD/plugins/TableOutputBranches.h +++ b/PhysicsTools/NanoAOD/plugins/TableOutputBranches.h @@ -42,7 +42,6 @@ class TableOutputBranches { : name(aname), title(atitle), rootTypeCode(rootType), branch(branchptr) {} }; TBranch *m_counterBranch = nullptr; - std::vector m_int8Branches; std::vector m_uint8Branches; std::vector m_int16Branches; std::vector m_uint16Branches; diff --git a/PhysicsTools/NanoAOD/plugins/VertexTableProducer.cc b/PhysicsTools/NanoAOD/plugins/VertexTableProducer.cc index e79ecf40450c8..e728fcea73d2f 100644 --- a/PhysicsTools/NanoAOD/plugins/VertexTableProducer.cc +++ b/PhysicsTools/NanoAOD/plugins/VertexTableProducer.cc @@ -41,6 +41,9 @@ #include "RecoVertex/VertexPrimitives/interface/VertexState.h" #include "DataFormats/Common/interface/ValueMap.h" +#include "DataFormats/PatCandidates/interface/PackedCandidate.h" +#include "DataFormats/ParticleFlowCandidate/interface/PFCandidate.h" + // // class declaration // @@ -48,23 +51,16 @@ class VertexTableProducer : public edm::stream::EDProducer<> { public: explicit VertexTableProducer(const edm::ParameterSet&); - ~VertexTableProducer() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: - void beginStream(edm::StreamID) override; void produce(edm::Event&, const edm::EventSetup&) override; - void endStream() override; - - //virtual void beginRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void endRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - //virtual void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; // ----------member data --------------------------- const edm::EDGetTokenT> pvs_; + const edm::EDGetTokenT pfc_; const edm::EDGetTokenT> pvsScore_; const edm::EDGetTokenT> svs_; const StringCutObjectSelector svCut_; @@ -81,6 +77,7 @@ class VertexTableProducer : public edm::stream::EDProducer<> { // VertexTableProducer::VertexTableProducer(const edm::ParameterSet& params) : pvs_(consumes>(params.getParameter("pvSrc"))), + pfc_(consumes(params.getParameter("pfcSrc"))), pvsScore_(consumes>(params.getParameter("pvSrc"))), svs_(consumes>(params.getParameter("svSrc"))), svCut_(params.getParameter("svCut"), true), @@ -99,11 +96,6 @@ VertexTableProducer::VertexTableProducer(const edm::ParameterSet& params) produces>(); } -VertexTableProducer::~VertexTableProducer() { - // do anything here that needs to be done at destruction time - // (e.g. close files, deallocate resources etc.) -} - // // member functions // @@ -115,6 +107,9 @@ void VertexTableProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSe const auto& pvsScoreProd = iEvent.get(pvsScore_); auto pvsIn = iEvent.getHandle(pvs_); + //pf candidates collection + auto pfcIn = iEvent.getHandle(pfc_); + auto pvTable = std::make_unique(1, pvName_, true); pvTable->addColumnValue("ndof", (*pvsIn)[0].ndof(), "main primary vertex number of degree of freedom", 8); pvTable->addColumnValue("x", (*pvsIn)[0].position().x(), "main primary vertex position x coordinate", 10); @@ -131,6 +126,31 @@ void VertexTableProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSe pvTable->addColumnValue( "score", pvsScoreProd.get(pvsIn.id(), 0), "main primary vertex score, i.e. sum pt2 of clustered objects", 8); + float pv_sumpt2 = 0.0; + for (const auto& obj : *pfcIn) { + if (obj.charge() == 0) { + continue; + } // skip neutrals + double dz = fabs(obj.dz((*pvsIn)[0].position())); + bool include_pfc = false; + if (dz < 0.2) { + include_pfc = true; + for (size_t j = 1; j < (*pvsIn).size(); j++) { + double newdz = fabs(obj.dz((*pvsIn)[j].position())); + if (newdz < dz) { + include_pfc = false; + break; + } + } // this pf candidate belongs to other PV + } + if (include_pfc) { + float pfc_pt = obj.pt(); + pv_sumpt2 += pfc_pt * pfc_pt; + } + } + pvTable->addColumnValue( + "sumpt2", pv_sumpt2, "sum pt2 of pf charged candidates for the main primary vertex", 10); + auto otherPVsTable = std::make_unique((*pvsIn).size() > 4 ? 3 : (*pvsIn).size() - 1, "Other" + pvName_, false); std::vector pvsz; @@ -195,18 +215,13 @@ void VertexTableProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSe iEvent.put(std::move(selCandSv)); } -// ------------ method called once each stream before processing any runs, lumis or events ------------ -void VertexTableProducer::beginStream(edm::StreamID) {} - -// ------------ method called once each stream after processing all runs, lumis and events ------------ -void VertexTableProducer::endStream() {} - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void VertexTableProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { edm::ParameterSetDescription desc; desc.add("pvSrc")->setComment( "std::vector and ValueMap primary vertex input collections"); + desc.add("pfcSrc")->setComment("packedPFCandidates input collections"); desc.add("goodPvCut")->setComment("selection on the primary vertex"); desc.add("svSrc")->setComment( "reco::VertexCompositePtrCandidate compatible secondary vertex input collection"); diff --git a/PhysicsTools/NanoAOD/plugins/rntuple/NanoAODRNTupleOutputModule.cc b/PhysicsTools/NanoAOD/plugins/rntuple/NanoAODRNTupleOutputModule.cc index 3813eefb77c74..037bdc611ea24 100644 --- a/PhysicsTools/NanoAOD/plugins/rntuple/NanoAODRNTupleOutputModule.cc +++ b/PhysicsTools/NanoAOD/plugins/rntuple/NanoAODRNTupleOutputModule.cc @@ -74,20 +74,20 @@ class NanoAODRNTupleOutputModule : public edm::one::OutputModule<> { class CommonEventFields { public: void createFields(RNTupleModel& model) { - model.AddField("run", &m_run); - model.AddField("luminosityBlock", &m_luminosityBlock); - model.AddField("event", &m_event); + m_run = model.MakeField("run"); + m_luminosityBlock = model.MakeField("luminosityBlock"); + m_event = model.MakeField("event"); } void fill(const edm::EventID& id) { - m_run = id.run(); - m_luminosityBlock = id.luminosityBlock(); - m_event = id.event(); + *m_run = id.run(); + *m_luminosityBlock = id.luminosityBlock(); + *m_event = id.event(); } private: - UInt_t m_run; - UInt_t m_luminosityBlock; - std::uint64_t m_event; + std::shared_ptr m_run; + std::shared_ptr m_luminosityBlock; + std::shared_ptr m_event; } m_commonFields; LumiNTuple m_lumi; diff --git a/PhysicsTools/NanoAOD/plugins/rntuple/TriggerOutputFields.cc b/PhysicsTools/NanoAOD/plugins/rntuple/TriggerOutputFields.cc index 5d2576b50d001..e1fa1161bf8be 100644 --- a/PhysicsTools/NanoAOD/plugins/rntuple/TriggerOutputFields.cc +++ b/PhysicsTools/NanoAOD/plugins/rntuple/TriggerOutputFields.cc @@ -123,7 +123,11 @@ void TriggerOutputFields::updateTriggerFields(const edm::TriggerResults& trigger void TriggerOutputFields::makeUniqueFieldName(RNTupleModel& model, std::string& name) { // Could also use a cache of names in a higher-level object, don't ask the RNTupleModel each time - const auto* existing_field = model.Get(name); +#if ROOT_VERSION_CODE < ROOT_VERSION(6, 31, 0) + auto existing_field = model.Get(name); +#else + auto existing_field = model.GetDefaultEntry().GetPtr(name); +#endif if (!existing_field) { return; } diff --git a/PhysicsTools/NanoAOD/python/autoNANO.py b/PhysicsTools/NanoAOD/python/autoNANO.py index bb0ce6b721e2b..358aa6bb440ff 100644 --- a/PhysicsTools/NanoAOD/python/autoNANO.py +++ b/PhysicsTools/NanoAOD/python/autoNANO.py @@ -14,26 +14,26 @@ def expandNanoMapping(seqList, mapping, key): # - An empty string recalls the default for the given key # - None is interpreted as "ignore this" seqList.remove(specifiedCommand) - if key in mappedTo and mappedTo[key] is not None: + if key in mappedTo and mappedTo[key] is not None: seqList.extend(mappedTo[key].split('+')) break; if level==maxLevel: raise Exception("Could not fully expand "+repr(seqList)+" from "+repr(mapping)) -autoNANO = { +autoNANO = { # PHYS is a mapping to the default NANO config, i.e. empty strings - 'PHYS': {'sequence': '', + 'PHYS': {'sequence': '', 'customize': ''}, # L1 flavours: add tables through customize, supposed to be combined with PHYS 'L1' : {'customize': 'nanoL1TrigObjCustomize'}, 'L1FULL' : {'customize': 'nanoL1TrigObjCustomizeFull'}, - # MUDPG flavours: use their own sequence - 'MUDPG' : {'sequence': 'muDPGNanoProducer', - 'customize': 'muDPGNanoCustomize'}, - 'MUDPGBKG' : {'sequence': 'muDPGNanoProducerBkg', - 'customize': 'muDPGNanoBkgCustomize'}, + # MUDPG flavours: use their own sequence + 'MUDPG' : {'sequence': 'DPGAnalysis/MuonTools/muNtupleProducer_cff.muDPGNanoProducer', + 'customize': 'DPGAnalysis/MuonTools/muNtupleProducer_cff.muDPGNanoCustomize'}, + 'MUDPGBKG' : {'sequence': 'DPGAnalysis/MuonTools/muNtupleProducerBkg_cff.muDPGNanoProducerBkg', + 'customize': 'DPGAnalysis/MuonTools/muNtupleProducerBkg_cff.muDPGNanoBkgCustomize'}, # PromptReco config: PHYS+L1 - 'Prompt' : {'sequence': '@PHYS', + 'Prompt' : {'sequence': '@PHYS', 'customize': '@PHYS+@L1'} } diff --git a/PhysicsTools/NanoAOD/python/electrons_cff.py b/PhysicsTools/NanoAOD/python/electrons_cff.py index 9168c9d3643a3..e2dd3f0d2dcc6 100644 --- a/PhysicsTools/NanoAOD/python/electrons_cff.py +++ b/PhysicsTools/NanoAOD/python/electrons_cff.py @@ -12,6 +12,7 @@ 'RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Summer16UL_ID_ISO_cff', 'RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Summer17UL_ID_ISO_cff', 'RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Summer18UL_ID_ISO_cff', + 'RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Winter22_HZZ_V1_cff', # Fall17: need to include the modules too to make sure they are run 'RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_Fall17_94X_V2_cff', 'RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Fall17_iso_V2_cff', @@ -155,7 +156,7 @@ def _get_bitmapVIDForEle_docstring(modules,WorkingPoints): mvaNoIso_Fall17V2 = cms.InputTag("electronMVAValueMapProducer:ElectronMVAEstimatorRun2Fall17NoIsoV2Values"), mvaIso = cms.InputTag("electronMVAValueMapProducer:ElectronMVAEstimatorRun2RunIIIWinter22IsoV1Values"), mvaNoIso = cms.InputTag("electronMVAValueMapProducer:ElectronMVAEstimatorRun2RunIIIWinter22NoIsoV1Values"), - mvaHZZIso = cms.InputTag("electronMVAValueMapProducer:ElectronMVAEstimatorRun2Summer18ULIdIsoValues"), + mvaHZZIso = cms.InputTag("electronMVAValueMapProducer:ElectronMVAEstimatorRun2Winter22HZZV1Values"), miniIsoChg = cms.InputTag("isoForEle:miniIsoChg"), miniIsoAll = cms.InputTag("isoForEle:miniIsoAll"), @@ -184,7 +185,8 @@ def _get_bitmapVIDForEle_docstring(modules,WorkingPoints): mvaNoIso_Fall17V2_WPL = cms.InputTag("egmGsfElectronIDs:mvaEleID-Fall17-noIso-V2-wpLoose"), mvaNoIso_WP90 = cms.InputTag("egmGsfElectronIDs:mvaEleID-RunIIIWinter22-noIso-V1-wp90"), mvaNoIso_WP80 = cms.InputTag("egmGsfElectronIDs:mvaEleID-RunIIIWinter22-noIso-V1-wp80"), - + mvaIso_WPHZZ = cms.InputTag("egmGsfElectronIDs:mvaEleID-Winter22-HZZ-V1"), + cutBasedID_veto = cms.InputTag("egmGsfElectronIDs:cutBasedElectronID-RunIIIWinter22-V1-veto"), cutBasedID_loose = cms.InputTag("egmGsfElectronIDs:cutBasedElectronID-RunIIIWinter22-V1-loose"), cutBasedID_medium = cms.InputTag("egmGsfElectronIDs:cutBasedElectronID-RunIIIWinter22-V1-medium"), @@ -217,6 +219,7 @@ def _get_bitmapVIDForEle_docstring(modules,WorkingPoints): PFIsoAll = None, PFIsoAll04 = None).\ toModify(slimmedElectronsWithUserData.userIntFromBools, + mvaIso_WPHZZ = None, mvaIso_WP90 = None, mvaIso_WP80 = None, mvaNoIso_WP90 = None, @@ -329,6 +332,7 @@ def _get_bitmapVIDForEle_docstring(modules,WorkingPoints): mvaNoIso_WP80 = Var("userInt('mvaNoIso_WP80')",bool,doc="MVA noIso ID WP80, Winter22V1"), mvaNoIso_WP90 = Var("userInt('mvaNoIso_WP90')",bool,doc="MVA noIso ID WP90, Winter22V1"), mvaHZZIso = Var("userFloat('mvaHZZIso')", float,doc="HZZ MVA Iso ID score"), + mvaIso_WPHZZ = Var("userInt('mvaIso_WPHZZ')",bool,doc="MVA Iso ID WPHZZ, Winter22V1"), cutBased = Var("userInt('cutBasedID_veto')+userInt('cutBasedID_loose')+userInt('cutBasedID_medium')+userInt('cutBasedID_tight')", "uint8", doc="cut-based ID RunIII Winter22 (0:fail, 1:veto, 2:loose, 3:medium, 4:tight)"), vidNestedWPBitmap = Var("userInt('VIDNestedWPBitmap')", int, doc=_bitmapVIDForEle_docstring), @@ -350,8 +354,8 @@ def _get_bitmapVIDForEle_docstring(modules,WorkingPoints): lostHits = Var("gsfTrack.hitPattern.numberOfLostHits('MISSING_INNER_HITS')","uint8",doc="number of missing inner hits"), isPFcand = Var("pfCandidateRef().isNonnull()",bool,doc="electron is PF candidate"), seedGain = Var("userInt('seedGain')","uint8",doc="Gain of the seed crystal"), - seediEtaOriX = Var("superCluster().seedCrysIEtaOrIx","int8",doc="iEta or iX of seed crystal. iEta is barrel-only, iX is endcap-only. iEta runs from -85 to +85, with no crystal at iEta=0. iX runs from 1 to 100."), - seediPhiOriY = Var("superCluster().seedCrysIPhiOrIy",int,doc="iPhi or iY of seed crystal. iPhi is barrel-only, iY is endcap-only. iPhi runs from 1 to 360. iY runs from 1 to 100."), + seediEtaOriX = Var("superCluster().seedCrysIEtaOrIx","int16",doc="iEta or iX of seed crystal. iEta is barrel-only, iX is endcap-only. iEta runs from -85 to +85, with no crystal at iEta=0. iX runs from 1 to 100."), + seediPhiOriY = Var("superCluster().seedCrysIPhiOrIy","int16",doc="iPhi or iY of seed crystal. iPhi is barrel-only, iY is endcap-only. iPhi runs from 1 to 360. iY runs from 1 to 100."), jetNDauCharged = Var("?userCand('jetForLepJetVar').isNonnull()?userFloat('jetNDauChargedMVASel'):0", "uint8", doc="number of charged daughters of the closest jet"), ), externalVariables = cms.PSet( @@ -378,6 +382,7 @@ def _get_bitmapVIDForEle_docstring(modules,WorkingPoints): mvaIso_WPL = Var("userInt('mvaIso_Fall17V2_WPL')",bool,doc="MVA Iso ID loose WP, Fall17V2"), mvaNoIso = Var("userFloat('mvaNoIso_Fall17V2')",float,doc="MVA noIso ID score, Fall17V2"), mvaNoIso_WP80 = Var("userInt('mvaNoIso_Fall17V2_WP80')",bool,doc="MVA noIso ID WP80, Fall17V2"), + mvaIso_WPHZZ = None, mvaNoIso_WP90 = Var("userInt('mvaNoIso_Fall17V2_WP90')",bool,doc="MVA noIso ID WP90, Fall17V2"), mvaNoIso_WPL = Var("userInt('mvaNoIso_Fall17V2_WPL')",bool,doc="MVA noIso ID loose WP, Fall17V2"), cutBased = Var("userInt('cutBasedID_Fall17V2_veto')+userInt('cutBasedID_Fall17V2_loose')+userInt('cutBasedID_Fall17V2_medium')+userInt('cutBasedID_Fall17V2_tight')", "uint8", doc="cut-based ID Fall17V2 (0:fail, 1:veto, 2:loose, 3:medium, 4:tight)"), diff --git a/PhysicsTools/NanoAOD/python/globals_cff.py b/PhysicsTools/NanoAOD/python/globals_cff.py index 5c7ad360f7538..20e63336efa9b 100644 --- a/PhysicsTools/NanoAOD/python/globals_cff.py +++ b/PhysicsTools/NanoAOD/python/globals_cff.py @@ -11,7 +11,7 @@ name = cms.string("BeamSpot"), doc = cms.string("offlineBeamSpot, the offline reconstructed beamspot"), variables = cms.PSet( - type = Var("type()","int8",doc="BeamSpot type (Unknown = -1, Fake = 0, LHC = 1, Tracker = 2)"), + type = Var("type()","int16",doc="BeamSpot type (Unknown = -1, Fake = 0, LHC = 1, Tracker = 2)"), z = Var("position().z()",float,doc="BeamSpot center, z coordinate (cm)",precision=-1), zError = Var("z0Error()",float,doc="Error on BeamSpot center, z coordinate (cm)",precision=-1), sigmaZ = Var("sigmaZ()",float,doc="Width of BeamSpot in z (cm)",precision=-1), diff --git a/PhysicsTools/NanoAOD/python/jetsAK4_CHS_cff.py b/PhysicsTools/NanoAOD/python/jetsAK4_CHS_cff.py index eafbca1c54059..49818f80bd811 100644 --- a/PhysicsTools/NanoAOD/python/jetsAK4_CHS_cff.py +++ b/PhysicsTools/NanoAOD/python/jetsAK4_CHS_cff.py @@ -36,14 +36,14 @@ ) tightJetId = cms.EDProducer("PatJetIDValueMapProducer", filterParams=cms.PSet( - version = cms.string('RUN3WINTER22CHS'), + version = cms.string('RUN3CHSruns2022FGruns2023CD'), quality = cms.string('TIGHT'), ), src = cms.InputTag("updatedJets") ) tightJetIdLepVeto = cms.EDProducer("PatJetIDValueMapProducer", filterParams=cms.PSet( - version = cms.string('RUN3WINTER22CHS'), + version = cms.string('RUN3CHSruns2022FGruns2023CD'), quality = cms.string('TIGHTLEPVETO'), ), src = cms.InputTag("updatedJets") @@ -61,9 +61,9 @@ ) run3_jme_Winter22runsBCDEprompt.toModify( - tightJetId.filterParams, version = "RUN3WINTER22CHSrunsBCDEprompt" + tightJetId.filterParams, version = "RUN3CHSruns2022BCDEprompt" ).toModify( - tightJetIdLepVeto.filterParams, version = "RUN3WINTER22CHSrunsBCDEprompt" + tightJetIdLepVeto.filterParams, version = "RUN3CHSruns2022BCDEprompt" ) bJetVars = cms.EDProducer("JetRegressionVarProducer", diff --git a/PhysicsTools/NanoAOD/python/jetsAK4_Puppi_cff.py b/PhysicsTools/NanoAOD/python/jetsAK4_Puppi_cff.py index f3eac030aa776..9ca6530aea4d3 100644 --- a/PhysicsTools/NanoAOD/python/jetsAK4_Puppi_cff.py +++ b/PhysicsTools/NanoAOD/python/jetsAK4_Puppi_cff.py @@ -28,14 +28,14 @@ tightJetPuppiId = cms.EDProducer("PatJetIDValueMapProducer", filterParams=cms.PSet( - version = cms.string('RUN3WINTER22PUPPI'), + version = cms.string('RUN3PUPPIruns2022FGruns2023CD'), quality = cms.string('TIGHT'), ), src = cms.InputTag("updatedJetsPuppi") ) tightJetPuppiIdLepVeto = cms.EDProducer("PatJetIDValueMapProducer", filterParams=cms.PSet( - version = cms.string('RUN3WINTER22PUPPI'), + version = cms.string('RUN3PUPPIruns2022FGruns2023CD'), quality = cms.string('TIGHTLEPVETO'), ), src = cms.InputTag("updatedJetsPuppi") @@ -54,9 +54,9 @@ ) run3_jme_Winter22runsBCDEprompt.toModify( - tightJetPuppiId.filterParams, version = "RUN3WINTER22PUPPIrunsBCDEprompt" + tightJetPuppiId.filterParams, version = "RUN3PUPPIruns2022BCDEprompt" ).toModify( - tightJetPuppiIdLepVeto.filterParams, version = "RUN3WINTER22PUPPIrunsBCDEprompt" + tightJetPuppiIdLepVeto.filterParams, version = "RUN3PUPPIruns2022BCDEprompt" ) #HF shower shape recomputation diff --git a/PhysicsTools/NanoAOD/python/muons_cff.py b/PhysicsTools/NanoAOD/python/muons_cff.py index b7462f06aeb67..c7b6cefc3c345 100644 --- a/PhysicsTools/NanoAOD/python/muons_cff.py +++ b/PhysicsTools/NanoAOD/python/muons_cff.py @@ -164,6 +164,7 @@ softId = Var("passed('SoftCutBasedId')",bool,doc="soft cut-based ID"), softMvaId = Var("passed('SoftMvaId')",bool,doc="soft MVA ID"), softMva = Var("softMvaValue()",float,doc="soft MVA ID score",precision=6), + softMvaRun3 = Var("softMvaRun3Value()",float,doc="soft MVA Run3 ID score",precision=6), highPtId = Var("?passed('CutBasedIdGlobalHighPt')?2:passed('CutBasedIdTrkHighPt')","uint8",doc="high-pT cut-based ID (1 = tracker high pT, 2 = global high pT, which includes tracker high pT)"), pfIsoId = Var("passed('PFIsoVeryLoose')+passed('PFIsoLoose')+passed('PFIsoMedium')+passed('PFIsoTight')+passed('PFIsoVeryTight')+passed('PFIsoVeryVeryTight')","uint8",doc="PFIso ID from miniAOD selector (1=PFIsoVeryLoose, 2=PFIsoLoose, 3=PFIsoMedium, 4=PFIsoTight, 5=PFIsoVeryTight, 6=PFIsoVeryVeryTight)"), tkIsoId = Var("?passed('TkIsoTight')?2:passed('TkIsoLoose')","uint8",doc="TkIso ID (1=TkIsoLoose, 2=TkIsoTight)"), diff --git a/PhysicsTools/NanoAOD/python/nanoDQM_cfi.py b/PhysicsTools/NanoAOD/python/nanoDQM_cfi.py index 6768e4d2fae3b..d57280f9d31a7 100644 --- a/PhysicsTools/NanoAOD/python/nanoDQM_cfi.py +++ b/PhysicsTools/NanoAOD/python/nanoDQM_cfi.py @@ -106,6 +106,8 @@ Plot1D('mvaNoIso_Fall17V2_WP80', 'mvaNoIso_Fall17V2_WP80', 2, -0.5, 1.5, 'MVA noIso ID WP80, Fall17V2'), Plot1D('mvaNoIso_Fall17V2_WP90', 'mvaNoIso_Fall17V2_WP90', 2, -0.5, 1.5, 'MVA noIso ID WP90, Fall17V2'), Plot1D('mvaNoIso_Fall17V2_WPL', 'mvaNoIso_Fall17V2_WPL', 2, -0.5, 1.5, 'MVA noIso ID loose WP, Fall17V2'), + Plot1D('mvaHZZIso', 'mvaHZZIso', 20, -1, 1, 'HZZ MVA Iso ID score'), + Plot1D('mvaIso_WPHZZ', 'mvaIso_WPHZZ', 2, -0.5, 1.5, 'MVA Iso ID WPHZZ, Winter22V1'), Plot1D('mvaTTH', 'mvaTTH', 20, -1, 1, 'TTH MVA lepton ID score'), Plot1D('pdgId', 'pdgId', 27, -13.5, 13.5, 'PDG code assigned by the event reconstruction (not by MC truth)'), Plot1D('miniPFRelIso_all', 'miniPFRelIso_all', 20, 0, 1, 'mini PF relative isolation, total (with scaled rho*EA PU corrections)'), @@ -577,6 +579,7 @@ Plot1D('sip3d', 'sip3d', 20, 0, 20, '3D impact parameter significance wrt first PV'), Profile1D('softId', 'softId', 'pt', 20, 0, 40, 'POG Soft muon ID (using the relaxed cuts in the data Run 2016 B-F periods, and standard cuts elsewhere)'), Plot1D('softMva', 'softMva', 20, -1, 1, 'soft MVA ID score'), + Plot1D('softMvaRun3', 'softMvaRun3', 20, 0, 1, 'soft MVA ID score for Run3'), Plot1D('softMvaId', 'softMvaId', 2, -0.5, 1.5, 'soft MVA ID'), Plot1D('tightCharge', 'tightCharge', 1, 1.5, 2.5, 'Tight charge criterion using pterr/pt of muonBestTrack (0:fail, 2:pass)'), Profile1D('tightId', 'tightId', 'pt', 16, 0, 80, 'POG Tight muon ID'), @@ -622,6 +625,7 @@ Plot1D('npvs', 'npvs', 20, 0, 60, 'total number of reconstructed primary vertices'), Plot1D('npvsGood', 'npvsGood', 20, 0, 60, 'total number of Good primary vertices'), Plot1D('score', 'score', 20, 0, 300000, 'main primary vertex score, i.e. sum pt2 of clustered objects'), + Plot1D('sumpt2', 'sumpt2', 100, 0, 300000, 'main primary vertex sum pt2 of the charged pf candidates'), Plot1D('x', 'x', 20, -0.3, 0.3, 'main primary vertex position x coordinate'), Plot1D('y', 'y', 20, -0.3, 0.3, 'main primary vertex position y coordinate'), Plot1D('z', 'z', 20, -20, 20, 'main primary vertex position z coordinate'), @@ -867,6 +871,17 @@ Plot1D('probDM11PNet', 'probDM11PNet', 20, 0, 1, 'normalised probablity of decayMode 11, 3h+1pi0 (PNet 2023)'), ) ), + TauProd = cms.PSet( + sels = cms.PSet(), + plots = cms.VPSet( + Count1D('_size', 40, -0.5, 5.5, 'tau decay products'), + Plot1D('pt', 'pt', 20, 0, 200, 'pt'), + Plot1D('phi', 'phi', 20, -3.14159, 3.14159, 'phi'), + Plot1D('eta', 'eta', 20, -5, 5, 'eta'), + Plot1D('pdgId', 'pdgId', 200, -10250, 10250, 'PDG code assigned by the event reconstruction (not by MC truth)'), + NoPlot('status'), + ) + ), TkMET = cms.PSet( sels = cms.PSet(), plots = cms.VPSet( diff --git a/PhysicsTools/NanoAOD/python/nano_cff.py b/PhysicsTools/NanoAOD/python/nano_cff.py index 4c142ddc9f2bf..cf194650941dd 100644 --- a/PhysicsTools/NanoAOD/python/nano_cff.py +++ b/PhysicsTools/NanoAOD/python/nano_cff.py @@ -284,7 +284,3 @@ def nanoL1TrigObjCustomizeFull(process): process.nanoTableTaskCommon.add(process.l1TablesTask) return process -### muon DPG NANO flavour sequences and customize functions -from DPGAnalysis.MuonTools.muNtupleProducer_cff import * -from DPGAnalysis.MuonTools.muNtupleProducerBkg_cff import * - diff --git a/PhysicsTools/NanoAOD/python/photons_cff.py b/PhysicsTools/NanoAOD/python/photons_cff.py index 9fa56cd025f2f..c99619d353f2b 100644 --- a/PhysicsTools/NanoAOD/python/photons_cff.py +++ b/PhysicsTools/NanoAOD/python/photons_cff.py @@ -191,7 +191,7 @@ def make_bitmapVID_docstring(id_modules_working_points_pset): variables = cms.PSet(P3Vars, jetIdx = Var("?hasUserCand('jet')?userCand('jet').key():-1", "int16", doc="index of the associated jet (-1 if none)"), electronIdx = Var("?hasUserCand('electron')?userCand('electron').key():-1", "int16", doc="index of the associated electron (-1 if none)"), - energyErr = Var("getCorrectedEnergyError('regression2')",float,doc="energy error of the cluster from regression",precision=6), + energyErr = Var("getCorrectedEnergyError('regression2')",float,doc="energy error of the cluster from regression",precision=10), energyRaw = Var("superCluster().rawEnergy()",float,doc="raw energy of photon supercluster", precision=10), superclusterEta = Var("superCluster().eta()",float,doc="supercluster eta",precision=10), r9 = Var("full5x5_r9()",float,doc="R9 of the supercluster, calculated with full 5x5 region",precision=10), @@ -217,10 +217,10 @@ def make_bitmapVID_docstring(id_modules_working_points_pset): mvaID = Var("userFloat('mvaID')",float,doc="MVA ID score, Winter22V1",precision=10), mvaID_WP90 = Var("userInt('mvaID_WP90')",bool,doc="MVA ID WP90, Winter22V1"), mvaID_WP80 = Var("userInt('mvaID_WP80')",bool,doc="MVA ID WP80, Winter22V1"), - trkSumPtHollowConeDR03 = Var("trkSumPtHollowConeDR03()",float,doc="Sum of track pT in a hollow cone of outer radius, inner radius", precision=8), - trkSumPtSolidConeDR04 = Var("trkSumPtSolidConeDR04()",float,doc="Sum of track pT in a cone of dR=0.4", precision=8), - ecalPFClusterIso = Var("ecalPFClusterIso()",float,doc="sum pt of ecal clusters, vetoing clusters part of photon", precision=8), - hcalPFClusterIso = Var("hcalPFClusterIso()",float,doc="sum pt of hcal clusters, vetoing clusters part of photon", precision=8), + trkSumPtHollowConeDR03 = Var("trkSumPtHollowConeDR03()",float,doc="Sum of track pT in a hollow cone of outer radius, inner radius", precision=10), + trkSumPtSolidConeDR04 = Var("trkSumPtSolidConeDR04()",float,doc="Sum of track pT in a cone of dR=0.4", precision=10), + ecalPFClusterIso = Var("ecalPFClusterIso()",float,doc="sum pt of ecal clusters, vetoing clusters part of photon", precision=10), + hcalPFClusterIso = Var("hcalPFClusterIso()",float,doc="sum pt of hcal clusters, vetoing clusters part of photon", precision=10), pfPhoIso03 = Var("photonIso()",float,doc="PF absolute isolation dR=0.3, photon component (uncorrected)"), pfChargedIso = Var("chargedHadronIso()",float,doc="PF absolute isolation dR=0.3, charged component with dxy,dz match to PV", precision=8), pfChargedIsoPFPV = Var("chargedHadronPFPVIso()",float,doc="PF absolute isolation dR=0.3, charged component (PF PV only)"), @@ -232,8 +232,8 @@ def make_bitmapVID_docstring(id_modules_working_points_pset): isScEtaEB = Var("abs(superCluster().eta()) < 1.4442",bool,doc="is supercluster eta within barrel acceptance"), isScEtaEE = Var("abs(superCluster().eta()) > 1.566 && abs(superCluster().eta()) < 2.5",bool,doc="is supercluster eta within endcap acceptance"), seedGain = Var("userInt('seedGain')","uint8",doc="Gain of the seed crystal"), - seediEtaOriX = Var("superCluster().seedCrysIEtaOrIx","int8",doc="iEta or iX of seed crystal. iEta is barrel-only, iX is endcap-only. iEta runs from -85 to +85, with no crystal at iEta=0. iX runs from 1 to 100."), - seediPhiOriY = Var("superCluster().seedCrysIPhiOrIy",int,doc="iPhi or iY of seed crystal. iPhi is barrel-only, iY is endcap-only. iPhi runs from 1 to 360. iY runs from 1 to 100."), + seediEtaOriX = Var("superCluster().seedCrysIEtaOrIx","int16",doc="iEta or iX of seed crystal. iEta is barrel-only, iX is endcap-only. iEta runs from -85 to +85, with no crystal at iEta=0. iX runs from 1 to 100."), + seediPhiOriY = Var("superCluster().seedCrysIPhiOrIy","int16",doc="iPhi or iY of seed crystal. iPhi is barrel-only, iY is endcap-only. iPhi runs from 1 to 360. iY runs from 1 to 100."), # position of photon is best approximated by position of seed cluster, not the SC centroid x_calo = Var("superCluster().seed().position().x()",float,doc="photon supercluster position on calorimeter, x coordinate (cm)",precision=10), y_calo = Var("superCluster().seed().position().y()",float,doc="photon supercluster position on calorimeter, y coordinate (cm)",precision=10), diff --git a/PhysicsTools/NanoAOD/python/triggerObjects_cff.py b/PhysicsTools/NanoAOD/python/triggerObjects_cff.py index b5e7dce8449ed..37739ec4e81e6 100644 --- a/PhysicsTools/NanoAOD/python/triggerObjects_cff.py +++ b/PhysicsTools/NanoAOD/python/triggerObjects_cff.py @@ -129,31 +129,41 @@ def AND(tokens): ), Tau = cms.PSet( id = cms.int32(15), - sel = cms.string("type(84) && pt > 5 && coll('*Tau*') && ( filter('*LooseChargedIso*') || filter('*MediumChargedIso*') || filter('*DeepTau*') || filter('*TightChargedIso*') || filter('*TightOOSCPhotons*') || filter('hltL2TauIsoFilter') || filter('*OverlapFilterIsoMu*') || filter('*OverlapFilterIsoEle*') || filter('*L1HLTMatched*') || filter('*Dz02*') || filter('*DoublePFTau*') || filter('*SinglePFTau*') || filter('hlt*SelectedPFTau') || filter('*DisplPFTau*') )"), #All trigger objects from a Tau collection + passing at least one filter + sel = cms.string("type(84) && pt > 5 && coll('*Tau*') && ( filter('*Loose*') || filter('*Medium*') || filter('*Tight*') || filter('*DeepTau*') || filter('*ChargedIso*') || filter('hltL2Tau*IsoFilter*') || filter('hltL2TauTagNNFilter*') || filter('*OverlapFilter*') || filter('*DisplPFTau*') || filter('*VBFIsoTau*') || filter('*Monitoring*') || filter('*DoublePFTau*') || filter('*SingleTau*') || filter('hlt*SelectedPFTau') || filter('*ETau*') || filter('*MuTau*') )"), #All trigger objects from a Tau collection + passing at least one filter l1seed = cms.string("type(-100)"), l1deltaR = cms.double(0.3), l2seed = cms.string("type(84) && coll('hltL2TauJetsL1IsoTauSeeded')"), l2deltaR = cms.double(0.3), skipObjectsNotPassingQualityBits = cms.bool(True), qualityBits = cms.VPSet( - mksel("filter('*LooseChargedIso*')","LooseChargedIso"), - mksel("filter('*MediumChargedIso*')","MediumChargedIso"), - mksel("filter('*TightChargedIso*')","TightChargedIso"), - mksel("filter('*DeepTau*')","DeepTau"), - mksel("filter('*TightOOSCPhotons*')","TightID OOSC photons"), - mksel("filter('*Hps*')","HPS"), - mksel("filter('hlt*DoublePFTau*TrackPt1*ChargedIsolation*Dz02*')","charged iso di-tau"), - mksel("filter('hlt*DoublePFTau*DeepTau*L1HLTMatched')","deeptau di-tau"), - mksel("filter('hlt*OverlapFilterIsoEle*WPTightGsf*PFTau*')","e-tau"), - mksel("filter('hlt*OverlapFilterIsoMu*PFTau*')","mu-tau"), - mksel("filter('hlt*SelectedPFTau*L1HLTMatched')","single-tau/tau+MET"), - mksel("filter('hlt*DoublePFTau*TrackPt1*ChargedIso*')","run 2 VBF+ditau"), - mksel("filter('hlt*DoublePFTau*Track*ChargedIso*AgainstMuon')","run 3 VBF+ditau"), - mksel("filter('hltHpsSinglePFTau*HLTMatched')","run 3 double PF jets + ditau"), - mksel("filter('hltHpsOverlapFilterDeepTauDoublePFTau*PFJet*')","di-tau + PFJet"), - mksel("filter('hlt*Double*ChargedIsoDisplPFTau*Dxy*')","Displaced Tau"), - mksel("filter('*Monitoring')","Monitoring"), - mksel("filter('*Reg')","regional paths"), - mksel("filter('*L1Seeded')","L1 seeded paths"), - mksel("filter('*1Prong')","1 prong tau paths") + mksel("filter('*Loose*')","Loose"), # 0 + mksel("filter('*Medium*')","Medium"), # 1 + mksel("filter('*Tight*')","Tight"), # 2 + mksel("filter('*DeepTau*')","DeepTau no spec WP"), #3 + mksel("filter('*ChargedIso*')","ChargedIso"), # 4 + mksel("filter('*Hps*')","HPS"), # 5 + mksel("filter('*ETau*')","e-tau inside filter"), # 6 + mksel("filter('*MuTau*')","mu-tau inside filter"), # 7 + mksel("filter('*SingleTau*')","single-tau inside filter"), # 8 + mksel("filter('hltMatchedVBFIsoTau*')","VBF matching"), # 9 + mksel("filter('hlt*DoublePFTau*L1HLTMatched')","di-tau"), # 10 + mksel("filter('hltHpsOverlapFilterIsoEle*WPTightGsf*PFTau*')","e-tau"), # 11 + mksel("filter('hltHpsOverlapFilterIsoMu*PFTau*')","mu-tau"), # 12 + mksel("filter('hltHpsOverlapFilterDeepTauDoublePFTau*PFJet*')","di-tau + PFJet"), # 13 + mksel("filter('hltHpsOverlapFilterDisplacedEle*DisplPFTau*')","e-tau displaced"), # 14 + mksel("filter('hltHpsOverlapFilterDisplacedMu*DisplPFTau*')","mu-tau displaced"), # 15 + mksel("filter('hlt*Double*ChargedIsoDisplPFTau*')","di-tau displaced"), # 16 + mksel("filter('*Monitoring')","Monitoring"), # 17 + mksel("filter('*MonitoringForVBFIsoTau')","MonitoringForVBFIsoTau"), # 18 + mksel("filter('hltHpsOverlapFilterDeepTauPFTau*PFJet*')","'Monitoring di-tau + PFJet"), # 19 + mksel("filter('hltHpsOverlapFilterIsoMu*MediumChargedIsoDisplTau*')","'Monitoring muTau displaced"), # 20 + mksel("filter('*OneProng*')","OneProng"), # 21 + mksel("filter('*2*DiJetCorr*')","DiJetCorr"), # 22 + mksel("filter('*OverlapFilter*')","OverlapFilter"), # 23 + mksel("filter('*Dxy*')","Dxy"), # 24 + mksel("filter('*L1HLTMatched*')","MatchL1HLT"), # 25 + mksel("filter('*L1Seeded')","MatchL1HLT"), # 26 + mksel("filter('hltHpsOverlapFilterIsoMu27MediumDeepTauDitauWPPFTau20')","VBF + DoubleTau Monitoring"), # 27 + mksel("filter('hltHpsOverlapFilterIsoMu24MediumDeepTauPFTau20')","For matching to monitoring trigger for 20 GeV tau leg of VBF triggers"), # 28 + mksel("filter('*SinglePFTau*')","single PF-tau inside filter"), # 29 ) ), BoostedTau = cms.PSet( @@ -220,7 +230,7 @@ def AND(tokens): mksel("coll('hltAK8PFSoftDropJets230')"), #4, present if nothing else below is fired, otherwise 12, 20, 28, 52, 60 mksel(["hltAK8SinglePFJets230SoftDropMass40BTagParticleNetBB0p35", "hltAK8SinglePFJets250SoftDropMass40BTagParticleNetBB0p35", - "hltAK8SinglePFJets275SoftDropMass40BTagParticleNetBB0p35"]), # 12 if nothing below is fired, #28 if also "hltAK8DoublePFJetSDModMass30", #60 if also "hltAK8DoublePFJetSDModMass50" + "hltAK8SinglePFJets275SoftDropMass40BTagParticleNetBB0p35"]), # 12 if nothing below is fired, #28 if also "hltAK8DoublePFJetSDModMass30", #60 if also "hltAK8DoublePFJetSDModMass50" mksel(["hltAK8DoublePFJetSDModMass30"]), # 16 if onthing else (except #1), 20 if also #4, 28 if also #12 mksel(["hltAK8DoublePFJetSDModMass50"]), # 48 if also (obviously) "hltAK8DoublePFJetSDModMass30", 52 if also #4, #60 if all above ) @@ -294,6 +304,33 @@ def AND(tokens): ) ) +_run2_2017_2018_tau_filters = [ + mksel("filter('*LooseChargedIso*')","LooseChargedIso"), + mksel("filter('*MediumChargedIso*')","MediumChargedIso"), + mksel("filter('*TightChargedIso*')","TightChargedIso"), + mksel("filter('*DeepTau*')","DeepTau"), + mksel("filter('*TightOOSCPhotons*')","TightID OOSC photons"), + mksel("filter('*Hps*')","HPS"), + mksel("filter('hlt*DoublePFTau*TrackPt1*ChargedIsolation*Dz02*')","charged iso di-tau"), + mksel("filter('hlt*DoublePFTau*DeepTau*L1HLTMatched')","deeptau di-tau"), + mksel("filter('hlt*OverlapFilterIsoEle*WPTightGsf*PFTau*')","e-tau"), + mksel("filter('hlt*OverlapFilterIsoMu*PFTau*')","mu-tau"), + mksel("filter('hlt*SelectedPFTau*L1HLTMatched')","single-tau/tau+MET"), + mksel("filter('hlt*DoublePFTau*TrackPt1*ChargedIso*')","run 2 VBF+ditau"), + mksel("filter('hlt*DoublePFTau*Track*ChargedIso*AgainstMuon')","run 3 VBF+ditau"), + mksel("filter('hltHpsSinglePFTau*HLTMatched')","run 3 double PF jets + ditau"), + mksel("filter('hltHpsOverlapFilterDeepTauDoublePFTau*PFJet*')","di-tau + PFJet"), + mksel("filter('hlt*Double*ChargedIsoDisplPFTau*Dxy*')","Displaced Tau"), + mksel("filter('*Monitoring')","Monitoring"), + mksel("filter('*Reg')","regional paths"), + mksel("filter('*L1Seeded')","L1 seeded paths"), + mksel("filter('*1Prong')","1 prong tau paths") +] +(run2_HLTconditions_2017 | run2_HLTconditions_2018).toModify( + triggerObjectTable.selections.Tau, + sel = "type(84) && pt > 5 && coll('*Tau*') && ( filter('*LooseChargedIso*') || filter('*MediumChargedIso*') || filter('*DeepTau*') || filter('*TightChargedIso*') || filter('*TightOOSCPhotons*') || filter('hltL2TauIsoFilter') || filter('*OverlapFilterIsoMu*') || filter('*OverlapFilterIsoEle*') || filter('*L1HLTMatched*') || filter('*Dz02*') || filter('*DoublePFTau*') || filter('*SinglePFTau*') || filter('hlt*SelectedPFTau') || filter('*DisplPFTau*') )", + qualityBits = cms.VPSet(_run2_2017_2018_tau_filters) +) _run2_HLTconditions = run2_HLTconditions_2016 | run2_HLTconditions_2017 | run2_HLTconditions_2018 _run2_2016_jet_filters = [ @@ -344,7 +381,7 @@ def AND(tokens): DataEraECAL = cms.string("UL2016postVFP"), DataEraMuon = cms.string("2016postVFP") ) -#Next line is for UL2017 maps +#Next line is for UL2017 maps run2_jme_2017.toModify( prefiringweight, DataEraECAL = cms.string("UL2017BtoF"), diff --git a/PhysicsTools/NanoAOD/python/vertices_cff.py b/PhysicsTools/NanoAOD/python/vertices_cff.py index ddda0ba422c1e..69d105795d25d 100644 --- a/PhysicsTools/NanoAOD/python/vertices_cff.py +++ b/PhysicsTools/NanoAOD/python/vertices_cff.py @@ -10,6 +10,7 @@ vertexTable = cms.EDProducer("VertexTableProducer", pvSrc = cms.InputTag("offlineSlimmedPrimaryVertices"), goodPvCut = cms.string("!isFake && ndof > 4 && abs(z) <= 24 && position.Rho <= 2"), + pfcSrc = cms.InputTag("packedPFCandidates"), svSrc = cms.InputTag("linkedObjects", "vertices"), svCut = cms.string(""), # careful: adding a cut here would make the collection matching inconsistent with the SV table dlenMin = cms.double(0), diff --git a/PhysicsTools/NanoAOD/scripts/haddnano.py b/PhysicsTools/NanoAOD/scripts/haddnano.py index 1b7ce9831d4e6..ee8c3dec9d166 100755 --- a/PhysicsTools/NanoAOD/scripts/haddnano.py +++ b/PhysicsTools/NanoAOD/scripts/haddnano.py @@ -32,7 +32,7 @@ def zeroFill(tree, brName, brObj, allowNonBool=False): fileHandles = [] goFast = True for fn in files: - print("Adding file" + str(fn)) + print("Adding file", str(fn)) fileHandles.append(ROOT.TFile.Open(fn)) if fileHandles[-1].GetCompressionSettings() != fileHandles[0].GetCompressionSettings(): goFast = False @@ -44,7 +44,7 @@ def zeroFill(tree, brName, brObj, allowNonBool=False): for e in fileHandles[0].GetListOfKeys(): name = e.GetName() - print("Merging" + str(name)) + print("Merging", str(name)) obj = e.ReadObj() cl = ROOT.TClass.GetClass(e.GetClassName()) inputs = ROOT.TList() @@ -53,7 +53,18 @@ def zeroFill(tree, brName, brObj, allowNonBool=False): obj = obj.CloneTree(-1, "fast" if goFast else "") branchNames = set([x.GetName() for x in obj.GetListOfBranches()]) for fh in fileHandles[1:]: + if isTree and obj.GetName() == 'Events' and obj.GetEntries() == 0 : + # Zero-events first file. Skip to avoid messing up branches. + print(" 'Events' tree contsins no events; skipping") + obj = fh.GetListOfKeys().FindObject(name).ReadObj() + obj = obj.CloneTree(-1, "fast" if goFast else "") + branchNames = set([x.GetName() for x in obj.GetListOfBranches()]) + continue otherObj = fh.GetListOfKeys().FindObject(name).ReadObj() + if isTree and obj.GetName() == 'Events' and otherObj.GetEntries() == 0 : + # Zero-events file; skip + print(" 'Events' tree contains no events; skipping") + continue inputs.Add(otherObj) if isTree and obj.GetName() == 'Events': otherObj.SetAutoFlush(0) @@ -61,7 +72,7 @@ def zeroFill(tree, brName, brObj, allowNonBool=False): for x in otherObj.GetListOfBranches()]) missingBranches = list(branchNames - otherBranches) additionalBranches = list(otherBranches - branchNames) - print("missing: " + str(missingBranches) + "\n Additional:" + str(additionalBranches)) + print("missing: " + str(missingBranches) + "\n Additional: " + str(additionalBranches)) for br in missingBranches: # fill "Other" zeroFill(otherObj, br, obj.GetListOfBranches().FindObject(br)) @@ -76,7 +87,7 @@ def zeroFill(tree, brName, brObj, allowNonBool=False): for x in otherObj.GetListOfBranches()]) missingBranches = list(branchNames - otherBranches) additionalBranches = list(otherBranches - branchNames) - print("missing: " + str(missingBranches) + "\n Additional:" + str(additionalBranches)) + print("missing: " + str(missingBranches) + "\n Additional: " + str(additionalBranches)) for br in missingBranches: # fill "Other" zeroFill(otherObj, br, obj.GetListOfBranches( diff --git a/PhysicsTools/PatAlgos/BuildFile.xml b/PhysicsTools/PatAlgos/BuildFile.xml index eb12b6b83be43..5686f217c31ff 100644 --- a/PhysicsTools/PatAlgos/BuildFile.xml +++ b/PhysicsTools/PatAlgos/BuildFile.xml @@ -18,6 +18,7 @@ + diff --git a/PhysicsTools/PatAlgos/interface/SoftMuonMvaRun3Estimator.h b/PhysicsTools/PatAlgos/interface/SoftMuonMvaRun3Estimator.h new file mode 100644 index 0000000000000..a3cc3693e18ec --- /dev/null +++ b/PhysicsTools/PatAlgos/interface/SoftMuonMvaRun3Estimator.h @@ -0,0 +1,13 @@ +#ifndef __PhysicsTools_PatAlgos_SoftMuonMvaRun3Estimator__ +#define __PhysicsTools_PatAlgos_SoftMuonMvaRun3Estimator__ + +#include +#include + +namespace pat { + class XGBooster; + class Muon; + + float computeSoftMvaRun3(XGBooster& booster, const Muon& muon); +} // namespace pat +#endif diff --git a/PhysicsTools/PatAlgos/interface/XGBooster.h b/PhysicsTools/PatAlgos/interface/XGBooster.h new file mode 100644 index 0000000000000..64036742ace36 --- /dev/null +++ b/PhysicsTools/PatAlgos/interface/XGBooster.h @@ -0,0 +1,34 @@ +#ifndef PhysicsTools_PatAlgos_XGBooster_h +#define PhysicsTools_PatAlgos_XGBooster_h + +#include +#include +#include +#include +#include + +namespace pat { + class XGBooster { + public: + XGBooster(std::string model_file); + XGBooster(std::string model_file, std::string model_features); + + /// Features need to be entered in the order they are used + /// in the model + void addFeature(std::string name); + + /// Reset feature values + void reset(); + + void set(std::string name, float value); + + float predict(); + + private: + std::vector features_; + std::map feature_name_to_index_; + BoosterHandle booster_; + }; +} // namespace pat + +#endif diff --git a/PhysicsTools/PatAlgos/plugins/LowPtGSFToPackedCandidateLinker.cc b/PhysicsTools/PatAlgos/plugins/LowPtGSFToPackedCandidateLinker.cc index a82b2fd3ac561..bb913d8b50eb5 100644 --- a/PhysicsTools/PatAlgos/plugins/LowPtGSFToPackedCandidateLinker.cc +++ b/PhysicsTools/PatAlgos/plugins/LowPtGSFToPackedCandidateLinker.cc @@ -89,8 +89,8 @@ void LowPtGSFToPackedCandidateLinker::produce(edm::StreamID, edm::Event& iEvent, //store auxiliary mappings for association std::vector gsf2pack(ngsf, -1); std::vector gsf2lost(ngsf, -1); - PackedCandidatePtrCollection ele2packedptr(nele, PackedCandidatePtr(packed, -1)); - PackedCandidatePtrCollection ele2lostptr(nele, PackedCandidatePtr(lost_tracks, -1)); + PackedCandidatePtrCollection ele2packedptr(nele, PackedCandidatePtr()); + PackedCandidatePtrCollection ele2lostptr(nele, PackedCandidatePtr()); //electrons will never store their track (they store the Gsf track) //map PackedPF <--> Track diff --git a/PhysicsTools/PatAlgos/plugins/PATElectronSlimmer.cc b/PhysicsTools/PatAlgos/plugins/PATElectronSlimmer.cc index 02fee8eabc1dc..e605623065902 100644 --- a/PhysicsTools/PatAlgos/plugins/PATElectronSlimmer.cc +++ b/PhysicsTools/PatAlgos/plugins/PATElectronSlimmer.cc @@ -26,10 +26,8 @@ namespace pat { class PATElectronSlimmer : public edm::stream::EDProducer<> { public: explicit PATElectronSlimmer(const edm::ParameterSet& iConfig); - ~PATElectronSlimmer() override {} void produce(edm::Event& iEvent, const edm::EventSetup& iSetup) final; - void beginLuminosityBlock(const edm::LuminosityBlock&, const edm::EventSetup&) final; private: const edm::EDGetTokenT> src_; @@ -91,8 +89,6 @@ pat::PATElectronSlimmer::PATElectronSlimmer(const edm::ParameterSet& iConfig) produces>(); } -void pat::PATElectronSlimmer::beginLuminosityBlock(const edm::LuminosityBlock&, const edm::EventSetup& iSetup) {} - void pat::PATElectronSlimmer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { using namespace edm; using namespace std; diff --git a/PhysicsTools/PatAlgos/plugins/PATMuonProducer.cc b/PhysicsTools/PatAlgos/plugins/PATMuonProducer.cc index ee5fa42c53eb2..61c386fbb333a 100644 --- a/PhysicsTools/PatAlgos/plugins/PATMuonProducer.cc +++ b/PhysicsTools/PatAlgos/plugins/PATMuonProducer.cc @@ -46,6 +46,8 @@ #include "PhysicsTools/PatAlgos/interface/MuonMvaIDEstimator.h" #include "PhysicsTools/PatAlgos/interface/PATUserDataHelper.h" #include "PhysicsTools/PatAlgos/interface/SoftMuonMvaEstimator.h" +#include "PhysicsTools/PatAlgos/interface/SoftMuonMvaRun3Estimator.h" +#include "PhysicsTools/PatAlgos/interface/XGBooster.h" #include "PhysicsTools/PatUtils/interface/MiniIsolation.h" #include "TrackingTools/IPTools/interface/IPTools.h" #include "TrackingTools/Records/interface/TransientTrackRecord.h" @@ -236,6 +238,7 @@ namespace pat { /// standard muon selectors bool computeMuonIDMVA_; bool computeSoftMuonMVA_; + std::unique_ptr softMuonMvaRun3Booster_; bool recomputeBasicSelectors_; bool useJec_; edm::EDGetTokenT mvaBTagCollectionTag_; @@ -473,6 +476,13 @@ PATMuonProducer::PATMuonProducer(const edm::ParameterSet& iConfig, PATMuonHeavyO // MC info simInfo_ = consumes>(iConfig.getParameter("muonSimInfo")); + if (computeSoftMuonMVA_) { + std::string softMvaRun3Model = iConfig.getParameter("softMvaRun3Model"); + softMuonMvaRun3Booster_ = + std::make_unique(edm::FileInPath(softMvaRun3Model + ".model").fullPath(), + edm::FileInPath(softMvaRun3Model + ".features").fullPath()); + } + addTriggerMatching_ = iConfig.getParameter("addTriggerMatching"); if (addTriggerMatching_) { triggerObjects_ = @@ -1013,6 +1023,9 @@ void PATMuonProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) muon.setSoftMvaValue(mva); //preselection in SoftMuonMvaEstimator.cc muon.setSelector(reco::Muon::SoftMvaId, muon.softMvaValue() > 0.58); //WP choose for bmm4 + + // run3 soft mva + muon.setSoftMvaRun3Value(computeSoftMvaRun3(*softMuonMvaRun3Booster_, muon)); } } diff --git a/PhysicsTools/PatAlgos/plugins/PATObjectCrossLinker.cc b/PhysicsTools/PatAlgos/plugins/PATObjectCrossLinker.cc index ef790ad65c14f..eaacd778eaa12 100644 --- a/PhysicsTools/PatAlgos/plugins/PATObjectCrossLinker.cc +++ b/PhysicsTools/PatAlgos/plugins/PATObjectCrossLinker.cc @@ -46,14 +46,11 @@ class PATObjectCrossLinker : public edm::stream::EDProducer<> { public: explicit PATObjectCrossLinker(const edm::ParameterSet&); - ~PATObjectCrossLinker() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: - void beginStream(edm::StreamID) override; void produce(edm::Event&, const edm::EventSetup&) override; - void endStream() override; template void matchOneToMany(const C1& refProdOne, @@ -85,11 +82,6 @@ class PATObjectCrossLinker : public edm::stream::EDProducer<> { template void matchVertexToMany(const C1& refProdVtx, C2& itemsVtx, const std::string& nameVtx, C3& itemsMany); - //virtual void beginRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void endRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - //virtual void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - // ----------member data --------------------------- const edm::EDGetTokenT> jets_; const edm::EDGetTokenT> muons_; @@ -135,11 +127,6 @@ PATObjectCrossLinker::PATObjectCrossLinker(const edm::ParameterSet& params) } } -PATObjectCrossLinker::~PATObjectCrossLinker() { - // do anything here that needs to be done at destruction time - // (e.g. close files, deallocate resources etc.) -} - // // member functions // @@ -369,12 +356,6 @@ void PATObjectCrossLinker::produce(edm::Event& iEvent, const edm::EventSetup& iS iEvent.put(std::move(vertices), "vertices"); } -// ------------ method called once each stream before processing any runs, lumis or events ------------ -void PATObjectCrossLinker::beginStream(edm::StreamID) {} - -// ------------ method called once each stream after processing all runs, lumis and events ------------ -void PATObjectCrossLinker::endStream() {} - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void PATObjectCrossLinker::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { edm::ParameterSetDescription desc; diff --git a/PhysicsTools/PatAlgos/plugins/PATPhotonSlimmer.cc b/PhysicsTools/PatAlgos/plugins/PATPhotonSlimmer.cc index 997405760ed77..677ff575cc4e0 100644 --- a/PhysicsTools/PatAlgos/plugins/PATPhotonSlimmer.cc +++ b/PhysicsTools/PatAlgos/plugins/PATPhotonSlimmer.cc @@ -28,10 +28,8 @@ namespace pat { class PATPhotonSlimmer : public edm::stream::EDProducer<> { public: explicit PATPhotonSlimmer(const edm::ParameterSet& iConfig); - ~PATPhotonSlimmer() override {} void produce(edm::Event& iEvent, const edm::EventSetup& iSetup) override; - void beginLuminosityBlock(const edm::LuminosityBlock&, const edm::EventSetup&) final; private: const edm::EDGetTokenT> src_; @@ -86,8 +84,6 @@ pat::PATPhotonSlimmer::PATPhotonSlimmer(const edm::ParameterSet& iConfig) produces>(); } -void pat::PATPhotonSlimmer::beginLuminosityBlock(const edm::LuminosityBlock&, const edm::EventSetup& iSetup) {} - void pat::PATPhotonSlimmer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { using namespace edm; using namespace std; diff --git a/PhysicsTools/PatAlgos/python/producersLayer1/displacedMuonProducer_cff.py b/PhysicsTools/PatAlgos/python/producersLayer1/displacedMuonProducer_cff.py index 6ddc7ee47ceff..6de0140609f3f 100644 --- a/PhysicsTools/PatAlgos/python/producersLayer1/displacedMuonProducer_cff.py +++ b/PhysicsTools/PatAlgos/python/producersLayer1/displacedMuonProducer_cff.py @@ -70,6 +70,7 @@ computeSoftMuonMVA = False, softMvaTrainingFile = "RecoMuon/MuonIdentification/data/TMVA-muonid-bmm4-B-25.weights.xml", + softMvaRun3Model = cms.string("RecoMuon/MuonIdentification/data/Run2022-20231030-1731-Event0"), # MC Info muonSimInfo = "displacedMuonSimClassifier", # This module does not exists but producer checks existence by itself diff --git a/PhysicsTools/PatAlgos/python/producersLayer1/muonProducer_cfi.py b/PhysicsTools/PatAlgos/python/producersLayer1/muonProducer_cfi.py index 9859b19e68b15..4e6c9fb74a9d6 100644 --- a/PhysicsTools/PatAlgos/python/producersLayer1/muonProducer_cfi.py +++ b/PhysicsTools/PatAlgos/python/producersLayer1/muonProducer_cfi.py @@ -123,6 +123,7 @@ computeSoftMuonMVA = cms.bool(False), softMvaTrainingFile = cms.FileInPath("RecoMuon/MuonIdentification/data/TMVA-muonid-bmm4-B-25.weights.xml"), + softMvaRun3Model = cms.string("RecoMuon/MuonIdentification/data/Run2022-20231030-1731-Event0"), # MC Info muonSimInfo = cms.InputTag("muonSimClassifier"), diff --git a/PhysicsTools/PatAlgos/python/selectionLayer1/lowPtElectronSelector_cfi.py b/PhysicsTools/PatAlgos/python/selectionLayer1/lowPtElectronSelector_cfi.py index 7ce0db586eaa8..5e03d2de37f48 100644 --- a/PhysicsTools/PatAlgos/python/selectionLayer1/lowPtElectronSelector_cfi.py +++ b/PhysicsTools/PatAlgos/python/selectionLayer1/lowPtElectronSelector_cfi.py @@ -9,6 +9,10 @@ cut = cms.string("pt > 1. && electronID('ID') > -0.25"), ) +# Modifier for UPC +from Configuration.ProcessModifiers.egamma_lowPt_exclusive_cff import egamma_lowPt_exclusive +egamma_lowPt_exclusive.toModify(selectedPatLowPtElectrons,cut = "") + # Modifier for bParking (fully open selection) from Configuration.Eras.Modifier_bParking_cff import bParking bParking.toModify(selectedPatLowPtElectrons,cut = "pt > 1.") diff --git a/PhysicsTools/PatAlgos/python/slimming/MicroEventContent_cff.py b/PhysicsTools/PatAlgos/python/slimming/MicroEventContent_cff.py index 8c0bdc101cd2f..e33dc63a6e637 100644 --- a/PhysicsTools/PatAlgos/python/slimming/MicroEventContent_cff.py +++ b/PhysicsTools/PatAlgos/python/slimming/MicroEventContent_cff.py @@ -160,6 +160,18 @@ from Configuration.ProcessModifiers.pp_on_AA_cff import pp_on_AA pp_on_AA.toModify(MicroEventContent, outputCommands = MicroEventContent.outputCommands + _pp_on_AA_extraCommands) +_upc_extraCommands = [ + 'keep patPackedCandidates_hiPixelTracks_*_*', + 'keep floatedmValueMap_packedPFCandidateTrackChi2_*_*', + 'keep floatedmValueMap_lostTrackChi2_*_*', + 'keep recoCentrality_hiCentrality_*_*', + 'keep recoClusterCompatibility_hiClusterCompatibility_*_*', + 'keep QIE10DataFrameHcalDataFrameContainer_hcalDigis_ZDC_*', +] + +from Configuration.Eras.Modifier_run3_upc_cff import run3_upc +run3_upc.toModify(MicroEventContent, outputCommands = MicroEventContent.outputCommands + _upc_extraCommands) + _zdc_extraCommands = ['keep QIE10DataFrameHcalDataFrameContainer_hcalDigis_ZDC_*'] from Configuration.ProcessModifiers.storeZDCDigis_cff import storeZDCDigis storeZDCDigis.toModify(MicroEventContent, outputCommands = MicroEventContent.outputCommands + _zdc_extraCommands) diff --git a/PhysicsTools/PatAlgos/python/slimming/isolatedTracks_cfi.py b/PhysicsTools/PatAlgos/python/slimming/isolatedTracks_cfi.py index 11d9c9b6ba500..27e4ba8d350bf 100644 --- a/PhysicsTools/PatAlgos/python/slimming/isolatedTracks_cfi.py +++ b/PhysicsTools/PatAlgos/python/slimming/isolatedTracks_cfi.py @@ -76,6 +76,9 @@ from Configuration.ProcessModifiers.pp_on_AA_cff import pp_on_AA pp_on_AA.toModify(isolatedTracks, useHighPurity = True) +from Configuration.Eras.Modifier_run3_upc_cff import run3_upc +run3_upc.toModify(isolatedTracks, pT_cut = 0.0, pT_cut_noIso = 0.0, saveDeDxHitInfoCut = "") + def miniAOD_customizeIsolatedTracksFastSim(process): """Switch off dE/dx hit info on fast sim, as it's not available""" process.isolatedTracks.saveDeDxHitInfo = False diff --git a/PhysicsTools/PatAlgos/python/slimming/lostTracks_cfi.py b/PhysicsTools/PatAlgos/python/slimming/lostTracks_cfi.py index 2da39f09701e7..4ea00ea436428 100644 --- a/PhysicsTools/PatAlgos/python/slimming/lostTracks_cfi.py +++ b/PhysicsTools/PatAlgos/python/slimming/lostTracks_cfi.py @@ -31,3 +31,6 @@ from Configuration.ProcessModifiers.run2_miniAOD_pp_on_AA_103X_cff import run2_miniAOD_pp_on_AA_103X run2_miniAOD_pp_on_AA_103X.toModify(lostTracks,inputCandidates = 'cleanedParticleFlow') + +from Configuration.Eras.Modifier_run3_upc_cff import run3_upc +run3_upc.toModify(lostTracks, minPtToStoreProps = 0.0, passThroughCut = "") diff --git a/PhysicsTools/PatAlgos/python/slimming/miniAOD_tools.py b/PhysicsTools/PatAlgos/python/slimming/miniAOD_tools.py index 3230dddce9dc3..360e6e4160f1f 100644 --- a/PhysicsTools/PatAlgos/python/slimming/miniAOD_tools.py +++ b/PhysicsTools/PatAlgos/python/slimming/miniAOD_tools.py @@ -124,6 +124,8 @@ def miniAOD_customizeCommon(process): phase2_muon.toModify(process.selectedPatMuons, cut = "pt > 5 || isPFMuon || (pt > 3 && (isGlobalMuon || isStandAloneMuon || numberOfMatches > 0 || muonID('RPCMuLoose') || muonID('ME0MuonArbitrated') || muonID('GEMMuonArbitrated')) )") from Configuration.ProcessModifiers.pp_on_AA_cff import pp_on_AA pp_on_AA.toModify(process.selectedPatMuons, cut = "pt > 5 || isPFMuon || (pt > 1.2 && (isGlobalMuon || isStandAloneMuon) )") + from Configuration.Eras.Modifier_run3_upc_cff import run3_upc + run3_upc.toModify(process.selectedPatMuons, cut = "") process.selectedPatElectrons.cut = cms.string("") process.selectedPatTaus.cut = cms.string("pt > 18. && tauID('decayModeFindingNewDMs')> 0.5") @@ -305,7 +307,8 @@ def _add_deepFlavour(process): 'RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Summer17UL_ID_ISO_cff', 'RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Summer18UL_ID_ISO_cff', 'RecoEgamma.ElectronIdentification.Identification.mvaElectronID_RunIIIWinter22_noIso_V1_cff', - 'RecoEgamma.ElectronIdentification.Identification.mvaElectronID_RunIIIWinter22_iso_V1_cff' + 'RecoEgamma.ElectronIdentification.Identification.mvaElectronID_RunIIIWinter22_iso_V1_cff', + 'RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Winter22_HZZ_V1_cff' ] switchOnVIDElectronIdProducer(process,DataFormat.MiniAOD, task) process.egmGsfElectronIDs.physicsObjectSrc = cms.InputTag("reducedEgamma","reducedGedGsfElectrons") diff --git a/PhysicsTools/PatAlgos/python/slimming/packedPFCandidates_cfi.py b/PhysicsTools/PatAlgos/python/slimming/packedPFCandidates_cfi.py index d151a2a5c5e06..ed0a2c7c6c7dd 100644 --- a/PhysicsTools/PatAlgos/python/slimming/packedPFCandidates_cfi.py +++ b/PhysicsTools/PatAlgos/python/slimming/packedPFCandidates_cfi.py @@ -57,3 +57,6 @@ inputCollection = "cleanedParticleFlow", chargedHadronIsolation = "" ) + +from Configuration.Eras.Modifier_run3_upc_cff import run3_upc +run3_upc.toModify(packedPFCandidates, minPtForChargedHadronProperties = 0.0, minPtForTrackProperties = 0.0) diff --git a/PhysicsTools/PatAlgos/python/slimming/slimmedCaloJets_cfi.py b/PhysicsTools/PatAlgos/python/slimming/slimmedCaloJets_cfi.py index 29955489d41c0..4ea6e857a4e14 100644 --- a/PhysicsTools/PatAlgos/python/slimming/slimmedCaloJets_cfi.py +++ b/PhysicsTools/PatAlgos/python/slimming/slimmedCaloJets_cfi.py @@ -7,3 +7,6 @@ from Configuration.ProcessModifiers.pp_on_AA_cff import pp_on_AA pp_on_AA.toModify(slimmedCaloJets, src = 'akPu4CaloJets') + +from Configuration.Eras.Modifier_run3_upc_cff import run3_upc +run3_upc.toModify(slimmedCaloJets, cut = "pt>5") diff --git a/PhysicsTools/PatAlgos/python/slimming/slimming_cff.py b/PhysicsTools/PatAlgos/python/slimming/slimming_cff.py index 9f7bd2625d026..668b6725f9b3a 100644 --- a/PhysicsTools/PatAlgos/python/slimming/slimming_cff.py +++ b/PhysicsTools/PatAlgos/python/slimming/slimming_cff.py @@ -122,3 +122,6 @@ from PhysicsTools.PatAlgos.slimming.patPhotonDRNCorrector_cfi import patPhotonsDRN from Configuration.ProcessModifiers.photonDRN_cff import _photonDRN _photonDRN.toReplaceWith(slimmingTask, cms.Task(slimmingTask.copy(), patPhotonsDRN)) + +from Configuration.Eras.Modifier_run3_upc_cff import run3_upc +run3_upc.toReplaceWith(slimmingTask, cms.Task(slimmingTask.copy(), hiPixelTracks, packedPFCandidateTrackChi2, lostTrackChi2)) diff --git a/PhysicsTools/PatAlgos/src/SoftMuonMvaRun3Estimator.cc b/PhysicsTools/PatAlgos/src/SoftMuonMvaRun3Estimator.cc new file mode 100644 index 0000000000000..6cba0e438e495 --- /dev/null +++ b/PhysicsTools/PatAlgos/src/SoftMuonMvaRun3Estimator.cc @@ -0,0 +1,138 @@ +#include "PhysicsTools/PatAlgos/interface/SoftMuonMvaRun3Estimator.h" +#include "DataFormats/PatCandidates/interface/Muon.h" +#include "PhysicsTools/PatAlgos/interface/XGBooster.h" + +typedef std::pair MatchPair; + +const MatchPair& getBetterMatch(const MatchPair& match1, const MatchPair& match2) { + // Prefer DT over CSC simply because it's closer to IP + // and will have less multiple scattering (at least for + // RB1 vs ME1/3 case). RB1 & ME1/2 overlap is tiny + if (match2.first->detector() == MuonSubdetId::DT and match1.first->detector() != MuonSubdetId::DT) + return match2; + + // For the rest compare local x match. We expect that + // segments belong to the muon, so the difference in + // local x is a reflection on how well we can measure it + if (abs(match1.first->x - match1.second->x) > abs(match2.first->x - match2.second->x)) + return match2; + + return match1; +} + +float dX(const MatchPair& match) { + if (match.first and match.second->hasPhi()) + return (match.first->x - match.second->x); + else + return 9999.; +} + +float pullX(const MatchPair& match) { + if (match.first and match.second->hasPhi()) + return dX(match) / sqrt(pow(match.first->xErr, 2) + pow(match.second->xErr, 2)); + else + return 9999.; +} + +float pullDxDz(const MatchPair& match) { + if (match.first and match.second->hasPhi()) + return (match.first->dXdZ - match.second->dXdZ) / + sqrt(pow(match.first->dXdZErr, 2) + pow(match.second->dXdZErr, 2)); + else + return 9999.; +} + +float dY(const MatchPair& match) { + if (match.first and match.second->hasZed()) + return (match.first->y - match.second->y); + else + return 9999.; +} + +float pullY(const MatchPair& match) { + if (match.first and match.second->hasZed()) + return dY(match) / sqrt(pow(match.first->yErr, 2) + pow(match.second->yErr, 2)); + else + return 9999.; +} + +float pullDyDz(const MatchPair& match) { + if (match.first and match.second->hasZed()) + return (match.first->dYdZ - match.second->dYdZ) / + sqrt(pow(match.first->dYdZErr, 2) + pow(match.second->dYdZErr, 2)); + else + return 9999.; +} + +void fillMatchInfoForStation(std::string prefix, pat::XGBooster& booster, const MatchPair& match) { + booster.set(prefix + "_dX", dX(match)); + booster.set(prefix + "_pullX", pullX(match)); + booster.set(prefix + "_pullDxDz", pullDxDz(match)); + booster.set(prefix + "_dY", dY(match)); + booster.set(prefix + "_pullY", pullY(match)); + booster.set(prefix + "_pullDyDz", pullDyDz(match)); +} + +void fillMatchInfo(pat::XGBooster& booster, const pat::Muon& muon) { + // Initiate containter for results + const int n_stations = 2; + std::vector matches; + for (unsigned int i = 0; i < n_stations; ++i) + matches.push_back(std::pair(nullptr, nullptr)); + + // Find best matches + for (auto& chamberMatch : muon.matches()) { + unsigned int station = chamberMatch.station() - 1; + if (station >= n_stations) + continue; + + // Find best segment match. + // We could consider all segments, but we will restrict to segments + // that match to this candidate better than to other muon candidates + for (auto& segmentMatch : chamberMatch.segmentMatches) { + if (not segmentMatch.isMask(reco::MuonSegmentMatch::BestInStationByDR) || + not segmentMatch.isMask(reco::MuonSegmentMatch::BelongsToTrackByDR)) + continue; + + // Multiple segment matches are possible in different + // chambers that are either overlapping or belong to + // different detectors. We need to select one. + auto match_pair = MatchPair(&chamberMatch, &segmentMatch); + + if (matches[station].first) + matches[station] = getBetterMatch(matches[station], match_pair); + else + matches[station] = match_pair; + } + } + + // Fill matching information + fillMatchInfoForStation("match1", booster, matches[0]); + fillMatchInfoForStation("match2", booster, matches[1]); +} + +float pat::computeSoftMvaRun3(pat::XGBooster& booster, const pat::Muon& muon) { + if (!muon.isTrackerMuon() && !muon.isGlobalMuon()) + return 0; + + fillMatchInfo(booster, muon); + + booster.set("pt", muon.pt()); + booster.set("eta", muon.eta()); + booster.set("trkValidFrac", muon.innerTrack()->validFraction()); + booster.set("glbTrackProbability", muon.combinedQuality().glbTrackProbability); + booster.set("nLostHitsInner", + muon.innerTrack()->hitPattern().numberOfLostTrackerHits(reco::HitPattern::MISSING_INNER_HITS)); + booster.set("nLostHitsOuter", + muon.innerTrack()->hitPattern().numberOfLostTrackerHits(reco::HitPattern::MISSING_OUTER_HITS)); + booster.set("trkKink", muon.combinedQuality().trkKink); + booster.set("chi2LocalPosition", muon.combinedQuality().chi2LocalPosition); + booster.set("nPixels", muon.innerTrack()->hitPattern().numberOfValidPixelHits()); + booster.set("nValidHits", muon.innerTrack()->hitPattern().numberOfValidTrackerHits()); + booster.set("nLostHitsOn", muon.innerTrack()->hitPattern().numberOfLostTrackerHits(reco::HitPattern::TRACK_HITS)); + booster.set("glbNormChi2", muon.isGlobalMuon() ? muon.globalTrack()->normalizedChi2() : 9999.); + booster.set("trkLayers", muon.innerTrack()->hitPattern().trackerLayersWithMeasurement()); + booster.set("highPurity", muon.innerTrack()->quality(reco::Track::highPurity)); + + return booster.predict(); +} diff --git a/PhysicsTools/PatAlgos/src/XGBooster.cc b/PhysicsTools/PatAlgos/src/XGBooster.cc new file mode 100644 index 0000000000000..7e7013ee40369 --- /dev/null +++ b/PhysicsTools/PatAlgos/src/XGBooster.cc @@ -0,0 +1,126 @@ +#include "PhysicsTools/PatAlgos/interface/XGBooster.h" +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +using namespace pat; + +std::vector read_features(const std::string& content) { + std::vector result; + + std::istringstream stream(content); + char ch; + + // Expect opening '[' + stream >> ch; + if (ch != '[') { + throw std::runtime_error("Expected '[' at the beginning of the JSON array!"); + } + + while (stream) { + stream >> ch; + + if (ch == ']') { + break; + } else if (ch == ',') { + continue; + } else if (ch == '"') { + std::string feature; + std::getline(stream, feature, '"'); + result.push_back(feature); + } else { + throw std::runtime_error("Unexpected character in the JSON array!"); + } + } + + return result; +} + +XGBooster::XGBooster(std::string model_file) { + int status = XGBoosterCreate(nullptr, 0, &booster_); + if (status != 0) + throw std::runtime_error("Failed to create XGBooster"); + status = XGBoosterLoadModel(booster_, model_file.c_str()); + if (status != 0) + throw std::runtime_error("Failed to load XGBoost model"); + XGBoosterSetParam(booster_, "nthread", "1"); +} + +XGBooster::XGBooster(std::string model_file, std::string model_features) : XGBooster(model_file) { + std::ifstream file(model_features); + if (!file.is_open()) + throw std::runtime_error("Failed to open file: " + model_features); + + std::string content((std::istreambuf_iterator(file)), std::istreambuf_iterator()); + file.close(); + + std::vector features = read_features(content); + + for (const auto& feature : features) { + addFeature(feature); + } +} + +void XGBooster::reset() { std::fill(features_.begin(), features_.end(), std::nan("")); } + +void XGBooster::addFeature(std::string name) { + features_.push_back(0); + feature_name_to_index_[name] = features_.size() - 1; +} + +void XGBooster::set(std::string name, float value) { features_.at(feature_name_to_index_[name]) = value; } + +float XGBooster::predict() { + float result(-999.); + + // check if all feature values are set properly + for (unsigned int i = 0; i < features_.size(); ++i) + if (std::isnan(features_.at(i))) { + std::string feature_name; + for (const auto& pair : feature_name_to_index_) { + if (pair.second == i) { + feature_name = pair.first; + break; + } + } + throw std::runtime_error("Feature is not set: " + feature_name); + } + + DMatrixHandle dvalues; + XGDMatrixCreateFromMat(&features_[0], 1, features_.size(), 9e99, &dvalues); + + bst_ulong out_len = 0; + const float* score = nullptr; + + // config json + const char* json = R"({ + "type": 0, + "training": false, + "iteration_begin": 0, + "iteration_end": 0, + "strict_shape": false + })"; + + // Shape of output prediction + bst_ulong const* out_shape = nullptr; + + auto ret = XGBoosterPredictFromDMatrix(booster_, dvalues, json, &out_shape, &out_len, &score); + + XGDMatrixFree(dvalues); + + if (ret == 0) { + assert(out_len == 1 && "Unexpected prediction format"); + result = score[0]; + } + + reset(); + + return result; +} diff --git a/PhysicsTools/PythonAnalysis/test/BuildFile.xml b/PhysicsTools/PythonAnalysis/test/BuildFile.xml index 0c04ef39142f7..ca00df05a0690 100644 --- a/PhysicsTools/PythonAnalysis/test/BuildFile.xml +++ b/PhysicsTools/PythonAnalysis/test/BuildFile.xml @@ -129,3 +129,12 @@ + + + + + + + + + diff --git a/PhysicsTools/PythonAnalysis/test/testTorch.cc b/PhysicsTools/PythonAnalysis/test/testTorch.cc new file mode 100644 index 0000000000000..ae38f0f790393 --- /dev/null +++ b/PhysicsTools/PythonAnalysis/test/testTorch.cc @@ -0,0 +1,64 @@ +// Based on https://github.com/Maverobot/libtorch_examples/blob/master/src/simple_optimization_example.cpp +#include +#include +#include + +constexpr double kLearningRate = 0.001; +constexpr int kMaxIterations = 100000; + +void native_run(double minimal) { + // Initial x value + auto x = torch::randn({1, 1}, torch::requires_grad(true)); + + for (size_t t = 0; t < kMaxIterations; t++) { + // Expression/value to be minimized + auto y = (x - minimal) * (x - minimal); + if (y.item() < 1e-3) { + break; + } + // Calculate gradient + y.backward(); + + // Step x value without considering gradient + torch::NoGradGuard no_grad_guard; + x -= kLearningRate * x.grad(); + + // Reset the gradient of variable x + x.mutable_grad().reset(); + } + + std::cout << "[native] Actual minimal x value: " << minimal << ", calculated optimal x value: " << x.item() + << std::endl; +} + +void optimizer_run(double minimal) { + // Initial x value + std::vector x; + x.push_back(torch::randn({1, 1}, torch::requires_grad(true))); + auto opt = torch::optim::SGD(x, torch::optim::SGDOptions(kLearningRate)); + + for (size_t t = 0; t < kMaxIterations; t++) { + // Expression/value to be minimized + auto y = (x[0] - minimal) * (x[0] - minimal); + if (y.item() < 1e-3) { + break; + } + // Calculate gradient + y.backward(); + + // Step x value without considering gradient + opt.step(); + // Reset the gradient of variable x + opt.zero_grad(); + } + + std::cout << "[optimizer] Actual minimal x value: " << minimal + << ", calculated optimal x value: " << x[0].item() << std::endl; +} + +// optimize y = (x - 10)^2 +int main(int argc, char* argv[]) { + native_run(0.01); + optimizer_run(0.01); + return 0; +} diff --git a/PhysicsTools/PythonAnalysis/test/time_serie_prediction.cpp b/PhysicsTools/PythonAnalysis/test/time_serie_prediction.cpp new file mode 100644 index 0000000000000..efe4b7fb6c1ea --- /dev/null +++ b/PhysicsTools/PythonAnalysis/test/time_serie_prediction.cpp @@ -0,0 +1,32 @@ +#include +#include + +struct Net : torch::nn::Module { + Net(int64_t N, int64_t M) : linear(register_module("linear", torch::nn::Linear(N, M))) { + another_bias = register_parameter("b", torch::randn(M)); + } + torch::Tensor forward(torch::Tensor input) { return linear(input) + another_bias; } + torch::nn::Linear linear; + torch::Tensor another_bias; +}; + +int main(int /*argc*/, char* /*argv*/[]) { + // Use GPU when present, CPU otherwise. + Net net(4, 5); + + torch::Device device(torch::kCPU); + if (torch::cuda::is_available()) { + device = torch::Device(torch::kCUDA); + std::cout << "CUDA is available! Training on GPU." << std::endl; + } + + net.to(device); + + for (const auto& pair : net.named_parameters()) { + std::cout << pair.key() << ": " << pair.value() << std::endl; + } + + std::cout << net.forward(torch::ones({2, 4})) << std::endl; + + return 0; +} diff --git a/PhysicsTools/SelectorUtils/interface/PFJetIDSelectionFunctor.h b/PhysicsTools/SelectorUtils/interface/PFJetIDSelectionFunctor.h index a97f0c9d7c88f..039625b269f5e 100644 --- a/PhysicsTools/SelectorUtils/interface/PFJetIDSelectionFunctor.h +++ b/PhysicsTools/SelectorUtils/interface/PFJetIDSelectionFunctor.h @@ -37,10 +37,10 @@ class PFJetIDSelectionFunctor : public Selector { SUMMER18PUPPI, RUN2UL16CHS, RUN2UL16PUPPI, - RUN3WINTER22CHSrunsBCDEprompt, - RUN3WINTER22PUPPIrunsBCDEprompt, - RUN3WINTER22CHS, - RUN3WINTER22PUPPI, + RUN3CHSrunsBCDEprompt, + RUN3PUPPIrunsBCDEprompt, + RUN3CHSruns2022FGruns2023CD, + RUN3PUPPIruns2022FGruns2023CD, RUN2ULCHS, RUN2ULPUPPI, N_VERSIONS @@ -80,16 +80,17 @@ class PFJetIDSelectionFunctor : public Selector { version_ = RUN2ULCHS; else if (versionStr == "RUN2ULPUPPI") version_ = RUN2ULPUPPI; - else if (versionStr == "RUN3WINTER22CHSrunsBCDEprompt") - version_ = RUN3WINTER22CHSrunsBCDEprompt; - else if (versionStr == "RUN3WINTER22PUPPIrunsBCDEprompt") - version_ = RUN3WINTER22PUPPIrunsBCDEprompt; - else if (versionStr == "RUN3WINTER22CHS") - version_ = RUN3WINTER22CHS; - else if (versionStr == "RUN3WINTER22PUPPI") - version_ = RUN3WINTER22PUPPI; + else if (versionStr == "RUN3CHSrunsBCDEprompt") + version_ = RUN3CHSrunsBCDEprompt; + else if (versionStr == "RUN3PUPPIrunsBCDEprompt") + version_ = RUN3PUPPIrunsBCDEprompt; + else if (versionStr == "RUN3CHSruns2022FGruns2023CD") + version_ = RUN3CHSruns2022FGruns2023CD; + else if (versionStr == "RUN3PUPPIruns2022FGruns2023CD") + version_ = RUN3PUPPIruns2022FGruns2023CD; else - version_ = RUN3WINTER22PUPPI; //set RUN3WINTER22PUPPI as default //this is extremely unsafe + version_ = + RUN3PUPPIruns2022FGruns2023CD; //set RUN3PUPPIruns2022FGruns2023CD as default //this is extremely unsafe if (qualityStr == "LOOSE") quality_ = LOOSE; @@ -131,22 +132,23 @@ class PFJetIDSelectionFunctor : public Selector { static edm::ParameterSetDescription getDescription() { edm::ParameterSetDescription desc; - desc.ifValue(edm::ParameterDescription("version", "RUN3WINTER22PUPPI", true, edm::Comment("")), - edm::allowedValues("FIRSTDATA", - "RUNIISTARTUP", - "WINTER16", - "WINTER17", - "WINTER17PUPPI", - "SUMMER18", - "SUMMER18PUPPI", - "RUN2UL16CHS", - "RUN2UL16PUPPI", - "RUN2ULCHS", - "RUN2ULPUPPI", - "RUN3WINTER22CHSrunsBCDEprompt", - "RUN3WINTER22PUPPIrunsBCDEprompt", - "RUN3WINTER22CHS", - "RUN3WINTER22PUPPI")); + desc.ifValue( + edm::ParameterDescription("version", "RUN3PUPPIruns2022FGruns2023CD", true, edm::Comment("")), + edm::allowedValues("FIRSTDATA", + "RUNIISTARTUP", + "WINTER16", + "WINTER17", + "WINTER17PUPPI", + "SUMMER18", + "SUMMER18PUPPI", + "RUN2UL16CHS", + "RUN2UL16PUPPI", + "RUN2ULCHS", + "RUN2ULPUPPI", + "RUN3CHSrunsBCDEprompt", + "RUN3PUPPIrunsBCDEprompt", + "RUN3CHSruns2022FGruns2023CD", + "RUN3PUPPIruns2022FGruns2023CD")); desc.ifValue(edm::ParameterDescription("quality", "TIGHT", true, edm::Comment("")), edm::allowedValues("LOOSE", "TIGHT", "TIGHTLEPVETO")); desc.addOptional>("cutsToIgnore")->setComment(""); @@ -211,9 +213,9 @@ class PFJetIDSelectionFunctor : public Selector { bool operator()(const pat::Jet &jet, pat::strbitset &ret) override { if (version_ == FIRSTDATA || version_ == RUNIISTARTUP || version_ == WINTER16 || version_ == WINTER17 || version_ == WINTER17PUPPI || version_ == SUMMER18 || version_ == SUMMER18PUPPI || version_ == RUN2UL16CHS || - version_ == RUN2UL16PUPPI || version_ == RUN3WINTER22CHSrunsBCDEprompt || - version_ == RUN3WINTER22PUPPIrunsBCDEprompt || version_ == RUN3WINTER22CHS || version_ == RUN3WINTER22PUPPI || - version_ == RUN2ULCHS || version_ == RUN2ULPUPPI) { + version_ == RUN2UL16PUPPI || version_ == RUN3CHSrunsBCDEprompt || version_ == RUN3PUPPIrunsBCDEprompt || + version_ == RUN3CHSruns2022FGruns2023CD || version_ == RUN3PUPPIruns2022FGruns2023CD || version_ == RUN2ULCHS || + version_ == RUN2ULPUPPI) { if (jet.currentJECLevel() == "Uncorrected" || !jet.jecSetsAvailable()) return firstDataCuts(jet, ret, version_); else @@ -231,9 +233,9 @@ class PFJetIDSelectionFunctor : public Selector { bool operator()(const reco::PFJet &jet, pat::strbitset &ret) { if (version_ == FIRSTDATA || version_ == RUNIISTARTUP || version_ == WINTER16 || version_ == WINTER17 || version_ == WINTER17PUPPI || version_ == SUMMER18 || version_ == SUMMER18PUPPI || version_ == RUN2UL16CHS || - version_ == RUN2UL16PUPPI || version_ == RUN3WINTER22CHSrunsBCDEprompt || - version_ == RUN3WINTER22PUPPIrunsBCDEprompt || version_ == RUN3WINTER22CHS || version_ == RUN3WINTER22PUPPI || - version_ == RUN2ULCHS || version_ == RUN2ULPUPPI) { + version_ == RUN2UL16PUPPI || version_ == RUN3CHSrunsBCDEprompt || version_ == RUN3PUPPIrunsBCDEprompt || + version_ == RUN3CHSruns2022FGruns2023CD || version_ == RUN3PUPPIruns2022FGruns2023CD || version_ == RUN2ULCHS || + version_ == RUN2ULPUPPI) { return firstDataCuts(jet, ret, version_); } else { return false; @@ -380,13 +382,13 @@ class PFJetIDSelectionFunctor : public Selector { float etaB = 2.4; // Cuts for |eta| < 2.6 for Summer18 if (version_ == SUMMER18 || version_ == SUMMER18PUPPI || version_ == RUN2ULCHS || version_ == RUN2ULPUPPI || - version_ == RUN3WINTER22CHSrunsBCDEprompt || version_ == RUN3WINTER22PUPPIrunsBCDEprompt || - version_ == RUN3WINTER22CHS || version_ == RUN3WINTER22PUPPI) + version_ == RUN3CHSrunsBCDEprompt || version_ == RUN3PUPPIrunsBCDEprompt || + version_ == RUN3CHSruns2022FGruns2023CD || version_ == RUN3PUPPIruns2022FGruns2023CD) etaB = 2.6; if ((version_ != WINTER17 && version_ != WINTER17PUPPI && version_ != SUMMER18 && version_ != SUMMER18PUPPI && - version_ != RUN2UL16CHS && version_ != RUN2UL16PUPPI && version_ != RUN3WINTER22CHSrunsBCDEprompt && - version_ != RUN3WINTER22PUPPIrunsBCDEprompt && version_ != RUN3WINTER22CHS && version_ != RUN3WINTER22PUPPI && - version_ != RUN2ULCHS && version_ != RUN2ULPUPPI) || + version_ != RUN2UL16CHS && version_ != RUN2UL16PUPPI && version_ != RUN3CHSrunsBCDEprompt && + version_ != RUN3PUPPIrunsBCDEprompt && version_ != RUN3CHSruns2022FGruns2023CD && + version_ != RUN3PUPPIruns2022FGruns2023CD && version_ != RUN2ULCHS && version_ != RUN2ULPUPPI) || quality_ != TIGHT) { if (ignoreCut(indexCEF_) || (cef < cut(indexCEF_, double()) || std::abs(jet.eta()) > etaB)) passCut(ret, indexCEF_); @@ -593,8 +595,8 @@ class PFJetIDSelectionFunctor : public Selector { (nneutrals < cut(indexNNeutrals_FW_U_, int()) || std::abs(jet.eta()) <= 3.0)) passCut(ret, indexNNeutrals_FW_U_); - } else if ((version_ == SUMMER18) || (version_ == RUN2ULCHS) || (version_ == RUN3WINTER22CHSrunsBCDEprompt) || - (version_ == RUN3WINTER22CHS)) { + } else if ((version_ == SUMMER18) || (version_ == RUN2ULCHS) || (version_ == RUN3CHSrunsBCDEprompt) || + (version_ == RUN3CHSruns2022FGruns2023CD)) { // Cuts for |eta| <= 2.6 for SUMMER18 scenario if (ignoreCut(indexNConstituents_) || (nconstituents > cut(indexNConstituents_, int()) || std::abs(jet.eta()) > 2.6)) @@ -606,6 +608,8 @@ class PFJetIDSelectionFunctor : public Selector { if (quality_ == TIGHTLEPVETO) { if (ignoreCut(indexMUF_) || (muf < cut(indexMUF_, double()) || std::abs(jet.eta()) > 2.6)) passCut(ret, indexMUF_); + if (ignoreCut(indexCEF_) || (cef < cut(indexCEF_, double()) || std::abs(jet.eta()) > 2.6)) //edw + passCut(ret, indexCEF_); } // Cuts for 2.6 <= |eta| <= 2.7 for SUMMER18 scenario @@ -647,8 +651,8 @@ class PFJetIDSelectionFunctor : public Selector { passCut(ret, indexNNeutrals_FW_); } - else if ((version_ == SUMMER18PUPPI) || (version_ == RUN2ULPUPPI) || - (version_ == RUN3WINTER22PUPPIrunsBCDEprompt) || (version_ == RUN3WINTER22PUPPI)) { + else if ((version_ == SUMMER18PUPPI) || (version_ == RUN2ULPUPPI) || (version_ == RUN3PUPPIrunsBCDEprompt) || + (version_ == RUN3PUPPIruns2022FGruns2023CD)) { // Cuts for |eta| <= 2.6 for SUMMER18PUPPI scenario if (ignoreCut(indexNConstituents_) || (nconstituents > cut(indexNConstituents_, int()) || std::abs(jet.eta()) > 2.6)) @@ -660,6 +664,8 @@ class PFJetIDSelectionFunctor : public Selector { if (quality_ == TIGHTLEPVETO) { if (ignoreCut(indexMUF_) || (muf < cut(indexMUF_, double()) || std::abs(jet.eta()) > 2.6)) passCut(ret, indexMUF_); + if (ignoreCut(indexCEF_) || (cef < cut(indexCEF_, double()) || std::abs(jet.eta()) > 2.6)) //edw + passCut(ret, indexCEF_); } // Cuts for 2.6 <= |eta| <= 2.7 for SUMMER18PUPPI scenario @@ -709,8 +715,8 @@ class PFJetIDSelectionFunctor : public Selector { push_back("NHF"); if ((version_ != WINTER17 && version_ != WINTER17PUPPI && version_ != SUMMER18 && version_ != SUMMER18PUPPI && version_ != RUN2UL16CHS && version_ != RUN2UL16PUPPI && version_ != RUN2ULCHS && version_ != RUN2ULPUPPI && - version_ != RUN3WINTER22CHSrunsBCDEprompt && version_ != RUN3WINTER22PUPPIrunsBCDEprompt && - version_ != RUN3WINTER22CHS && version_ != RUN3WINTER22PUPPI) || + version_ != RUN3CHSrunsBCDEprompt && version_ != RUN3PUPPIrunsBCDEprompt && + version_ != RUN3CHSruns2022FGruns2023CD && version_ != RUN3PUPPIruns2022FGruns2023CD) || quality_ != TIGHT) push_back("CEF"); push_back("NEF"); @@ -775,8 +781,7 @@ class PFJetIDSelectionFunctor : public Selector { push_back("MUF"); } } - if ((version_ == SUMMER18) || (version_ == RUN2ULCHS) || (version_ == RUN3WINTER22CHSrunsBCDEprompt) || - (version_ == RUN3WINTER22CHS)) { + if ((version_ == SUMMER18) || (version_ == RUN2ULCHS)) { push_back("NHF_TR"); push_back("NEF_TR"); push_back("NCH_TR"); @@ -793,8 +798,41 @@ class PFJetIDSelectionFunctor : public Selector { push_back("CEF_TR"); } } - if ((version_ == SUMMER18PUPPI) || (version_ == RUN2ULPUPPI) || (version_ == RUN3WINTER22PUPPIrunsBCDEprompt) || - (version_ == RUN3WINTER22PUPPI)) { + if (version_ == RUN3CHSrunsBCDEprompt) { + push_back("NHF_TR"); + push_back("NEF_TR"); + push_back("NCH_TR"); + push_back("NEF_EC_U"); + push_back("nNeutrals_EC"); + push_back("NEF_FW"); + push_back("NHF_FW"); + push_back("nNeutrals_FW"); + + if (quality_ == TIGHTLEPVETO) { + push_back("CEF"); + push_back("MUF"); + push_back("MUF_TR"); + push_back("CEF_TR"); + } + } + if (version_ == RUN3CHSruns2022FGruns2023CD) { + push_back("NHF_TR"); + push_back("NEF_TR"); + push_back("NCH_TR"); + push_back("NHF_EC"); + push_back("NEF_EC_U"); + push_back("nNeutrals_EC"); + push_back("NEF_FW"); + push_back("nNeutrals_FW"); + + if (quality_ == TIGHTLEPVETO) { + push_back("CEF"); + push_back("MUF"); + push_back("MUF_TR"); + push_back("CEF_TR"); + } + } + if ((version_ == SUMMER18PUPPI) || (version_ == RUN2ULPUPPI)) { push_back("NHF_TR"); push_back("NEF_TR"); push_back("NHF_EC"); @@ -809,11 +847,24 @@ class PFJetIDSelectionFunctor : public Selector { push_back("CEF_TR"); } } + if ((version_ == RUN3PUPPIrunsBCDEprompt) || (version_ == RUN3PUPPIruns2022FGruns2023CD)) { + push_back("NHF_TR"); + push_back("NEF_TR"); + push_back("NHF_EC"); + push_back("NEF_FW"); + push_back("nNeutrals_FW_L"); + if (quality_ == TIGHTLEPVETO) { + push_back("CEF"); + push_back("MUF"); + push_back("MUF_TR"); + push_back("CEF_TR"); + } + } if ((version_ == WINTER17 || version_ == WINTER17PUPPI || version_ == SUMMER18 || version_ == SUMMER18PUPPI || version_ == RUN2UL16CHS || version_ == RUN2UL16PUPPI || version_ == RUN2ULCHS || version_ == RUN2ULPUPPI || - version_ == RUN3WINTER22CHSrunsBCDEprompt || version_ == RUN3WINTER22PUPPIrunsBCDEprompt || - version_ == RUN3WINTER22CHS || version_ == RUN3WINTER22PUPPI) && + version_ == RUN3CHSrunsBCDEprompt || version_ == RUN3PUPPIrunsBCDEprompt || + version_ == RUN3CHSruns2022FGruns2023CD || version_ == RUN3PUPPIruns2022FGruns2023CD) && quality_ == LOOSE) { edm::LogWarning("BadJetIDVersion") << "The LOOSE operating point is only supported for the WINTER16 JetID version -- defaulting to TIGHT"; @@ -843,13 +894,13 @@ class PFJetIDSelectionFunctor : public Selector { set("NHF", 0.9); if (version_ != WINTER17 && version_ != WINTER17PUPPI && version_ != SUMMER18 && version_ != SUMMER18PUPPI && version_ != RUN2UL16CHS && version_ != RUN2UL16PUPPI && version_ != RUN2ULCHS && version_ != RUN2ULPUPPI && - version_ != RUN3WINTER22CHSrunsBCDEprompt && version_ != RUN3WINTER22PUPPIrunsBCDEprompt && - version_ != RUN3WINTER22CHS && version_ != RUN3WINTER22PUPPI) + version_ != RUN3CHSrunsBCDEprompt && version_ != RUN3PUPPIrunsBCDEprompt && + version_ != RUN3CHSruns2022FGruns2023CD && version_ != RUN3PUPPIruns2022FGruns2023CD) set("CEF", 0.99); - if (version_ == RUN3WINTER22CHSrunsBCDEprompt || version_ == RUN3WINTER22PUPPIrunsBCDEprompt || - version_ == RUN3WINTER22CHS || version_ == RUN3WINTER22PUPPI) + if (version_ == RUN3CHSrunsBCDEprompt || version_ == RUN3PUPPIrunsBCDEprompt || + version_ == RUN3CHSruns2022FGruns2023CD || version_ == RUN3PUPPIruns2022FGruns2023CD) set("CHF", 0.01); - if (version_ == RUN3WINTER22CHS || version_ == RUN3WINTER22PUPPI) + if (version_ == RUN3CHSruns2022FGruns2023CD || version_ == RUN3PUPPIruns2022FGruns2023CD) set("NHF", 0.99); set("NEF", 0.9); set("NCH", 0); @@ -911,32 +962,54 @@ class PFJetIDSelectionFunctor : public Selector { set("NEF_FW", 0.90); set("nNeutrals_FW_L", 2); set("nNeutrals_FW_U", 999999); - } else if (version_ == RUN2ULCHS || version_ == RUN3WINTER22CHSrunsBCDEprompt || version_ == RUN3WINTER22CHS) { + } else if (version_ == RUN2ULCHS) { set("NHF_TR", 0.9); set("NEF_TR", 0.99); set("NCH_TR", 0); set("NEF_EC_L", 0.01); set("NEF_EC_U", 0.99); - set("nNeutrals_EC", 2); + set("nNeutrals_EC", 1); set("NHF_FW", 0.2); set("NEF_FW", 0.90); set("nNeutrals_FW", 10); - } else if (version_ == RUN2ULPUPPI || version_ == RUN3WINTER22PUPPIrunsBCDEprompt) { + } else if (version_ == RUN3CHSrunsBCDEprompt) { set("NHF_TR", 0.9); set("NEF_TR", 0.99); - set("NHF_EC", 0.9999); - set("NHF_FW", -1.0); + set("NCH_TR", 0); + set("NEF_EC_U", 0.99); + set("nNeutrals_EC", 1); + set("NHF_FW", 0.2); set("NEF_FW", 0.90); + set("nNeutrals_FW", 10); + } else if (version_ == RUN3CHSruns2022FGruns2023CD) { + set("NHF_TR", 0.9); + set("NEF_TR", 0.99); + set("NCH_TR", 0); + set("NEF_EC_U", 0.99); + set("NHF_EC", 0.99); + set("nNeutrals_EC", 1); + set("NEF_FW", 0.4); + set("nNeutrals_FW", 10); + } else if (version_ == RUN2ULPUPPI) { + set("NHF_TR", 0.9); + set("NEF_TR", 0.99); + set("NHF_EC", 0.99); + set("NHF_FW", -1.0); + set("NEF_FW", 0.4); set("nNeutrals_FW_L", 2); set("nNeutrals_FW_U", 999999); - } else if (version_ == RUN3WINTER22PUPPI) { + } else if (version_ == RUN3PUPPIrunsBCDEprompt) { set("NHF_TR", 0.9); set("NEF_TR", 0.99); set("NHF_EC", 0.9999); - set("NHF_FW", -1.0); set("NEF_FW", 0.90); + set("nNeutrals_FW_L", 2); + } else if (version_ == RUN3PUPPIruns2022FGruns2023CD) { + set("NHF_TR", 0.9); + set("NEF_TR", 0.99); + set("NHF_EC", 0.99); + set("NEF_FW", 0.4); set("nNeutrals_FW_L", 1); - set("nNeutrals_FW_U", 999999); } } else if (quality_ == TIGHTLEPVETO) { set("CHF", 0.0); @@ -954,10 +1027,10 @@ class PFJetIDSelectionFunctor : public Selector { set("NEF_FW", 0.90); set("nNeutrals_FW", 10); } - if (version_ == RUN3WINTER22CHSrunsBCDEprompt || version_ == RUN3WINTER22PUPPIrunsBCDEprompt || - version_ == RUN3WINTER22CHS || version_ == RUN3WINTER22PUPPI) { + if (version_ == RUN3CHSrunsBCDEprompt || version_ == RUN3PUPPIrunsBCDEprompt || + version_ == RUN3CHSruns2022FGruns2023CD || version_ == RUN3PUPPIruns2022FGruns2023CD) { set("CHF", 0.01); - } else if (version_ == RUN3WINTER22CHS || version_ == RUN3WINTER22PUPPI) { + } else if (version_ == RUN3CHSruns2022FGruns2023CD || version_ == RUN3PUPPIruns2022FGruns2023CD) { set("NHF", 0.99); } else if (version_ == WINTER17PUPPI) { set("NHF_EC", 0.99); @@ -1024,7 +1097,7 @@ class PFJetIDSelectionFunctor : public Selector { set("NEF_FW", 0.90); set("nNeutrals_FW_L", 2); set("nNeutrals_FW_U", 999999); - } else if (version_ == RUN2ULCHS || version_ == RUN3WINTER22CHSrunsBCDEprompt || version_ == RUN3WINTER22CHS) { + } else if (version_ == RUN2ULCHS) { set("NHF_TR", 0.9); set("NEF_TR", 0.99); set("MUF_TR", 0.8); @@ -1032,11 +1105,33 @@ class PFJetIDSelectionFunctor : public Selector { set("CEF_TR", 0.8); set("NEF_EC_L", 0.01); set("NEF_EC_U", 0.99); - set("nNeutrals_EC", 2); + set("nNeutrals_EC", 1); + set("NHF_FW", 0.2); + set("NEF_FW", 0.90); + set("nNeutrals_FW", 10); + } else if (version_ == RUN3CHSrunsBCDEprompt) { + set("MUF_TR", 0.8); + set("CEF_TR", 0.8); + set("NHF_TR", 0.9); + set("NEF_TR", 0.99); + set("NCH_TR", 0); + set("NEF_EC_U", 0.99); + set("nNeutrals_EC", 1); set("NHF_FW", 0.2); set("NEF_FW", 0.90); set("nNeutrals_FW", 10); - } else if (version_ == RUN2ULPUPPI || version_ == RUN3WINTER22PUPPIrunsBCDEprompt) { + } else if (version_ == RUN3CHSruns2022FGruns2023CD) { + set("MUF_TR", 0.8); + set("CEF_TR", 0.8); + set("NHF_TR", 0.9); + set("NEF_TR", 0.99); + set("NCH_TR", 0); + set("NEF_EC_U", 0.99); + set("NHF_EC", 0.99); + set("nNeutrals_EC", 1); + set("NEF_FW", 0.4); + set("nNeutrals_FW", 10); + } else if (version_ == RUN2ULPUPPI) { set("NHF_TR", 0.9); set("NEF_TR", 0.99); set("MUF_TR", 0.8); @@ -1046,16 +1141,22 @@ class PFJetIDSelectionFunctor : public Selector { set("NEF_FW", 0.90); set("nNeutrals_FW_L", 2); set("nNeutrals_FW_U", 999999); - } else if (version_ == RUN3WINTER22PUPPI) { - set("NHF_TR", 0.9); - set("NEF_TR", 0.99); + } else if (version_ == RUN3PUPPIrunsBCDEprompt) { set("MUF_TR", 0.8); set("CEF_TR", 0.8); + set("NHF_TR", 0.9); + set("NEF_TR", 0.99); set("NHF_EC", 0.9999); - set("NHF_FW", -1.0); set("NEF_FW", 0.90); + set("nNeutrals_FW_L", 2); + } else if (version_ == RUN3PUPPIruns2022FGruns2023CD) { + set("MUF_TR", 0.8); + set("CEF_TR", 0.8); + set("NHF_TR", 0.9); + set("NEF_TR", 0.99); + set("NHF_EC", 0.99); + set("NEF_FW", 0.4); set("nNeutrals_FW_L", 1); - set("nNeutrals_FW_U", 999999); } } } @@ -1066,8 +1167,8 @@ class PFJetIDSelectionFunctor : public Selector { indexNHF_ = index_type(&bits_, "NHF"); if ((version_ != WINTER17 && version_ != WINTER17PUPPI && version_ != SUMMER18 && version_ != SUMMER18PUPPI && version_ != RUN2UL16CHS && version_ != RUN2UL16PUPPI && version_ != RUN2ULCHS && version_ != RUN2ULPUPPI && - version_ != RUN3WINTER22CHSrunsBCDEprompt && version_ != RUN3WINTER22PUPPIrunsBCDEprompt && - version_ != RUN3WINTER22CHS && version_ != RUN3WINTER22PUPPI) || + version_ != RUN3CHSrunsBCDEprompt && version_ != RUN3PUPPIrunsBCDEprompt && + version_ != RUN3CHSruns2022FGruns2023CD && version_ != RUN3PUPPIruns2022FGruns2023CD) || quality_ != TIGHT) indexCEF_ = index_type(&bits_, "CEF"); @@ -1108,8 +1209,8 @@ class PFJetIDSelectionFunctor : public Selector { indexMUF_ = index_type(&bits_, "MUF"); } } - if ((version_ == SUMMER18) || (version_ == RUN2ULCHS) || (version_ == RUN3WINTER22CHSrunsBCDEprompt) || - (version_ == RUN3WINTER22CHS)) { + if ((version_ == SUMMER18) || (version_ == RUN2ULCHS) || (version_ == RUN3CHSrunsBCDEprompt) || + (version_ == RUN3CHSruns2022FGruns2023CD)) { indexNHF_TR_ = index_type(&bits_, "NHF_TR"); indexNEF_TR_ = index_type(&bits_, "NEF_TR"); indexNCH_TR_ = index_type(&bits_, "NCH_TR"); @@ -1125,8 +1226,8 @@ class PFJetIDSelectionFunctor : public Selector { indexCEF_TR_ = index_type(&bits_, "CEF_TR"); } } - if ((version_ == SUMMER18PUPPI) || (version_ == RUN2ULPUPPI) || (version_ == RUN3WINTER22PUPPIrunsBCDEprompt) || - (version_ == RUN3WINTER22PUPPI)) { + if ((version_ == SUMMER18PUPPI) || (version_ == RUN2ULPUPPI) || (version_ == RUN3PUPPIrunsBCDEprompt) || + (version_ == RUN3PUPPIruns2022FGruns2023CD)) { indexNHF_TR_ = index_type(&bits_, "NHF_TR"); indexNEF_TR_ = index_type(&bits_, "NEF_TR"); indexNHF_EC_ = index_type(&bits_, "NHF_EC"); diff --git a/PhysicsTools/TagAndProbe/src/TagProbeFitter.cc b/PhysicsTools/TagAndProbe/src/TagProbeFitter.cc index 74ed40f67e30e..3e4faccf25a1d 100644 --- a/PhysicsTools/TagAndProbe/src/TagProbeFitter.cc +++ b/PhysicsTools/TagAndProbe/src/TagProbeFitter.cc @@ -222,10 +222,10 @@ string TagProbeFitter::calculateEfficiency(string dirName, if (not split_mode) { data = new RooDataSet("data", "data", - inputTree, dataVars, - /*selExpr=*/"", - /*wgtVarName=*/(weightVar.empty() ? nullptr : weightVar.c_str())); + Import(*inputTree), + /*selExpr=*/Cut(""), + /*wgtVarName=*/WeightVar(weightVar.empty() ? nullptr : weightVar.c_str())); // Now add all expressions that are computed dynamically for (vector, pair > > >::const_iterator @@ -329,9 +329,7 @@ string TagProbeFitter::calculateEfficiency(string dirName, } else { // disactive not needed branches inputTree->SetBranchStatus("*", false); - TIterator* iter = dataVars.createIterator(); - TObject* obj(nullptr); - while ((obj = iter->Next())) + for (TObject* obj : dataVars) inputTree->SetBranchStatus(obj->GetName(), true); } @@ -357,7 +355,7 @@ string TagProbeFitter::calculateEfficiency(string dirName, //create the dataset data_bin = (RooDataSet*)data->reduce(Cut(TString::Format("allCats==%d", iCat))); } else { - data_bin = new RooDataSet("data", "data", dataVars, (weightVar.empty() ? nullptr : weightVar.c_str())); + data_bin = new RooDataSet("data", "data", dataVars, WeightVar(weightVar.empty() ? nullptr : weightVar.c_str())); TDirectory* tmp = gDirectory; gROOT->cd(); @@ -480,8 +478,8 @@ string TagProbeFitter::calculateEfficiency(string dirName, if (data_bin->numEntries() > 0) { //set the values of binnedVariables to the mean value in this data bin RooArgSet meanOfVariables; - RooLinkedListIter vit = binnedVariables.iterator(); - for (RooRealVar* v = (RooRealVar*)vit.Next(); v != nullptr; v = (RooRealVar*)vit.Next()) { + for (const RooAbsArg* vv : binnedVariables) { + const RooRealVar* v = dynamic_cast(vv); meanOfVariables.addClone(*v); double mean = w->data("data")->mean(*v); RooBinning binning((RooBinning&)v->getBinning()); @@ -788,8 +786,7 @@ void TagProbeFitter::saveFitPlot(RooWorkspace* w) { RooAbsPdf& pdf = *w->pdf("simPdf"); std::unique_ptr obs(pdf.getObservables(*dataAll)); RooRealVar* mass = nullptr; - RooLinkedListIter it = obs->iterator(); - for (RooAbsArg* v = (RooAbsArg*)it.Next(); v != nullptr; v = (RooAbsArg*)it.Next()) { + for (RooAbsArg* v : *obs) { if (!v->InheritsFrom("RooRealVar")) continue; mass = (RooRealVar*)v; @@ -867,8 +864,7 @@ void TagProbeFitter::saveDistributionsPlot(RooWorkspace* w) { const RooArgSet* vars = dataAll->get(); vector reals; - RooLinkedListIter it = vars->iterator(); - for (RooAbsArg* v = (RooAbsArg*)it.Next(); v != nullptr; v = (RooAbsArg*)it.Next()) { + for (RooAbsArg* v : *vars) { if (!v->InheritsFrom("RooRealVar")) continue; reals.push_back((RooRealVar*)v); @@ -909,15 +905,15 @@ void TagProbeFitter::saveEfficiencyPlots(RooDataSet& eff, const TString& effName, RooArgSet& binnedVariables, RooArgSet& mappedCategories) { - RooLinkedListIter v1it = binnedVariables.iterator(); bool isOnePoint = (eff.numEntries() == 1); // for datasets with > 1 entry, we don't make plots for variables with just one bin - for (RooRealVar* v1 = (RooRealVar*)v1it.Next(); v1 != nullptr; v1 = (RooRealVar*)v1it.Next()) { + for (auto it1 = binnedVariables.begin(); it1 != binnedVariables.end(); it1++) { + RooRealVar* v1 = dynamic_cast(*it1); RooArgSet binCategories1D; if (v1->numBins() == 1 && !isOnePoint) continue; - RooLinkedListIter v2it = binnedVariables.iterator(); - for (RooRealVar* v2 = (RooRealVar*)v2it.Next(); v2 != nullptr; v2 = (RooRealVar*)v2it.Next()) { + for (auto it2 = binnedVariables.begin(); it2 != binnedVariables.end(); it2++) { + RooRealVar* v2 = dynamic_cast(*it2); if (v2 == v1) continue; if (v2->numBins() == 1 && !isOnePoint) @@ -926,8 +922,8 @@ void TagProbeFitter::saveEfficiencyPlots(RooDataSet& eff, RooBinningCategory(TString(v2->GetName()) + "_bins", TString(v2->GetName()) + "_bins", *v2)); RooArgSet binCategories2D; - RooLinkedListIter v3it = binnedVariables.iterator(); - for (RooRealVar* v3 = (RooRealVar*)v3it.Next(); v3 != nullptr; v3 = (RooRealVar*)v3it.Next()) { + for (auto it3 = binnedVariables.begin(); it3 != binnedVariables.end(); it3++) { + RooRealVar* v3 = dynamic_cast(*it3); if (v3 == v1 || v3 == v2) continue; binCategories2D.addClone( diff --git a/PhysicsTools/Utilities/src/SideBandSubtraction.cc b/PhysicsTools/Utilities/src/SideBandSubtraction.cc index 2395d02f5bbb7..390c65ab81af3 100644 --- a/PhysicsTools/Utilities/src/SideBandSubtraction.cc +++ b/PhysicsTools/Utilities/src/SideBandSubtraction.cc @@ -98,15 +98,14 @@ int SideBandSubtract::doSubtraction(RooRealVar* variable, //out how to do this in one shot to avoid a loop //O(N_vars*N_events)... - TIterator* iter = (TIterator*)Data->get()->createIterator(); - RooAbsArg* var = nullptr; RooRealVar* sep_var = nullptr; - while ((var = (RooAbsArg*)iter->Next())) { + for (const auto& var : *Data->get()) { if ((string)var->GetName() == (string)SeparationVariable->GetName()) { sep_var = (RooRealVar*)var; break; } } + for (int i = 0; i < Data->numEntries(); i++) { Data->get(i); Double_t value = variable->getVal(); @@ -431,9 +430,7 @@ int SideBandSubtract::doGlobalFit() { //need to grab sbs objects after each global fit, because they get reset resetSBSProducts(); - TIterator* iter = (TIterator*)Data->get()->createIterator(); - RooAbsArg* variable; - while ((variable = (RooAbsArg*)iter->Next())) { + for (const auto& variable : *Data->get()) { for (unsigned int i = 0; i < BaseHistos.size(); i++) { if ((string)variable->GetName() != (string)SeparationVariable->GetName() && (string)variable->GetName() == (string)BaseHistos[i]->GetName()) @@ -441,11 +438,6 @@ int SideBandSubtract::doGlobalFit() { } } - // clean up our memory... - if (variable) - delete variable; - if (iter) - delete iter; return 0; } void SideBandSubtract::doFastSubtraction(TH1F& Total, TH1F& Result, SbsRegion& leftRegion, SbsRegion& rightRegion) { diff --git a/RecoEcal/EgammaClusterProducers/src/InterestingDetIdCollectionProducer.cc b/RecoEcal/EgammaClusterProducers/src/InterestingDetIdCollectionProducer.cc index bb422be344b55..1e7cf14b6a960 100644 --- a/RecoEcal/EgammaClusterProducers/src/InterestingDetIdCollectionProducer.cc +++ b/RecoEcal/EgammaClusterProducers/src/InterestingDetIdCollectionProducer.cc @@ -32,7 +32,7 @@ The following classes of "interesting id" are considered #include "FWCore/Framework/interface/EventSetup.h" #include "FWCore/Framework/interface/Frameworkfwd.h" #include "FWCore/Framework/interface/MakerMacros.h" -#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/Framework/interface/global/EDProducer.h" #include "FWCore/MessageLogger/interface/MessageLogger.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "FWCore/Utilities/interface/ESGetToken.h" @@ -47,13 +47,12 @@ The following classes of "interesting id" are considered #include -class InterestingDetIdCollectionProducer : public edm::stream::EDProducer<> { +class InterestingDetIdCollectionProducer : public edm::global::EDProducer<> { public: //! ctor explicit InterestingDetIdCollectionProducer(const edm::ParameterSet&); - void beginRun(edm::Run const&, const edm::EventSetup&) final; //! producer - void produce(edm::Event&, const edm::EventSetup&) override; + void produce(edm::StreamID, edm::Event&, const edm::EventSetup&) const override; private: // ----------member data --------------------------- @@ -66,10 +65,8 @@ class InterestingDetIdCollectionProducer : public edm::stream::EDProducer<> { std::string interestingDetIdCollection_; int minimalEtaSize_; int minimalPhiSize_; - const CaloTopology* caloTopology_; int severityLevel_; - const EcalSeverityLevelAlgo* severity_; bool keepNextToDead_; bool keepNextToBoundary_; }; @@ -81,8 +78,8 @@ InterestingDetIdCollectionProducer::InterestingDetIdCollectionProducer(const edm recHitsToken_ = consumes(iConfig.getParameter("recHitsLabel")); basicClustersToken_ = consumes(iConfig.getParameter("basicClustersLabel")); - caloTopologyToken_ = esConsumes(); - sevLVToken_ = esConsumes(); + caloTopologyToken_ = esConsumes(); + sevLVToken_ = esConsumes(); nextToDeadToken_ = esConsumes(); interestingDetIdCollection_ = iConfig.getParameter("interestingDetIdCollection"); @@ -100,19 +97,17 @@ InterestingDetIdCollectionProducer::InterestingDetIdCollectionProducer(const edm keepNextToBoundary_ = iConfig.getParameter("keepNextToBoundary"); } -void InterestingDetIdCollectionProducer::beginRun(edm::Run const& run, const edm::EventSetup& iSetup) { - edm::ESHandle theCaloTopology = iSetup.getHandle(caloTopologyToken_); - caloTopology_ = &(*theCaloTopology); - - edm::ESHandle sevLv = iSetup.getHandle(sevLVToken_); - severity_ = sevLv.product(); -} - // ------------ method called to produce the data ------------ -void InterestingDetIdCollectionProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { +void InterestingDetIdCollectionProducer::produce(edm::StreamID, + edm::Event& iEvent, + const edm::EventSetup& iSetup) const { using namespace edm; using namespace std; + auto const& caloTopology = iSetup.getData(caloTopologyToken_); + + auto const& severity = iSetup.getData(sevLVToken_); + // take BasicClusters Handle pClusters; iEvent.getByToken(basicClustersToken_, pClusters); @@ -155,7 +150,7 @@ void InterestingDetIdCollectionProducer::produce(edm::Event& iEvent, const edm:: if (eMaxId.null()) continue; - const CaloSubdetectorTopology* topology = caloTopology_->getSubdetectorTopology(eMaxId.det(), eMaxId.subdetId()); + const CaloSubdetectorTopology* topology = caloTopology.getSubdetectorTopology(eMaxId.det(), eMaxId.subdetId()); xtalsToStore = topology->getWindow(eMaxId, minimalEtaSize_, minimalPhiSize_); @@ -172,7 +167,7 @@ void InterestingDetIdCollectionProducer::produce(edm::Event& iEvent, const edm:: indexToStore.push_back(it->id()); } // add hits for severities above a threshold - if (severityLevel_ >= 0 && severity_->severityLevel(*it) >= severityLevel_) { + if (severityLevel_ >= 0 && severity.severityLevel(*it) >= severityLevel_) { indexToStore.push_back(it->id()); } if (keepNextToDead_) { diff --git a/RecoEcal/EgammaClusterProducers/src/InterestingDetIdFromSuperClusterProducer.cc b/RecoEcal/EgammaClusterProducers/src/InterestingDetIdFromSuperClusterProducer.cc index a9bc0b3f692b4..223a7cd9434d9 100644 --- a/RecoEcal/EgammaClusterProducers/src/InterestingDetIdFromSuperClusterProducer.cc +++ b/RecoEcal/EgammaClusterProducers/src/InterestingDetIdFromSuperClusterProducer.cc @@ -31,7 +31,7 @@ The following classes of "interesting id" are considered #include "FWCore/Framework/interface/Event.h" #include "FWCore/Framework/interface/EventSetup.h" #include "FWCore/Framework/interface/MakerMacros.h" -#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/Framework/interface/global/EDProducer.h" #include "FWCore/MessageLogger/interface/MessageLogger.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "FWCore/Utilities/interface/ESGetToken.h" @@ -46,13 +46,12 @@ The following classes of "interesting id" are considered #include -class InterestingDetIdFromSuperClusterProducer : public edm::stream::EDProducer<> { +class InterestingDetIdFromSuperClusterProducer : public edm::global::EDProducer<> { public: //! ctor explicit InterestingDetIdFromSuperClusterProducer(const edm::ParameterSet&); - void beginRun(edm::Run const&, const edm::EventSetup&) final; //! producer - void produce(edm::Event&, const edm::EventSetup&) override; + void produce(edm::StreamID, edm::Event&, const edm::EventSetup&) const override; private: // ----------member data --------------------------- @@ -64,10 +63,8 @@ class InterestingDetIdFromSuperClusterProducer : public edm::stream::EDProducer< std::string interestingDetIdCollection_; int minimalEtaSize_; int minimalPhiSize_; - const CaloTopology* caloTopology_; int severityLevel_; - const EcalSeverityLevelAlgo* severity_; bool keepNextToDead_; bool keepNextToBoundary_; }; @@ -79,8 +76,8 @@ InterestingDetIdFromSuperClusterProducer::InterestingDetIdFromSuperClusterProduc recHitsToken_ = consumes(iConfig.getParameter("recHitsLabel")); superClustersToken_ = consumes(iConfig.getParameter("superClustersLabel")); - caloTopologyToken_ = esConsumes(); - severityLevelToken_ = esConsumes(); + caloTopologyToken_ = esConsumes(); + severityLevelToken_ = esConsumes(); interestingDetIdCollection_ = iConfig.getParameter("interestingDetIdCollection"); minimalEtaSize_ = iConfig.getParameter("etaSize"); @@ -99,19 +96,16 @@ InterestingDetIdFromSuperClusterProducer::InterestingDetIdFromSuperClusterProduc } } -void InterestingDetIdFromSuperClusterProducer::beginRun(edm::Run const& run, const edm::EventSetup& iSetup) { - edm::ESHandle theCaloTopology = iSetup.getHandle(caloTopologyToken_); - caloTopology_ = &(*theCaloTopology); - - edm::ESHandle sevLv = iSetup.getHandle(severityLevelToken_); - severity_ = sevLv.product(); -} - // ------------ method called to produce the data ------------ -void InterestingDetIdFromSuperClusterProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { +void InterestingDetIdFromSuperClusterProducer::produce(edm::StreamID, + edm::Event& iEvent, + const edm::EventSetup& iSetup) const { using namespace edm; using namespace std; + auto const& caloTopology = iSetup.getData(caloTopologyToken_); + + auto const& severity = iSetup.getData(severityLevelToken_); // take BasicClusters Handle pClusters; iEvent.getByToken(superClustersToken_, pClusters); @@ -154,7 +148,7 @@ void InterestingDetIdFromSuperClusterProducer::produce(edm::Event& iEvent, const if (eMaxId.null()) continue; - const CaloSubdetectorTopology* topology = caloTopology_->getSubdetectorTopology(eMaxId.det(), eMaxId.subdetId()); + const CaloSubdetectorTopology* topology = caloTopology.getSubdetectorTopology(eMaxId.det(), eMaxId.subdetId()); xtalsToStore = topology->getWindow(eMaxId, minimalEtaSize_, minimalPhiSize_); std::vector > xtalsInClus = (*clusIt)->hitsAndFractions(); @@ -173,7 +167,7 @@ void InterestingDetIdFromSuperClusterProducer::produce(edm::Event& iEvent, const indexToStore.push_back(it->id()); } // add hits for severities above a threshold - if (severityLevel_ >= 0 && severity_->severityLevel(*it) >= severityLevel_) { + if (severityLevel_ >= 0 && severity.severityLevel(*it) >= severityLevel_) { indexToStore.push_back(it->id()); } if (keepNextToDead_) { diff --git a/RecoEgamma/EgammaElectronAlgos/interface/ElectronHcalHelper.h b/RecoEgamma/EgammaElectronAlgos/interface/ElectronHcalHelper.h index 5bec05a1e65c7..0dd9d5d377d16 100644 --- a/RecoEgamma/EgammaElectronAlgos/interface/ElectronHcalHelper.h +++ b/RecoEgamma/EgammaElectronAlgos/interface/ElectronHcalHelper.h @@ -12,6 +12,8 @@ #include "RecoEgamma/EgammaIsolationAlgos/interface/EgammaHadTower.h" #include "CondFormats/DataRecord/interface/HcalChannelQualityRcd.h" #include "Geometry/CaloTopology/interface/CaloTowerConstituentsMap.h" +#include "CondFormats/DataRecord/interface/HcalPFCutsRcd.h" +#include "CondTools/Hcal/interface/HcalPFCutsHandler.h" class ConsumesCollector; class EgammaHadTower; @@ -43,7 +45,7 @@ class ElectronHcalHelper { void beginEvent(const edm::Event &evt, const edm::EventSetup &eventSetup); inline auto hcalTowersBehindClusters(const reco::SuperCluster &sc) const { return egamma::towersOf(sc, *towerMap_); } - double hcalESum(const reco::SuperCluster &, int depth) const; + double hcalESum(const reco::SuperCluster &, int depth, const HcalPFCuts *hcalCuts) const; double hOverEConeSize() const { return cfg_.hOverEConeSize; } int maxSeverityHB() const { return cfg_.maxSeverityHB; } int maxSeverityHE() const { return cfg_.maxSeverityHE; } diff --git a/RecoEgamma/EgammaElectronAlgos/interface/GsfElectronAlgo.h b/RecoEgamma/EgammaElectronAlgos/interface/GsfElectronAlgo.h index 65c27221a9133..463947c1f1d26 100644 --- a/RecoEgamma/EgammaElectronAlgos/interface/GsfElectronAlgo.h +++ b/RecoEgamma/EgammaElectronAlgos/interface/GsfElectronAlgo.h @@ -225,7 +225,8 @@ class GsfElectronAlgo { // main methods reco::GsfElectronCollection completeElectrons(edm::Event const& event, edm::EventSetup const& eventSetup, - const HeavyObjectCache* hoc); + const HeavyObjectCache* hoc, + const HcalPFCuts* hcalCuts); private: // internal structures @@ -258,7 +259,8 @@ class GsfElectronAlgo { const HeavyObjectCache*, egamma::conv::TrackTableView ctfTable, egamma::conv::TrackTableView gsfTable, - EcalPFRecHitThresholds const& thresholds); + EcalPFRecHitThresholds const& thresholds, + const HcalPFCuts* hcalCuts); void setCutBasedPreselectionFlag(reco::GsfElectron& ele, const reco::BeamSpot&) const; @@ -269,7 +271,8 @@ class GsfElectronAlgo { EventData const& eventData, CaloTopology const& topology, CaloGeometry const& geometry, - EcalPFRecHitThresholds const& thresholds) const; + EcalPFRecHitThresholds const& thresholds, + const HcalPFCuts* hcalCuts) const; reco::GsfElectron::SaturationInfo calculateSaturationInfo(const reco::SuperClusterRef&, EventData const& eventData) const; diff --git a/RecoEgamma/EgammaElectronAlgos/src/ElectronHcalHelper.cc b/RecoEgamma/EgammaElectronAlgos/src/ElectronHcalHelper.cc index 44ecc256bc4b1..6f9daf6a658fc 100644 --- a/RecoEgamma/EgammaElectronAlgos/src/ElectronHcalHelper.cc +++ b/RecoEgamma/EgammaElectronAlgos/src/ElectronHcalHelper.cc @@ -70,8 +70,8 @@ bool ElectronHcalHelper::hasActiveHcal(const reco::SuperCluster& sc) const { : true; } -double ElectronHcalHelper::hcalESum(const SuperCluster& sc, int depth) const { - return (cfg_.onlyBehindCluster) ? hcalIso_->getHcalESumBc(&sc, depth) - : (cfg_.hOverEConeSize > 0.) ? hcalIso_->getHcalESum(&sc, depth) +double ElectronHcalHelper::hcalESum(const SuperCluster& sc, int depth, const HcalPFCuts* hcalCuts) const { + return (cfg_.onlyBehindCluster) ? hcalIso_->getHcalESumBc(&sc, depth, hcalCuts) + : (cfg_.hOverEConeSize > 0.) ? hcalIso_->getHcalESum(&sc, depth, hcalCuts) : 0.; } diff --git a/RecoEgamma/EgammaElectronAlgos/src/GsfElectronAlgo.cc b/RecoEgamma/EgammaElectronAlgos/src/GsfElectronAlgo.cc index 7b58ade6e733c..094d50be492e7 100644 --- a/RecoEgamma/EgammaElectronAlgos/src/GsfElectronAlgo.cc +++ b/RecoEgamma/EgammaElectronAlgos/src/GsfElectronAlgo.cc @@ -26,6 +26,9 @@ #include "RecoEgamma/EgammaElectronAlgos/interface/ecalClusterEnergyUncertaintyElectronSpecific.h" #include "CommonTools/Egamma/interface/ConversionTools.h" #include "RecoEcal/EgammaCoreTools/interface/EgammaLocalCovParamDefaults.h" +#include "CondFormats/DataRecord/interface/HcalPFCutsRcd.h" +#include "CondTools/Hcal/interface/HcalPFCutsHandler.h" +#include "Geometry/CaloTopology/interface/HcalTopology.h" #include #include @@ -311,7 +314,8 @@ reco::GsfElectron::ShowerShape GsfElectronAlgo::calculateShowerShape(const reco: EventData const& eventData, CaloTopology const& topology, CaloGeometry const& geometry, - EcalPFRecHitThresholds const& thresholds) const { + EcalPFRecHitThresholds const& thresholds, + const HcalPFCuts* hcalCuts) const { using ClusterTools = EcalClusterToolsT; reco::GsfElectron::ShowerShape showerShape; @@ -358,8 +362,8 @@ reco::GsfElectron::ShowerShape GsfElectronAlgo::calculateShowerShape(const reco: const float scale = full5x5 ? showerShape.e5x5 : theClus->energy(); for (uint id = 0; id < showerShape.hcalOverEcal.size(); ++id) { - showerShape.hcalOverEcal[id] = hcalHelperCone.hcalESum(*theClus, id + 1) / scale; - showerShape.hcalOverEcalBc[id] = hcalHelperBc.hcalESum(*theClus, id + 1) / scale; + showerShape.hcalOverEcal[id] = hcalHelperCone.hcalESum(*theClus, id + 1, hcalCuts) / scale; + showerShape.hcalOverEcalBc[id] = hcalHelperBc.hcalESum(*theClus, id + 1, hcalCuts) / scale; } showerShape.invalidHcal = !hcalHelperBc.hasActiveHcal(*theClus); showerShape.hcalTowersBehindClusters = hcalHelperBc.hcalTowersBehindClusters(*theClus); @@ -662,7 +666,8 @@ GsfElectronAlgo::EventData GsfElectronAlgo::beginEvent(edm::Event const& event, reco::GsfElectronCollection GsfElectronAlgo::completeElectrons(edm::Event const& event, edm::EventSetup const& eventSetup, - const GsfElectronAlgo::HeavyObjectCache* hoc) { + const GsfElectronAlgo::HeavyObjectCache* hoc, + const HcalPFCuts* hcalCuts) { reco::GsfElectronCollection electrons; auto const& magneticField = eventSetup.getData(magneticFieldToken_); @@ -725,7 +730,8 @@ reco::GsfElectronCollection GsfElectronAlgo::completeElectrons(edm::Event const& hoc, ctfTrackTable.value(), gsfTrackTable.value(), - thresholds); + thresholds, + hcalCuts); } // loop over tracks return electrons; @@ -859,7 +865,8 @@ void GsfElectronAlgo::createElectron(reco::GsfElectronCollection& electrons, const GsfElectronAlgo::HeavyObjectCache* hoc, egamma::conv::TrackTableView ctfTable, egamma::conv::TrackTableView gsfTable, - EcalPFRecHitThresholds const& thresholds) { + EcalPFRecHitThresholds const& thresholds, + const HcalPFCuts* hcalCuts) { // charge ID int eleCharge; GsfElectron::ChargeInfo eleChargeInfo; @@ -988,10 +995,22 @@ void GsfElectronAlgo::createElectron(reco::GsfElectronCollection& electrons, reco::GsfElectron::ShowerShape showerShape; reco::GsfElectron::ShowerShape full5x5_showerShape; if (!EcalTools::isHGCalDet((DetId::Detector)region)) { - showerShape = calculateShowerShape( - electronData.superClusterRef, hcalHelperCone_, hcalHelperBc_, eventData, topology, geometry, thresholds); - full5x5_showerShape = calculateShowerShape( - electronData.superClusterRef, hcalHelperCone_, hcalHelperBc_, eventData, topology, geometry, thresholds); + showerShape = calculateShowerShape(electronData.superClusterRef, + hcalHelperCone_, + hcalHelperBc_, + eventData, + topology, + geometry, + thresholds, + hcalCuts); + full5x5_showerShape = calculateShowerShape(electronData.superClusterRef, + hcalHelperCone_, + hcalHelperBc_, + eventData, + topology, + geometry, + thresholds, + hcalCuts); } //==================================================== @@ -1155,18 +1174,18 @@ void GsfElectronAlgo::createElectron(reco::GsfElectronCollection& electrons, if (!EcalTools::isHGCalDet((DetId::Detector)region)) { for (uint id = 0; id < dr03.hcalRecHitSumEt.size(); ++id) { - dr03.hcalRecHitSumEt[id] = eventData.hadIsolation03.getHcalEtSum(&ele, id + 1); - dr03.hcalRecHitSumEtBc[id] = eventData.hadIsolation03Bc.getHcalEtSumBc(&ele, id + 1); + dr03.hcalRecHitSumEt[id] = eventData.hadIsolation03.getHcalEtSum(&ele, id + 1, hcalCuts); + dr03.hcalRecHitSumEtBc[id] = eventData.hadIsolation03Bc.getHcalEtSumBc(&ele, id + 1, hcalCuts); - dr04.hcalRecHitSumEt[id] = eventData.hadIsolation04.getHcalEtSum(&ele, id + 1); - dr04.hcalRecHitSumEtBc[id] = eventData.hadIsolation04Bc.getHcalEtSumBc(&ele, id + 1); + dr04.hcalRecHitSumEt[id] = eventData.hadIsolation04.getHcalEtSum(&ele, id + 1, hcalCuts); + dr04.hcalRecHitSumEtBc[id] = eventData.hadIsolation04Bc.getHcalEtSumBc(&ele, id + 1, hcalCuts); } - dr03.ecalRecHitSumEt = eventData.ecalBarrelIsol03.getEtSum(&ele); - dr03.ecalRecHitSumEt += eventData.ecalEndcapIsol03.getEtSum(&ele); + dr03.ecalRecHitSumEt = eventData.ecalBarrelIsol03.getEtSum(&ele, thresholds); + dr03.ecalRecHitSumEt += eventData.ecalEndcapIsol03.getEtSum(&ele, thresholds); - dr04.ecalRecHitSumEt = eventData.ecalBarrelIsol04.getEtSum(&ele); - dr04.ecalRecHitSumEt += eventData.ecalEndcapIsol04.getEtSum(&ele); + dr04.ecalRecHitSumEt = eventData.ecalBarrelIsol04.getEtSum(&ele, thresholds); + dr04.ecalRecHitSumEt += eventData.ecalEndcapIsol04.getEtSum(&ele, thresholds); } dr03.pre7DepthHcal = false; diff --git a/RecoEgamma/EgammaElectronAlgos/src/TrajSeedMatcher.cc b/RecoEgamma/EgammaElectronAlgos/src/TrajSeedMatcher.cc index a761afdd8c6af..82f7082e0d2bf 100644 --- a/RecoEgamma/EgammaElectronAlgos/src/TrajSeedMatcher.cc +++ b/RecoEgamma/EgammaElectronAlgos/src/TrajSeedMatcher.cc @@ -341,7 +341,7 @@ int TrajSeedMatcher::getNrValidLayersAlongTraj(const DetId& hitId, const Traject bool TrajSeedMatcher::layerHasValidHits(const DetLayer& layer, const TrajectoryStateOnSurface& hitSurState, const Propagator& propToLayerFromState) const { - //FIXME: do not hardcode with werid magic numbers stolen from ancient tracking code + //FIXME: do not hardcode with weird magic numbers stolen from ancient tracking code //its taken from https://cmssdt.cern.ch/dxr/CMSSW/source/RecoTracker/TrackProducer/interface/TrackProducerBase.icc#165 //which inspires this code Chi2MeasurementEstimator estimator(30., -3.0, 0.5, 2.0, 0.5, 1.e12); // same as defauts.... @@ -353,7 +353,8 @@ bool TrajSeedMatcher::layerHasValidHits(const DetLayer& layer, else { DetId id = detWithState.front().first->geographicalId(); MeasurementDetWithData measDet = measTkEvt_.idToDet(id); - if (measDet.isActive()) + //Below, measDet.hasBadComponents handles the check that a Pixel module has or not errors like FED25 + if (measDet.isActive() && !measDet.hasBadComponents(detWithState.front().second)) return true; else return false; diff --git a/RecoEgamma/EgammaElectronProducers/plugins/ElectronSeedProducer.cc b/RecoEgamma/EgammaElectronProducers/plugins/ElectronSeedProducer.cc index 362b8a4a59b01..5fc5eb39adb2d 100644 --- a/RecoEgamma/EgammaElectronProducers/plugins/ElectronSeedProducer.cc +++ b/RecoEgamma/EgammaElectronProducers/plugins/ElectronSeedProducer.cc @@ -29,6 +29,9 @@ #include "FWCore/Framework/interface/Event.h" #include "RecoLocalCalo/HGCalRecAlgos/interface/ClusterTools.h" #include "RecoEcal/EgammaCoreTools/interface/EcalTools.h" +#include "CondFormats/DataRecord/interface/HcalPFCutsRcd.h" +#include "CondTools/Hcal/interface/HcalPFCutsHandler.h" +#include "Geometry/CaloTopology/interface/HcalTopology.h" class ElectronSeedProducer : public edm::stream::EDProducer<> { public: @@ -40,7 +43,8 @@ class ElectronSeedProducer : public edm::stream::EDProducer<> { private: reco::SuperClusterRefVector filterClusters(math::XYZPoint const& beamSpotPosition, - const edm::Handle& superClusters) const; + const edm::Handle& superClusters, + HcalPFCuts const* hcalCuts) const; edm::EDGetTokenT superClusters_[2]; std::vector> initialSeeds_; @@ -56,6 +60,9 @@ class ElectronSeedProducer : public edm::stream::EDProducer<> { bool allowHGCal_; std::unique_ptr hgcClusterTools_; + + edm::ESGetToken hcalCutsToken_; + bool cutsFromDB_; }; using namespace reco; @@ -102,6 +109,12 @@ ElectronSeedProducer::ElectronSeedProducer(const edm::ParameterSet& conf) maxHOverEEndcaps_ = conf.getParameter("maxHOverEEndcaps"); } + //Retrieve HCAL PF thresholds - from config or from DB + cutsFromDB_ = conf.getParameter("usePFThresholdsFromDB"); + if (cutsFromDB_) { + hcalCutsToken_ = esConsumes(edm::ESInputTag("", "withTopo")); + } + ElectronSeedGenerator::Tokens esg_tokens; esg_tokens.token_bs = beamSpotTag_; esg_tokens.token_vtx = mayConsume(conf.getParameter("vertices")); @@ -118,6 +131,11 @@ ElectronSeedProducer::ElectronSeedProducer(const edm::ParameterSet& conf) void ElectronSeedProducer::produce(edm::Event& e, const edm::EventSetup& iSetup) { LogDebug("ElectronSeedProducer") << "[ElectronSeedProducer::produce] entering "; + HcalPFCuts const* hcalCuts = nullptr; + if (cutsFromDB_) { + hcalCuts = &iSetup.getData(hcalCutsToken_); + } + std::vector initialSeedCollections; std::unique_ptr initialSeedCollectionPtr = nullptr; //created on the fly @@ -138,11 +156,11 @@ void ElectronSeedProducer::produce(edm::Event& e, const edm::EventSetup& iSetup) } auto seeds = std::make_unique(); - auto const& beamSportPosition = e.get(beamSpotTag_).position(); + auto const& beamSpotPosition = e.get(beamSpotTag_).position(); // loop over barrel + endcap for (unsigned int i = 0; i < 2; i++) { - auto clusterRefs = filterClusters(beamSportPosition, e.getHandle(superClusters_[i])); + auto clusterRefs = filterClusters(beamSpotPosition, e.getHandle(superClusters_[i]), hcalCuts); matcher_->run(e, clusterRefs, initialSeedCollections, *seeds); } @@ -165,7 +183,9 @@ void ElectronSeedProducer::produce(edm::Event& e, const edm::EventSetup& iSetup) //=============================== SuperClusterRefVector ElectronSeedProducer::filterClusters( - math::XYZPoint const& beamSpotPosition, const edm::Handle& superClusters) const { + math::XYZPoint const& beamSpotPosition, + const edm::Handle& superClusters, + HcalPFCuts const* hcalCuts) const { SuperClusterRefVector sclRefs; for (unsigned int i = 0; i < superClusters->size(); ++i) { @@ -174,7 +194,7 @@ SuperClusterRefVector ElectronSeedProducer::filterClusters( if (scl.energy() / cosh(sclEta) > SCEtCut_) { if (applyHOverECut_) { bool hoeVeto = false; - double had = hcalHelper_->hcalESum(scl, 0); + double had = hcalHelper_->hcalESum(scl, 0, hcalCuts); double scle = scl.energy(); int det_group = scl.seed()->hitsAndFractions()[0].first.det(); int detector = scl.seed()->hitsAndFractions()[0].first.subdetId(); @@ -222,6 +242,7 @@ void ElectronSeedProducer::fillDescriptions(edm::ConfigurationDescriptions& desc desc.add>("recHitEThresholdHB", {0., 0., 0., 0.}); desc.add>("recHitEThresholdHE", {0., 0., 0., 0., 0., 0., 0.}); desc.add("maxHcalRecHitSeverity", 999999); + desc.add("usePFThresholdsFromDB", false); // H/E equivalent for HGCal desc.add("allowHGCal", false); diff --git a/RecoEgamma/EgammaElectronProducers/plugins/GsfElectronProducer.cc b/RecoEgamma/EgammaElectronProducers/plugins/GsfElectronProducer.cc index 9434c40be3064..0651579540b9c 100644 --- a/RecoEgamma/EgammaElectronProducers/plugins/GsfElectronProducer.cc +++ b/RecoEgamma/EgammaElectronProducers/plugins/GsfElectronProducer.cc @@ -166,6 +166,9 @@ class GsfElectronProducer : public edm::stream::EDProducer tfSessions_; + + edm::ESGetToken hcalCutsToken_; + bool cutsFromDB_; }; void GsfElectronProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { @@ -210,6 +213,7 @@ void GsfElectronProducer::fillDescriptions(edm::ConfigurationDescriptions& descr desc.add>("recHitEThresholdHE", {0., 0., 0., 0., 0., 0., 0.}); desc.add("maxHcalRecHitSeverity", 999999); desc.add("hcalRun2EffDepth", false); + desc.add("usePFThresholdsFromDB", false); // Isolation algos configuration desc.add("trkIsol03Cfg", EleTkIsolFromCands::pSetDescript()); @@ -411,6 +415,12 @@ GsfElectronProducer::GsfElectronProducer(const edm::ParameterSet& cfg, const Gsf egmPFCandidateCollection_ = consumes(cfg.getParameter("egmPFCandidatesTag")); } + //Retrieve HCAL PF thresholds - from config or from DB + cutsFromDB_ = cfg.getParameter("usePFThresholdsFromDB"); + if (cutsFromDB_) { + hcalCutsToken_ = esConsumes(edm::ESInputTag("", "withTopo")); + } + inputCfg_.gsfElectronCores = consumes(cfg.getParameter("gsfElectronCoresTag")); inputCfg_.hbheRecHitsTag = consumes(cfg.getParameter("hbheRecHits")); inputCfg_.barrelRecHitCollection = consumes(cfg.getParameter("barrelRecHitCollectionTag")); @@ -727,6 +737,11 @@ bool GsfElectronProducer::isPreselected(GsfElectron const& ele) const { // ------------ method called to produce the data ------------ void GsfElectronProducer::produce(edm::Event& event, const edm::EventSetup& setup) { + HcalPFCuts const* hcalCuts = nullptr; + if (cutsFromDB_) { + hcalCuts = &setup.getData(hcalCutsToken_); + } + // check configuration if (!ecalSeedingParametersChecked_) { ecalSeedingParametersChecked_ = true; @@ -740,7 +755,7 @@ void GsfElectronProducer::produce(edm::Event& event, const edm::EventSetup& setu } } - auto electrons = algo_->completeElectrons(event, setup, globalCache()); + auto electrons = algo_->completeElectrons(event, setup, globalCache(), hcalCuts); if (resetMvaValuesUsingPFCandidates_) { const auto gsfMVAInputMap = matchWithPFCandidates(event.get(egmPFCandidateCollection_)); for (auto& el : electrons) { diff --git a/RecoEgamma/EgammaElectronProducers/plugins/LowPtGsfElectronSeedProducer.cc b/RecoEgamma/EgammaElectronProducers/plugins/LowPtGsfElectronSeedProducer.cc index 6492ecdd1b94b..76ee75fc6f02e 100644 --- a/RecoEgamma/EgammaElectronProducers/plugins/LowPtGsfElectronSeedProducer.cc +++ b/RecoEgamma/EgammaElectronProducers/plugins/LowPtGsfElectronSeedProducer.cc @@ -66,8 +66,6 @@ class LowPtGsfElectronSeedProducer final static void globalEndJob(lowptgsfeleseed::HeavyObjectCache const*) {} - void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - void produce(edm::Event&, const edm::EventSetup&) override; static void fillDescriptions(edm::ConfigurationDescriptions&); @@ -184,7 +182,7 @@ LowPtGsfElectronSeedProducer::LowPtGsfElectronSeedProducer(const edm::ParameterS trajectoryFitterToken_{esConsumes(conf.getParameter("Fitter"))}, trajectorySmootherToken_{esConsumes(conf.getParameter("Smoother"))}, builderToken_{esConsumes(conf.getParameter("TTRHBuilder"))}, - magToken_{esConsumes()}, + magToken_{esConsumes()}, ecalClusterToolsESGetTokens_{consumesCollector()}, passThrough_(conf.getParameter("PassThrough")), usePfTracks_(conf.getParameter("UsePfTracks")), @@ -202,15 +200,10 @@ LowPtGsfElectronSeedProducer::LowPtGsfElectronSeedProducer(const edm::ParameterS produces >(); // indexed by edm::Ref.index() } -////////////////////////////////////////////////////////////////////////////////////////// -// -void LowPtGsfElectronSeedProducer::beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const& setup) { - field_ = setup.getHandle(magToken_); -} - ////////////////////////////////////////////////////////////////////////////////////////// // void LowPtGsfElectronSeedProducer::produce(edm::Event& event, const edm::EventSetup& setup) { + field_ = setup.getHandle(magToken_); // Products auto seeds = std::make_unique(); auto ecalPreIds = std::make_unique(); diff --git a/RecoEgamma/EgammaElectronProducers/python/ecalDrivenElectronSeeds_cfi.py b/RecoEgamma/EgammaElectronProducers/python/ecalDrivenElectronSeeds_cfi.py index c9ebb201e81ed..2cb4f1060ecbd 100644 --- a/RecoEgamma/EgammaElectronProducers/python/ecalDrivenElectronSeeds_cfi.py +++ b/RecoEgamma/EgammaElectronProducers/python/ecalDrivenElectronSeeds_cfi.py @@ -7,5 +7,6 @@ hbheRecHits = egammaHBHERecHit.hbheRecHits, recHitEThresholdHB = egammaHBHERecHit.recHitEThresholdHB, recHitEThresholdHE = egammaHBHERecHit.recHitEThresholdHE, + usePFThresholdsFromDB = egammaHBHERecHit.usePFThresholdsFromDB, maxHcalRecHitSeverity = egammaHBHERecHit.maxHcalRecHitSeverity ) diff --git a/RecoEgamma/EgammaElectronProducers/python/gsfElectronProducer_cfi.py b/RecoEgamma/EgammaElectronProducers/python/gsfElectronProducer_cfi.py index 5a70acfebe75c..937bc8d8e8c45 100644 --- a/RecoEgamma/EgammaElectronProducers/python/gsfElectronProducer_cfi.py +++ b/RecoEgamma/EgammaElectronProducers/python/gsfElectronProducer_cfi.py @@ -10,6 +10,7 @@ hbheRecHits = egammaHBHERecHit.hbheRecHits, recHitEThresholdHB = egammaHBHERecHit.recHitEThresholdHB, recHitEThresholdHE = egammaHBHERecHit.recHitEThresholdHE, + usePFThresholdsFromDB = egammaHBHERecHit.usePFThresholdsFromDB, maxHcalRecHitSeverity = egammaHBHERecHit.maxHcalRecHitSeverity, pfECALClusIsolCfg = cms.PSet( diff --git a/RecoEgamma/EgammaElectronProducers/python/lowPtGsfElectronSeeds_cfi.py b/RecoEgamma/EgammaElectronProducers/python/lowPtGsfElectronSeeds_cfi.py index 869f3ecdde3c5..f3203f795ba03 100644 --- a/RecoEgamma/EgammaElectronProducers/python/lowPtGsfElectronSeeds_cfi.py +++ b/RecoEgamma/EgammaElectronProducers/python/lowPtGsfElectronSeeds_cfi.py @@ -52,3 +52,6 @@ def thresholds( wp ) : from Configuration.ProcessModifiers.pp_on_AA_cff import pp_on_AA pp_on_AA.toModify(lowPtGsfElectronSeeds,MinPtThreshold = 5.0) + +from Configuration.ProcessModifiers.egamma_lowPt_exclusive_cff import egamma_lowPt_exclusive +egamma_lowPt_exclusive.toModify(lowPtGsfElectronSeeds, ModelThresholds = thresholds("VL"), MinPtThreshold = 0.05) diff --git a/RecoEgamma/EgammaHLTProducers/plugins/EgammaHLTGsfTrackVarProducer.cc b/RecoEgamma/EgammaHLTProducers/plugins/EgammaHLTGsfTrackVarProducer.cc index ce66e70a0daab..f309f2621b981 100644 --- a/RecoEgamma/EgammaHLTProducers/plugins/EgammaHLTGsfTrackVarProducer.cc +++ b/RecoEgamma/EgammaHLTProducers/plugins/EgammaHLTGsfTrackVarProducer.cc @@ -39,10 +39,29 @@ #include "TrackingTools/TrajectoryState/interface/TrajectoryStateOnSurface.h" class EgammaHLTGsfTrackVarProducer : public edm::global::EDProducer<> { +public: + struct GsfTrackExtrapolations { + GsfTrackExtrapolations() {} + void operator()(const reco::GsfTrack& trk, + const reco::SuperCluster& sc, + const MultiTrajectoryStateTransform& mtsTransform); + TrajectoryStateOnSurface innTSOS; + TrajectoryStateOnSurface outTSOS; + TrajectoryStateOnSurface sclTSOS; + + GlobalVector innMom, outMom; + GlobalPoint sclPos; + }; + public: explicit EgammaHLTGsfTrackVarProducer(const edm::ParameterSet&); void produce(edm::StreamID, edm::Event&, const edm::EventSetup&) const override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + void fillAbsAbleVar(float& existVal, const float newVal) const { + if (std::abs(newVal) < std::abs(existVal)) { + existVal = produceAbsValues_ ? std::abs(newVal) : newVal; + } + } private: const edm::EDGetTokenT recoEcalCandToken_; @@ -57,6 +76,7 @@ class EgammaHLTGsfTrackVarProducer : public edm::global::EDProducer<> { const int lowerTrackNrToRemoveCut_; const bool useDefaultValuesForBarrel_; const bool useDefaultValuesForEndcap_; + const bool produceAbsValues_; const edm::EDPutTokenT dEtaMapPutToken_; const edm::EDPutTokenT dEtaSeedMapPutToken_; @@ -67,8 +87,15 @@ class EgammaHLTGsfTrackVarProducer : public edm::global::EDProducer<> { const edm::EDPutTokenT validHitsMapPutToken_; const edm::EDPutTokenT nLayerITMapPutToken_; const edm::EDPutTokenT chi2MapPutToken_; + const edm::EDPutTokenT fbremMapPutToken_; }; +namespace { + + float calRelDelta(float a, float b, float defaultVal = 0.f) { return a != 0.f ? (a - b) / a : defaultVal; } + +} // namespace + EgammaHLTGsfTrackVarProducer::EgammaHLTGsfTrackVarProducer(const edm::ParameterSet& config) : recoEcalCandToken_( consumes(config.getParameter("recoEcalCandidateProducer"))), @@ -81,6 +108,7 @@ EgammaHLTGsfTrackVarProducer::EgammaHLTGsfTrackVarProducer(const edm::ParameterS lowerTrackNrToRemoveCut_{config.getParameter("lowerTrackNrToRemoveCut")}, useDefaultValuesForBarrel_{config.getParameter("useDefaultValuesForBarrel")}, useDefaultValuesForEndcap_{config.getParameter("useDefaultValuesForEndcap")}, + produceAbsValues_{config.getParameter("produceAbsValues")}, dEtaMapPutToken_{produces("Deta").setBranchAlias("deta")}, dEtaSeedMapPutToken_{produces("DetaSeed").setBranchAlias("detaseed")}, dPhiMapPutToken_{produces("Dphi").setBranchAlias("dphi")}, @@ -90,7 +118,8 @@ EgammaHLTGsfTrackVarProducer::EgammaHLTGsfTrackVarProducer(const edm::ParameterS produces("MissingHits").setBranchAlias("missinghits")}, validHitsMapPutToken_{produces("ValidHits").setBranchAlias("validhits")}, nLayerITMapPutToken_{produces("NLayerIT").setBranchAlias("nlayerit")}, - chi2MapPutToken_{produces("Chi2").setBranchAlias("chi2")} {} + chi2MapPutToken_{produces("Chi2").setBranchAlias("chi2")}, + fbremMapPutToken_{produces("fbrem")} {} void EgammaHLTGsfTrackVarProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { edm::ParameterSetDescription desc; @@ -101,6 +130,7 @@ void EgammaHLTGsfTrackVarProducer::fillDescriptions(edm::ConfigurationDescriptio desc.add(("lowerTrackNrToRemoveCut"), -1); desc.add(("useDefaultValuesForBarrel"), false); desc.add(("useDefaultValuesForEndcap"), false); + desc.add(("produceAbsValues"), false); descriptions.add("hltEgammaHLTGsfTrackVarProducer", desc); } @@ -124,6 +154,7 @@ void EgammaHLTGsfTrackVarProducer::produce(edm::StreamID, edm::Event& iEvent, co reco::RecoEcalCandidateIsolationMap validHitsMap(recoEcalCandHandle); reco::RecoEcalCandidateIsolationMap nLayerITMap(recoEcalCandHandle); reco::RecoEcalCandidateIsolationMap chi2Map(recoEcalCandHandle); + reco::RecoEcalCandidateIsolationMap fbremMap(recoEcalCandHandle); for (unsigned int iRecoEcalCand = 0; iRecoEcalCand < recoEcalCandHandle->size(); ++iRecoEcalCand) { reco::RecoEcalCandidateRef recoEcalCandRef(recoEcalCandHandle, iRecoEcalCand); @@ -156,6 +187,7 @@ void EgammaHLTGsfTrackVarProducer::produce(edm::StreamID, edm::Event& iEvent, co float dPhiInValue = 999999; float oneOverESuperMinusOneOverPValue = 999999; float oneOverESeedMinusOneOverPValue = 999999; + float fbrem = 999999; const int nrTracks = gsfTracks.size(); const bool rmCutsDueToNrTracks = nrTracks <= lowerTrackNrToRemoveCut_ || nrTracks >= upperTrackNrToRemoveCut_; @@ -164,6 +196,9 @@ void EgammaHLTGsfTrackVarProducer::produce(edm::StreamID, edm::Event& iEvent, co ? useDefaultValuesForBarrel_ && nrTracks >= 1 : useDefaultValuesForEndcap_ && nrTracks >= 1; + MultiTrajectoryStateTransform mtsTransform(&trackerGeometry, &magneticField); + GsfTrackExtrapolations gsfTrackExtrapolations; + if (rmCutsDueToNrTracks || useDefaultValues) { nLayerITValue = 100; dEtaInValue = 0; @@ -174,30 +209,23 @@ void EgammaHLTGsfTrackVarProducer::produce(edm::StreamID, edm::Event& iEvent, co chi2Value = 0; oneOverESuperMinusOneOverPValue = 0; oneOverESeedMinusOneOverPValue = 0; + fbrem = 0; } else { for (size_t trkNr = 0; trkNr < gsfTracks.size(); trkNr++) { GlobalPoint scPos(scRef->x(), scRef->y(), scRef->z()); - GlobalPoint trackExtrapToSC; - { - auto innTSOS = - MultiTrajectoryStateTransform::innerStateOnSurface(*gsfTracks[trkNr], trackerGeometry, &magneticField); - auto posTSOS = extrapolator.extrapolate(innTSOS, scPos); - multiTrajectoryStateMode::positionFromModeCartesian(posTSOS, trackExtrapToSC); - } + gsfTrackExtrapolations(*gsfTracks[trkNr], *scRef, mtsTransform); - EleRelPointPair scAtVtx(scRef->position(), trackExtrapToSC, beamSpotPosition); + EleRelPointPair scAtVtx(scRef->position(), gsfTrackExtrapolations.sclPos, beamSpotPosition); + + fbrem = calRelDelta(gsfTrackExtrapolations.innMom.mag(), gsfTrackExtrapolations.outMom.mag(), fbrem); float trkP = gsfTracks[trkNr]->p(); if (scRef->energy() != 0 && trkP != 0) { - if (std::abs(1 / scRef->energy() - 1 / trkP) < oneOverESuperMinusOneOverPValue) { - oneOverESuperMinusOneOverPValue = std::abs(1 / scRef->energy() - 1 / trkP); - } + fillAbsAbleVar(oneOverESuperMinusOneOverPValue, 1 / scRef->energy() - 1 / trkP); } if (scRef->seed().isNonnull() && scRef->seed()->energy() != 0 && trkP != 0) { - if (std::abs(1 / scRef->seed()->energy() - 1 / trkP) < oneOverESeedMinusOneOverPValue) { - oneOverESeedMinusOneOverPValue = std::abs(1 / scRef->seed()->energy() - 1 / trkP); - } + fillAbsAbleVar(oneOverESeedMinusOneOverPValue, 1 / scRef->seed()->energy() - 1 / trkP); } if (gsfTracks[trkNr]->missingInnerHits() < missingHitsValue) { @@ -218,19 +246,9 @@ void EgammaHLTGsfTrackVarProducer::produce(edm::StreamID, edm::Event& iEvent, co chi2Value = gsfTracks[trkNr]->normalizedChi2(); } - if (std::abs(scAtVtx.dEta()) < dEtaInValue) { - // we are allowing them to come from different tracks - dEtaInValue = std::abs(scAtVtx.dEta()); - } - - if (std::abs(scAtVtx.dEta()) < dEtaSeedInValue) { - dEtaSeedInValue = std::abs(scAtVtx.dEta() - scRef->position().eta() + scRef->seed()->position().eta()); - } - - if (std::abs(scAtVtx.dPhi()) < dPhiInValue) { - // we are allowing them to come from different tracks - dPhiInValue = std::abs(scAtVtx.dPhi()); - } + fillAbsAbleVar(dEtaInValue, scAtVtx.dEta()); + fillAbsAbleVar(dEtaSeedInValue, scAtVtx.dEta() - scRef->position().eta() + scRef->seed()->position().eta()); + fillAbsAbleVar(dPhiInValue, scAtVtx.dPhi()); } } @@ -243,6 +261,7 @@ void EgammaHLTGsfTrackVarProducer::produce(edm::StreamID, edm::Event& iEvent, co validHitsMap.insert(recoEcalCandRef, validHitsValue); nLayerITMap.insert(recoEcalCandRef, nLayerITValue); chi2Map.insert(recoEcalCandRef, chi2Value); + fbremMap.insert(recoEcalCandRef, fbrem); } iEvent.emplace(dEtaMapPutToken_, dEtaMap); @@ -254,6 +273,18 @@ void EgammaHLTGsfTrackVarProducer::produce(edm::StreamID, edm::Event& iEvent, co iEvent.emplace(validHitsMapPutToken_, validHitsMap); iEvent.emplace(nLayerITMapPutToken_, nLayerITMap); iEvent.emplace(chi2MapPutToken_, chi2Map); + iEvent.emplace(fbremMapPutToken_, fbremMap); +} + +void EgammaHLTGsfTrackVarProducer::GsfTrackExtrapolations::operator()( + const reco::GsfTrack& trk, const reco::SuperCluster& sc, const MultiTrajectoryStateTransform& mtsTransform) { + innTSOS = mtsTransform.innerStateOnSurface(trk); + outTSOS = mtsTransform.outerStateOnSurface(trk); + sclTSOS = mtsTransform.extrapolatedState(innTSOS, GlobalPoint(sc.x(), sc.y(), sc.z())); + + multiTrajectoryStateMode::momentumFromModeCartesian(innTSOS, innMom); + multiTrajectoryStateMode::positionFromModeCartesian(sclTSOS, sclPos); + multiTrajectoryStateMode::momentumFromModeCartesian(outTSOS, outMom); } #include "FWCore/Framework/interface/MakerMacros.h" diff --git a/RecoEgamma/EgammaHLTProducers/plugins/EgammaHLTHcalVarProducerFromRecHit.cc b/RecoEgamma/EgammaHLTProducers/plugins/EgammaHLTHcalVarProducerFromRecHit.cc index e950fde814006..e721892bd06df 100644 --- a/RecoEgamma/EgammaHLTProducers/plugins/EgammaHLTHcalVarProducerFromRecHit.cc +++ b/RecoEgamma/EgammaHLTProducers/plugins/EgammaHLTHcalVarProducerFromRecHit.cc @@ -32,6 +32,9 @@ A rho correction can be applied #include "RecoLocalCalo/HcalRecAlgos/interface/HcalSeverityLevelComputerRcd.h" #include "CondFormats/HcalObjects/interface/HcalChannelQuality.h" #include "CondFormats/DataRecord/interface/HcalChannelQualityRcd.h" +#include "CondFormats/DataRecord/interface/HcalPFCutsRcd.h" +#include "CondTools/Hcal/interface/HcalPFCutsHandler.h" +#include "Geometry/CaloTopology/interface/HcalTopology.h" class EgammaHLTHcalVarProducerFromRecHit : public edm::global::EDProducer<> { public: @@ -67,6 +70,10 @@ class EgammaHLTHcalVarProducerFromRecHit : public edm::global::EDProducer<> { const edm::ESGetToken hcalSevLvlComputerToken_; const edm::ESGetToken caloTowerConstituentsMapToken_; const edm::EDPutTokenT putToken_; + + //Get HCAL thresholds from GT + edm::ESGetToken hcalCutsToken_; + bool cutsFromDB; }; EgammaHLTHcalVarProducerFromRecHit::EgammaHLTHcalVarProducerFromRecHit(const edm::ParameterSet &config) @@ -95,7 +102,9 @@ EgammaHLTHcalVarProducerFromRecHit::EgammaHLTHcalVarProducerFromRecHit(const edm hcalChannelQualityToken_{esConsumes(edm::ESInputTag("", "withTopo"))}, hcalSevLvlComputerToken_{esConsumes()}, caloTowerConstituentsMapToken_{esConsumes()}, - putToken_{produces()} { + putToken_{produces()}, + cutsFromDB( + config.getParameter("usePFThresholdsFromDB")) { //Retrieve HCAL PF thresholds - from config or from DB if (doRhoCorrection_) { if (absEtaLowEdges_.size() != effectiveAreas_.size()) { throw cms::Exception("IncompatibleVects") << "absEtaLowEdges and effectiveAreas should be of the same size. \n"; @@ -111,6 +120,10 @@ EgammaHLTHcalVarProducerFromRecHit::EgammaHLTHcalVarProducerFromRecHit(const edm } } } + + if (cutsFromDB) { + hcalCutsToken_ = esConsumes(edm::ESInputTag("", "withTopo")); + } } void EgammaHLTHcalVarProducerFromRecHit::fillDescriptions(edm::ConfigurationDescriptions &descriptions) { @@ -127,6 +140,7 @@ void EgammaHLTHcalVarProducerFromRecHit::fillDescriptions(edm::ConfigurationDesc desc.add >("etThresHB", {0, 0, 0, 0}); desc.add >("eThresHE", {0.1, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2}); desc.add >("etThresHE", {0, 0, 0, 0, 0, 0, 0}); + desc.add("usePFThresholdsFromDB", true); desc.add("innerCone", 0); desc.add("outerCone", 0.14); desc.add("depth", 0); @@ -192,18 +206,22 @@ void EgammaHLTHcalVarProducerFromRecHit::produce(edm::StreamID, iSetup.getData(hcalChannelQualityToken_), iSetup.getData(hcalSevLvlComputerToken_), iSetup.getData(caloTowerConstituentsMapToken_)); + const HcalPFCuts *hcalCuts{nullptr}; + if (cutsFromDB) { + hcalCuts = &iSetup.getData(hcalCutsToken_); + } if (useSingleTower_) { if (doEtSum_) { //this is cone-based HCAL isolation with single tower based footprint removal - isol = thisHcalVar_.getHcalEtSumBc(recoEcalCandRef.get(), depth_); //depth=0 means all depths - } else { //this is single tower based H/E - isol = thisHcalVar_.getHcalESumBc(recoEcalCandRef.get(), depth_); //depth=0 means all depths + isol = thisHcalVar_.getHcalEtSumBc(recoEcalCandRef.get(), depth_, hcalCuts); //depth=0 means all depths + } else { //this is single tower based H/E + isol = thisHcalVar_.getHcalESumBc(recoEcalCandRef.get(), depth_, hcalCuts); //depth=0 means all depths } } else { //useSingleTower_=False means H/E is cone-based. if (doEtSum_) { //hcal iso - isol = thisHcalVar_.getHcalEtSum(recoEcalCandRef.get(), depth_); //depth=0 means all depths + isol = thisHcalVar_.getHcalEtSum(recoEcalCandRef.get(), depth_, hcalCuts); //depth=0 means all depths } else { // doEtSum_=False means sum up energy, this is for H/E - isol = thisHcalVar_.getHcalESum(recoEcalCandRef.get(), depth_); //depth=0 means all depths + isol = thisHcalVar_.getHcalESum(recoEcalCandRef.get(), depth_, hcalCuts); //depth=0 means all depths } } diff --git a/RecoEgamma/EgammaIsolationAlgos/interface/EgammaHcalIsolation.h b/RecoEgamma/EgammaIsolationAlgos/interface/EgammaHcalIsolation.h index 21b47f31fdb20..275cd11739352 100644 --- a/RecoEgamma/EgammaIsolationAlgos/interface/EgammaHcalIsolation.h +++ b/RecoEgamma/EgammaIsolationAlgos/interface/EgammaHcalIsolation.h @@ -25,6 +25,8 @@ #include "Geometry/CaloTopology/interface/HcalTopology.h" #include "RecoLocalCalo/HcalRecAlgos/interface/HcalSeverityLevelComputer.h" #include "RecoLocalCalo/HcalRecAlgos/interface/HcalSeverityLevelComputerRcd.h" +#include "CondFormats/DataRecord/interface/HcalPFCutsRcd.h" +#include "CondTools/Hcal/interface/HcalPFCutsHandler.h" #include "CondFormats/HcalObjects/interface/HcalChannelQuality.h" #include "CondFormats/DataRecord/interface/HcalChannelQualityRcd.h" @@ -76,62 +78,90 @@ class EgammaHcalIsolation { const HcalSeverityLevelComputer &hcalSevLvlComputer, const CaloTowerConstituentsMap &towerMap); - double getHcalESum(const reco::Candidate *c, int depth) const { - return getHcalESum(c->get().get(), depth); + double getHcalESum(const reco::Candidate *c, int depth, const HcalPFCuts *hcalCuts) const { + return getHcalESum(c->get().get(), depth, hcalCuts); } - double getHcalEtSum(const reco::Candidate *c, int depth) const { - return getHcalEtSum(c->get().get(), depth); + double getHcalEtSum(const reco::Candidate *c, int depth, const HcalPFCuts *hcalCuts) const { + return getHcalEtSum(c->get().get(), depth, hcalCuts); } - double getHcalESum(const reco::SuperCluster *sc, int depth) const { return getHcalESum(sc->position(), depth); } - double getHcalEtSum(const reco::SuperCluster *sc, int depth) const { return getHcalEtSum(sc->position(), depth); } - double getHcalESum(const math::XYZPoint &p, int depth) const { - return getHcalESum(GlobalPoint(p.x(), p.y(), p.z()), depth); + double getHcalESum(const reco::SuperCluster *sc, int depth, const HcalPFCuts *hcalCuts) const { + return getHcalESum(sc->position(), depth, hcalCuts); } - double getHcalEtSum(const math::XYZPoint &p, int depth) const { - return getHcalEtSum(GlobalPoint(p.x(), p.y(), p.z()), depth); + double getHcalEtSum(const reco::SuperCluster *sc, int depth, const HcalPFCuts *hcalCuts) const { + return getHcalEtSum(sc->position(), depth, hcalCuts); + } + double getHcalESum(const math::XYZPoint &p, int depth, const HcalPFCuts *hcalCuts) const { + return getHcalESum(GlobalPoint(p.x(), p.y(), p.z()), depth, hcalCuts); + } + double getHcalEtSum(const math::XYZPoint &p, int depth, const HcalPFCuts *hcalCuts) const { + return getHcalEtSum(GlobalPoint(p.x(), p.y(), p.z()), depth, hcalCuts); + } + double getHcalESum(const GlobalPoint &pclu, int depth, const HcalPFCuts *hcalCuts) const { + return getHcalSum(pclu, depth, 0, 0, 0, &scaleToE, hcalCuts); + } + double getHcalEtSum(const GlobalPoint &pclu, int depth, const HcalPFCuts *hcalCuts) const { + return getHcalSum(pclu, depth, 0, 0, 0, &scaleToEt, hcalCuts); } - double getHcalESum(const GlobalPoint &pclu, int depth) const { return getHcalSum(pclu, depth, 0, 0, 0, &scaleToE); } - double getHcalEtSum(const GlobalPoint &pclu, int depth) const { return getHcalSum(pclu, depth, 0, 0, 0, &scaleToEt); } - double getHcalESumBc(const reco::Candidate *c, int depth) const { - return getHcalESumBc(c->get().get(), depth); + double getHcalESumBc(const reco::Candidate *c, int depth, const HcalPFCuts *hcalCuts) const { + return getHcalESumBc(c->get().get(), depth, hcalCuts); } - double getHcalEtSumBc(const reco::Candidate *c, int depth) const { - return getHcalEtSumBc(c->get().get(), depth); + double getHcalEtSumBc(const reco::Candidate *c, int depth, const HcalPFCuts *hcalCuts) const { + return getHcalEtSumBc(c->get().get(), depth, hcalCuts); } - double getHcalESumBc(const reco::SuperCluster *sc, int depth) const { + double getHcalESumBc(const reco::SuperCluster *sc, int depth, const HcalPFCuts *hcalCuts) const { const auto tower = egamma::towerOf(*(sc->seed()), towerMap_); if (extIncRule_ == InclusionRule::isBehindClusterSeed and intIncRule_ == InclusionRule::withinConeAroundCluster) - return getHcalESumBc(sc->position(), depth, tower.ieta(), tower.iphi(), -1); + return getHcalESumBc(sc->position(), depth, tower.ieta(), tower.iphi(), -1, hcalCuts); else if (extIncRule_ == InclusionRule::withinConeAroundCluster and intIncRule_ == InclusionRule::isBehindClusterSeed) - return getHcalESumBc(sc->position(), depth, tower.ieta(), tower.iphi(), 1); + return getHcalESumBc(sc->position(), depth, tower.ieta(), tower.iphi(), 1, hcalCuts); - return getHcalESumBc(sc->position(), depth, tower.ieta(), tower.iphi(), 0); + return getHcalESumBc(sc->position(), depth, tower.ieta(), tower.iphi(), 0, hcalCuts); } - double getHcalEtSumBc(const reco::SuperCluster *sc, int depth) const { + double getHcalEtSumBc(const reco::SuperCluster *sc, int depth, const HcalPFCuts *hcalCuts) const { const auto tower = egamma::towerOf(*(sc->seed()), towerMap_); if (extIncRule_ == InclusionRule::isBehindClusterSeed and intIncRule_ == InclusionRule::withinConeAroundCluster) - return getHcalEtSumBc(sc->position(), depth, tower.ieta(), tower.iphi(), -1); + return getHcalEtSumBc(sc->position(), depth, tower.ieta(), tower.iphi(), -1, hcalCuts); else if (extIncRule_ == InclusionRule::withinConeAroundCluster and intIncRule_ == InclusionRule::isBehindClusterSeed) - return getHcalEtSumBc(sc->position(), depth, tower.ieta(), tower.iphi(), 1); + return getHcalEtSumBc(sc->position(), depth, tower.ieta(), tower.iphi(), 1, hcalCuts); - return getHcalEtSumBc(sc->position(), depth, tower.ieta(), tower.iphi(), 0); + return getHcalEtSumBc(sc->position(), depth, tower.ieta(), tower.iphi(), 0, hcalCuts); } - double getHcalESumBc(const math::XYZPoint &p, int depth, int ieta, int iphi, int include_or_exclude) const { - return getHcalESumBc(GlobalPoint(p.x(), p.y(), p.z()), depth, ieta, iphi, include_or_exclude); + double getHcalESumBc(const math::XYZPoint &p, + int depth, + int ieta, + int iphi, + int include_or_exclude, + const HcalPFCuts *hcalCuts) const { + return getHcalESumBc(GlobalPoint(p.x(), p.y(), p.z()), depth, ieta, iphi, include_or_exclude, hcalCuts); } - double getHcalEtSumBc(const math::XYZPoint &p, int depth, int ieta, int iphi, int include_or_exclude) const { - return getHcalEtSumBc(GlobalPoint(p.x(), p.y(), p.z()), depth, ieta, iphi, include_or_exclude); + double getHcalEtSumBc(const math::XYZPoint &p, + int depth, + int ieta, + int iphi, + int include_or_exclude, + const HcalPFCuts *hcalCuts) const { + return getHcalEtSumBc(GlobalPoint(p.x(), p.y(), p.z()), depth, ieta, iphi, include_or_exclude, hcalCuts); } - double getHcalESumBc(const GlobalPoint &pclu, int depth, int ieta, int iphi, int include_or_exclude) const { - return getHcalSum(pclu, depth, ieta, iphi, include_or_exclude, &scaleToE); + double getHcalESumBc(const GlobalPoint &pclu, + int depth, + int ieta, + int iphi, + int include_or_exclude, + const HcalPFCuts *hcalCuts) const { + return getHcalSum(pclu, depth, ieta, iphi, include_or_exclude, &scaleToE, hcalCuts); } - double getHcalEtSumBc(const GlobalPoint &pclu, int depth, int ieta, int iphi, int include_or_exclude) const { - return getHcalSum(pclu, depth, ieta, iphi, include_or_exclude, &scaleToEt); + double getHcalEtSumBc(const GlobalPoint &pclu, + int depth, + int ieta, + int iphi, + int include_or_exclude, + const HcalPFCuts *hcalCuts) const { + return getHcalSum(pclu, depth, ieta, iphi, include_or_exclude, &scaleToEt, hcalCuts); } private: @@ -142,14 +172,16 @@ class EgammaHcalIsolation { int ieta, int iphi, int include_or_exclude, - double (*scale)(const double &)) const; + double (*scale)(const double &), + const HcalPFCuts *hcalCuts) const; double getHcalSum(const GlobalPoint &pclu, int depth, int ieta, int iphi, int include_or_exclude, - double (*scale)(const double &)) const; + double (*scale)(const double &), + const HcalPFCuts *hcalCuts) const; InclusionRule extIncRule_; double extRadius_; diff --git a/RecoEgamma/EgammaIsolationAlgos/interface/EgammaRecHitIsolation.h b/RecoEgamma/EgammaIsolationAlgos/interface/EgammaRecHitIsolation.h index fc508e58ffdbd..56c79505c41fc 100644 --- a/RecoEgamma/EgammaIsolationAlgos/interface/EgammaRecHitIsolation.h +++ b/RecoEgamma/EgammaIsolationAlgos/interface/EgammaRecHitIsolation.h @@ -22,6 +22,8 @@ #include "RecoLocalCalo/EcalRecAlgos/interface/EcalSeverityLevelAlgo.h" #include "DataFormats/EcalRecHit/interface/EcalRecHitCollections.h" +#include "CondFormats/EcalObjects/interface/EcalPFRecHitThresholds.h" +#include "CondFormats/DataRecord/interface/EcalPFRecHitThresholdsRcd.h" class EgammaRecHitIsolation { public: @@ -36,11 +38,19 @@ class EgammaRecHitIsolation { const EcalSeverityLevelAlgo*, DetId::Detector detector); - double getEtSum(const reco::Candidate* emObject) const { return getSum_(emObject, true); } - double getEnergySum(const reco::Candidate* emObject) const { return getSum_(emObject, false); } + double getEtSum(const reco::Candidate* emObject, EcalPFRecHitThresholds const& thresholds) const { + return getSum_(emObject, true, &thresholds); + } + double getEnergySum(const reco::Candidate* emObject, EcalPFRecHitThresholds const& thresholds) const { + return getSum_(emObject, false, &thresholds); + } - double getEtSum(const reco::SuperCluster* emObject) const { return getSum_(emObject, true); } - double getEnergySum(const reco::SuperCluster* emObject) const { return getSum_(emObject, false); } + double getEtSum(const reco::SuperCluster* emObject, EcalPFRecHitThresholds const& thresholds) const { + return getSum_(emObject, true, &thresholds); + } + double getEnergySum(const reco::SuperCluster* emObject, EcalPFRecHitThresholds const& thresholds) const { + return getSum_(emObject, false, &thresholds); + } void setUseNumCrystals(bool b = true) { useNumCrystals_ = b; } void setVetoClustered(bool b = true) { vetoClustered_ = b; } @@ -61,8 +71,8 @@ class EgammaRecHitIsolation { ~EgammaRecHitIsolation(); private: - double getSum_(const reco::Candidate*, bool returnEt) const; - double getSum_(const reco::SuperCluster*, bool returnEt) const; + double getSum_(const reco::Candidate*, bool returnEt, const EcalPFRecHitThresholds* thresholds) const; + double getSum_(const reco::SuperCluster*, bool returnEt, const EcalPFRecHitThresholds* thresholds) const; double extRadius_; double intRadius_; diff --git a/RecoEgamma/EgammaIsolationAlgos/plugins/EgammaEcalRecHitIsolationProducer.cc b/RecoEgamma/EgammaIsolationAlgos/plugins/EgammaEcalRecHitIsolationProducer.cc deleted file mode 100644 index 0537893cef3f3..0000000000000 --- a/RecoEgamma/EgammaIsolationAlgos/plugins/EgammaEcalRecHitIsolationProducer.cc +++ /dev/null @@ -1,193 +0,0 @@ -//***************************************************************************** -// File: EgammaEcalRecHitIsolationProducer.cc -// ---------------------------------------------------------------------------- -// OrigAuth: Matthias Mozer, adapted from EgammaHcalIsolationProducer by S. Harper -// Institute: IIHE-VUB, RAL -//============================================================================= -//***************************************************************************** - -#include "DataFormats/Candidate/interface/Candidate.h" -#include "DataFormats/Candidate/interface/CandAssociation.h" -#include "DataFormats/Common/interface/Handle.h" -#include "DataFormats/DetId/interface/DetId.h" -#include "DataFormats/EgammaReco/interface/SuperCluster.h" -#include "DataFormats/EgammaReco/interface/SuperClusterFwd.h" -#include "DataFormats/RecoCandidate/interface/RecoCandidate.h" -#include "FWCore/Framework/interface/ConsumesCollector.h" -#include "FWCore/Framework/interface/ESHandle.h" -#include "FWCore/Framework/interface/EventSetup.h" -#include "FWCore/Framework/interface/Event.h" -#include "FWCore/Framework/interface/global/EDProducer.h" -#include "FWCore/ParameterSet/interface/ParameterSet.h" -#include "FWCore/Utilities/interface/ESGetToken.h" -#include "Geometry/CaloGeometry/interface/CaloGeometry.h" -#include "Geometry/Records/interface/CaloGeometryRecord.h" -#include "RecoEgamma/EgammaIsolationAlgos/interface/EgammaRecHitIsolation.h" -#include "RecoLocalCalo/EcalRecAlgos/interface/EcalSeverityLevelAlgo.h" -#include "RecoLocalCalo/EcalRecAlgos/interface/EcalSeverityLevelAlgoRcd.h" - -class EgammaEcalRecHitIsolationProducer : public edm::global::EDProducer<> { -public: - explicit EgammaEcalRecHitIsolationProducer(const edm::ParameterSet&); - - void produce(edm::StreamID, edm::Event&, const edm::EventSetup&) const override; - -private: - const edm::EDGetTokenT> emObjectProducer_; - const edm::EDGetTokenT ecalBarrelRecHitCollection_; - const edm::EDGetTokenT ecalEndcapRecHitCollection_; - - double egIsoPtMinBarrel_; //minimum Et noise cut - double egIsoEMinBarrel_; //minimum E noise cut - double egIsoPtMinEndcap_; //minimum Et noise cut - double egIsoEMinEndcap_; //minimum E noise cut - double egIsoConeSizeOut_; //outer cone size - double egIsoConeSizeInBarrel_; //inner cone size - double egIsoConeSizeInEndcap_; //inner cone size - double egIsoJurassicWidth_; // exclusion strip width for jurassic veto - - bool useIsolEt_; //switch for isolEt rather than isolE - bool tryBoth_; // use rechits from barrel + endcap - bool subtract_; // subtract SC energy (allows veto cone of zero size) - - bool useNumCrystals_; // veto on number of crystals - bool vetoClustered_; // veto all clusterd rechits - - edm::ESGetToken sevLvToken_; - edm::ESGetToken caloGeometrytoken_; -}; - -#include "FWCore/Framework/interface/MakerMacros.h" -DEFINE_FWK_MODULE(EgammaEcalRecHitIsolationProducer); - -EgammaEcalRecHitIsolationProducer::EgammaEcalRecHitIsolationProducer(const edm::ParameterSet& config) - //inputs - : emObjectProducer_{consumes(config.getParameter("emObjectProducer"))}, - ecalBarrelRecHitCollection_{consumes(config.getParameter("ecalBarrelRecHitCollection"))}, - ecalEndcapRecHitCollection_{consumes(config.getParameter("ecalEndcapRecHitCollection"))} { - //vetos - egIsoPtMinBarrel_ = config.getParameter("etMinBarrel"); - egIsoEMinBarrel_ = config.getParameter("eMinBarrel"); - egIsoPtMinEndcap_ = config.getParameter("etMinEndcap"); - egIsoEMinEndcap_ = config.getParameter("eMinEndcap"); - egIsoConeSizeInBarrel_ = config.getParameter("intRadiusBarrel"); - egIsoConeSizeInEndcap_ = config.getParameter("intRadiusEndcap"); - egIsoConeSizeOut_ = config.getParameter("extRadius"); - egIsoJurassicWidth_ = config.getParameter("jurassicWidth"); - - // options - useIsolEt_ = config.getParameter("useIsolEt"); - tryBoth_ = config.getParameter("tryBoth"); - subtract_ = config.getParameter("subtract"); - useNumCrystals_ = config.getParameter("useNumCrystals"); - vetoClustered_ = config.getParameter("vetoClustered"); - - //EventSetup Tokens - sevLvToken_ = esConsumes(); - caloGeometrytoken_ = esConsumes(); - - //register your products - produces>(); -} - -// ------------ method called to produce the data ------------ -void EgammaEcalRecHitIsolationProducer::produce(edm::StreamID, - edm::Event& iEvent, - const edm::EventSetup& iSetup) const { - // Get the filtered objects - auto emObjectHandle = iEvent.getHandle(emObjectProducer_); - - // Next get Ecal hits barrel - auto ecalBarrelRecHitHandle = iEvent.getHandle(ecalBarrelRecHitCollection_); - - // Next get Ecal hits endcap - auto ecalEndcapRecHitHandle = iEvent.getHandle(ecalEndcapRecHitCollection_); - - edm::ESHandle sevlv = iSetup.getHandle(sevLvToken_); - const EcalSeverityLevelAlgo* sevLevel = sevlv.product(); - - //Get Calo Geometry - edm::ESHandle pG = iSetup.getHandle(caloGeometrytoken_); - const CaloGeometry* caloGeom = pG.product(); - - //reco::CandViewDoubleAssociations* isoMap = new reco::CandViewDoubleAssociations( reco::CandidateBaseRefProd( emObjectHandle ) ); - auto isoMap = std::make_unique>(); - edm::ValueMap::Filler filler(*isoMap); - std::vector retV(emObjectHandle->size(), 0); - - EgammaRecHitIsolation ecalBarrelIsol(egIsoConeSizeOut_, - egIsoConeSizeInBarrel_, - egIsoJurassicWidth_, - egIsoPtMinBarrel_, - egIsoEMinBarrel_, - caloGeom, - *ecalBarrelRecHitHandle, - sevLevel, - DetId::Ecal); - ecalBarrelIsol.setUseNumCrystals(useNumCrystals_); - ecalBarrelIsol.setVetoClustered(vetoClustered_); - - EgammaRecHitIsolation ecalEndcapIsol(egIsoConeSizeOut_, - egIsoConeSizeInEndcap_, - egIsoJurassicWidth_, - egIsoPtMinEndcap_, - egIsoEMinEndcap_, - caloGeom, - *ecalEndcapRecHitHandle, - sevLevel, - DetId::Ecal); - ecalEndcapIsol.setUseNumCrystals(useNumCrystals_); - ecalEndcapIsol.setVetoClustered(vetoClustered_); - - for (size_t i = 0; i < emObjectHandle->size(); ++i) { - //i need to know if its in the barrel/endcap so I get the supercluster handle to find out the detector eta - //this might not be the best way, are we guaranteed that eta<1.5 is barrel - //this can be safely replaced by another method which determines where the emobject is - //then we either get the isolation Et or isolation Energy depending on user selection - double isoValue = 0.; - - reco::SuperClusterRef superClus = emObjectHandle->at(i).get(); - - if (tryBoth_) { //barrel + endcap - if (useIsolEt_) - isoValue = - ecalBarrelIsol.getEtSum(&(emObjectHandle->at(i))) + ecalEndcapIsol.getEtSum(&(emObjectHandle->at(i))); - else - isoValue = ecalBarrelIsol.getEnergySum(&(emObjectHandle->at(i))) + - ecalEndcapIsol.getEnergySum(&(emObjectHandle->at(i))); - } else if (fabs(superClus->eta()) < 1.479) { //barrel - if (useIsolEt_) - isoValue = ecalBarrelIsol.getEtSum(&(emObjectHandle->at(i))); - else - isoValue = ecalBarrelIsol.getEnergySum(&(emObjectHandle->at(i))); - } else { //endcap - if (useIsolEt_) - isoValue = ecalEndcapIsol.getEtSum(&(emObjectHandle->at(i))); - else - isoValue = ecalEndcapIsol.getEnergySum(&(emObjectHandle->at(i))); - } - - //we subtract off the electron energy here as well - double subtractVal = 0; - - if (useIsolEt_) - subtractVal = superClus.get()->rawEnergy() * sin(2 * atan(exp(-superClus.get()->eta()))); - else - subtractVal = superClus.get()->rawEnergy(); - - if (subtract_) - isoValue -= subtractVal; - - retV[i] = isoValue; - //all done, isolation is now in the map - - } //end of loop over em objects - - filler.insert(emObjectHandle, retV.begin(), retV.end()); - filler.fill(); - - iEvent.put(std::move(isoMap)); -} - -//define this as a plug-in -//DEFINE_FWK_MODULE(EgammaRecHitIsolation,Producer); diff --git a/RecoEgamma/EgammaIsolationAlgos/python/egammaHBHERecHitThreshold_cff.py b/RecoEgamma/EgammaIsolationAlgos/python/egammaHBHERecHitThreshold_cff.py index c276700ed4760..ef3c78c6a1109 100644 --- a/RecoEgamma/EgammaIsolationAlgos/python/egammaHBHERecHitThreshold_cff.py +++ b/RecoEgamma/EgammaIsolationAlgos/python/egammaHBHERecHitThreshold_cff.py @@ -7,6 +7,7 @@ recHitEThresholdHB = _thresholdsHBphase1, recHitEThresholdHE = _thresholdsHEphase1, maxHcalRecHitSeverity = cms.int32(9), + usePFThresholdsFromDB = cms.bool(False) ) egammaHBHERecHit_2023 = egammaHBHERecHit.clone( @@ -15,3 +16,9 @@ from Configuration.Eras.Modifier_run3_egamma_2023_cff import run3_egamma_2023 run3_egamma_2023.toReplaceWith(egammaHBHERecHit,egammaHBHERecHit_2023) + +from Configuration.Eras.Modifier_hcalPfCutsFromDB_cff import hcalPfCutsFromDB +hcalPfCutsFromDB.toModify(egammaHBHERecHit, + usePFThresholdsFromDB = True) + + diff --git a/RecoEgamma/EgammaIsolationAlgos/src/EgammaHcalIsolation.cc b/RecoEgamma/EgammaIsolationAlgos/src/EgammaHcalIsolation.cc index 63ebc6b7f5593..e11ed027580b6 100644 --- a/RecoEgamma/EgammaIsolationAlgos/src/EgammaHcalIsolation.cc +++ b/RecoEgamma/EgammaIsolationAlgos/src/EgammaHcalIsolation.cc @@ -126,10 +126,12 @@ double EgammaHcalIsolation::goodHitEnergy(float pcluEta, int ieta, int iphi, int include_or_exclude, - double (*scale)(const double &)) const { + double (*scale)(const double &), + const HcalPFCuts *hcalCuts) const { const HcalDetId hid(hit.detid()); const int hd = hid.depth(), he = hid.ieta(), hp = hid.iphi(); const int h1 = hd - 1; + double thresholdE = 0.; if (include_or_exclude == -1 and (he != ieta or hp != iphi)) return 0.; @@ -146,8 +148,16 @@ double EgammaHcalIsolation::goodHitEnergy(float pcluEta, if (!right_depth) return 0.; - const bool goodHBe = hid.subdet() == HcalBarrel and hit.energy() > eThresHB_[h1]; - const bool goodHEe = hid.subdet() == HcalEndcap and hit.energy() > eThresHE_[h1]; + bool goodHBe = hid.subdet() == HcalBarrel and hit.energy() > eThresHB_[h1]; + bool goodHEe = hid.subdet() == HcalEndcap and hit.energy() > eThresHE_[h1]; + + if (hcalCuts != nullptr) { + const HcalPFCut *cutValue = hcalCuts->getValues(hid.rawId()); + thresholdE = cutValue->noiseThreshold(); + goodHBe = hid.subdet() == HcalBarrel and hit.energy() > thresholdE; + goodHEe = hid.subdet() == HcalEndcap and hit.energy() > thresholdE; + } + if (!(goodHBe or goodHEe)) return 0.; @@ -182,12 +192,13 @@ double EgammaHcalIsolation::getHcalSum(const GlobalPoint &pclu, int ieta, int iphi, int include_or_exclude, - double (*scale)(const double &)) const { + double (*scale)(const double &), + const HcalPFCuts *hcalCuts) const { double sum = 0.; const float pcluEta = pclu.eta(); const float pcluPhi = pclu.phi(); for (const auto &hit : mhbhe_) - sum += goodHitEnergy(pcluEta, pcluPhi, hit, depth, ieta, iphi, include_or_exclude, scale); + sum += goodHitEnergy(pcluEta, pcluPhi, hit, depth, ieta, iphi, include_or_exclude, scale, hcalCuts); return sum; } diff --git a/RecoEgamma/EgammaIsolationAlgos/src/EgammaRecHitIsolation.cc b/RecoEgamma/EgammaIsolationAlgos/src/EgammaRecHitIsolation.cc index 0e706fd61b8d3..0eebf9ee20fbd 100644 --- a/RecoEgamma/EgammaIsolationAlgos/src/EgammaRecHitIsolation.cc +++ b/RecoEgamma/EgammaIsolationAlgos/src/EgammaRecHitIsolation.cc @@ -64,7 +64,9 @@ EgammaRecHitIsolation::EgammaRecHitIsolation(double extRadius, EgammaRecHitIsolation::~EgammaRecHitIsolation() {} -double EgammaRecHitIsolation::getSum_(const reco::Candidate* emObject, bool returnEt) const { +double EgammaRecHitIsolation::getSum_(const reco::Candidate* emObject, + bool returnEt, + const EcalPFRecHitThresholds* thresholds) const { double energySum = 0.; if (!caloHits_.empty()) { //Take the SC position @@ -96,6 +98,10 @@ double EgammaRecHitIsolation::getSum_(const reco::Candidate* emObject, bool retu float phiDiff = reco::deltaPhi(phi, phiclus); float energy = j->energy(); + float rhThres = (thresholds != nullptr) ? (*thresholds)[j->detid()] : 0.f; + if (energy <= rhThres) + continue; + if (useNumCrystals_) { if (fabs(etaclus) < 1.479) { // Barrel num crystals, crystal width = 0.0174 if (fabs(etaDiff) < 0.0174 * etaSlice_) @@ -174,7 +180,9 @@ double EgammaRecHitIsolation::getSum_(const reco::Candidate* emObject, bool retu return energySum; } -double EgammaRecHitIsolation::getSum_(const reco::SuperCluster* sc, bool returnEt) const { +double EgammaRecHitIsolation::getSum_(const reco::SuperCluster* sc, + bool returnEt, + const EcalPFRecHitThresholds* thresholds) const { double energySum = 0.; if (!caloHits_.empty()) { //Take the SC position @@ -205,6 +213,10 @@ double EgammaRecHitIsolation::getSum_(const reco::SuperCluster* sc, bool returnE double phiDiff = reco::deltaPhi(phi, phiclus); double energy = j->energy(); + float rhThres = (thresholds != nullptr) ? (*thresholds)[j->detid()] : 0.f; + if (energy <= rhThres) + continue; + if (useNumCrystals_) { if (fabs(etaclus) < 1.479) { // Barrel num crystals, crystal width = 0.0174 if (fabs(etaDiff) < 0.0174 * etaSlice_) diff --git a/RecoEgamma/EgammaPhotonProducers/python/conversionTrackCandidates_cfi.py b/RecoEgamma/EgammaPhotonProducers/python/conversionTrackCandidates_cfi.py index d58e07faea9c2..c09aa0b772086 100644 --- a/RecoEgamma/EgammaPhotonProducers/python/conversionTrackCandidates_cfi.py +++ b/RecoEgamma/EgammaPhotonProducers/python/conversionTrackCandidates_cfi.py @@ -6,5 +6,6 @@ hbheRecHits = egammaHBHERecHit.hbheRecHits, recHitEThresholdHB = egammaHBHERecHit.recHitEThresholdHB, recHitEThresholdHE = egammaHBHERecHit.recHitEThresholdHE, + usePFThresholdsFromDB = egammaHBHERecHit.usePFThresholdsFromDB, maxHcalRecHitSeverity = egammaHBHERecHit.maxHcalRecHitSeverity ) diff --git a/RecoEgamma/EgammaPhotonProducers/python/conversions_cfi.py b/RecoEgamma/EgammaPhotonProducers/python/conversions_cfi.py index c85d5b66bb5e8..774b23436e1c2 100644 --- a/RecoEgamma/EgammaPhotonProducers/python/conversions_cfi.py +++ b/RecoEgamma/EgammaPhotonProducers/python/conversions_cfi.py @@ -25,6 +25,7 @@ hbheRecHits = egammaHBHERecHit.hbheRecHits, recHitEThresholdHB = egammaHBHERecHit.recHitEThresholdHB, recHitEThresholdHE = egammaHBHERecHit.recHitEThresholdHE, + usePFThresholdsFromDB = egammaHBHERecHit.usePFThresholdsFromDB, maxHcalRecHitSeverity = egammaHBHERecHit.maxHcalRecHitSeverity, maxHOverE = cms.double(0.15), recoverOneTrackCase = cms.bool(True), diff --git a/RecoEgamma/EgammaPhotonProducers/python/gedPhotons_cfi.py b/RecoEgamma/EgammaPhotonProducers/python/gedPhotons_cfi.py index c2e3915508a93..978a08fb47c4b 100644 --- a/RecoEgamma/EgammaPhotonProducers/python/gedPhotons_cfi.py +++ b/RecoEgamma/EgammaPhotonProducers/python/gedPhotons_cfi.py @@ -63,6 +63,7 @@ hbheRecHits = egammaHBHERecHit.hbheRecHits, recHitEThresholdHB = egammaHBHERecHit.recHitEThresholdHB, recHitEThresholdHE = egammaHBHERecHit.recHitEThresholdHE, + usePFThresholdsFromDB = egammaHBHERecHit.usePFThresholdsFromDB, maxHcalRecHitSeverity = egammaHBHERecHit.maxHcalRecHitSeverity, hcalRun2EffDepth = cms.bool(False), posCalc_x0 = cms.double(0.89), diff --git a/RecoEgamma/EgammaPhotonProducers/python/photons_cfi.py b/RecoEgamma/EgammaPhotonProducers/python/photons_cfi.py index d772f921b9cbd..91c9f0f5b731e 100644 --- a/RecoEgamma/EgammaPhotonProducers/python/photons_cfi.py +++ b/RecoEgamma/EgammaPhotonProducers/python/photons_cfi.py @@ -54,6 +54,7 @@ hbheRecHits = egammaHBHERecHit.hbheRecHits, recHitEThresholdHB = egammaHBHERecHit.recHitEThresholdHB, recHitEThresholdHE = egammaHBHERecHit.recHitEThresholdHE, + usePFThresholdsFromDB = egammaHBHERecHit.usePFThresholdsFromDB, maxHcalRecHitSeverity = egammaHBHERecHit.maxHcalRecHitSeverity, hcalRun2EffDepth = cms.bool(False), posCalc_x0 = cms.double(0.89), @@ -172,6 +173,7 @@ hbheRecHits = egammaHBHERecHit.hbheRecHits, recHitEThresholdHB = egammaHBHERecHit.recHitEThresholdHB, recHitEThresholdHE = egammaHBHERecHit.recHitEThresholdHE, + usePFThresholdsFromDB = egammaHBHERecHit.usePFThresholdsFromDB, maxHcalRecHitSeverity = egammaHBHERecHit.maxHcalRecHitSeverity, hcalRun2EffDepth = cms.bool(False), posCalc_x0 = cms.double(0.89), diff --git a/RecoEgamma/EgammaPhotonProducers/src/ConversionTrackCandidateProducer.cc b/RecoEgamma/EgammaPhotonProducers/src/ConversionTrackCandidateProducer.cc index 617e56968f9d6..3822137823132 100644 --- a/RecoEgamma/EgammaPhotonProducers/src/ConversionTrackCandidateProducer.cc +++ b/RecoEgamma/EgammaPhotonProducers/src/ConversionTrackCandidateProducer.cc @@ -43,7 +43,8 @@ #include "RecoTracker/Record/interface/NavigationSchoolRecord.h" #include "TrackingTools/DetLayers/interface/NavigationSchool.h" #include "RecoEgamma/EgammaElectronAlgos/interface/ElectronHcalHelper.h" - +#include "CondFormats/EcalObjects/interface/EcalPFRecHitThresholds.h" +#include "CondFormats/DataRecord/interface/EcalPFRecHitThresholdsRcd.h" #include class ConversionTrackCandidateProducer : public edm::stream::EDProducer<> { @@ -77,6 +78,8 @@ class ConversionTrackCandidateProducer : public edm::stream::EDProducer<> { const edm::ESGetToken navToken_; const edm::ESGetToken theCaloGeomToken_; const edm::ESGetToken sevlvToken_; + const edm::ESGetToken ecalPFRechitThresholdsToken_; + const EcalPFRecHitThresholds* thresholds = nullptr; double hOverEConeSize_; double maxHOverE_; @@ -114,6 +117,10 @@ class ConversionTrackCandidateProducer : public edm::stream::EDProducer<> { std::unique_ptr hcalHelper_; + edm::ESGetToken hcalCutsToken_; + bool cutsFromDB; + HcalPFCuts const* hcalCuts = nullptr; + void buildCollections(bool detector, const edm::Handle>& scHandle, const edm::Handle>& bcHandle, @@ -149,7 +156,7 @@ ConversionTrackCandidateProducer::ConversionTrackCandidateProducer(const edm::Pa navToken_(esConsumes(edm::ESInputTag("", "SimpleNavigationSchool"))), theCaloGeomToken_(esConsumes()), sevlvToken_(esConsumes()), - + ecalPFRechitThresholdsToken_{esConsumes()}, theTrajectoryBuilder_(createBaseCkfTrajectoryBuilder( config.getParameter("TrajectoryBuilderPSet"), consumesCollector())), outInSeedFinder_{config, consumesCollector()}, @@ -162,6 +169,10 @@ ConversionTrackCandidateProducer::ConversionTrackCandidateProducer(const edm::Pa OutInTrackSCAssociationCollection_ = config.getParameter("outInTrackCandidateSCAssociationCollection"); InOutTrackSCAssociationCollection_ = config.getParameter("inOutTrackCandidateSCAssociationCollection"); + cutsFromDB = config.getParameter("usePFThresholdsFromDB"); + if (cutsFromDB) { + hcalCutsToken_ = esConsumes(edm::ESInputTag("", "withTopo")); + } hOverEConeSize_ = config.getParameter("hOverEConeSize"); maxHOverE_ = config.getParameter("maxHOverE"); minSCEt_ = config.getParameter("minSCEt"); @@ -224,6 +235,10 @@ void ConversionTrackCandidateProducer::beginRun(edm::Run const& r, edm::EventSet theTrajectoryBuilder_->setNavigationSchool(navigation); outInSeedFinder_.setNavigationSchool(navigation); inOutSeedFinder_.setNavigationSchool(navigation); + + if (cutsFromDB) { + hcalCuts = &theEventSetup.getData(hcalCutsToken_); + } } void ConversionTrackCandidateProducer::produce(edm::Event& theEvent, const edm::EventSetup& theEventSetup) { @@ -279,6 +294,8 @@ void ConversionTrackCandidateProducer::produce(edm::Event& theEvent, const edm:: validEndcapSCHandle = false; } + thresholds = &theEventSetup.getData(ecalPFRechitThresholdsToken_); + // get the geometry from the event setup: theCaloGeom_ = theEventSetup.getHandle(theCaloGeomToken_); @@ -327,7 +344,7 @@ void ConversionTrackCandidateProducer::produce(edm::Event& theEvent, const edm:: auto const refprodOutInTrackC = theEvent.put(std::move(outInTrackCandidate_p), OutInTrackCandidateCollection_); //std::cout << "ConversionTrackCandidateProducer refprodOutInTrackC size " << (*(refprodOutInTrackC.product())).size() << "\n"; // - //std::cout << "ConversionTrackCandidateProducer Putting in the event " << (*inOutTrackCandidate_p).size() << " In Out track Candidates " << "\n"; + //std::cout << "ConversionTrackCandidateProducer Putting in the event " << (*inOutTrackCandidate_p).size() << " In Out track Candidates " << "\n"; auto const refprodInOutTrackC = theEvent.put(std::move(inOutTrackCandidate_p), InOutTrackCandidateCollection_); //std::cout << "ConversionTrackCandidateProducer refprodInOutTrackC size " << (*(refprodInOutTrackC.product())).size() << "\n"; @@ -371,7 +388,7 @@ void ConversionTrackCandidateProducer::buildCollections(bool isBarrel, const reco::CaloCluster* pClus = &(*aClus); const reco::SuperCluster* sc = dynamic_cast(pClus); double scEt = sc->energy() / cosh(sc->eta()); - double HoE = hcalHelper.hcalESum(*sc, 0) / sc->energy(); + double HoE = hcalHelper.hcalESum(*sc, 0, hcalCuts) / sc->energy(); if (HoE >= maxHOverE_) continue; @@ -389,7 +406,7 @@ void ConversionTrackCandidateProducer::buildCollections(bool isBarrel, ecalIso.doSeverityChecks(&ecalRecHits, severitiesexclEE_); } - double ecalIsolation = ecalIso.getEtSum(sc); + double ecalIsolation = ecalIso.getEtSum(sc, *thresholds); if (ecalIsolation > ecalIsoCut_offset_ + ecalIsoCut_slope_ * scEt) continue; @@ -454,6 +471,7 @@ void ConversionTrackCandidateProducer::fillDescriptions(edm::ConfigurationDescri desc.add("hbheRecHits", {"hbhereco"}); desc.add>("recHitEThresholdHB", {0., 0., 0., 0.}); desc.add>("recHitEThresholdHE", {0., 0., 0., 0., 0., 0., 0.}); + desc.add("usePFThresholdsFromDB", false); desc.add("maxHcalRecHitSeverity", 999999); desc.add("minSCEt", 20.0); diff --git a/RecoEgamma/EgammaPhotonProducers/src/ConvertedPhotonProducer.cc b/RecoEgamma/EgammaPhotonProducers/src/ConvertedPhotonProducer.cc index 61e08eb3f3dc2..71d0581f0296d 100644 --- a/RecoEgamma/EgammaPhotonProducers/src/ConvertedPhotonProducer.cc +++ b/RecoEgamma/EgammaPhotonProducers/src/ConvertedPhotonProducer.cc @@ -38,6 +38,9 @@ #include "TrackingTools/TransientTrack/interface/TransientTrack.h" #include "TrackingTools/TransientTrack/interface/TransientTrackBuilder.h" #include "RecoEgamma/EgammaElectronAlgos/interface/ElectronHcalHelper.h" +#include "CondFormats/DataRecord/interface/HcalPFCutsRcd.h" +#include "CondTools/Hcal/interface/HcalPFCutsHandler.h" +#include "Geometry/CaloTopology/interface/HcalTopology.h" #include @@ -45,7 +48,6 @@ class ConvertedPhotonProducer : public edm::stream::EDProducer<> { public: ConvertedPhotonProducer(const edm::ParameterSet& ps); - void beginRun(edm::Run const&, const edm::EventSetup& es) final; void produce(edm::Event& evt, const edm::EventSetup& es) override; private: @@ -75,6 +77,10 @@ class ConvertedPhotonProducer : public edm::stream::EDProducer<> { edm::EDGetTokenT generalTrackProducer_; + edm::ESGetToken hcalCutsToken_; + bool cutsFromDB_; + HcalPFCuts const* hcalCuts_ = nullptr; + // Register the product edm::EDPutTokenT convertedPhotonCollectionPutToken_; edm::EDPutTokenT cleanedConvertedPhotonCollectionPutToken_; @@ -139,9 +145,8 @@ ConvertedPhotonProducer::ConvertedPhotonProducer(const edm::ParameterSet& config scIslandEndcapProducer_{consumes(config.getParameter("scIslandEndcapProducer"))}, hbheRecHits_{consumes(config.getParameter("hbheRecHits"))}, caloGeomToken_{esConsumes()}, - mFToken_{esConsumes()}, - transientTrackToken_{esConsumes( - edm::ESInputTag("", "TransientTrackBuilder"))}, + mFToken_{esConsumes()}, + transientTrackToken_{esConsumes(edm::ESInputTag("", "TransientTrackBuilder"))}, vertexFinder_{config}, algoName_{config.getParameter("AlgorithmName")}, @@ -159,6 +164,11 @@ ConvertedPhotonProducer::ConvertedPhotonProducer(const edm::ParameterSet& config // instantiate the Track Pair Finder algorithm likelihoodCalc_.setWeightsFile(edm::FileInPath{likelihoodWeights_.c_str()}.fullPath().c_str()); + cutsFromDB_ = config.getParameter("usePFThresholdsFromDB"); + if (cutsFromDB_) { + hcalCutsToken_ = esConsumes(edm::ESInputTag("", "withTopo")); + } + ElectronHcalHelper::Configuration cfgCone; cfgCone.hOverEConeSize = hOverEConeSize_; if (cfgCone.hOverEConeSize > 0) { @@ -176,14 +186,16 @@ ConvertedPhotonProducer::ConvertedPhotonProducer(const edm::ParameterSet& config hcalHelper_ = std::make_unique(cfgCone, consumesCollector()); } -void ConvertedPhotonProducer::beginRun(edm::Run const& r, edm::EventSetup const& theEventSetup) { +void ConvertedPhotonProducer::produce(edm::Event& theEvent, const edm::EventSetup& theEventSetup) { magneticField_ = &theEventSetup.getData(mFToken_); // Transform Track into TransientTrack (needed by the Vertex fitter) transientTrackBuilder_ = &theEventSetup.getData(transientTrackToken_); -} -void ConvertedPhotonProducer::produce(edm::Event& theEvent, const edm::EventSetup& theEventSetup) { + if (cutsFromDB_) { + hcalCuts_ = &theEventSetup.getData(hcalCutsToken_); + } + // // create empty output collections // @@ -341,7 +353,7 @@ void ConvertedPhotonProducer::buildCollections( continue; const reco::CaloCluster* pClus = &(*aClus); auto const* sc = dynamic_cast(pClus); - double HoE = hcalHelper.hcalESum(*sc, 0) / sc->energy(); + double HoE = hcalHelper.hcalESum(*sc, 0, hcalCuts_) / sc->energy(); if (HoE >= maxHOverE_) continue; ///// diff --git a/RecoEgamma/EgammaPhotonProducers/src/GEDPhotonProducer.cc b/RecoEgamma/EgammaPhotonProducers/src/GEDPhotonProducer.cc index 3992e2b4cf8c6..20547ad868f5c 100644 --- a/RecoEgamma/EgammaPhotonProducers/src/GEDPhotonProducer.cc +++ b/RecoEgamma/EgammaPhotonProducers/src/GEDPhotonProducer.cc @@ -53,6 +53,9 @@ #include "RecoEgamma/EgammaIsolationAlgos/interface/HcalPFClusterIsolation.h" #include "CondFormats/GBRForest/interface/GBRForest.h" #include "CommonTools/MVAUtils/interface/GBRForestTools.h" +#include "CondFormats/DataRecord/interface/HcalPFCutsRcd.h" +#include "CondTools/Hcal/interface/HcalPFCutsHandler.h" +#include "Geometry/CaloTopology/interface/HcalTopology.h" class CacheData { public: @@ -94,6 +97,10 @@ class GEDPhotonProducer : public edm::stream::EDProducer hcalCutsToken_; + bool cutsFromDB_; + HcalPFCuts const* hcalCuts_ = nullptr; + class RecoStepInfo { public: enum FlagBits { kOOT = 0x1, kFinal = 0x2 }; @@ -280,6 +287,12 @@ GEDPhotonProducer::GEDPhotonProducer(const edm::ParameterSet& config, const Cach ecalPFRechitThresholdsToken_{esConsumes()}, hcalHelperCone_(nullptr), hcalHelperBc_(nullptr) { + //Retrieve HCAL PF thresholds - from config or from DB + cutsFromDB_ = config.getParameter("usePFThresholdsFromDB"); + if (cutsFromDB_) { + hcalCutsToken_ = esConsumes(edm::ESInputTag("", "withTopo")); + } + if (recoStep_.isFinal()) { photonProducerT_ = consumes(photonProducer_); pfCandidates_ = consumes(config.getParameter("pfCandidates")); @@ -485,6 +498,10 @@ void GEDPhotonProducer::endStream() { void GEDPhotonProducer::produce(edm::Event& theEvent, const edm::EventSetup& eventSetup) { using namespace edm; + if (cutsFromDB_) { + hcalCuts_ = &eventSetup.getData(hcalCutsToken_); + } + auto outputPhotonCollection_p = std::make_unique(); edm::ValueMap pfEGCandToPhotonMap; @@ -799,7 +816,7 @@ void GEDPhotonProducer::fillPhotonCollection(edm::Event& evt, reco::Photon::FiducialFlags fiducialFlags; reco::Photon::IsolationVariables isolVarR03, isolVarR04; if (!EcalTools::isHGCalDet(thedet)) { - photonIsoCalculator_->calculate(&newCandidate, evt, es, fiducialFlags, isolVarR04, isolVarR03); + photonIsoCalculator_->calculate(&newCandidate, evt, es, fiducialFlags, isolVarR04, isolVarR03, hcalCuts_); } newCandidate.setFiducialVolumeFlags(fiducialFlags); newCandidate.setIsolationVariables(isolVarR04, isolVarR03); @@ -815,10 +832,10 @@ void GEDPhotonProducer::fillPhotonCollection(edm::Event& evt, showerShape.sigmaIetaIeta = sigmaIetaIeta; for (uint id = 0; id < showerShape.hcalOverEcal.size(); ++id) { showerShape.hcalOverEcal[id] = - (hcalHelperCone != nullptr) ? hcalHelperCone->hcalESum(*scRef, id + 1) / scRef->energy() : 0.f; + (hcalHelperCone != nullptr) ? hcalHelperCone->hcalESum(*scRef, id + 1, hcalCuts_) / scRef->energy() : 0.f; showerShape.hcalOverEcalBc[id] = - (hcalHelperBc != nullptr) ? hcalHelperBc->hcalESum(*scRef, id + 1) / scRef->energy() : 0.f; + (hcalHelperBc != nullptr) ? hcalHelperBc->hcalESum(*scRef, id + 1, hcalCuts_) / scRef->energy() : 0.f; } showerShape.invalidHcal = (hcalHelperBc != nullptr) ? !hcalHelperBc->hasActiveHcal(*scRef) : false; if (hcalHelperBc != nullptr) @@ -930,9 +947,9 @@ void GEDPhotonProducer::fillPhotonCollection(edm::Event& evt, full5x5_showerShape.effSigmaRR = sigmaRR; for (uint id = 0; id < full5x5_showerShape.hcalOverEcal.size(); ++id) { full5x5_showerShape.hcalOverEcal[id] = - (hcalHelperCone != nullptr) ? hcalHelperCone->hcalESum(*scRef, id + 1) / full5x5_e5x5 : 0.f; + (hcalHelperCone != nullptr) ? hcalHelperCone->hcalESum(*scRef, id + 1, hcalCuts_) / full5x5_e5x5 : 0.f; full5x5_showerShape.hcalOverEcalBc[id] = - (hcalHelperBc != nullptr) ? hcalHelperBc->hcalESum(*scRef, id + 1) / full5x5_e5x5 : 0.f; + (hcalHelperBc != nullptr) ? hcalHelperBc->hcalESum(*scRef, id + 1, hcalCuts_) / full5x5_e5x5 : 0.f; } full5x5_showerShape.pre7DepthHcal = false; newCandidate.full5x5_setShowerShapeVariables(full5x5_showerShape); diff --git a/RecoEgamma/EgammaPhotonProducers/src/PhotonProducer.cc b/RecoEgamma/EgammaPhotonProducers/src/PhotonProducer.cc index 8544ca0f260ca..7fed2882a787c 100644 --- a/RecoEgamma/EgammaPhotonProducers/src/PhotonProducer.cc +++ b/RecoEgamma/EgammaPhotonProducers/src/PhotonProducer.cc @@ -38,6 +38,9 @@ #include "RecoLocalCalo/EcalRecAlgos/interface/EcalSeverityLevelAlgo.h" #include "RecoLocalCalo/EcalRecAlgos/interface/EcalSeverityLevelAlgoRcd.h" #include "RecoEgamma/EgammaElectronAlgos/interface/ElectronHcalHelper.h" +#include "CondFormats/DataRecord/interface/HcalPFCutsRcd.h" +#include "CondTools/Hcal/interface/HcalPFCutsHandler.h" +#include "Geometry/CaloTopology/interface/HcalTopology.h" #include @@ -53,6 +56,7 @@ class PhotonProducer : public edm::stream::EDProducer<> { edm::EventSetup const& es, const edm::Handle& photonCoreHandle, const CaloTopology* topology, + const HcalPFCuts* hcalCuts, const EcalRecHitCollection* ecalBarrelHits, const EcalRecHitCollection* ecalEndcapHits, ElectronHcalHelper const& hcalHelperCone, @@ -110,6 +114,9 @@ class PhotonProducer : public edm::stream::EDProducer<> { std::unique_ptr hcalHelperCone_; std::unique_ptr hcalHelperBc_; bool hcalRun2EffDepth_; + + edm::ESGetToken hcalCutsToken_; + bool cutsFromDB_; }; #include "FWCore/Framework/interface/MakerMacros.h" @@ -138,6 +145,12 @@ PhotonProducer::PhotonProducer(const edm::ParameterSet& config) edm::ParameterSet posCalcParameters = config.getParameter("posCalcParameters"); posCalculator_ = PositionCalc(posCalcParameters); + //Retrieve HCAL PF thresholds - from config or from DB + cutsFromDB_ = config.getParameter("usePFThresholdsFromDB"); + if (cutsFromDB_) { + hcalCutsToken_ = esConsumes(edm::ESInputTag("", "withTopo")); + } + //AA //Flags and Severities to be excluded from photon calculations const std::vector flagnamesEB = @@ -241,6 +254,10 @@ PhotonProducer::PhotonProducer(const edm::ParameterSet& config) } void PhotonProducer::produce(edm::Event& theEvent, const edm::EventSetup& theEventSetup) { + HcalPFCuts const* hcalCuts = nullptr; + if (cutsFromDB_) { + hcalCuts = &theEventSetup.getData(hcalCutsToken_); + } using namespace edm; // nEvt_++; @@ -306,6 +323,7 @@ void PhotonProducer::produce(edm::Event& theEvent, const edm::EventSetup& theEve theEventSetup, photonCoreHandle, topology, + hcalCuts, &barrelRecHits, &endcapRecHits, *hcalHelperCone_, @@ -331,6 +349,7 @@ void PhotonProducer::fillPhotonCollection(edm::Event& evt, edm::EventSetup const& es, const edm::Handle& photonCoreHandle, const CaloTopology* topology, + const HcalPFCuts* hcalCuts, const EcalRecHitCollection* ecalBarrelHits, const EcalRecHitCollection* ecalEndcapHits, ElectronHcalHelper const& hcalHelperCone, @@ -435,7 +454,7 @@ void PhotonProducer::fillPhotonCollection(edm::Event& evt, // Calculate fiducial flags and isolation variable. Blocked are filled from the isolationCalculator reco::Photon::FiducialFlags fiducialFlags; reco::Photon::IsolationVariables isolVarR03, isolVarR04; - photonIsolationCalculator_.calculate(&newCandidate, evt, es, fiducialFlags, isolVarR04, isolVarR03); + photonIsolationCalculator_.calculate(&newCandidate, evt, es, fiducialFlags, isolVarR04, isolVarR03, hcalCuts); newCandidate.setFiducialVolumeFlags(fiducialFlags); newCandidate.setIsolationVariables(isolVarR04, isolVarR03); @@ -449,8 +468,8 @@ void PhotonProducer::fillPhotonCollection(edm::Event& evt, showerShape.sigmaEtaEta = sigmaEtaEta; showerShape.sigmaIetaIeta = sigmaIetaIeta; for (uint id = 0; id < showerShape.hcalOverEcal.size(); ++id) { - showerShape.hcalOverEcal[id] = hcalHelperCone.hcalESum(*scRef, id + 1) / scRef->energy(); - showerShape.hcalOverEcalBc[id] = hcalHelperBc.hcalESum(*scRef, id + 1) / scRef->energy(); + showerShape.hcalOverEcal[id] = hcalHelperCone.hcalESum(*scRef, id + 1, hcalCuts) / scRef->energy(); + showerShape.hcalOverEcalBc[id] = hcalHelperBc.hcalESum(*scRef, id + 1, hcalCuts) / scRef->energy(); } showerShape.hcalTowersBehindClusters = hcalHelperBc.hcalTowersBehindClusters(*scRef); showerShape.pre7DepthHcal = false; @@ -466,8 +485,8 @@ void PhotonProducer::fillPhotonCollection(edm::Event& evt, full5x5_showerShape.sigmaEtaEta = full5x5_sigmaEtaEta; full5x5_showerShape.sigmaIetaIeta = full5x5_sigmaIetaIeta; for (uint id = 0; id < full5x5_showerShape.hcalOverEcal.size(); ++id) { - full5x5_showerShape.hcalOverEcal[id] = hcalHelperCone.hcalESum(*scRef, id + 1) / full5x5_e5x5; - full5x5_showerShape.hcalOverEcalBc[id] = hcalHelperBc.hcalESum(*scRef, id + 1) / full5x5_e5x5; + full5x5_showerShape.hcalOverEcal[id] = hcalHelperCone.hcalESum(*scRef, id + 1, hcalCuts) / full5x5_e5x5; + full5x5_showerShape.hcalOverEcalBc[id] = hcalHelperBc.hcalESum(*scRef, id + 1, hcalCuts) / full5x5_e5x5; } full5x5_showerShape.hcalTowersBehindClusters = hcalHelperBc.hcalTowersBehindClusters(*scRef); full5x5_showerShape.pre7DepthHcal = false; diff --git a/RecoEgamma/EgammaTools/python/egammaObjectModificationsInMiniAOD_cff.py b/RecoEgamma/EgammaTools/python/egammaObjectModificationsInMiniAOD_cff.py index 92b3c3339d8b0..641f493656177 100644 --- a/RecoEgamma/EgammaTools/python/egammaObjectModificationsInMiniAOD_cff.py +++ b/RecoEgamma/EgammaTools/python/egammaObjectModificationsInMiniAOD_cff.py @@ -11,6 +11,7 @@ import RecoEgamma.ElectronIdentification.Identification.mvaElectronID_RunIIIWinter22_iso_V1_cff as ele_RunIIIWinter22_iso_v1 import RecoEgamma.ElectronIdentification.Identification.mvaElectronID_RunIIIWinter22_noIso_V1_cff as ele_RunIIIWinter22_noIso_v1 +import RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Winter22_HZZ_V1_cff as ele_Winter22_HZZ_V1 #photon mva ids import RecoEgamma.PhotonIdentification.Identification.mvaPhotonID_Spring16_nonTrig_V1_cff as pho_spring16_nt_v1 @@ -49,7 +50,8 @@ def setup_mva(val_pset,cat_pset,prod_name,mva_name): ele_fall17_noIso_v2, ele_summer18UL_hzz, ele_RunIIIWinter22_iso_v1, - ele_RunIIIWinter22_noIso_v1 + ele_RunIIIWinter22_noIso_v1, + ele_Winter22_HZZ_V1 ]: setup_mva(egamma_modifications[0].electron_config, diff --git a/RecoEgamma/ElectronIdentification/python/ElectronMVAValueMapProducer_cfi.py b/RecoEgamma/ElectronIdentification/python/ElectronMVAValueMapProducer_cfi.py index 991e11b123d3a..dbc65a274d42a 100644 --- a/RecoEgamma/ElectronIdentification/python/ElectronMVAValueMapProducer_cfi.py +++ b/RecoEgamma/ElectronIdentification/python/ElectronMVAValueMapProducer_cfi.py @@ -48,6 +48,10 @@ import mvaEleID_Summer18UL_ID_ISO_producer_config mvaConfigsForEleProducer.append( mvaEleID_Summer18UL_ID_ISO_producer_config ) +from RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Winter22_HZZ_V1_cff \ + import mvaEleID_Winter22_HZZ_V1_producer_config +mvaConfigsForEleProducer.append( mvaEleID_Winter22_HZZ_V1_producer_config ) + electronMVAValueMapProducer = cms.EDProducer('ElectronMVAValueMapProducer', src = cms.InputTag('slimmedElectrons'), mvaConfigurations = mvaConfigsForEleProducer diff --git a/RecoEgamma/ElectronIdentification/python/FWLite.py b/RecoEgamma/ElectronIdentification/python/FWLite.py index 998c190783e40..d42d090967bfa 100644 --- a/RecoEgamma/ElectronIdentification/python/FWLite.py +++ b/RecoEgamma/ElectronIdentification/python/FWLite.py @@ -100,6 +100,8 @@ def passed(self, ele, mva, category, wp): import mvaSpring16WeightFiles_V1 as mvaSpring16GPWeightFiles_V1 from RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Spring16_HZZ_V1_cff \ import mvaSpring16WeightFiles_V1 as mvaSpring16HZZWeightFiles_V1 +from RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Winter22_HZZ_V1_cff \ + import mvaWeightFiles as mvaWinter22HZZWeightFiles_V1 from RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Spring16_GeneralPurpose_V1_cff \ import workingPoints as mvaSpring16GP_V1_workingPoints @@ -113,6 +115,8 @@ def passed(self, ele, mva, category, wp): import workingPoints as RunIIIWinter22_iso_V1_workingPoints from RecoEgamma.ElectronIdentification.Identification.mvaElectronID_RunIIIWinter22_noIso_V1_cff \ import workingPoints as RunIIIWinter22_noIso_V1_workingPoints +from RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Winter22_HZZ_V1_cff \ + import workingPoints as Winter22_HZZ_V1_workingPoints # Dictionary with the relecant e/gmma MVAs @@ -129,6 +133,8 @@ def passed(self, ele, mva, category, wp): EleMVA_6CategoriesCuts, mvaSpring16HZZWeightFiles_V1, mvaVariablesFile), "Spring16GPV1" : ElectronMVAID("ElectronMVAEstimatorRun2","Spring16GeneralPurposeV1", EleMVA_3CategoriesCuts, mvaSpring16GPWeightFiles_V1, mvaVariablesFile), + "Winter22HZZV1" : ElectronMVAID("ElectronMVAEstimatorRun2","Winter22HZZV1", + EleMVA_6CategoriesCuts, mvaWinter22HZZWeightFiles_V1, mvaVariablesFileRun3), } working_points = { @@ -144,5 +150,7 @@ def passed(self, ele, mva, category, wp): mvaSpring16HZZ_V1_workingPoints, logistic_transform=True), "Spring16GPV1" : WorkingPoints("ElectronMVAEstimatorRun2","Spring16GeneralPurposeV1", mvaSpring16GP_V1_workingPoints, logistic_transform=True), + "Winter22HZZV1" : WorkingPoints("ElectronMVAEstimatorRun2","Winter22HZZV1", + Winter22_HZZ_V1_workingPoints), } diff --git a/RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Winter22_HZZ_V1_cff.py b/RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Winter22_HZZ_V1_cff.py new file mode 100644 index 0000000000000..30046434ceed4 --- /dev/null +++ b/RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Winter22_HZZ_V1_cff.py @@ -0,0 +1,49 @@ +import FWCore.ParameterSet.Config as cms +from RecoEgamma.ElectronIdentification.Identification.mvaElectronID_tools import * +from os import path + +mvaTag = "Winter22HZZV1" + +weightFileDir = "RecoEgamma/ElectronIdentification/data/MVAWeightFiles/Winter22HZZV1" + +mvaWeightFiles = cms.vstring( + path.join(weightFileDir, "EB1_5.weights.xml.gz"), # EB1_5 + path.join(weightFileDir, "EB2_5.weights.xml.gz"), # EB2_5 + path.join(weightFileDir, "EE_5.weights.xml.gz"), # EE_5 + path.join(weightFileDir, "EB1_10.weights.xml.gz"), # EB1_10 + path.join(weightFileDir, "EB2_10.weights.xml.gz"), # EB2_10 + path.join(weightFileDir, "EE_10.weights.xml.gz"), # EE_10 + ) + +categoryCuts = cms.vstring( + "pt < 10. && abs(superCluster.eta) < 0.800", + "pt < 10. && abs(superCluster.eta) >= 0.800 && abs(superCluster.eta) < 1.479", + "pt < 10. && abs(superCluster.eta) >= 1.479", + "pt >= 10. && abs(superCluster.eta) < 0.800", + "pt >= 10. && abs(superCluster.eta) >= 0.800 && abs(superCluster.eta) < 1.479", + "pt >= 10. && abs(superCluster.eta) >= 1.479", +) + +mvaEleID_Winter22_HZZ_V1_container = EleMVARaw_WP( + idName = "mvaEleID-Winter22-HZZ-V1", mvaTag = mvaTag, + cutCategory0 = "1.633973689084034", # EB1_5 + cutCategory1 = "1.5499076306249353", # EB2_5 + cutCategory2 = "2.0629564440753247", # EE_5 + cutCategory3 = "0.3685228146685872", # EB1_10 + cutCategory4 = "0.2662407818935475", # EB2_10 + cutCategory5 = "-0.5444837363886459", # EE_10 + ) + + +mvaEleID_Winter22_HZZ_V1_producer_config = cms.PSet( + mvaName = cms.string(mvaClassName), + mvaTag = cms.string(mvaTag), + nCategories = cms.int32(6), + categoryCuts = categoryCuts, + weightFileNames = mvaWeightFiles, + variableDefinition = cms.string(mvaVariablesFileRun3) + ) + +mvaEleID_Winter22_HZZ_V1 = configureVIDMVAEleID( mvaEleID_Winter22_HZZ_V1_container ) + +mvaEleID_Winter22_HZZ_V1.isPOGApproved = cms.untracked.bool(True) diff --git a/RecoEgamma/ElectronIdentification/test/testElectronMVARun3_cfg.py b/RecoEgamma/ElectronIdentification/test/testElectronMVARun3_cfg.py index 21c837a167eaf..35a9a769e85c5 100644 --- a/RecoEgamma/ElectronIdentification/test/testElectronMVARun3_cfg.py +++ b/RecoEgamma/ElectronIdentification/test/testElectronMVARun3_cfg.py @@ -53,6 +53,7 @@ #'RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Fall17_iso_V1_cff', 'RecoEgamma.ElectronIdentification.Identification.mvaElectronID_RunIIIWinter22_iso_V1_cff', 'RecoEgamma.ElectronIdentification.Identification.mvaElectronID_RunIIIWinter22_noIso_V1_cff', + 'RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Winter22_HZZ_V1_cff', ] #add them to the VID producer @@ -71,6 +72,7 @@ "egmGsfElectronIDs:mvaEleID-RunIIIWinter22-iso-V1-wp90", "egmGsfElectronIDs:mvaEleID-RunIIIWinter22-noIso-V1-wp80", "egmGsfElectronIDs:mvaEleID-RunIIIWinter22-noIso-V1-wp90", + "egmGsfElectronIDs:mvaEleID-Winter22-HZZ-V1", #"egmGsfElectronIDs:mvaEleID-Fall17-noIso-V2-wp90", #"egmGsfElectronIDs:mvaEleID-Fall17-iso-V2-wpHZZ", # "egmGsfElectronIDs:mvaEleID-Fall17-iso-V2-wp80", @@ -104,7 +106,8 @@ "RunIIIWinter22isoV1wp80", "RunIIIWinter22isoV1wp90", "RunIIIWinter22noIsoV1wp80", - "RunIIIWinter22noIsoV1wp90", + "RunIIIWinter22noIsoV1wp90", + "Winter22isoV1wpHZZ", ), eleMVAValMaps = cms.vstring( #"electronMVAValueMapProducer:ElectronMVAEstimatorRun2Spring16GeneralPurposeV1Values", @@ -121,6 +124,8 @@ "electronMVAValueMapProducer:ElectronMVAEstimatorRun2RunIIIWinter22IsoV1RawValues", "electronMVAValueMapProducer:ElectronMVAEstimatorRun2RunIIIWinter22NoIsoV1Values", "electronMVAValueMapProducer:ElectronMVAEstimatorRun2RunIIIWinter22NoIsoV1RawValues", + "electronMVAValueMapProducer:ElectronMVAEstimatorRun2Winter22HZZV1Values", + "electronMVAValueMapProducer:ElectronMVAEstimatorRun2Winter22HZZV1RawValues", ), eleMVAValMapLabels = cms.vstring( #"Spring16GPV1Vals", @@ -136,6 +141,8 @@ "RunIIIWinter22NoIsoV1RawVals", "RunIIIWinter22IsoV1Vals", "RunIIIWinter22IsoV1RawVals", + "Winter22HZZV1Vals", + "Winter22HZZV1RawVals", ), eleMVACats = cms.vstring( #"electronMVAValueMapProducer:ElectronMVAEstimatorRun2Fall17NoIsoV1Categories", diff --git a/RecoEgamma/PhotonIdentification/interface/PhotonIsolationCalculator.h b/RecoEgamma/PhotonIdentification/interface/PhotonIsolationCalculator.h index edbebc27626e4..85204bd35ebb1 100644 --- a/RecoEgamma/PhotonIdentification/interface/PhotonIsolationCalculator.h +++ b/RecoEgamma/PhotonIdentification/interface/PhotonIsolationCalculator.h @@ -24,6 +24,8 @@ #include "Geometry/CaloTopology/interface/CaloTowerConstituentsMap.h" #include "RecoEgamma/EgammaIsolationAlgos/interface/EgammaHcalIsolation.h" +#include "CondFormats/EcalObjects/interface/EcalPFRecHitThresholds.h" +#include "CondFormats/DataRecord/interface/EcalPFRecHitThresholdsRcd.h" class EcalSeverityLevelAlgo; class EcalSeverityLevelAlgoRcd; @@ -46,7 +48,8 @@ class PhotonIsolationCalculator { const edm::EventSetup& es, reco::Photon::FiducialFlags& phofid, reco::Photon::IsolationVariables& phoisolR03, - reco::Photon::IsolationVariables& phoisolR04) const; + reco::Photon::IsolationVariables& phoisolR04, + const HcalPFCuts* hcalCuts) const; private: static void classify(const reco::Photon* photon, @@ -90,7 +93,8 @@ class PhotonIsolationCalculator { const HBHERecHitCollection& hbheRecHits, double RCone, double RConeInner, - int depth) const dso_internal; + int depth, + const HcalPFCuts* hcalCuts) const dso_internal; private: edm::EDGetToken barrelecalCollection_; @@ -103,6 +107,7 @@ class PhotonIsolationCalculator { edm::ESGetToken hcalSevLvlComputerToken_; edm::ESGetToken towerMapToken_; edm::ESGetToken ecalSevLvlToken_; + edm::ESGetToken ecalPFRechitThresholdsToken_; edm::EDGetToken trackInputTag_; edm::EDGetToken beamSpotProducerTag_; diff --git a/RecoEgamma/PhotonIdentification/src/PhotonIsolationCalculator.cc b/RecoEgamma/PhotonIdentification/src/PhotonIsolationCalculator.cc index 8a3442f9da7ae..096fdb2545c5f 100644 --- a/RecoEgamma/PhotonIdentification/src/PhotonIsolationCalculator.cc +++ b/RecoEgamma/PhotonIdentification/src/PhotonIsolationCalculator.cc @@ -49,8 +49,7 @@ void PhotonIsolationCalculator::setup(const edm::ParameterSet& conf, hcalSevLvlComputerToken_ = decltype(hcalSevLvlComputerToken_){iC.esConsumes()}; towerMapToken_ = decltype(towerMapToken_){iC.esConsumes()}; ecalSevLvlToken_ = iC.esConsumes(); - - // gsfRecoInputTag_ = conf.getParameter("GsfRecoCollection"); + ecalPFRechitThresholdsToken_ = iC.esConsumes(); modulePhiBoundary_ = conf.getParameter("modulePhiBoundary"); moduleEtaBoundary_ = conf.getParameter>("moduleEtaBoundary"); // @@ -147,7 +146,8 @@ void PhotonIsolationCalculator::calculate(const reco::Photon* pho, const edm::EventSetup& es, reco::Photon::FiducialFlags& phofid, reco::Photon::IsolationVariables& phoisolR1, - reco::Photon::IsolationVariables& phoisolR2) const { + reco::Photon::IsolationVariables& phoisolR2, + const HcalPFCuts* hcalCuts) const { //Get fiducial flags. This does not really belong here bool isEBPho = false; bool isEEPho = false; @@ -362,9 +362,10 @@ void PhotonIsolationCalculator::calculate(const reco::Photon* pho, &hcalQual = *hcalChannelQuality, &hcalSev = *hcalSevLvlComputer, &towerMap, - &hbheRecHits](double outer, double inner, int depth) { + &hbheRecHits, + hcalCuts](double outer, double inner, int depth) { return calculateHcalRecHitIso( - pho, caloGeometry, hcalTopo, hcalQual, hcalSev, towerMap, hbheRecHits, outer, inner, depth); + pho, caloGeometry, hcalTopo, hcalQual, hcalSev, towerMap, hbheRecHits, outer, inner, depth, hcalCuts); }; auto fbc = [this, @@ -374,9 +375,10 @@ void PhotonIsolationCalculator::calculate(const reco::Photon* pho, &hcalQual = *hcalChannelQuality, &hcalSev = *hcalSevLvlComputer, &towerMap, - &hbheRecHits](double outer, int depth) { + &hbheRecHits, + hcalCuts](double outer, int depth) { return calculateHcalRecHitIso( - pho, caloGeometry, hcalTopo, hcalQual, hcalSev, towerMap, hbheRecHits, outer, 0., depth); + pho, caloGeometry, hcalTopo, hcalQual, hcalSev, towerMap, hbheRecHits, outer, 0., depth, hcalCuts); }; for (size_t id = 0; id < phoisolR1.hcalRecHitSumEt.size(); ++id) { @@ -497,6 +499,8 @@ double PhotonIsolationCalculator::calculateEcalRecHitIso(const reco::Photon* pho iEvent.getByToken(barrelecalCollection_, ecalhitsCollEB); + auto const& thresholds = iSetup.getData(ecalPFRechitThresholdsToken_); + const EcalRecHitCollection* rechitsCollectionEE_ = ecalhitsCollEE.product(); const EcalRecHitCollection* rechitsCollectionEB_ = ecalhitsCollEB.product(); @@ -511,7 +515,7 @@ double PhotonIsolationCalculator::calculateEcalRecHitIso(const reco::Photon* pho phoIsoEB.setUseNumCrystals(useNumXtals); phoIsoEB.doSeverityChecks(ecalhitsCollEB.product(), severityExclEB_); phoIsoEB.doFlagChecks(flagsEB_); - double ecalIsolEB = phoIsoEB.getEtSum(photon); + double ecalIsolEB = phoIsoEB.getEtSum(photon, thresholds); EgammaRecHitIsolation phoIsoEE( RCone, RConeInner, etaSlice, etMin, eMin, geoHandle, *rechitsCollectionEE_, sevLevel, DetId::Ecal); @@ -521,7 +525,7 @@ double PhotonIsolationCalculator::calculateEcalRecHitIso(const reco::Photon* pho phoIsoEE.doSeverityChecks(ecalhitsCollEE.product(), severityExclEE_); phoIsoEE.doFlagChecks(flagsEE_); - double ecalIsolEE = phoIsoEE.getEtSum(photon); + double ecalIsolEE = phoIsoEE.getEtSum(photon, thresholds); // delete phoIso; double ecalIsol = ecalIsolEB + ecalIsolEE; @@ -538,7 +542,8 @@ double PhotonIsolationCalculator::calculateHcalRecHitIso(const reco::Photon* pho const HBHERecHitCollection& hbheRecHits, double RCone, double RConeInner, - int depth) const { + int depth, + const HcalPFCuts* hcalCuts) const { const EgammaHcalIsolation::arrayHB e04{{0., 0., 0., 0.}}; const EgammaHcalIsolation::arrayHE e07{{0., 0., 0., 0., 0., 0., 0.}}; @@ -560,7 +565,7 @@ double PhotonIsolationCalculator::calculateHcalRecHitIso(const reco::Photon* pho hcalSevLvlComputer, towerMap); - return hcaliso.getHcalEtSumBc(photon, depth); + return hcaliso.getHcalEtSumBc(photon, depth, hcalCuts); } else { auto hcaliso = EgammaHcalIsolation(EgammaHcalIsolation::InclusionRule::withinConeAroundCluster, RCone, @@ -579,6 +584,6 @@ double PhotonIsolationCalculator::calculateHcalRecHitIso(const reco::Photon* pho hcalSevLvlComputer, towerMap); - return hcaliso.getHcalEtSum(photon, depth); + return hcaliso.getHcalEtSum(photon, depth, hcalCuts); } } diff --git a/RecoHI/Configuration/python/Reconstruction_hiPF_cff.py b/RecoHI/Configuration/python/Reconstruction_hiPF_cff.py index 20c353ae16718..8dd03b2e2ad5c 100644 --- a/RecoHI/Configuration/python/Reconstruction_hiPF_cff.py +++ b/RecoHI/Configuration/python/Reconstruction_hiPF_cff.py @@ -48,10 +48,13 @@ cms.PSet( importerName = cms.string("SuperClusterImporter"), source_eb = cms.InputTag("particleFlowSuperClusterECAL:particleFlowSuperClusterECALBarrel"), source_ee = cms.InputTag("particleFlowSuperClusterECAL:particleFlowSuperClusterECALEndcapWithPreshower"), - source_towers = cms.InputTag("towerMaker"), maximumHoverE = cms.double(0.5), minSuperClusterPt = cms.double(10.0), minPTforBypass = cms.double(100.0), + hbheRecHitsTag = cms.InputTag("hbhereco"), + maxSeverityHB = cms.int32(9), + maxSeverityHE = cms.int32(9), + usePFThresholdsFromDB = cms.bool(True), superClustersArePF = cms.bool(True) ), # all secondary track importers cms.PSet( importerName = cms.string("GeneralTracksImporter"), diff --git a/RecoHI/HiCentralityAlgos/python/HiCentrality_cfi.py b/RecoHI/HiCentralityAlgos/python/HiCentrality_cfi.py index cadc6b45def02..c3e9d73fc0d5b 100644 --- a/RecoHI/HiCentralityAlgos/python/HiCentrality_cfi.py +++ b/RecoHI/HiCentralityAlgos/python/HiCentrality_cfi.py @@ -36,7 +36,8 @@ from Configuration.Eras.Modifier_pp_on_XeXe_2017_cff import pp_on_XeXe_2017 from Configuration.ProcessModifiers.pp_on_AA_cff import pp_on_AA -(pp_on_XeXe_2017 | pp_on_AA).toModify(hiCentrality, +from Configuration.Eras.Modifier_run3_upc_cff import run3_upc +(pp_on_XeXe_2017 | pp_on_AA | run3_upc).toModify(hiCentrality, producePixelTracks = True, srcPixelTracks = "hiConformalPixelTracks", srcTracks = "generalTracks", diff --git a/RecoHI/HiTracking/python/HILowPtConformalPixelTracks_cfi.py b/RecoHI/HiTracking/python/HILowPtConformalPixelTracks_cfi.py index 2d170b5b39e50..e5733107d3622 100644 --- a/RecoHI/HiTracking/python/HILowPtConformalPixelTracks_cfi.py +++ b/RecoHI/HiTracking/python/HILowPtConformalPixelTracks_cfi.py @@ -217,3 +217,7 @@ )) hiConformalPixelTracksSequencePhase1 = cms.Sequence(hiConformalPixelTracksTaskPhase1) + +from Configuration.Eras.Modifier_run3_upc_cff import run3_upc +run3_upc.toModify(hiConformalPixelTracksPhase1TrackingRegions.RegionPSet, ptMin = 0.05) +run3_upc.toModify(hiConformalPixelTracksPhase1Filter, ptMin = 0.05) diff --git a/RecoJets/Configuration/python/RecoJets_EventContent_cff.py b/RecoJets/Configuration/python/RecoJets_EventContent_cff.py index f38a8f7f8e585..1ee57683ddf1f 100644 --- a/RecoJets/Configuration/python/RecoJets_EventContent_cff.py +++ b/RecoJets/Configuration/python/RecoJets_EventContent_cff.py @@ -52,12 +52,13 @@ from Configuration.Eras.Modifier_peripheralPbPb_cff import peripheralPbPb from Configuration.Eras.Modifier_pp_on_XeXe_2017_cff import pp_on_XeXe_2017 from Configuration.ProcessModifiers.pp_on_AA_cff import pp_on_AA +from Configuration.Eras.Modifier_run3_upc_cff import run3_upc #products from regular pp which does not fit the normal AOD for e in [pA_2016, peripheralPbPb, pp_on_XeXe_2017, pp_on_AA]: e.toModify( RecoJetsAOD.outputCommands, func=lambda outputCommands: outputCommands.extend(['keep *_towerMaker_*_*']) ) -for e in [pp_on_XeXe_2017, pp_on_AA]: +for e in [pp_on_XeXe_2017, pp_on_AA, run3_upc]: e.toModify( RecoJetsAOD.outputCommands, func=lambda outputCommands: outputCommands.extend(['keep recoCentrality*_hiCentrality_*_*', 'keep recoClusterCompatibility*_hiClusterCompatibility_*_*' diff --git a/RecoJets/JetProducers/plugins/FixedGridRhoProducerFastjetFromRecHit.cc b/RecoJets/JetProducers/plugins/FixedGridRhoProducerFastjetFromRecHit.cc index 1b99e960cfafc..0767ec0ee4460 100644 --- a/RecoJets/JetProducers/plugins/FixedGridRhoProducerFastjetFromRecHit.cc +++ b/RecoJets/JetProducers/plugins/FixedGridRhoProducerFastjetFromRecHit.cc @@ -27,6 +27,8 @@ So this recHit-based rho producer, FixedGridRhoProducerFastjetFromRecHit, can be #include "CondFormats/DataRecord/interface/EcalPFRecHitThresholdsRcd.h" #include "fastjet/tools/GridMedianBackgroundEstimator.hh" #include "RecoEgamma/EgammaIsolationAlgos/interface/EgammaHcalIsolation.h" +#include "CondFormats/DataRecord/interface/HcalPFCutsRcd.h" +#include "CondTools/Hcal/interface/HcalPFCutsHandler.h" class FixedGridRhoProducerFastjetFromRecHit : public edm::stream::EDProducer<> { public: @@ -37,7 +39,7 @@ class FixedGridRhoProducerFastjetFromRecHit : public edm::stream::EDProducer<> { private: void produce(edm::Event &, const edm::EventSetup &) override; std::array getHitP4(const DetId &detId, const double hitE, const CaloGeometry &caloGeometry) const; - bool passedHcalNoiseCut(const HBHERecHit &hit) const; + bool passedHcalNoiseCut(const HBHERecHit &hit, const HcalPFCuts *) const; bool passedEcalNoiseCut(const EcalRecHit &hit, const EcalPFRecHitThresholds &thresholds) const; fastjet::GridMedianBackgroundEstimator bge_; @@ -55,6 +57,11 @@ class FixedGridRhoProducerFastjetFromRecHit : public edm::stream::EDProducer<> { const edm::ESGetToken ecalPFRecHitThresholdsToken_; const edm::ESGetToken caloGeometryToken_; + + // following are needed to grab HCal thresholds from GT + edm::ESGetToken hcalCutsToken_; + const bool cutsFromDB_; + HcalPFCuts const *paramPF_ = nullptr; }; FixedGridRhoProducerFastjetFromRecHit::FixedGridRhoProducerFastjetFromRecHit(const edm::ParameterSet &iConfig) @@ -67,11 +74,15 @@ FixedGridRhoProducerFastjetFromRecHit::FixedGridRhoProducerFastjetFromRecHit(con skipHCAL_(iConfig.getParameter("skipHCAL")), skipECAL_(iConfig.getParameter("skipECAL")), ecalPFRecHitThresholdsToken_{esConsumes()}, - caloGeometryToken_{esConsumes()} { + caloGeometryToken_{esConsumes()}, + cutsFromDB_(iConfig.getParameter("usePFThresholdsFromDB")) { if (skipHCAL_ && skipECAL_) { throw cms::Exception("FixedGridRhoProducerFastjetFromRecHit") << "skipHCAL and skipECAL both can't be True. Please make at least one of them False."; } + if (cutsFromDB_) { + hcalCutsToken_ = esConsumes(edm::ESInputTag("", "withTopo")); + } produces(); } @@ -89,12 +100,17 @@ void FixedGridRhoProducerFastjetFromRecHit::fillDescriptions(edm::ConfigurationD desc.add >("eThresHE", {0.1, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2}); desc.add("maxRapidity", 2.5); desc.add("gridSpacing", 0.55); + desc.add("usePFThresholdsFromDB", true); descriptions.addWithDefaultLabel(desc); } FixedGridRhoProducerFastjetFromRecHit::~FixedGridRhoProducerFastjetFromRecHit() = default; void FixedGridRhoProducerFastjetFromRecHit::produce(edm::Event &iEvent, const edm::EventSetup &iSetup) { + if (cutsFromDB_) { + paramPF_ = &iSetup.getData(hcalCutsToken_); + } + std::vector inputs; auto const &thresholds = iSetup.getData(ecalPFRecHitThresholdsToken_); auto const &caloGeometry = iSetup.getData(caloGeometryToken_); @@ -103,7 +119,7 @@ void FixedGridRhoProducerFastjetFromRecHit::produce(edm::Event &iEvent, const ed auto const &hbheRecHits = iEvent.get(hbheRecHitsTag_); inputs.reserve(inputs.size() + hbheRecHits.size()); for (const auto &hit : hbheRecHits) { - if (passedHcalNoiseCut(hit)) { + if (passedHcalNoiseCut(hit, paramPF_)) { const auto &hitp4 = getHitP4(hit.id(), hit.energy(), caloGeometry); inputs.emplace_back(fastjet::PseudoJet(hitp4[0], hitp4[1], hitp4[2], hitp4[3])); } @@ -147,11 +163,17 @@ std::array FixedGridRhoProducerFastjetFromRecHit::getHitP4(const DetI } //HCAL noise cleaning cuts. -bool FixedGridRhoProducerFastjetFromRecHit::passedHcalNoiseCut(const HBHERecHit &hit) const { - const auto thisDetId = hit.id(); - const auto thisDepth = thisDetId.depth(); - return (thisDetId.subdet() == HcalBarrel && hit.energy() > eThresHB_[thisDepth - 1]) || - (thisDetId.subdet() == HcalEndcap && hit.energy() > eThresHE_[thisDepth - 1]); +bool FixedGridRhoProducerFastjetFromRecHit::passedHcalNoiseCut(const HBHERecHit &hit, + const HcalPFCuts *hcalcuts) const { + if (hcalcuts != nullptr) { // using hcal cuts from DB + const HcalPFCut *item = hcalcuts->getValues(hit.id().rawId()); + return (hit.energy() > item->noiseThreshold()); + } else { // using hcal cuts from config file + const auto thisDetId = hit.id(); + const auto thisDepth = thisDetId.depth(); + return (thisDetId.subdet() == HcalBarrel && hit.energy() > eThresHB_[thisDepth - 1]) || + (thisDetId.subdet() == HcalEndcap && hit.energy() > eThresHE_[thisDepth - 1]); + } } //ECAL noise cleaning cuts using per-crystal PF-recHit thresholds. diff --git a/RecoLocalCalo/CaloTowersCreator/python/calotowermaker_cfi.py b/RecoLocalCalo/CaloTowersCreator/python/calotowermaker_cfi.py index f54754ab89e03..e5413cf39701f 100644 --- a/RecoLocalCalo/CaloTowersCreator/python/calotowermaker_cfi.py +++ b/RecoLocalCalo/CaloTowersCreator/python/calotowermaker_cfi.py @@ -146,7 +146,10 @@ AllowMissingInputs = cms.bool(False), # specify hcal upgrade phase - 0, 1, 2 - HcalPhase = cms.int32(0) + HcalPhase = cms.int32(0), + +# Read HBHE thresholds from Global Tag + usePFThresholdsFromDB = cms.bool(False) ) @@ -175,3 +178,8 @@ HBThreshold2 = 0.2, HBThreshold = 0.3, ) + +#--- Use DB conditions for HBHE thresholds for Run3 and phase2 +from Configuration.Eras.Modifier_hcalPfCutsFromDB_cff import hcalPfCutsFromDB +hcalPfCutsFromDB.toModify( calotowermaker, + usePFThresholdsFromDB = True) diff --git a/RecoLocalCalo/CaloTowersCreator/src/CaloTowersCreationAlgo.cc b/RecoLocalCalo/CaloTowersCreator/src/CaloTowersCreationAlgo.cc index 6dd974d999d64..00c8566a55afd 100644 --- a/RecoLocalCalo/CaloTowersCreator/src/CaloTowersCreationAlgo.cc +++ b/RecoLocalCalo/CaloTowersCreator/src/CaloTowersCreationAlgo.cc @@ -353,6 +353,8 @@ CaloTowersCreationAlgo::CaloTowersCreationAlgo(double EBthreshold, // nalgo=N; } +void CaloTowersCreationAlgo::setThresFromDB(const HcalPFCuts* cuts) { hcalCuts = cuts; } + void CaloTowersCreationAlgo::setGeometry(const CaloTowerTopology* cttopo, const CaloTowerConstituentsMap* ctmap, const HcalTopology* htopo, @@ -1271,7 +1273,12 @@ void CaloTowersCreationAlgo::getThresholdAndWeight(const DetId& detId, double& t int depth = hcalDetId.depth(); if (subdet == HcalBarrel) { - threshold = (depth == 1) ? theHBthreshold1 : (depth == 2) ? theHBthreshold2 : theHBthreshold; + if (hcalCuts == nullptr) { // this means cutsFromDB is false + threshold = (depth == 1) ? theHBthreshold1 : (depth == 2) ? theHBthreshold2 : theHBthreshold; + } else { // hcalCuts is not nullptr, i.e. cutsFromDB is true + const HcalPFCut* item = hcalCuts->getValues(hcalDetId.rawId()); + threshold = item->noiseThreshold(); + } weight = theHBweight; if (weight <= 0.) { ROOT::Math::Interpolator my(theHBGrid, theHBWeights, ROOT::Math::Interpolation::kAKIMA); @@ -1282,14 +1289,24 @@ void CaloTowersCreationAlgo::getThresholdAndWeight(const DetId& detId, double& t else if (subdet == HcalEndcap) { // check if it's single or double tower if (hcalDetId.ietaAbs() < theHcalTopology->firstHEDoublePhiRing()) { - threshold = (depth == 1) ? theHESthreshold1 : theHESthreshold; + if (hcalCuts == nullptr) { // this means cutsFromDB is false + threshold = (depth == 1) ? theHESthreshold1 : theHESthreshold; + } else { // hcalCuts is not nullptr, i.e. cutsFromDB is true + const HcalPFCut* item = hcalCuts->getValues(hcalDetId.rawId()); + threshold = item->noiseThreshold(); + } weight = theHESweight; if (weight <= 0.) { ROOT::Math::Interpolator my(theHESGrid, theHESWeights, ROOT::Math::Interpolation::kAKIMA); weight = my.Eval(theHESEScale); } } else { - threshold = (depth == 1) ? theHEDthreshold1 : theHEDthreshold; + if (hcalCuts == nullptr) { // this means cutsFromDB is false + threshold = (depth == 1) ? theHEDthreshold1 : theHEDthreshold; + } else { // hcalCuts is not nullptr, i.e. cutsFromDB is true + const HcalPFCut* item = hcalCuts->getValues(hcalDetId.rawId()); + threshold = item->noiseThreshold(); + } weight = theHEDweight; if (weight <= 0.) { ROOT::Math::Interpolator my(theHEDGrid, theHEDWeights, ROOT::Math::Interpolation::kAKIMA); diff --git a/RecoLocalCalo/CaloTowersCreator/src/CaloTowersCreationAlgo.h b/RecoLocalCalo/CaloTowersCreator/src/CaloTowersCreationAlgo.h index b56ea6586dd80..99d4bc75d5f64 100644 --- a/RecoLocalCalo/CaloTowersCreator/src/CaloTowersCreationAlgo.h +++ b/RecoLocalCalo/CaloTowersCreator/src/CaloTowersCreationAlgo.h @@ -26,6 +26,10 @@ #include #include + +#include "CondFormats/DataRecord/interface/HcalPFCutsRcd.h" +#include "CondTools/Hcal/interface/HcalPFCutsHandler.h" + class CaloTowerTopology; class HcalTopology; class CaloGeometry; @@ -158,6 +162,7 @@ class CaloTowersCreationAlgo { const HcalTopology* htopo, const CaloGeometry* geo); + void setThresFromDB(const HcalPFCuts* cuts); // pass the containers of channels status from the event record (stored in DB) // these are called in CaloTowersCreator void setHcalChStatusFromDB(const HcalChannelQuality* s) { theHcalChStatus = s; } @@ -317,6 +322,7 @@ class CaloTowersCreationAlgo { double theHOEScale; double theHF1EScale; double theHF2EScale; + const HcalPFCuts* hcalCuts; const CaloTowerTopology* theTowerTopology; const HcalTopology* theHcalTopology; const CaloGeometry* theGeometry; diff --git a/RecoLocalCalo/CaloTowersCreator/src/CaloTowersCreator.cc b/RecoLocalCalo/CaloTowersCreator/src/CaloTowersCreator.cc index 25a7e9c48a207..4503026b5612f 100644 --- a/RecoLocalCalo/CaloTowersCreator/src/CaloTowersCreator.cc +++ b/RecoLocalCalo/CaloTowersCreator/src/CaloTowersCreator.cc @@ -6,7 +6,6 @@ // Now we allow for the creation of towers from // rejected hists as well: requested by the MET group // for studies of the effect of noise clean up. - #include "CaloTowersCreationAlgo.h" #include "EScales.h" @@ -25,12 +24,15 @@ #include "Geometry/Records/interface/CaloGeometryRecord.h" #include "Geometry/Records/interface/IdealGeometryRecord.h" #include "RecoLocalCalo/EcalRecAlgos/interface/EcalSeverityLevelAlgoRcd.h" +#include "CondFormats/DataRecord/interface/HcalPFCutsRcd.h" +#include "CondTools/Hcal/interface/HcalPFCutsHandler.h" class CaloTowersCreator : public edm::stream::EDProducer<> { public: explicit CaloTowersCreator(const edm::ParameterSet& ps); ~CaloTowersCreator() override {} void produce(edm::Event& e, const edm::EventSetup& c) override; + void beginRun(edm::Run const&, edm::EventSetup const&) override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); double EBEScale, EEEScale, HBEScale, HESEScale; double HEDEScale, HOEScale, HF1EScale, HF2EScale; @@ -84,6 +86,10 @@ class CaloTowersCreator : public edm::stream::EDProducer<> { edm::ESWatcher caloTowerConstituentsWatcher_; edm::ESWatcher ecalSevLevelWatcher_; EScales eScales_; + + edm::ESGetToken hcalCutsToken_; + bool cutsFromDB; + HcalPFCuts const* paramPF = nullptr; }; #include "FWCore/Framework/interface/MakerMacros.h" @@ -165,7 +171,8 @@ CaloTowersCreator::CaloTowersCreator(const edm::ParameterSet& conf) conf.getParameter("HcalAcceptSeverityLevelForRejectedHit")), useRejectedRecoveredHcalHits_(conf.getParameter("UseRejectedRecoveredHcalHits")), - useRejectedRecoveredEcalHits_(conf.getParameter("UseRejectedRecoveredEcalHits")) + useRejectedRecoveredEcalHits_(conf.getParameter("UseRejectedRecoveredEcalHits")), + cutsFromDB(conf.getParameter("usePFThresholdsFromDB")) { algo_.setMissingHcalRescaleFactorForEcal(conf.getParameter("missingHcalRescaleFactorForEcal")); @@ -183,6 +190,9 @@ CaloTowersCreator::CaloTowersCreator(const edm::ParameterSet& conf) tok_hcalSevComputer_ = esConsumes(); tok_ecalSevAlgo_ = esConsumes(); + if (cutsFromDB) { + hcalCutsToken_ = esConsumes(edm::ESInputTag("", "withTopo")); + } const unsigned nLabels = ecalLabels_.size(); for (unsigned i = 0; i != nLabels; i++) toks_ecal_.push_back(consumes(ecalLabels_[i])); @@ -217,6 +227,13 @@ CaloTowersCreator::CaloTowersCreator(const edm::ParameterSet& conf) #endif } +void CaloTowersCreator::beginRun(const edm::Run& run, const edm::EventSetup& es) { + if (cutsFromDB) { + paramPF = &es.getData(hcalCutsToken_); + } + algo_.setThresFromDB(paramPF); +} + void CaloTowersCreator::produce(edm::Event& e, const edm::EventSetup& c) { // get the necessary event setup objects... edm::ESHandle pG = c.getHandle(tok_geom_); @@ -452,6 +469,7 @@ void CaloTowersCreator::fillDescriptions(edm::ConfigurationDescriptions& descrip desc.add("HcalAcceptSeverityLevelForRejectedHit", 9999); desc.add >("EcalSeveritiesToBeUsedInBadTowers", {}); desc.add("HcalPhase", 0); + desc.add("usePFThresholdsFromDB", true); descriptions.addDefault(desc); } diff --git a/RecoLocalCalo/EcalRecProducers/BuildFile.xml b/RecoLocalCalo/EcalRecProducers/BuildFile.xml index b77b79e9c1180..4852e0b98d1f4 100644 --- a/RecoLocalCalo/EcalRecProducers/BuildFile.xml +++ b/RecoLocalCalo/EcalRecProducers/BuildFile.xml @@ -1,5 +1,6 @@ + diff --git a/RecoLocalCalo/EcalRecProducers/plugins/EigenMatrixTypes_gpu.h b/RecoLocalCalo/EcalRecProducers/interface/EigenMatrixTypes_gpu.h similarity index 87% rename from RecoLocalCalo/EcalRecProducers/plugins/EigenMatrixTypes_gpu.h rename to RecoLocalCalo/EcalRecProducers/interface/EigenMatrixTypes_gpu.h index bbf9cb0dbb5c9..dab46c4868ab3 100644 --- a/RecoLocalCalo/EcalRecProducers/plugins/EigenMatrixTypes_gpu.h +++ b/RecoLocalCalo/EcalRecProducers/interface/EigenMatrixTypes_gpu.h @@ -1,11 +1,10 @@ -#ifndef RecoLocalCalo_EcalRecProducers_plugins_EigenMatrixTypes_gpu_h -#define RecoLocalCalo_EcalRecProducers_plugins_EigenMatrixTypes_gpu_h +#ifndef RecoLocalCalo_EcalRecProducers_EigenMatrixTypes_gpu_h +#define RecoLocalCalo_EcalRecProducers_EigenMatrixTypes_gpu_h #include - #include -#include "CUDADataFormats/EcalRecHitSoA/interface/RecoTypes.h" +#include "DataFormats/EcalRecHit/interface/RecoTypes.h" namespace ecal { namespace multifit { @@ -46,4 +45,4 @@ namespace ecal { } // namespace multifit } // namespace ecal -#endif // RecoLocalCalo_EcalRecProducers_plugins_EigenMatrixTypes_gpu_h +#endif // RecoLocalCalo_EcalRecProducers_EigenMatrixTypes_gpu_h diff --git a/RecoLocalCalo/EcalRecProducers/plugins/AmplitudeComputationCommonKernels.h b/RecoLocalCalo/EcalRecProducers/plugins/AmplitudeComputationCommonKernels.h index 1797fb6d2ec88..20495ebf49be5 100644 --- a/RecoLocalCalo/EcalRecProducers/plugins/AmplitudeComputationCommonKernels.h +++ b/RecoLocalCalo/EcalRecProducers/plugins/AmplitudeComputationCommonKernels.h @@ -1,8 +1,8 @@ #ifndef RecoLocalCalo_EcalRecProducers_plugins_AmplitudeComputationCommonKernels_h #define RecoLocalCalo_EcalRecProducers_plugins_AmplitudeComputationCommonKernels_h +#include "RecoLocalCalo/EcalRecProducers/interface/EigenMatrixTypes_gpu.h" #include "DeclsForKernels.h" -#include "EigenMatrixTypes_gpu.h" class EcalPulseShape; // this flag setting is applied to all of the cases diff --git a/RecoLocalCalo/EcalRecProducers/plugins/AmplitudeComputationKernels.h b/RecoLocalCalo/EcalRecProducers/plugins/AmplitudeComputationKernels.h index 72ccf3b11a987..762de114c4a6a 100644 --- a/RecoLocalCalo/EcalRecProducers/plugins/AmplitudeComputationKernels.h +++ b/RecoLocalCalo/EcalRecProducers/plugins/AmplitudeComputationKernels.h @@ -1,8 +1,8 @@ #ifndef RecoLocalCalo_EcalRecProducers_plugins_AmplitudeComputationKernels_h #define RecoLocalCalo_EcalRecProducers_plugins_AmplitudeComputationKernels_h +#include "RecoLocalCalo/EcalRecProducers/interface/EigenMatrixTypes_gpu.h" #include "DeclsForKernels.h" -#include "EigenMatrixTypes_gpu.h" class EcalPulseShape; class EcalPulseCovariance; diff --git a/RecoLocalCalo/EcalRecProducers/plugins/BuildFile.xml b/RecoLocalCalo/EcalRecProducers/plugins/BuildFile.xml index 83b7e5f912c76..40ad5ade53326 100644 --- a/RecoLocalCalo/EcalRecProducers/plugins/BuildFile.xml +++ b/RecoLocalCalo/EcalRecProducers/plugins/BuildFile.xml @@ -1,4 +1,3 @@ - @@ -9,8 +8,6 @@ - - @@ -22,5 +19,16 @@ + + + + + + + + + + + diff --git a/RecoLocalCalo/EcalRecProducers/plugins/DeclsForKernels.h b/RecoLocalCalo/EcalRecProducers/plugins/DeclsForKernels.h index cbd28df94eb42..68bbc3400f23c 100644 --- a/RecoLocalCalo/EcalRecProducers/plugins/DeclsForKernels.h +++ b/RecoLocalCalo/EcalRecProducers/plugins/DeclsForKernels.h @@ -9,7 +9,6 @@ #include "CUDADataFormats/EcalDigi/interface/DigisCollection.h" #include "CUDADataFormats/EcalRecHitSoA/interface/EcalRecHit.h" #include "CUDADataFormats/EcalRecHitSoA/interface/EcalUncalibratedRecHit.h" -#include "CUDADataFormats/EcalRecHitSoA/interface/RecoTypes.h" #include "CondFormats/EcalObjects/interface/EcalChannelStatus.h" #include "CondFormats/EcalObjects/interface/EcalChannelStatusCode.h" #include "CondFormats/EcalObjects/interface/EcalGainRatios.h" @@ -32,9 +31,9 @@ #include "CondFormats/EcalObjects/interface/EcalTimeCalibConstantsGPU.h" #include "CondFormats/EcalObjects/interface/EcalTimeOffsetConstant.h" #include "CondFormats/EcalObjects/interface/EcalWeightSet.h" +#include "DataFormats/EcalRecHit/interface/RecoTypes.h" #include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h" - -#include "EigenMatrixTypes_gpu.h" +#include "RecoLocalCalo/EcalRecProducers/interface/EigenMatrixTypes_gpu.h" struct EcalPulseShape; class EcalSampleMask; diff --git a/RecoLocalCalo/EcalRecProducers/plugins/EcalCPURecHitProducer.cc b/RecoLocalCalo/EcalRecProducers/plugins/EcalCPURecHitProducer.cc index 3de6b62898925..286f4cd2f413c 100644 --- a/RecoLocalCalo/EcalRecProducers/plugins/EcalCPURecHitProducer.cc +++ b/RecoLocalCalo/EcalRecProducers/plugins/EcalCPURecHitProducer.cc @@ -91,7 +91,7 @@ void EcalCPURecHitProducer::acquire(edm::Event const& event, cudaMemcpyDeviceToHost, ctx.stream())); // - // ./CUDADataFormats/EcalRecHitSoA/interface/RecoTypes.h:using StorageScalarType = float; + // ./DataFormats/EcalRecHit/interface/RecoTypes.h:using StorageScalarType = float; // cudaCheck(cudaMemcpyAsync(recHitsEB_.energy.data(), diff --git a/RecoLocalCalo/EcalRecProducers/plugins/EcalDetIdToBeRecoveredProducer.cc b/RecoLocalCalo/EcalRecProducers/plugins/EcalDetIdToBeRecoveredProducer.cc index 5bc751776abd4..765bc264bb38f 100644 --- a/RecoLocalCalo/EcalRecProducers/plugins/EcalDetIdToBeRecoveredProducer.cc +++ b/RecoLocalCalo/EcalRecProducers/plugins/EcalDetIdToBeRecoveredProducer.cc @@ -38,7 +38,6 @@ class EcalDetIdToBeRecoveredProducer : public edm::stream::EDProducer<> { public: explicit EcalDetIdToBeRecoveredProducer(const edm::ParameterSet& ps); void produce(edm::Event& evt, const edm::EventSetup& es) final; - void beginRun(edm::Run const& run, const edm::EventSetup& es) final; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: @@ -82,9 +81,9 @@ class EcalDetIdToBeRecoveredProducer : public edm::stream::EDProducer<> { }; EcalDetIdToBeRecoveredProducer::EcalDetIdToBeRecoveredProducer(const edm::ParameterSet& ps) { - ecalMappingToken_ = esConsumes(); - channelStatusToken_ = esConsumes(); - ttMapToken_ = esConsumes(); + ecalMappingToken_ = esConsumes(); + channelStatusToken_ = esConsumes(); + ttMapToken_ = esConsumes(); // SRP collections ebSrFlagToken_ = consumes(ps.getParameter("ebSrFlagCollection")); eeSrFlagToken_ = consumes(ps.getParameter("eeSrFlagCollection")); @@ -119,16 +118,6 @@ EcalDetIdToBeRecoveredProducer::EcalDetIdToBeRecoveredProducer(const edm::Parame produces>(scDetIdCollection_); } -void EcalDetIdToBeRecoveredProducer::beginRun(edm::Run const& run, const edm::EventSetup& es) { - edm::ESHandle pEcalMapping = es.getHandle(ecalMappingToken_); - ecalMapping_ = pEcalMapping.product(); - - edm::ESHandle pChStatus = es.getHandle(channelStatusToken_); - chStatus_ = pChStatus.product(); - - ttMap_ = es.getHandle(ttMapToken_); -} - // fuction return true if "coll" have "item" template bool include(const CollT& coll, const ItemT& item) { @@ -137,6 +126,10 @@ bool include(const CollT& coll, const ItemT& item) { } void EcalDetIdToBeRecoveredProducer::produce(edm::Event& ev, const edm::EventSetup& es) { + ecalMapping_ = &es.getData(ecalMappingToken_); + chStatus_ = &es.getData(channelStatusToken_); + ttMap_ = es.getHandle(ttMapToken_); + std::vector> ebDetIdColls; std::vector> eeDetIdColls; std::vector> ttColls; diff --git a/RecoLocalCalo/EcalRecProducers/plugins/EcalRecHitProducerGPU.cc b/RecoLocalCalo/EcalRecProducers/plugins/EcalRecHitProducerGPU.cc index 9edf3ad0087b1..86dbacbf69e3e 100644 --- a/RecoLocalCalo/EcalRecProducers/plugins/EcalRecHitProducerGPU.cc +++ b/RecoLocalCalo/EcalRecProducers/plugins/EcalRecHitProducerGPU.cc @@ -1,6 +1,5 @@ #include "CUDADataFormats/EcalRecHitSoA/interface/EcalRecHit.h" #include "CUDADataFormats/EcalRecHitSoA/interface/EcalUncalibratedRecHit.h" -#include "CUDADataFormats/EcalRecHitSoA/interface/RecoTypes.h" #include "CommonTools/Utils/interface/StringToEnumValue.h" #include "CondFormats/DataRecord/interface/EcalADCToGeVConstantRcd.h" #include "CondFormats/DataRecord/interface/EcalChannelStatusRcd.h" @@ -18,6 +17,7 @@ #include "CondFormats/EcalObjects/interface/EcalRechitADCToGeVConstantGPU.h" #include "CondFormats/EcalObjects/interface/EcalRechitChannelStatusGPU.h" #include "DataFormats/EcalRecHit/interface/EcalRecHit.h" +#include "DataFormats/EcalRecHit/interface/RecoTypes.h" #include "FWCore/Framework/interface/Event.h" #include "FWCore/Framework/interface/EventSetup.h" #include "FWCore/Framework/interface/MakerMacros.h" diff --git a/RecoLocalCalo/EcalRecProducers/plugins/EcalUncalibRecHitSoAToLegacy.cc b/RecoLocalCalo/EcalRecProducers/plugins/EcalUncalibRecHitSoAToLegacy.cc new file mode 100644 index 0000000000000..32ebbf669186f --- /dev/null +++ b/RecoLocalCalo/EcalRecProducers/plugins/EcalUncalibRecHitSoAToLegacy.cc @@ -0,0 +1,105 @@ +#include "DataFormats/EcalDigi/interface/EcalDigiCollections.h" +#include "DataFormats/EcalRecHit/interface/EcalRecHitCollections.h" +#include "DataFormats/EcalRecHit/interface/EcalUncalibratedRecHit.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/EmptyGroupDescription.h" +#include "FWCore/Utilities/interface/EDGetToken.h" +#include "FWCore/Utilities/interface/EDPutToken.h" +#include "DataFormats/EcalRecHit/interface/EcalUncalibratedRecHitHostCollection.h" + +class EcalUncalibRecHitSoAToLegacy : public edm::stream::EDProducer<> { +public: + explicit EcalUncalibRecHitSoAToLegacy(edm::ParameterSet const &ps); + ~EcalUncalibRecHitSoAToLegacy() override = default; + static void fillDescriptions(edm::ConfigurationDescriptions &); + +private: + using InputProduct = EcalUncalibratedRecHitHostCollection; + void produce(edm::Event &, edm::EventSetup const &) override; + +private: + const bool isPhase2_; + const edm::EDGetTokenT uncalibRecHitsPortableEB_; + const edm::EDGetTokenT uncalibRecHitsPortableEE_; + const edm::EDPutTokenT uncalibRecHitsCPUEBToken_; + const edm::EDPutTokenT uncalibRecHitsCPUEEToken_; +}; + +void EcalUncalibRecHitSoAToLegacy::fillDescriptions(edm::ConfigurationDescriptions &confDesc) { + edm::ParameterSetDescription desc; + + desc.add("uncalibRecHitsPortableEB", + edm::InputTag("ecalMultiFitUncalibRecHitPortable", "EcalUncalibRecHitsEB")); + desc.add("recHitsLabelCPUEB", "EcalUncalibRecHitsEB"); + desc.ifValue(edm::ParameterDescription("isPhase2", false, true), + false >> (edm::ParameterDescription( + "uncalibRecHitsPortableEE", + edm::InputTag("ecalMultiFitUncalibRecHitPortable", "EcalUncalibRecHitsEE"), + true) and + edm::ParameterDescription("recHitsLabelCPUEE", "EcalUncalibRecHitsEE", true)) or + true >> edm::EmptyGroupDescription()); + confDesc.add("ecalUncalibRecHitSoAToLegacy", desc); +} + +EcalUncalibRecHitSoAToLegacy::EcalUncalibRecHitSoAToLegacy(edm::ParameterSet const &ps) + : isPhase2_{ps.getParameter("isPhase2")}, + uncalibRecHitsPortableEB_{consumes(ps.getParameter("uncalibRecHitsPortableEB"))}, + uncalibRecHitsPortableEE_{ + isPhase2_ ? edm::EDGetTokenT{} + : consumes(ps.getParameter("uncalibRecHitsPortableEE"))}, + uncalibRecHitsCPUEBToken_{ + produces(ps.getParameter("recHitsLabelCPUEB"))}, + uncalibRecHitsCPUEEToken_{ + isPhase2_ ? edm::EDPutTokenT{} + : produces(ps.getParameter("recHitsLabelCPUEE"))} {} + +void EcalUncalibRecHitSoAToLegacy::produce(edm::Event &event, edm::EventSetup const &setup) { + auto const &uncalRecHitsEBColl = event.get(uncalibRecHitsPortableEB_); + auto const &uncalRecHitsEBCollView = uncalRecHitsEBColl.const_view(); + auto recHitsCPUEB = std::make_unique(); + recHitsCPUEB->reserve(uncalRecHitsEBCollView.size()); + + for (uint32_t i = 0; i < uncalRecHitsEBCollView.size(); ++i) { + recHitsCPUEB->emplace_back(DetId{uncalRecHitsEBCollView.id()[i]}, + uncalRecHitsEBCollView.amplitude()[i], + uncalRecHitsEBCollView.pedestal()[i], + uncalRecHitsEBCollView.jitter()[i], + uncalRecHitsEBCollView.chi2()[i], + uncalRecHitsEBCollView.flags()[i]); + if (isPhase2_) { + (*recHitsCPUEB)[i].setAmplitudeError(uncalRecHitsEBCollView.amplitudeError()[i]); + } + (*recHitsCPUEB)[i].setJitterError(uncalRecHitsEBCollView.jitterError()[i]); + for (uint32_t sample = 0; sample < EcalDataFrame::MAXSAMPLES; ++sample) { + (*recHitsCPUEB)[i].setOutOfTimeAmplitude(sample, uncalRecHitsEBCollView.outOfTimeAmplitudes()[i][sample]); + } + } + event.put(uncalibRecHitsCPUEBToken_, std::move(recHitsCPUEB)); + + if (!isPhase2_) { + auto const &uncalRecHitsEEColl = event.get(uncalibRecHitsPortableEE_); + auto const &uncalRecHitsEECollView = uncalRecHitsEEColl.const_view(); + auto recHitsCPUEE = std::make_unique(); + recHitsCPUEE->reserve(uncalRecHitsEECollView.size()); + + for (uint32_t i = 0; i < uncalRecHitsEECollView.size(); ++i) { + recHitsCPUEE->emplace_back(DetId{uncalRecHitsEECollView.id()[i]}, + uncalRecHitsEECollView.amplitude()[i], + uncalRecHitsEECollView.pedestal()[i], + uncalRecHitsEECollView.jitter()[i], + uncalRecHitsEECollView.chi2()[i], + uncalRecHitsEECollView.flags()[i]); + (*recHitsCPUEE)[i].setJitterError(uncalRecHitsEECollView.jitterError()[i]); + for (uint32_t sample = 0; sample < EcalDataFrame::MAXSAMPLES; ++sample) { + (*recHitsCPUEE)[i].setOutOfTimeAmplitude(sample, uncalRecHitsEECollView.outOfTimeAmplitudes()[i][sample]); + } + } + event.put(uncalibRecHitsCPUEEToken_, std::move(recHitsCPUEE)); + } +} + +DEFINE_FWK_MODULE(EcalUncalibRecHitSoAToLegacy); diff --git a/RecoLocalCalo/EcalRecProducers/plugins/TimeComputationKernels.h b/RecoLocalCalo/EcalRecProducers/plugins/TimeComputationKernels.h index dea6bad26fa0d..30cf742d44d10 100644 --- a/RecoLocalCalo/EcalRecProducers/plugins/TimeComputationKernels.h +++ b/RecoLocalCalo/EcalRecProducers/plugins/TimeComputationKernels.h @@ -8,9 +8,9 @@ #include "DataFormats/Math/interface/approx_exp.h" #include "DataFormats/Math/interface/approx_log.h" +#include "RecoLocalCalo/EcalRecProducers/interface/EigenMatrixTypes_gpu.h" #include "DeclsForKernels.h" -#include "EigenMatrixTypes_gpu.h" //#define DEBUG diff --git a/RecoLocalCalo/EcalRecProducers/plugins/alpaka/AmplitudeComputationCommonKernels.h b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/AmplitudeComputationCommonKernels.h new file mode 100644 index 0000000000000..e590ce0d8b795 --- /dev/null +++ b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/AmplitudeComputationCommonKernels.h @@ -0,0 +1,488 @@ +#ifndef RecoLocalCalo_EcalRecProducers_plugins_alpaka_AmplitudeComputationCommonKernels_h +#define RecoLocalCalo_EcalRecProducers_plugins_alpaka_AmplitudeComputationCommonKernels_h + +#include +#include +#include + +#include "CondFormats/EcalObjects/interface/alpaka/EcalMultifitConditionsDevice.h" +#include "DataFormats/EcalDigi/interface/alpaka/EcalDigiDeviceCollection.h" +#include "DataFormats/EcalRecHit/interface/alpaka/EcalUncalibratedRecHitDeviceCollection.h" +#include "CondFormats/EcalObjects/interface/EcalPulseShapes.h" +#include "DataFormats/EcalDigi/interface/EcalDataFrame.h" +#include "DataFormats/EcalDigi/interface/EcalMGPASample.h" +#include "DataFormats/EcalRecHit/interface/EcalUncalibratedRecHit.h" +#include "FWCore/Utilities/interface/CMSUnrollLoop.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/traits.h" +#include "RecoLocalCalo/EcalRecProducers/interface/EigenMatrixTypes_gpu.h" + +#include "DeclsForKernels.h" +#include "KernelHelpers.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::multifit { + + /// + /// assume kernel launch configuration is + /// (MAXSAMPLES * nchannels, blocks) + /// TODO: is there a point to split this kernel further to separate reductions + /// + class Kernel_prep_1d_and_initialize { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, + EcalDigiDeviceCollection::ConstView digisDevEB, + EcalDigiDeviceCollection::ConstView digisDevEE, + EcalUncalibratedRecHitDeviceCollection::View uncalibRecHitsEB, + EcalUncalibratedRecHitDeviceCollection::View uncalibRecHitsEE, + EcalMultifitConditionsDevice::ConstView conditionsDev, + ::ecal::multifit::SampleVector* amplitudes, + ::ecal::multifit::SampleGainVector* gainsNoise, + bool* hasSwitchToGain6, + bool* hasSwitchToGain1, + bool* isSaturated, + char* acState, + ::ecal::multifit::BXVectorType* bxs, + bool const gainSwitchUseMaxSampleEB, + bool const gainSwitchUseMaxSampleEE) const { + constexpr bool dynamicPedestal = false; //---- default to false, ok + constexpr auto nsamples = EcalDataFrame::MAXSAMPLES; + constexpr int sample_max = 5; + constexpr int full_pulse_max = 9; + auto const offsetForHashes = conditionsDev.offsetEE(); + + auto const nchannelsEB = digisDevEB.size(); + auto const nchannelsEE = digisDevEE.size(); + auto const nchannels = nchannelsEB + nchannelsEE; + auto const totalElements = nchannels * nsamples; + + auto const elemsPerBlock = alpaka::getWorkDiv(acc)[0u]; + + char* shared_mem = alpaka::getDynSharedMem(acc); + auto* shr_hasSwitchToGain6 = reinterpret_cast(shared_mem); + auto* shr_hasSwitchToGain1 = shr_hasSwitchToGain6 + elemsPerBlock; + auto* shr_hasSwitchToGain0 = shr_hasSwitchToGain1 + elemsPerBlock; + auto* shr_isSaturated = shr_hasSwitchToGain0 + elemsPerBlock; + auto* shr_hasSwitchToGain0_tmp = shr_isSaturated + elemsPerBlock; + auto* shr_counts = reinterpret_cast(shr_hasSwitchToGain0_tmp) + elemsPerBlock; + + for (auto block : cms::alpakatools::blocks_with_stride(acc, totalElements)) { + for (auto idx : cms::alpakatools::elements_in_block(acc, block, totalElements)) { + // set the output collection size scalars + if (idx.global == 0) { + uncalibRecHitsEB.size() = nchannelsEB; + uncalibRecHitsEE.size() = nchannelsEE; + } + + auto const ch = idx.global / nsamples; + // for accessing input arrays + int const inputTx = ch >= nchannelsEB ? idx.global - nchannelsEB * nsamples : idx.global; + // eb is first and then ee + auto const* digis_in = ch >= nchannelsEB ? digisDevEE.data()->data() : digisDevEB.data()->data(); + auto const gainId = ecalMGPA::gainId(digis_in[inputTx]); + + // store into shared mem for initialization + shr_hasSwitchToGain6[idx.local] = gainId == EcalMgpaBitwiseGain6; + shr_hasSwitchToGain1[idx.local] = gainId == EcalMgpaBitwiseGain1; + shr_hasSwitchToGain0_tmp[idx.local] = gainId == EcalMgpaBitwiseGain0; + shr_hasSwitchToGain0[idx.local] = shr_hasSwitchToGain0_tmp[idx.local]; + shr_counts[idx.local] = 0; + } + + alpaka::syncBlockThreads(acc); + + for (auto idx : cms::alpakatools::elements_in_block(acc, block, totalElements)) { + auto const sample = idx.local % nsamples; + + // non-divergent branch (except for the last 4 threads) + if (idx.local <= elemsPerBlock - 5) { + CMS_UNROLL_LOOP + for (int i = 0; i < 5; ++i) + shr_counts[idx.local] += shr_hasSwitchToGain0[idx.local + i]; + } + shr_isSaturated[idx.local] = shr_counts[idx.local] == 5; + + // + // unrolled reductions + // + if (sample < 5) { + shr_hasSwitchToGain6[idx.local] = shr_hasSwitchToGain6[idx.local] || shr_hasSwitchToGain6[idx.local + 5]; + shr_hasSwitchToGain1[idx.local] = shr_hasSwitchToGain1[idx.local] || shr_hasSwitchToGain1[idx.local + 5]; + + // duplication of hasSwitchToGain0 in order not to + // introduce another syncthreads + shr_hasSwitchToGain0_tmp[idx.local] = + shr_hasSwitchToGain0_tmp[idx.local] || shr_hasSwitchToGain0_tmp[idx.local + 5]; + } + } + + alpaka::syncBlockThreads(acc); + + for (auto idx : cms::alpakatools::elements_in_block(acc, block, totalElements)) { + auto const sample = idx.local % nsamples; + + if (sample < 2) { + // note, both threads per channel take value [3] twice to avoid another if + shr_hasSwitchToGain6[idx.local] = shr_hasSwitchToGain6[idx.local] || shr_hasSwitchToGain6[idx.local + 2] || + shr_hasSwitchToGain6[idx.local + 3]; + shr_hasSwitchToGain1[idx.local] = shr_hasSwitchToGain1[idx.local] || shr_hasSwitchToGain1[idx.local + 2] || + shr_hasSwitchToGain1[idx.local + 3]; + + shr_hasSwitchToGain0_tmp[idx.local] = shr_hasSwitchToGain0_tmp[idx.local] || + shr_hasSwitchToGain0_tmp[idx.local + 2] || + shr_hasSwitchToGain0_tmp[idx.local + 3]; + + // sample < 2 -> first 2 threads of each channel will be used here + // => 0 -> will compare 3 and 4 and put into 0 + // => 1 -> will compare 4 and 5 and put into 1 + shr_isSaturated[idx.local] = shr_isSaturated[idx.local + 3] || shr_isSaturated[idx.local + 4]; + } + } + + alpaka::syncBlockThreads(acc); + + for (auto idx : cms::alpakatools::elements_in_block(acc, block, totalElements)) { + auto const ch = idx.global / nsamples; + auto const sample = idx.local % nsamples; + + if (sample == 0) { + shr_hasSwitchToGain6[idx.local] = shr_hasSwitchToGain6[idx.local] || shr_hasSwitchToGain6[idx.local + 1]; + shr_hasSwitchToGain1[idx.local] = shr_hasSwitchToGain1[idx.local] || shr_hasSwitchToGain1[idx.local + 1]; + shr_hasSwitchToGain0_tmp[idx.local] = + shr_hasSwitchToGain0_tmp[idx.local] || shr_hasSwitchToGain0_tmp[idx.local + 1]; + + hasSwitchToGain6[ch] = shr_hasSwitchToGain6[idx.local]; + hasSwitchToGain1[ch] = shr_hasSwitchToGain1[idx.local]; + + shr_isSaturated[idx.local + 3] = shr_isSaturated[idx.local] || shr_isSaturated[idx.local + 1]; + isSaturated[ch] = shr_isSaturated[idx.local + 3]; + } + } + + // TODO: w/o this sync, there is a race + // if (idx.local == sample_max) below uses max sample thread, not for 0 sample + // check if we can remove it + alpaka::syncBlockThreads(acc); + + for (auto idx : cms::alpakatools::elements_in_block(acc, block, totalElements)) { + auto const ch = idx.global / nsamples; + auto const sample = idx.local % nsamples; + + // for accessing input arrays + int const inputCh = ch >= nchannelsEB ? ch - nchannelsEB : ch; + int const inputTx = ch >= nchannelsEB ? idx.global - nchannelsEB * nsamples : idx.global; + + auto const* dids = ch >= nchannelsEB ? digisDevEE.id() : digisDevEB.id(); + auto const did = DetId{dids[inputCh]}; + auto const isBarrel = did.subdetId() == EcalBarrel; + // TODO offset for ee, 0 for eb + auto const hashedId = isBarrel ? reconstruction::hashedIndexEB(did.rawId()) + : offsetForHashes + reconstruction::hashedIndexEE(did.rawId()); + + // eb is first and then ee + auto const* digis_in = ch >= nchannelsEB ? digisDevEE.data()->data() : digisDevEB.data()->data(); + + auto* amplitudesForMinimization = reinterpret_cast<::ecal::multifit::SampleVector*>( + ch >= nchannelsEB ? uncalibRecHitsEE.outOfTimeAmplitudes()->data() + : uncalibRecHitsEB.outOfTimeAmplitudes()->data()); + auto* energies = ch >= nchannelsEB ? uncalibRecHitsEE.amplitude() : uncalibRecHitsEB.amplitude(); + auto* chi2 = ch >= nchannelsEB ? uncalibRecHitsEE.chi2() : uncalibRecHitsEB.chi2(); + auto* g_pedestal = ch >= nchannelsEB ? uncalibRecHitsEE.pedestal() : uncalibRecHitsEB.pedestal(); + auto* dids_out = ch >= nchannelsEB ? uncalibRecHitsEE.id() : uncalibRecHitsEB.id(); + auto* flags = ch >= nchannelsEB ? uncalibRecHitsEE.flags() : uncalibRecHitsEB.flags(); + + auto const adc = ecalMGPA::adc(digis_in[inputTx]); + auto const gainId = ecalMGPA::gainId(digis_in[inputTx]); + ::ecal::multifit::SampleVector::Scalar amplitude = 0.; + ::ecal::multifit::SampleVector::Scalar pedestal = 0.; + ::ecal::multifit::SampleVector::Scalar gainratio = 0.; + + // TODO: divergent branch + if (gainId == 0 || gainId == 3) { + pedestal = conditionsDev.pedestals_mean_x1()[hashedId]; + gainratio = conditionsDev.gain6Over1()[hashedId] * conditionsDev.gain12Over6()[hashedId]; + gainsNoise[ch](sample) = 2; + } else if (gainId == 1) { + pedestal = conditionsDev.pedestals_mean_x12()[hashedId]; + gainratio = 1.; + gainsNoise[ch](sample) = 0; + } else if (gainId == 2) { + pedestal = conditionsDev.pedestals_mean_x6()[hashedId]; + gainratio = conditionsDev.gain12Over6()[hashedId]; + gainsNoise[ch](sample) = 1; + } + + // TODO: compile time constant -> branch should be non-divergent + if (dynamicPedestal) + amplitude = static_cast<::ecal::multifit::SampleVector::Scalar>(adc) * gainratio; + else + amplitude = (static_cast<::ecal::multifit::SampleVector::Scalar>(adc) - pedestal) * gainratio; + amplitudes[ch][sample] = amplitude; + +#ifdef ECAL_RECO_ALPAKA_DEBUG + printf("%d %d %d %d %f %f %f\n", idx.global, ch, sample, adc, amplitude, pedestal, gainratio); + if (adc == 0) + printf("adc is zero\n"); +#endif + + // + // initialization + // + amplitudesForMinimization[inputCh](sample) = 0; + bxs[ch](sample) = sample - 5; + + // select the thread for the max sample + //---> hardcoded above to be 5th sample, ok + if (sample == sample_max) { + // + // initialization + // + acState[ch] = static_cast(MinimizationState::NotFinished); + energies[inputCh] = 0; + chi2[inputCh] = 0; + g_pedestal[inputCh] = 0; + uint32_t flag = 0; + dids_out[inputCh] = did.rawId(); + + // start of this channel in shared mem + auto const chStart = idx.local - sample_max; + // thread for the max sample in shared mem + auto const threadMax = idx.local; + auto const gainSwitchUseMaxSample = isBarrel ? gainSwitchUseMaxSampleEB : gainSwitchUseMaxSampleEE; + + // this flag setting is applied to all of the cases + if (shr_hasSwitchToGain6[chStart]) + flag |= 0x1 << EcalUncalibratedRecHit::kHasSwitchToGain6; + if (shr_hasSwitchToGain1[chStart]) + flag |= 0x1 << EcalUncalibratedRecHit::kHasSwitchToGain1; + + // this corresponds to cpu branching on lastSampleBeforeSaturation + // likely false + // check only for the idx.local corresponding to sample==0 + if (sample == 0 && shr_hasSwitchToGain0_tmp[idx.local]) { + // assign for the case some sample having gainId == 0 + //energies[inputCh] = amplitudes[ch][sample_max]; + energies[inputCh] = amplitude; + + // check if samples before sample_max have true + bool saturated_before_max = false; + CMS_UNROLL_LOOP + for (char ii = 0; ii < 5; ++ii) + saturated_before_max = saturated_before_max || shr_hasSwitchToGain0[chStart + ii]; + + // if saturation is in the max sample and not in the first 5 + if (!saturated_before_max && shr_hasSwitchToGain0[threadMax]) + energies[inputCh] = 49140; // 4095 * 12 (maximum ADC range * MultiGainPreAmplifier (MGPA) gain) + // This is the actual maximum range that is set when we saturate. + //---- AM FIXME : no pedestal subtraction??? + //It should be "(4095. - pedestal) * gainratio" + + // set state flag to terminate further processing of this channel + acState[ch] = static_cast(MinimizationState::Precomputed); + flag |= 0x1 << EcalUncalibratedRecHit::kSaturated; + flags[inputCh] = flag; + continue; + } + + // according to cpu version + // auto max_amplitude = amplitudes[ch][sample_max]; + auto const max_amplitude = amplitude; + // pulse shape template value + auto shape_value = conditionsDev.pulseShapes()[hashedId][full_pulse_max - 7]; + // note, no syncing as the same thread will be accessing here + bool hasGainSwitch = + shr_hasSwitchToGain6[chStart] || shr_hasSwitchToGain1[chStart] || shr_isSaturated[chStart + 3]; + + // pedestal is final unconditionally + g_pedestal[inputCh] = pedestal; + if (hasGainSwitch && gainSwitchUseMaxSample) { + // thread for sample=0 will access the right guys + energies[inputCh] = max_amplitude / shape_value; + acState[ch] = static_cast(MinimizationState::Precomputed); + flags[inputCh] = flag; + continue; + } + + // will be used in the future for setting state + auto const rmsForChecking = conditionsDev.pedestals_rms_x12()[hashedId]; + + // this happens cause sometimes rms_x12 is 0... + // needs to be checkec why this is the case + // general case here is that noisecov is a Zero matrix + if (rmsForChecking == 0) { + acState[ch] = static_cast(MinimizationState::Precomputed); + flags[inputCh] = flag; + continue; + } + + // for the case when no shortcuts were taken + flags[inputCh] = flag; + } + } + } + } + }; + + /// + /// assume kernel launch configuration is + /// ([MAXSAMPLES, MAXSAMPLES], nchannels) + /// + class Kernel_prep_2d { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, + EcalDigiDeviceCollection::ConstView digisDevEB, + EcalDigiDeviceCollection::ConstView digisDevEE, + EcalMultifitConditionsDevice::ConstView conditionsDev, + ::ecal::multifit::SampleGainVector const* gainsNoise, + ::ecal::multifit::SampleMatrix* noisecov, + ::ecal::multifit::PulseMatrixType* pulse_matrix, + bool const* hasSwitchToGain6, + bool const* hasSwitchToGain1, + bool const* isSaturated) const { + constexpr auto nsamples = EcalDataFrame::MAXSAMPLES; + auto const offsetForHashes = conditionsDev.offsetEE(); + auto const nchannelsEB = digisDevEB.size(); + constexpr float addPedestalUncertainty = 0.f; + constexpr bool dynamicPedestal = false; + constexpr bool simplifiedNoiseModelForGainSwitch = true; //---- default is true + + // pulse matrix + auto const* pulse_shapes = reinterpret_cast(conditionsDev.pulseShapes()->data()); + + auto const blockDimX = alpaka::getWorkDiv(acc)[1u]; + auto const elemsPerBlockX = alpaka::getWorkDiv(acc)[1u]; + auto const elemsPerBlockY = alpaka::getWorkDiv(acc)[0u]; + Vec2D const size_2d = {elemsPerBlockY, blockDimX * elemsPerBlockX}; // {y, x} coordinates + + for (auto ndindex : cms::alpakatools::elements_with_stride_nd(acc, size_2d)) { + auto const ch = ndindex[1] / nsamples; + auto const tx = ndindex[1] % nsamples; + auto const ty = ndindex[0]; + + // to access input arrays (ids and digis only) + int const inputCh = ch >= nchannelsEB ? ch - nchannelsEB : ch; + auto const* dids = ch >= nchannelsEB ? digisDevEE.id() : digisDevEB.id(); + + auto const did = DetId{dids[inputCh]}; + auto const isBarrel = did.subdetId() == EcalBarrel; + auto const hashedId = isBarrel ? ecal::reconstruction::hashedIndexEB(did.rawId()) + : offsetForHashes + ecal::reconstruction::hashedIndexEE(did.rawId()); + auto const* G12SamplesCorrelation = isBarrel ? conditionsDev.sampleCorrelation_EB_G12().data() + : conditionsDev.sampleCorrelation_EE_G12().data(); + auto const* G6SamplesCorrelation = + isBarrel ? conditionsDev.sampleCorrelation_EB_G6().data() : conditionsDev.sampleCorrelation_EE_G6().data(); + auto const* G1SamplesCorrelation = + isBarrel ? conditionsDev.sampleCorrelation_EB_G1().data() : conditionsDev.sampleCorrelation_EE_G1().data(); + auto const hasGainSwitch = hasSwitchToGain6[ch] || hasSwitchToGain1[ch] || isSaturated[ch]; + + auto const vidx = std::abs(static_cast(ty) - static_cast(tx)); + + // non-divergent branch for all threads per block + if (hasGainSwitch) { + // TODO: did not include simplified noise model + float noise_value = 0; + + // non-divergent branch - all threads per block + // TODO: all of these constants indicate that + // that these parts could be splitted into completely different + // kernels and run one of them only depending on the config + if (simplifiedNoiseModelForGainSwitch) { + constexpr int isample_max = 5; // according to cpu defs + auto const gainidx = gainsNoise[ch][isample_max]; + + // non-divergent branches + if (gainidx == 0) { + auto const rms_x12 = conditionsDev.pedestals_rms_x12()[hashedId]; + noise_value = rms_x12 * rms_x12 * G12SamplesCorrelation[vidx]; + } else if (gainidx == 1) { + auto const gain12Over6 = conditionsDev.gain12Over6()[hashedId]; + auto const rms_x6 = conditionsDev.pedestals_rms_x6()[hashedId]; + noise_value = gain12Over6 * gain12Over6 * rms_x6 * rms_x6 * G6SamplesCorrelation[vidx]; + } else if (gainidx == 2) { + auto const gain12Over6 = conditionsDev.gain12Over6()[hashedId]; + auto const gain6Over1 = conditionsDev.gain6Over1()[hashedId]; + auto const gain12Over1 = gain12Over6 * gain6Over1; + auto const rms_x1 = conditionsDev.pedestals_rms_x1()[hashedId]; + noise_value = gain12Over1 * gain12Over1 * rms_x1 * rms_x1 * G1SamplesCorrelation[vidx]; + } + if (!dynamicPedestal && addPedestalUncertainty > 0.f) + noise_value += addPedestalUncertainty * addPedestalUncertainty; + } else { + int gainidx = 0; + char mask = gainidx; + int pedestal = gainsNoise[ch][ty] == mask ? 1 : 0; + // NB: gainratio is 1, that is why it does not appear in the formula + auto const rms_x12 = conditionsDev.pedestals_rms_x12()[hashedId]; + noise_value += rms_x12 * rms_x12 * pedestal * G12SamplesCorrelation[vidx]; + // non-divergent branch + if (!dynamicPedestal && addPedestalUncertainty > 0.f) { + noise_value += addPedestalUncertainty * addPedestalUncertainty * pedestal; // gainratio is 1 + } + + // + gainidx = 1; + mask = gainidx; + pedestal = gainsNoise[ch][ty] == mask ? 1 : 0; + auto const gain12Over6 = conditionsDev.gain12Over6()[hashedId]; + auto const rms_x6 = conditionsDev.pedestals_rms_x6()[hashedId]; + noise_value += gain12Over6 * gain12Over6 * rms_x6 * rms_x6 * pedestal * G6SamplesCorrelation[vidx]; + // non-divergent branch + if (!dynamicPedestal && addPedestalUncertainty > 0.f) { + noise_value += gain12Over6 * gain12Over6 * addPedestalUncertainty * addPedestalUncertainty * pedestal; + } + + // + gainidx = 2; + mask = gainidx; + pedestal = gainsNoise[ch][ty] == mask ? 1 : 0; + auto const gain6Over1 = conditionsDev.gain6Over1()[hashedId]; + auto const gain12Over1 = gain12Over6 * gain6Over1; + auto const rms_x1 = conditionsDev.pedestals_rms_x1()[hashedId]; + noise_value += gain12Over1 * gain12Over1 * rms_x1 * rms_x1 * pedestal * G1SamplesCorrelation[vidx]; + // non-divergent branch + if (!dynamicPedestal && addPedestalUncertainty > 0.f) { + noise_value += gain12Over1 * gain12Over1 * addPedestalUncertainty * addPedestalUncertainty * pedestal; + } + } + + noisecov[ch](ty, tx) = noise_value; + } else { + auto const rms = conditionsDev.pedestals_rms_x12()[hashedId]; + float noise_value = rms * rms * G12SamplesCorrelation[vidx]; + if (!dynamicPedestal && addPedestalUncertainty > 0.f) { + //---- add fully correlated component to noise covariance to inflate pedestal uncertainty + noise_value += addPedestalUncertainty * addPedestalUncertainty; + } + noisecov[ch](ty, tx) = noise_value; + } + + auto const posToAccess = 9 - static_cast(tx) + static_cast(ty); // see cpu for reference + float const value = posToAccess >= 7 ? pulse_shapes[hashedId].pdfval[posToAccess - 7] : 0; + pulse_matrix[ch](ty, tx) = value; + } + } + }; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::multifit + +namespace alpaka::trait { + using namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::multifit; + + //! The trait for getting the size of the block shared dynamic memory for Kernel_prep_1d_and_initialize. + template + struct BlockSharedMemDynSizeBytes { + //! \return The size of the shared memory allocated for a block. + template + ALPAKA_FN_HOST_ACC static auto getBlockSharedMemDynSizeBytes(Kernel_prep_1d_and_initialize const&, + TVec const& threadsPerBlock, + TVec const& elemsPerThread, + TArgs const&...) -> std::size_t { + // return the amount of dynamic shared memory needed + std::size_t bytes = threadsPerBlock[0u] * elemsPerThread[0u] * (5 * sizeof(bool) + sizeof(char)); + return bytes; + } + }; +} // namespace alpaka::trait + +#endif // RecoLocalCalo_EcalRecProducers_plugins_AmplitudeComputationCommonKernels_h diff --git a/RecoLocalCalo/EcalRecProducers/plugins/alpaka/AmplitudeComputationKernels.dev.cc b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/AmplitudeComputationKernels.dev.cc new file mode 100644 index 0000000000000..fcf9e5de16f40 --- /dev/null +++ b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/AmplitudeComputationKernels.dev.cc @@ -0,0 +1,316 @@ +#include +#include +#include + +#include "CondFormats/EcalObjects/interface/EcalPulseCovariances.h" +#include "DataFormats/CaloRecHit/interface/MultifitComputations.h" +#include "FWCore/Utilities/interface/CMSUnrollLoop.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" + +#include "AmplitudeComputationKernels.h" +#include "KernelHelpers.h" +#include "EcalUncalibRecHitMultiFitAlgoPortable.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::multifit { + + using namespace ::ecal::multifit; + + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE void update_covariance(EcalPulseCovariance const& pulse_covariance, + MatrixType& inverse_cov, + SampleVector const& amplitudes) { + constexpr auto nsamples = SampleVector::RowsAtCompileTime; + constexpr auto npulses = BXVectorType::RowsAtCompileTime; + + CMS_UNROLL_LOOP + for (unsigned int ipulse = 0; ipulse < npulses; ++ipulse) { + auto const amplitude = amplitudes.coeff(ipulse); + if (amplitude == 0) + continue; + + // FIXME: ipulse - 5 -> ipulse - firstOffset + int bx = ipulse - 5; + int first_sample_t = std::max(0, bx + 3); + int offset = -3 - bx; + + auto const value_sq = amplitude * amplitude; + + for (int col = first_sample_t; col < nsamples; ++col) { + for (int row = col; row < nsamples; ++row) { + inverse_cov(row, col) += value_sq * pulse_covariance.covval[row + offset][col + offset]; + } + } + } + } + + /// + /// launch ctx parameters are (nchannels / block, blocks) + /// TODO: trivial impl for now, there must be a way to improve + /// + /// Conventions: + /// - amplitudes -> solution vector, what we are fitting for + /// - samples -> raw detector responses + /// - passive constraint - satisfied constraint + /// - active constraint - unsatisfied (yet) constraint + /// + class Kernel_minimize { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, + InputProduct::ConstView const& digisDevEB, + InputProduct::ConstView const& digisDevEE, + OutputProduct::View uncalibRecHitsEB, + OutputProduct::View uncalibRecHitsEE, + EcalMultifitConditionsDevice::ConstView conditionsDev, + ::ecal::multifit::SampleMatrix const* noisecov, + ::ecal::multifit::PulseMatrixType const* pulse_matrix, + ::ecal::multifit::BXVectorType* bxs, + ::ecal::multifit::SampleVector const* samples, + bool* hasSwitchToGain6, + bool* hasSwitchToGain1, + bool* isSaturated, + char* acState, + int max_iterations) const { + // FIXME: ecal has 10 samples and 10 pulses.... + // but this needs to be properly treated and renamed everywhere + constexpr auto NSAMPLES = SampleMatrix::RowsAtCompileTime; + constexpr auto NPULSES = SampleMatrix::ColsAtCompileTime; + static_assert(NSAMPLES == NPULSES); + + using DataType = SampleVector::Scalar; + + auto const elemsPerBlock(alpaka::getWorkDiv(acc)[0u]); + + auto const nchannelsEB = digisDevEB.size(); + auto const nchannels = nchannelsEB + digisDevEE.size(); + auto const offsetForHashes = conditionsDev.offsetEE(); + + auto const* pulse_covariance = reinterpret_cast(conditionsDev.pulseCovariance()); + + // shared memory + DataType* shrmem = alpaka::getDynSharedMem(acc); + + // channel + for (auto idx : cms::alpakatools::elements_with_stride(acc, nchannels)) { + if (static_cast(acState[idx]) == MinimizationState::Precomputed) + continue; + + auto const elemIdx = idx % elemsPerBlock; + + // shared memory pointers + DataType* shrMatrixLForFnnlsStorage = shrmem + calo::multifit::MapSymM::total * elemIdx; + DataType* shrAtAStorage = + shrmem + calo::multifit::MapSymM::total * (elemIdx + elemsPerBlock); + + auto* amplitudes = + reinterpret_cast(idx >= nchannelsEB ? uncalibRecHitsEE.outOfTimeAmplitudes()->data() + : uncalibRecHitsEB.outOfTimeAmplitudes()->data()); + auto* energies = idx >= nchannelsEB ? uncalibRecHitsEE.amplitude() : uncalibRecHitsEB.amplitude(); + auto* chi2s = idx >= nchannelsEB ? uncalibRecHitsEE.chi2() : uncalibRecHitsEB.chi2(); + + // get the hash + int const inputCh = idx >= nchannelsEB ? idx - nchannelsEB : idx; + auto const* dids = idx >= nchannelsEB ? digisDevEE.id() : digisDevEB.id(); + auto const did = DetId{dids[inputCh]}; + auto const isBarrel = did.subdetId() == EcalBarrel; + auto const hashedId = isBarrel ? ecal::reconstruction::hashedIndexEB(did.rawId()) + : offsetForHashes + ecal::reconstruction::hashedIndexEE(did.rawId()); + + // inits + int npassive = 0; + + calo::multifit::ColumnVector pulseOffsets; + CMS_UNROLL_LOOP + for (int i = 0; i < NPULSES; ++i) + pulseOffsets(i) = i; + + calo::multifit::ColumnVector resultAmplitudes; + CMS_UNROLL_LOOP + for (int counter = 0; counter < NPULSES; ++counter) + resultAmplitudes(counter) = 0; + + // inits + //SampleDecompLLT covariance_decomposition; + //SampleMatrix inverse_cov; + // SampleVector::Scalar chi2 = 0, chi2_now = 0; + float chi2 = 0, chi2_now = 0; + + // loop for up to max_iterations + for (int iter = 0; iter < max_iterations; ++iter) { + //inverse_cov = noisecov[idx]; + //DataType covMatrixStorage[MapSymM::total]; + DataType* covMatrixStorage = shrMatrixLForFnnlsStorage; + calo::multifit::MapSymM covMatrix{covMatrixStorage}; + int counter = 0; + CMS_UNROLL_LOOP + for (int col = 0; col < NSAMPLES; ++col) { + CMS_UNROLL_LOOP + for (int row = col; row < NSAMPLES; ++row) { + covMatrixStorage[counter++] = noisecov[idx].coeffRef(row, col); + } + } + update_covariance(pulse_covariance[hashedId], covMatrix, resultAmplitudes); + + // compute actual covariance decomposition + //covariance_decomposition.compute(inverse_cov); + //auto const& matrixL = covariance_decomposition.matrixL(); + DataType matrixLStorage[calo::multifit::MapSymM::total]; + calo::multifit::MapSymM matrixL{matrixLStorage}; + calo::multifit::compute_decomposition_unrolled(matrixL, covMatrix); + + // L * A = P + calo::multifit::ColMajorMatrix A; + calo::multifit::solve_forward_subst_matrix(A, pulse_matrix[idx], matrixL); + + // L b = s + float reg_b[NSAMPLES]; + calo::multifit::solve_forward_subst_vector(reg_b, samples[idx], matrixL); + + // FIXME: shared mem + //DataType AtAStorage[MapSymM::total]; + calo::multifit::MapSymM AtA{shrAtAStorage}; + //SampleMatrix AtA; + SampleVector Atb; + CMS_UNROLL_LOOP + for (int icol = 0; icol < NPULSES; ++icol) { + float reg_ai[NSAMPLES]; + + // load column icol + CMS_UNROLL_LOOP + for (int counter = 0; counter < NSAMPLES; ++counter) + reg_ai[counter] = A(counter, icol); + + // compute diagoanl + float sum = 0.f; + CMS_UNROLL_LOOP + for (int counter = 0; counter < NSAMPLES; ++counter) + sum += reg_ai[counter] * reg_ai[counter]; + + // store + AtA(icol, icol) = sum; + + // go thru the other columns + CMS_UNROLL_LOOP + for (int j = icol + 1; j < NPULSES; ++j) { + // load column j + float reg_aj[NSAMPLES]; + CMS_UNROLL_LOOP + for (int counter = 0; counter < NSAMPLES; ++counter) + reg_aj[counter] = A(counter, j); + + // accum + float sum = 0.f; + CMS_UNROLL_LOOP + for (int counter = 0; counter < NSAMPLES; ++counter) + sum += reg_aj[counter] * reg_ai[counter]; + + // store + //AtA(icol, j) = sum; + AtA(j, icol) = sum; + } + + // Atb accum + float sum_atb = 0.f; + CMS_UNROLL_LOOP + for (int counter = 0; counter < NSAMPLES; ++counter) + sum_atb += reg_ai[counter] * reg_b[counter]; + + // store atb + Atb(icol) = sum_atb; + } + + // FIXME: shared mem + //DataType matrixLForFnnlsStorage[MapSymM::total]; + calo::multifit::MapSymM matrixLForFnnls{shrMatrixLForFnnlsStorage}; + + calo::multifit::fnnls(AtA, + Atb, + //amplitudes[idx], + resultAmplitudes, + npassive, + pulseOffsets, + matrixLForFnnls, + 1e-11, + 500, + 16, + 2); + + calo::multifit::calculateChiSq(matrixL, pulse_matrix[idx], resultAmplitudes, samples[idx], chi2_now); + + auto const deltachi2 = chi2_now - chi2; + chi2 = chi2_now; + + if (std::abs(deltachi2) < 1e-3) + break; + } + + // store to global output values + // FIXME: amplitudes are used in global directly + chi2s[inputCh] = chi2; + energies[inputCh] = resultAmplitudes(5); + + CMS_UNROLL_LOOP + for (int counter = 0; counter < NPULSES; ++counter) + amplitudes[inputCh](counter) = resultAmplitudes(counter); + } + } + }; + + void minimization_procedure(Queue& queue, + InputProduct const& digisDevEB, + InputProduct const& digisDevEE, + OutputProduct& uncalibRecHitsDevEB, + OutputProduct& uncalibRecHitsDevEE, + EventDataForScratchDevice& scratch, + EcalMultifitConditionsDevice const& conditionsDev, + ConfigurationParameters const& configParams, + uint32_t const totalChannels) { + using DataType = SampleVector::Scalar; + // TODO: configure from python + auto threads_min = configParams.kernelMinimizeThreads[0]; + auto blocks_min = cms::alpakatools::divide_up_by(totalChannels, threads_min); + + auto workDivMinimize = cms::alpakatools::make_workdiv(blocks_min, threads_min); + alpaka::exec(queue, + workDivMinimize, + Kernel_minimize{}, + digisDevEB.const_view(), + digisDevEE.const_view(), + uncalibRecHitsDevEB.view(), + uncalibRecHitsDevEE.view(), + conditionsDev.const_view(), + reinterpret_cast<::ecal::multifit::SampleMatrix*>(scratch.noisecovDevBuf.data()), + reinterpret_cast<::ecal::multifit::PulseMatrixType*>(scratch.pulse_matrixDevBuf.data()), + reinterpret_cast<::ecal::multifit::BXVectorType*>(scratch.activeBXsDevBuf.data()), + reinterpret_cast<::ecal::multifit::SampleVector*>(scratch.samplesDevBuf.data()), + scratch.hasSwitchToGain6DevBuf.data(), + scratch.hasSwitchToGain1DevBuf.data(), + scratch.isSaturatedDevBuf.data(), + scratch.acStateDevBuf.data(), + 50); // maximum number of fit iterations + } + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::multifit + +namespace alpaka::trait { + using namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::multifit; + + //! The trait for getting the size of the block shared dynamic memory for Kernel_minimize. + template + struct BlockSharedMemDynSizeBytes { + //! \return The size of the shared memory allocated for a block. + template + ALPAKA_FN_HOST_ACC static auto getBlockSharedMemDynSizeBytes(Kernel_minimize const&, + TVec const& threadsPerBlock, + TVec const& elemsPerThread, + TArgs const&...) -> std::size_t { + using ScalarType = ecal::multifit::SampleVector::Scalar; + + // return the amount of dynamic shared memory needed + std::size_t bytes = 2 * threadsPerBlock[0u] * elemsPerThread[0u] * + calo::multifit::MapSymM::total * + sizeof(ScalarType); + return bytes; + } + }; +} // namespace alpaka::trait diff --git a/RecoLocalCalo/EcalRecProducers/plugins/alpaka/AmplitudeComputationKernels.h b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/AmplitudeComputationKernels.h new file mode 100644 index 0000000000000..fa8700301bc81 --- /dev/null +++ b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/AmplitudeComputationKernels.h @@ -0,0 +1,28 @@ +#ifndef RecoLocalCalo_EcalRecProducers_plugins_alpaka_AmplitudeComputationKernels_h +#define RecoLocalCalo_EcalRecProducers_plugins_alpaka_AmplitudeComputationKernels_h + +#include "CondFormats/EcalObjects/interface/alpaka/EcalMultifitConditionsDevice.h" +#include "DataFormats/EcalDigi/interface/alpaka/EcalDigiDeviceCollection.h" +#include "DataFormats/EcalRecHit/interface/alpaka/EcalUncalibratedRecHitDeviceCollection.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/traits.h" +#include "DeclsForKernels.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::multifit { + + using InputProduct = EcalDigiDeviceCollection; + using OutputProduct = EcalUncalibratedRecHitDeviceCollection; + + void minimization_procedure(Queue& queue, + InputProduct const& digisDevEB, + InputProduct const& digisDevEE, + OutputProduct& uncalibRecHitsDevEB, + OutputProduct& uncalibRecHitsDevEE, + EventDataForScratchDevice& scratch, + EcalMultifitConditionsDevice const& conditionsDev, + ConfigurationParameters const& configParams, + uint32_t const totalChannels); + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::multifit + +#endif // RecoLocalCalo_EcalRecProducers_plugins_AmplitudeComputationKernels_h diff --git a/RecoLocalCalo/EcalRecProducers/plugins/alpaka/DeclsForKernels.h b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/DeclsForKernels.h new file mode 100644 index 0000000000000..6f96b26d253d1 --- /dev/null +++ b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/DeclsForKernels.h @@ -0,0 +1,130 @@ +#ifndef RecoLocalCalo_EcalRecProducers_plugins_alpaka_DeclsForKernels_h +#define RecoLocalCalo_EcalRecProducers_plugins_alpaka_DeclsForKernels_h + +#include + +#include "CondFormats/EcalObjects/interface/EcalTimeOffsetConstant.h" +#include "DataFormats/EcalDigi/interface/EcalDataFrame.h" +#include "RecoLocalCalo/EcalRecProducers/interface/EigenMatrixTypes_gpu.h" + +class EcalSampleMask; + +namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::multifit { + + enum class TimeComputationState : char { NotFinished = 0, Finished = 1 }; + enum class MinimizationState : char { + NotFinished = 0, + Finished = 1, + Precomputed = 2, + }; + + // parameters have a fixed type + // Can we go by with single precision + struct ConfigurationParameters { + using type = double; + + type timeFitLimitsFirstEB, timeFitLimitsFirstEE; + type timeFitLimitsSecondEB, timeFitLimitsSecondEE; + + type timeConstantTermEB, timeConstantTermEE; + + type timeNconstEB, timeNconstEE; + + type amplitudeThreshEE, amplitudeThreshEB; + + type outOfTimeThreshG12pEB, outOfTimeThreshG12mEB; + type outOfTimeThreshG12pEE, outOfTimeThreshG12mEE; + type outOfTimeThreshG61pEE, outOfTimeThreshG61mEE; + type outOfTimeThreshG61pEB, outOfTimeThreshG61mEB; + + std::array kernelMinimizeThreads; + + bool shouldRunTimingComputation; + }; + + template + constexpr uint32_t getLength() { + return EigenM::RowsAtCompileTime * EigenM::ColsAtCompileTime; + } + + struct EventDataForScratchDevice { + using SVT = ::ecal::multifit::SampleVector::Scalar; + using SGVT = ::ecal::multifit::SampleGainVector::Scalar; + using SMT = ::ecal::multifit::SampleMatrix::Scalar; + using PMT = ::ecal::multifit::PulseMatrixType::Scalar; + using BXVT = ::ecal::multifit::BXVectorType::Scalar; + + static constexpr auto svlength = getLength<::ecal::multifit::SampleVector>(); + static constexpr auto sgvlength = getLength<::ecal::multifit::SampleGainVector>(); + static constexpr auto smlength = getLength<::ecal::multifit::SampleMatrix>(); + static constexpr auto pmlength = getLength<::ecal::multifit::PulseMatrixType>(); + static constexpr auto bxvlength = getLength<::ecal::multifit::BXVectorType>(); + + // delete the default constructor because alpaka buffers do not have a default constructor + EventDataForScratchDevice() = delete; + + explicit EventDataForScratchDevice(ConfigurationParameters const& configParameters, uint32_t size, Queue& queue) + : samplesDevBuf{cms::alpakatools::make_device_buffer(queue, size * svlength)}, + gainsNoiseDevBuf{cms::alpakatools::make_device_buffer(queue, size * sgvlength)}, + noisecovDevBuf{cms::alpakatools::make_device_buffer(queue, size * smlength)}, + pulse_matrixDevBuf{cms::alpakatools::make_device_buffer(queue, size * pmlength)}, + activeBXsDevBuf{cms::alpakatools::make_device_buffer(queue, size * bxvlength)}, + acStateDevBuf{cms::alpakatools::make_device_buffer(queue, size)}, + hasSwitchToGain6DevBuf{cms::alpakatools::make_device_buffer(queue, size)}, + hasSwitchToGain1DevBuf{cms::alpakatools::make_device_buffer(queue, size)}, + isSaturatedDevBuf{cms::alpakatools::make_device_buffer(queue, size)} { + if (configParameters.shouldRunTimingComputation) { + sample_valuesDevBuf = cms::alpakatools::make_device_buffer(queue, size * svlength); + sample_value_errorsDevBuf = cms::alpakatools::make_device_buffer(queue, size * svlength); + useless_sample_valuesDevBuf = + cms::alpakatools::make_device_buffer(queue, size * EcalDataFrame::MAXSAMPLES); + chi2sNullHypotDevBuf = cms::alpakatools::make_device_buffer(queue, size); + sum0sNullHypotDevBuf = cms::alpakatools::make_device_buffer(queue, size); + sumAAsNullHypotDevBuf = cms::alpakatools::make_device_buffer(queue, size); + pedestal_numsDevBuf = cms::alpakatools::make_device_buffer(queue, size); + + tMaxAlphaBetasDevBuf = cms::alpakatools::make_device_buffer(queue, size); + tMaxErrorAlphaBetasDevBuf = cms::alpakatools::make_device_buffer(queue, size); + accTimeMaxDevBuf = cms::alpakatools::make_device_buffer(queue, size); + accTimeWgtDevBuf = cms::alpakatools::make_device_buffer(queue, size); + ampMaxAlphaBetaDevBuf = cms::alpakatools::make_device_buffer(queue, size); + ampMaxErrorDevBuf = cms::alpakatools::make_device_buffer(queue, size); + timeMaxDevBuf = cms::alpakatools::make_device_buffer(queue, size); + timeErrorDevBuf = cms::alpakatools::make_device_buffer(queue, size); + tcStateDevBuf = cms::alpakatools::make_device_buffer(queue, size); + } + }; + + cms::alpakatools::device_buffer samplesDevBuf; + cms::alpakatools::device_buffer gainsNoiseDevBuf; + + cms::alpakatools::device_buffer noisecovDevBuf; + cms::alpakatools::device_buffer pulse_matrixDevBuf; + cms::alpakatools::device_buffer activeBXsDevBuf; + cms::alpakatools::device_buffer acStateDevBuf; + + cms::alpakatools::device_buffer hasSwitchToGain6DevBuf; + cms::alpakatools::device_buffer hasSwitchToGain1DevBuf; + cms::alpakatools::device_buffer isSaturatedDevBuf; + + std::optional> sample_valuesDevBuf; + std::optional> sample_value_errorsDevBuf; + std::optional> useless_sample_valuesDevBuf; + std::optional> chi2sNullHypotDevBuf; + std::optional> sum0sNullHypotDevBuf; + std::optional> sumAAsNullHypotDevBuf; + std::optional> pedestal_numsDevBuf; + std::optional> tMaxAlphaBetasDevBuf; + std::optional> tMaxErrorAlphaBetasDevBuf; + std::optional> accTimeMaxDevBuf; + std::optional> accTimeWgtDevBuf; + std::optional> ampMaxAlphaBetaDevBuf; + std::optional> ampMaxErrorDevBuf; + std::optional> timeMaxDevBuf; + std::optional> timeErrorDevBuf; + std::optional> tcStateDevBuf; + }; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::multifit + +#endif // RecoLocalCalo_EcalRecProducers_plugins_alpaka_DeclsForKernels_h diff --git a/RecoLocalCalo/EcalRecProducers/plugins/alpaka/EcalMultifitConditionsHostESProducer.cc b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/EcalMultifitConditionsHostESProducer.cc new file mode 100644 index 0000000000000..6db1ff58b2740 --- /dev/null +++ b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/EcalMultifitConditionsHostESProducer.cc @@ -0,0 +1,213 @@ +#include "FWCore/Framework/interface/ESTransientHandle.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" + +#include +#include +#include "CondFormats/DataRecord/interface/EcalGainRatiosRcd.h" +#include "CondFormats/DataRecord/interface/EcalPedestalsRcd.h" +#include "CondFormats/DataRecord/interface/EcalPulseCovariancesRcd.h" +#include "CondFormats/DataRecord/interface/EcalPulseShapesRcd.h" +#include "CondFormats/DataRecord/interface/EcalSampleMaskRcd.h" +#include "CondFormats/DataRecord/interface/EcalSamplesCorrelationRcd.h" +#include "CondFormats/DataRecord/interface/EcalTimeBiasCorrectionsRcd.h" +#include "CondFormats/DataRecord/interface/EcalTimeCalibConstantsRcd.h" +#include "CondFormats/DataRecord/interface/EcalTimeOffsetConstantRcd.h" +#include "CondFormats/EcalObjects/interface/EcalGainRatios.h" +#include "CondFormats/EcalObjects/interface/EcalPedestals.h" +#include "CondFormats/EcalObjects/interface/EcalPulseCovariances.h" +#include "CondFormats/EcalObjects/interface/EcalPulseShapes.h" +#include "CondFormats/EcalObjects/interface/EcalSamplesCorrelation.h" +#include "CondFormats/EcalObjects/interface/EcalSampleMask.h" +#include "CondFormats/EcalObjects/interface/EcalTimeBiasCorrections.h" +#include "CondFormats/EcalObjects/interface/EcalTimeCalibConstants.h" +#include "CondFormats/EcalObjects/interface/EcalTimeOffsetConstant.h" + +#include "CondFormats/EcalObjects/interface/alpaka/EcalMultifitConditionsDevice.h" +#include "CondFormats/EcalObjects/interface/EcalMultifitConditionsSoA.h" +#include "CondFormats/DataRecord/interface/EcalMultifitConditionsRcd.h" + +#include "DataFormats/EcalDigi/interface/EcalConstants.h" +#include "CondFormats/EcalObjects/interface/EcalPulseShapes.h" + +#include "DataFormats/EcalDetId/interface/EcalElectronicsId.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ESGetToken.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ESProducer.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ModuleFactory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/host.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + class EcalMultifitConditionsHostESProducer : public ESProducer { + public: + EcalMultifitConditionsHostESProducer(edm::ParameterSet const& iConfig) : ESProducer(iConfig) { + auto cc = setWhatProduced(this); + pedestalsToken_ = cc.consumes(); + gainRatiosToken_ = cc.consumes(); + pulseShapesToken_ = cc.consumes(); + pulseCovariancesToken_ = cc.consumes(); + samplesCorrelationToken_ = cc.consumes(); + timeBiasCorrectionsToken_ = cc.consumes(); + timeCalibConstantsToken_ = cc.consumes(); + sampleMaskToken_ = cc.consumes(); + timeOffsetConstantToken_ = cc.consumes(); + } + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + descriptions.addWithDefaultLabel(desc); + } + + std::unique_ptr produce(EcalMultifitConditionsRcd const& iRecord) { + auto const& pedestalsData = iRecord.get(pedestalsToken_); + auto const& gainRatiosData = iRecord.get(gainRatiosToken_); + auto const& pulseShapesData = iRecord.get(pulseShapesToken_); + auto const& pulseCovariancesData = iRecord.get(pulseCovariancesToken_); + auto const& samplesCorrelationData = iRecord.get(samplesCorrelationToken_); + auto const& timeBiasCorrectionsData = iRecord.get(timeBiasCorrectionsToken_); + auto const& timeCalibConstantsData = iRecord.get(timeCalibConstantsToken_); + auto const& sampleMaskData = iRecord.get(sampleMaskToken_); + auto const& timeOffsetConstantData = iRecord.get(timeOffsetConstantToken_); + + size_t numberOfXtals = pedestalsData.size(); + + auto product = std::make_unique(numberOfXtals, cms::alpakatools::host()); + auto view = product->view(); + + // Filling pedestals + const auto barrelSize = pedestalsData.barrelItems().size(); + const auto endcapSize = pedestalsData.endcapItems().size(); + + auto const& pedestalsEB = pedestalsData.barrelItems(); + auto const& pedestalsEE = pedestalsData.endcapItems(); + auto const& gainRatiosEB = gainRatiosData.barrelItems(); + auto const& gainRatiosEE = gainRatiosData.endcapItems(); + auto const& pulseShapesEB = pulseShapesData.barrelItems(); + auto const& pulseShapesEE = pulseShapesData.endcapItems(); + auto const& pulseCovariancesEB = pulseCovariancesData.barrelItems(); + auto const& pulseCovariancesEE = pulseCovariancesData.endcapItems(); + auto const& timeCalibConstantsEB = timeCalibConstantsData.barrelItems(); + auto const& timeCalibConstantsEE = timeCalibConstantsData.endcapItems(); + + for (unsigned int i = 0; i < barrelSize; i++) { + auto vi = view[i]; + + vi.pedestals_mean_x12() = pedestalsEB[i].mean_x12; + vi.pedestals_rms_x12() = pedestalsEB[i].rms_x12; + vi.pedestals_mean_x6() = pedestalsEB[i].mean_x6; + vi.pedestals_rms_x6() = pedestalsEB[i].rms_x6; + vi.pedestals_mean_x1() = pedestalsEB[i].mean_x1; + vi.pedestals_rms_x1() = pedestalsEB[i].rms_x1; + + vi.gain12Over6() = gainRatiosEB[i].gain12Over6(); + vi.gain6Over1() = gainRatiosEB[i].gain6Over1(); + + vi.timeCalibConstants() = timeCalibConstantsEB[i]; + + std::memcpy(vi.pulseShapes().data(), pulseShapesEB[i].pdfval, sizeof(float) * EcalPulseShape::TEMPLATESAMPLES); + for (unsigned int j = 0; j < EcalPulseShape::TEMPLATESAMPLES; j++) { + for (unsigned int k = 0; k < EcalPulseShape::TEMPLATESAMPLES; k++) { + vi.pulseCovariance()(j, k) = pulseCovariancesEB[i].val(j, k); + } + } + } // end Barrel loop + for (unsigned int i = 0; i < endcapSize; i++) { + auto vi = view[barrelSize + i]; + + vi.pedestals_mean_x12() = pedestalsEE[i].mean_x12; + vi.pedestals_rms_x12() = pedestalsEE[i].rms_x12; + vi.pedestals_mean_x6() = pedestalsEE[i].mean_x6; + vi.pedestals_rms_x6() = pedestalsEE[i].rms_x6; + vi.pedestals_mean_x1() = pedestalsEE[i].mean_x1; + vi.pedestals_rms_x1() = pedestalsEE[i].rms_x1; + + vi.gain12Over6() = gainRatiosEE[i].gain12Over6(); + vi.gain6Over1() = gainRatiosEE[i].gain6Over1(); + + vi.timeCalibConstants() = timeCalibConstantsEE[i]; + + std::memcpy(vi.pulseShapes().data(), pulseShapesEE[i].pdfval, sizeof(float) * EcalPulseShape::TEMPLATESAMPLES); + + for (unsigned int j = 0; j < EcalPulseShape::TEMPLATESAMPLES; j++) { + for (unsigned int k = 0; k < EcalPulseShape::TEMPLATESAMPLES; k++) { + vi.pulseCovariance()(j, k) = pulseCovariancesEE[i].val(j, k); + } + } + } // end Endcap loop + + // === Scalar data (not by xtal) + //TimeBiasCorrection + // Assert that there are not more parameters than the EcalMultiFitConditionsSoA expects + assert(timeBiasCorrectionsData.EBTimeCorrAmplitudeBins.size() <= kMaxTimeBiasCorrectionBinsEB); + assert(timeBiasCorrectionsData.EBTimeCorrShiftBins.size() <= kMaxTimeBiasCorrectionBinsEB); + std::memcpy(view.timeBiasCorrections_amplitude_EB().data(), + timeBiasCorrectionsData.EBTimeCorrAmplitudeBins.data(), + sizeof(float) * kMaxTimeBiasCorrectionBinsEB); + std::memcpy(view.timeBiasCorrections_shift_EB().data(), + timeBiasCorrectionsData.EBTimeCorrShiftBins.data(), + sizeof(float) * kMaxTimeBiasCorrectionBinsEB); + + // Assert that there are not more parameters than the EcalMultiFitConditionsSoA expects + assert(timeBiasCorrectionsData.EETimeCorrAmplitudeBins.size() <= kMaxTimeBiasCorrectionBinsEE); + assert(timeBiasCorrectionsData.EETimeCorrShiftBins.size() <= kMaxTimeBiasCorrectionBinsEE); + std::memcpy(view.timeBiasCorrections_amplitude_EE().data(), + timeBiasCorrectionsData.EETimeCorrAmplitudeBins.data(), + sizeof(float) * kMaxTimeBiasCorrectionBinsEE); + std::memcpy(view.timeBiasCorrections_shift_EE().data(), + timeBiasCorrectionsData.EETimeCorrShiftBins.data(), + sizeof(float) * kMaxTimeBiasCorrectionBinsEE); + + view.timeBiasCorrectionSizeEB() = + std::min(timeBiasCorrectionsData.EBTimeCorrAmplitudeBins.size(), kMaxTimeBiasCorrectionBinsEB); + view.timeBiasCorrectionSizeEE() = + std::min(timeBiasCorrectionsData.EETimeCorrAmplitudeBins.size(), kMaxTimeBiasCorrectionBinsEE); + + // SampleCorrelation + std::memcpy(view.sampleCorrelation_EB_G12().data(), + samplesCorrelationData.EBG12SamplesCorrelation.data(), + sizeof(double) * ecalPh1::sampleSize); + std::memcpy(view.sampleCorrelation_EB_G6().data(), + samplesCorrelationData.EBG6SamplesCorrelation.data(), + sizeof(double) * ecalPh1::sampleSize); + std::memcpy(view.sampleCorrelation_EB_G1().data(), + samplesCorrelationData.EBG1SamplesCorrelation.data(), + sizeof(double) * ecalPh1::sampleSize); + + std::memcpy(view.sampleCorrelation_EE_G12().data(), + samplesCorrelationData.EEG12SamplesCorrelation.data(), + sizeof(double) * ecalPh1::sampleSize); + std::memcpy(view.sampleCorrelation_EE_G6().data(), + samplesCorrelationData.EBG6SamplesCorrelation.data(), + sizeof(double) * ecalPh1::sampleSize); + std::memcpy(view.sampleCorrelation_EE_G1().data(), + samplesCorrelationData.EEG1SamplesCorrelation.data(), + sizeof(double) * ecalPh1::sampleSize); + + // Sample masks + view.sampleMask_EB() = sampleMaskData.getEcalSampleMaskRecordEB(); + view.sampleMask_EE() = sampleMaskData.getEcalSampleMaskRecordEE(); + + // Time offsets + view.timeOffset_EB() = timeOffsetConstantData.getEBValue(); + view.timeOffset_EE() = timeOffsetConstantData.getEEValue(); + + // number of barrel items as offset for hashed ID access to EE items of columns + view.offsetEE() = barrelSize; + + return product; + } + + private: + edm::ESGetToken pedestalsToken_; + edm::ESGetToken gainRatiosToken_; + edm::ESGetToken pulseShapesToken_; + edm::ESGetToken pulseCovariancesToken_; + edm::ESGetToken samplesCorrelationToken_; + edm::ESGetToken timeBiasCorrectionsToken_; + edm::ESGetToken timeCalibConstantsToken_; + edm::ESGetToken sampleMaskToken_; + edm::ESGetToken timeOffsetConstantToken_; + }; +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +DEFINE_FWK_EVENTSETUP_ALPAKA_MODULE(EcalMultifitConditionsHostESProducer); diff --git a/RecoLocalCalo/EcalRecProducers/plugins/alpaka/EcalMultifitParametersHostESProducer.cc b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/EcalMultifitParametersHostESProducer.cc new file mode 100644 index 0000000000000..809dacdabc43e --- /dev/null +++ b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/EcalMultifitParametersHostESProducer.cc @@ -0,0 +1,99 @@ +#include + +#include "FWCore/ParameterSet/interface/ParameterSet.h" + +#include "CondFormats/EcalObjects/interface/alpaka/EcalMultifitParametersDevice.h" +#include "CondFormats/EcalObjects/interface/EcalMultifitParametersSoA.h" +#include "CondFormats/DataRecord/interface/EcalMultifitParametersRcd.h" + +#include "DataFormats/EcalDigi/interface/EcalConstants.h" + +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ESGetToken.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ESProducer.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ModuleFactory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/host.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + class EcalMultifitParametersHostESProducer : public ESProducer { + public: + EcalMultifitParametersHostESProducer(edm::ParameterSet const&); + ~EcalMultifitParametersHostESProducer() override = default; + + static void fillDescriptions(edm::ConfigurationDescriptions&); + std::unique_ptr produce(EcalMultifitParametersRcd const&); + + private: + std::vector ebTimeFitParameters_; + std::vector eeTimeFitParameters_; + std::vector ebAmplitudeFitParameters_; + std::vector eeAmplitudeFitParameters_; + }; + + EcalMultifitParametersHostESProducer::EcalMultifitParametersHostESProducer(edm::ParameterSet const& iConfig) + : ESProducer(iConfig) { + setWhatProduced(this); + + auto const ebTimeFitParamsFromPSet = iConfig.getParameter>("EBtimeFitParameters"); + auto const eeTimeFitParamsFromPSet = iConfig.getParameter>("EEtimeFitParameters"); + // Assert that there are as many parameters as the EcalMultiFitParametersSoA expects + assert(ebTimeFitParamsFromPSet.size() == kNTimeFitParams); + assert(eeTimeFitParamsFromPSet.size() == kNTimeFitParams); + ebTimeFitParameters_.assign(ebTimeFitParamsFromPSet.begin(), ebTimeFitParamsFromPSet.end()); + eeTimeFitParameters_.assign(eeTimeFitParamsFromPSet.begin(), eeTimeFitParamsFromPSet.end()); + + auto const ebAmplFitParamsFromPSet = iConfig.getParameter>("EBamplitudeFitParameters"); + auto const eeAmplFitParamsFromPSet = iConfig.getParameter>("EEamplitudeFitParameters"); + // Assert that there are as many parameters as the EcalMultiFitParametersSoA expects + assert(ebAmplFitParamsFromPSet.size() == kNAmplitudeFitParams); + assert(eeAmplFitParamsFromPSet.size() == kNAmplitudeFitParams); + ebAmplitudeFitParameters_.assign(ebAmplFitParamsFromPSet.begin(), ebAmplFitParamsFromPSet.end()); + eeAmplitudeFitParameters_.assign(eeAmplFitParamsFromPSet.begin(), eeAmplFitParamsFromPSet.end()); + } + + void EcalMultifitParametersHostESProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add>("EBtimeFitParameters", + {-2.015452e+00, + 3.130702e+00, + -1.234730e+01, + 4.188921e+01, + -8.283944e+01, + 9.101147e+01, + -5.035761e+01, + 1.105621e+01}); + desc.add>("EEtimeFitParameters", + {-2.390548e+00, + 3.553628e+00, + -1.762341e+01, + 6.767538e+01, + -1.332130e+02, + 1.407432e+02, + -7.541106e+01, + 1.620277e+01}); + desc.add>("EBamplitudeFitParameters", {1.138, 1.652}); + desc.add>("EEamplitudeFitParameters", {1.890, 1.400}); + descriptions.addWithDefaultLabel(desc); + } + + std::unique_ptr EcalMultifitParametersHostESProducer::produce( + EcalMultifitParametersRcd const& iRecord) { + size_t const sizeone = 1; + auto product = std::make_unique(sizeone, cms::alpakatools::host()); + auto view = product->view(); + + std::memcpy(view.timeFitParamsEB().data(), ebTimeFitParameters_.data(), sizeof(float) * kNTimeFitParams); + std::memcpy(view.timeFitParamsEE().data(), eeTimeFitParameters_.data(), sizeof(float) * kNTimeFitParams); + + std::memcpy( + view.amplitudeFitParamsEB().data(), ebAmplitudeFitParameters_.data(), sizeof(float) * kNAmplitudeFitParams); + std::memcpy( + view.amplitudeFitParamsEE().data(), eeAmplitudeFitParameters_.data(), sizeof(float) * kNAmplitudeFitParams); + + return product; + } + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +DEFINE_FWK_EVENTSETUP_ALPAKA_MODULE(EcalMultifitParametersHostESProducer); diff --git a/RecoLocalCalo/EcalRecProducers/plugins/alpaka/EcalUncalibRecHitMultiFitAlgoPortable.dev.cc b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/EcalUncalibRecHitMultiFitAlgoPortable.dev.cc new file mode 100644 index 0000000000000..e2f56ae901903 --- /dev/null +++ b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/EcalUncalibRecHitMultiFitAlgoPortable.dev.cc @@ -0,0 +1,234 @@ +#include +#include +#include + +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" + +#include "AmplitudeComputationCommonKernels.h" +#include "AmplitudeComputationKernels.h" +#include "EcalUncalibRecHitMultiFitAlgoPortable.h" +#include "TimeComputationKernels.h" + +//#define DEBUG +//#define ECAL_RECO_ALPAKA_DEBUG + +namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::multifit { + + using namespace cms::alpakatools; + + void launchKernels(Queue& queue, + InputProduct const& digisDevEB, + InputProduct const& digisDevEE, + OutputProduct& uncalibRecHitsDevEB, + OutputProduct& uncalibRecHitsDevEE, + EcalMultifitConditionsDevice const& conditionsDev, + EcalMultifitParametersDevice const& paramsDev, + ConfigurationParameters const& configParams) { + using digis_type = std::vector; + using dids_type = std::vector; + // according to the cpu setup //----> hardcoded + bool constexpr gainSwitchUseMaxSampleEB = true; + // according to the cpu setup //----> hardcoded + bool constexpr gainSwitchUseMaxSampleEE = false; + auto constexpr kMaxSamples = EcalDataFrame::MAXSAMPLES; + + auto const ebSize = static_cast(uncalibRecHitsDevEB.const_view().metadata().size()); + auto const totalChannels = ebSize + static_cast(uncalibRecHitsDevEE.const_view().metadata().size()); + + EventDataForScratchDevice scratch(configParams, totalChannels, queue); + + // + // 1d preparation kernel + // + uint32_t constexpr nchannels_per_block = 32; + auto constexpr threads_1d = kMaxSamples * nchannels_per_block; + auto const blocks_1d = cms::alpakatools::divide_up_by(totalChannels * kMaxSamples, threads_1d); + auto workDivPrep1D = cms::alpakatools::make_workdiv(blocks_1d, threads_1d); + // Since the ::ecal::multifit::X objects are non-dynamic Eigen::Matrix types the returned pointers from the buffers + // and the ::ecal::multifit::X* both point to the data. + alpaka::exec(queue, + workDivPrep1D, + Kernel_prep_1d_and_initialize{}, + digisDevEB.const_view(), + digisDevEE.const_view(), + uncalibRecHitsDevEB.view(), + uncalibRecHitsDevEE.view(), + conditionsDev.const_view(), + reinterpret_cast<::ecal::multifit::SampleVector*>(scratch.samplesDevBuf.data()), + reinterpret_cast<::ecal::multifit::SampleGainVector*>(scratch.gainsNoiseDevBuf.data()), + scratch.hasSwitchToGain6DevBuf.data(), + scratch.hasSwitchToGain1DevBuf.data(), + scratch.isSaturatedDevBuf.data(), + scratch.acStateDevBuf.data(), + reinterpret_cast<::ecal::multifit::BXVectorType*>(scratch.activeBXsDevBuf.data()), + gainSwitchUseMaxSampleEB, + gainSwitchUseMaxSampleEE); + + // + // 2d preparation kernel + // + Vec2D const blocks_2d{1u, totalChannels}; // {y, x} coordiantes + Vec2D const threads_2d{kMaxSamples, kMaxSamples}; + auto workDivPrep2D = cms::alpakatools::make_workdiv(blocks_2d, threads_2d); + alpaka::exec(queue, + workDivPrep2D, + Kernel_prep_2d{}, + digisDevEB.const_view(), + digisDevEE.const_view(), + conditionsDev.const_view(), + reinterpret_cast<::ecal::multifit::SampleGainVector*>(scratch.gainsNoiseDevBuf.data()), + reinterpret_cast<::ecal::multifit::SampleMatrix*>(scratch.noisecovDevBuf.data()), + reinterpret_cast<::ecal::multifit::PulseMatrixType*>(scratch.pulse_matrixDevBuf.data()), + scratch.hasSwitchToGain6DevBuf.data(), + scratch.hasSwitchToGain1DevBuf.data(), + scratch.isSaturatedDevBuf.data()); + + // run minimization kernels + minimization_procedure(queue, + digisDevEB, + digisDevEE, + uncalibRecHitsDevEB, + uncalibRecHitsDevEE, + scratch, + conditionsDev, + configParams, + totalChannels); + + if (configParams.shouldRunTimingComputation) { + // + // TODO: this guy can run concurrently with other kernels, + // there is no dependence on the order of execution + // + auto const blocks_time_init = blocks_1d; + auto const threads_time_init = threads_1d; + auto workDivTimeCompInit1D = cms::alpakatools::make_workdiv(blocks_time_init, threads_time_init); + alpaka::exec(queue, + workDivTimeCompInit1D, + Kernel_time_computation_init{}, + digisDevEB.const_view(), + digisDevEE.const_view(), + conditionsDev.const_view(), + scratch.sample_valuesDevBuf.value().data(), + scratch.sample_value_errorsDevBuf.value().data(), + scratch.ampMaxErrorDevBuf.value().data(), + scratch.useless_sample_valuesDevBuf.value().data(), + scratch.pedestal_numsDevBuf.value().data()); + + // + // TODO: small kernel only for EB. It needs to be checked if + /// fusing such small kernels is beneficial in here + // + // we are running only over EB digis + // therefore we need to create threads/blocks only for that + auto const threadsFixMGPA = threads_1d; + auto const blocksFixMGPA = cms::alpakatools::divide_up_by(kMaxSamples * ebSize, threadsFixMGPA); + auto workDivTimeFixMGPAslew1D = cms::alpakatools::make_workdiv(blocksFixMGPA, threadsFixMGPA); + alpaka::exec(queue, + workDivTimeFixMGPAslew1D, + Kernel_time_compute_fixMGPAslew{}, + digisDevEB.const_view(), + digisDevEE.const_view(), + conditionsDev.const_view(), + scratch.sample_valuesDevBuf.value().data(), + scratch.sample_value_errorsDevBuf.value().data(), + scratch.useless_sample_valuesDevBuf.value().data()); + + auto const threads_nullhypot = threads_1d; + auto const blocks_nullhypot = blocks_1d; + auto workDivTimeNullhypot1D = cms::alpakatools::make_workdiv(blocks_nullhypot, threads_nullhypot); + alpaka::exec(queue, + workDivTimeNullhypot1D, + Kernel_time_compute_nullhypot{}, + scratch.sample_valuesDevBuf.value().data(), + scratch.sample_value_errorsDevBuf.value().data(), + scratch.useless_sample_valuesDevBuf.value().data(), + scratch.chi2sNullHypotDevBuf.value().data(), + scratch.sum0sNullHypotDevBuf.value().data(), + scratch.sumAAsNullHypotDevBuf.value().data(), + totalChannels); + + constexpr uint32_t nchannels_per_block_makeratio = kMaxSamples; + constexpr auto nthreads_per_channel = + nchannels_per_block_makeratio * (nchannels_per_block_makeratio - 1) / 2; // n(n-1)/2 + constexpr auto threads_makeratio = nthreads_per_channel * nchannels_per_block_makeratio; + auto const blocks_makeratio = + cms::alpakatools::divide_up_by(nthreads_per_channel * totalChannels, threads_makeratio); + auto workDivTimeMakeRatio1D = cms::alpakatools::make_workdiv(blocks_makeratio, threads_makeratio); + alpaka::exec(queue, + workDivTimeMakeRatio1D, + Kernel_time_compute_makeratio{}, + digisDevEB.const_view(), + digisDevEE.const_view(), + scratch.sample_valuesDevBuf.value().data(), + scratch.sample_value_errorsDevBuf.value().data(), + scratch.useless_sample_valuesDevBuf.value().data(), + scratch.pedestal_numsDevBuf.value().data(), + scratch.sumAAsNullHypotDevBuf.value().data(), + scratch.sum0sNullHypotDevBuf.value().data(), + scratch.tMaxAlphaBetasDevBuf.value().data(), + scratch.tMaxErrorAlphaBetasDevBuf.value().data(), + scratch.accTimeMaxDevBuf.value().data(), + scratch.accTimeWgtDevBuf.value().data(), + scratch.tcStateDevBuf.value().data(), + paramsDev.const_view(), + configParams.timeFitLimitsFirstEB, + configParams.timeFitLimitsFirstEE, + configParams.timeFitLimitsSecondEB, + configParams.timeFitLimitsSecondEE); + + auto const threads_findamplchi2 = threads_1d; + auto const blocks_findamplchi2 = blocks_1d; + auto workDivTimeFindAmplChi21D = cms::alpakatools::make_workdiv(blocks_findamplchi2, threads_findamplchi2); + alpaka::exec(queue, + workDivTimeFindAmplChi21D, + Kernel_time_compute_findamplchi2_and_finish{}, + digisDevEB.const_view(), + digisDevEE.const_view(), + scratch.sample_valuesDevBuf.value().data(), + scratch.sample_value_errorsDevBuf.value().data(), + scratch.useless_sample_valuesDevBuf.value().data(), + scratch.tMaxAlphaBetasDevBuf.value().data(), + scratch.tMaxErrorAlphaBetasDevBuf.value().data(), + scratch.accTimeMaxDevBuf.value().data(), + scratch.accTimeWgtDevBuf.value().data(), + scratch.sumAAsNullHypotDevBuf.value().data(), + scratch.sum0sNullHypotDevBuf.value().data(), + scratch.chi2sNullHypotDevBuf.value().data(), + scratch.tcStateDevBuf.value().data(), + scratch.ampMaxAlphaBetaDevBuf.value().data(), + scratch.ampMaxErrorDevBuf.value().data(), + scratch.timeMaxDevBuf.value().data(), + scratch.timeErrorDevBuf.value().data(), + paramsDev.const_view()); + + auto const threads_timecorr = 32; + auto const blocks_timecorr = cms::alpakatools::divide_up_by(totalChannels, threads_timecorr); + auto workDivCorrFinal1D = cms::alpakatools::make_workdiv(blocks_timecorr, threads_timecorr); + alpaka::exec(queue, + workDivCorrFinal1D, + Kernel_time_correction_and_finalize{}, + digisDevEB.const_view(), + digisDevEE.const_view(), + uncalibRecHitsDevEB.view(), + uncalibRecHitsDevEE.view(), + conditionsDev.const_view(), + scratch.timeMaxDevBuf.value().data(), + scratch.timeErrorDevBuf.value().data(), + configParams.timeConstantTermEB, + configParams.timeConstantTermEE, + configParams.timeNconstEB, + configParams.timeNconstEE, + configParams.amplitudeThreshEB, + configParams.amplitudeThreshEE, + configParams.outOfTimeThreshG12pEB, + configParams.outOfTimeThreshG12pEE, + configParams.outOfTimeThreshG12mEB, + configParams.outOfTimeThreshG12mEE, + configParams.outOfTimeThreshG61pEB, + configParams.outOfTimeThreshG61pEE, + configParams.outOfTimeThreshG61mEB, + configParams.outOfTimeThreshG61mEE); + } + } + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::multifit diff --git a/RecoLocalCalo/EcalRecProducers/plugins/alpaka/EcalUncalibRecHitMultiFitAlgoPortable.h b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/EcalUncalibRecHitMultiFitAlgoPortable.h new file mode 100644 index 0000000000000..c63b3f8181315 --- /dev/null +++ b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/EcalUncalibRecHitMultiFitAlgoPortable.h @@ -0,0 +1,30 @@ +#ifndef RecoLocalCalo_EcalRecProducers_plugins_alpaka_EcalUncalibRecHitMultiFitAlgoPortable_h +#define RecoLocalCalo_EcalRecProducers_plugins_alpaka_EcalUncalibRecHitMultiFitAlgoPortable_h + +#include + +#include "CondFormats/EcalObjects/interface/alpaka/EcalMultifitConditionsDevice.h" +#include "CondFormats/EcalObjects/interface/alpaka/EcalMultifitParametersDevice.h" +#include "DataFormats/EcalDigi/interface/alpaka/EcalDigiDeviceCollection.h" +#include "DataFormats/EcalRecHit/interface/alpaka/EcalUncalibratedRecHitDeviceCollection.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/traits.h" +#include "DeclsForKernels.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::multifit { + + using InputProduct = EcalDigiDeviceCollection; + using OutputProduct = EcalUncalibratedRecHitDeviceCollection; + + void launchKernels(Queue& queue, + InputProduct const& digisDevEB, + InputProduct const& digisDevEE, + OutputProduct& uncalibRecHitsDevEB, + OutputProduct& uncalibRecHitsDevEE, + EcalMultifitConditionsDevice const& conditionsDev, + EcalMultifitParametersDevice const& paramsDev, + ConfigurationParameters const& configParams); + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::multifit + +#endif // RecoLocalCalo_EcalRecProducers_plugins_alpaka_EcalUncalibRecHitMultiFitAlgoPortable_h diff --git a/RecoLocalCalo/EcalRecProducers/plugins/alpaka/EcalUncalibRecHitProducerPortable.cc b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/EcalUncalibRecHitProducerPortable.cc new file mode 100644 index 0000000000000..d0f06f3caf186 --- /dev/null +++ b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/EcalUncalibRecHitProducerPortable.cc @@ -0,0 +1,222 @@ +#include "CondFormats/DataRecord/interface/EcalMultifitConditionsRcd.h" +#include "CondFormats/DataRecord/interface/EcalMultifitParametersRcd.h" +#include "CondFormats/EcalObjects/interface/alpaka/EcalMultifitConditionsDevice.h" +#include "CondFormats/EcalObjects/interface/alpaka/EcalMultifitParametersDevice.h" +#include "DataFormats/EcalDigi/interface/alpaka/EcalDigiDeviceCollection.h" +#include "DataFormats/EcalRecHit/interface/alpaka/EcalUncalibratedRecHitDeviceCollection.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/EDGetToken.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/EDPutToken.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/Event.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/EventSetup.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/stream/SynchronizingEDProducer.h" + +#include "DeclsForKernels.h" +#include "EcalUncalibRecHitMultiFitAlgoPortable.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + class EcalUncalibRecHitProducerPortable : public stream::SynchronizingEDProducer<> { + public: + explicit EcalUncalibRecHitProducerPortable(edm::ParameterSet const& ps); + ~EcalUncalibRecHitProducerPortable() override = default; + static void fillDescriptions(edm::ConfigurationDescriptions&); + + void acquire(device::Event const&, device::EventSetup const&) override; + void produce(device::Event&, device::EventSetup const&) override; + + private: + using InputProduct = EcalDigiDeviceCollection; + const device::EDGetToken digisTokenEB_; + const device::EDGetToken digisTokenEE_; + using OutputProduct = EcalUncalibratedRecHitDeviceCollection; + const device::EDPutToken uncalibRecHitsTokenEB_; + const device::EDPutToken uncalibRecHitsTokenEE_; + + // conditions tokens + const device::ESGetToken multifitConditionsToken_; + const device::ESGetToken multifitParametersToken_; + + // configuration parameters + ecal::multifit::ConfigurationParameters configParameters_; + + cms::alpakatools::host_buffer ebDigisSizeHostBuf_; + cms::alpakatools::host_buffer eeDigisSizeHostBuf_; + }; + + void EcalUncalibRecHitProducerPortable::fillDescriptions(edm::ConfigurationDescriptions& confDesc) { + edm::ParameterSetDescription desc; + + desc.add("digisLabelEB", edm::InputTag("ecalRawToDigiPortable", "ebDigis")); + desc.add("digisLabelEE", edm::InputTag("ecalRawToDigiPortable", "eeDigis")); + + desc.add("recHitsLabelEB", "EcalUncalibRecHitsEB"); + desc.add("recHitsLabelEE", "EcalUncalibRecHitsEE"); + + desc.add("EBtimeFitLimits_Lower", 0.2); + desc.add("EBtimeFitLimits_Upper", 1.4); + desc.add("EEtimeFitLimits_Lower", 0.2); + desc.add("EEtimeFitLimits_Upper", 1.4); + desc.add("EBtimeConstantTerm", .6); + desc.add("EEtimeConstantTerm", 1.0); + desc.add("EBtimeNconst", 28.5); + desc.add("EEtimeNconst", 31.8); + desc.add("outOfTimeThresholdGain12pEB", 5); + desc.add("outOfTimeThresholdGain12mEB", 5); + desc.add("outOfTimeThresholdGain61pEB", 5); + desc.add("outOfTimeThresholdGain61mEB", 5); + desc.add("outOfTimeThresholdGain12pEE", 1000); + desc.add("outOfTimeThresholdGain12mEE", 1000); + desc.add("outOfTimeThresholdGain61pEE", 1000); + desc.add("outOfTimeThresholdGain61mEE", 1000); + desc.add("amplitudeThresholdEB", 10); + desc.add("amplitudeThresholdEE", 10); + desc.addUntracked>("kernelMinimizeThreads", {32, 1, 1}); + desc.add("shouldRunTimingComputation", true); + confDesc.addWithDefaultLabel(desc); + } + + EcalUncalibRecHitProducerPortable::EcalUncalibRecHitProducerPortable(const edm::ParameterSet& ps) + : digisTokenEB_{consumes(ps.getParameter("digisLabelEB"))}, + digisTokenEE_{consumes(ps.getParameter("digisLabelEE"))}, + uncalibRecHitsTokenEB_{produces(ps.getParameter("recHitsLabelEB"))}, + uncalibRecHitsTokenEE_{produces(ps.getParameter("recHitsLabelEE"))}, + multifitConditionsToken_{esConsumes()}, + multifitParametersToken_{esConsumes()}, + ebDigisSizeHostBuf_{cms::alpakatools::make_host_buffer()}, + eeDigisSizeHostBuf_{cms::alpakatools::make_host_buffer()} { + std::pair EBtimeFitLimits, EEtimeFitLimits; + EBtimeFitLimits.first = ps.getParameter("EBtimeFitLimits_Lower"); + EBtimeFitLimits.second = ps.getParameter("EBtimeFitLimits_Upper"); + EEtimeFitLimits.first = ps.getParameter("EEtimeFitLimits_Lower"); + EEtimeFitLimits.second = ps.getParameter("EEtimeFitLimits_Upper"); + + auto EBtimeConstantTerm = ps.getParameter("EBtimeConstantTerm"); + auto EEtimeConstantTerm = ps.getParameter("EEtimeConstantTerm"); + auto EBtimeNconst = ps.getParameter("EBtimeNconst"); + auto EEtimeNconst = ps.getParameter("EEtimeNconst"); + + auto outOfTimeThreshG12pEB = ps.getParameter("outOfTimeThresholdGain12pEB"); + auto outOfTimeThreshG12mEB = ps.getParameter("outOfTimeThresholdGain12mEB"); + auto outOfTimeThreshG61pEB = ps.getParameter("outOfTimeThresholdGain61pEB"); + auto outOfTimeThreshG61mEB = ps.getParameter("outOfTimeThresholdGain61mEB"); + auto outOfTimeThreshG12pEE = ps.getParameter("outOfTimeThresholdGain12pEE"); + auto outOfTimeThreshG12mEE = ps.getParameter("outOfTimeThresholdGain12mEE"); + auto outOfTimeThreshG61pEE = ps.getParameter("outOfTimeThresholdGain61pEE"); + auto outOfTimeThreshG61mEE = ps.getParameter("outOfTimeThresholdGain61mEE"); + auto amplitudeThreshEB = ps.getParameter("amplitudeThresholdEB"); + auto amplitudeThreshEE = ps.getParameter("amplitudeThresholdEE"); + + // switch to run timing computation kernels + configParameters_.shouldRunTimingComputation = ps.getParameter("shouldRunTimingComputation"); + + // minimize kernel launch conf + auto threadsMinimize = ps.getUntrackedParameter>("kernelMinimizeThreads"); + configParameters_.kernelMinimizeThreads[0] = threadsMinimize[0]; + configParameters_.kernelMinimizeThreads[1] = threadsMinimize[1]; + configParameters_.kernelMinimizeThreads[2] = threadsMinimize[2]; + + // + // configuration and physics parameters: done once + // assume there is a single device + // use sync copying + // + + // time fit parameters and limits + configParameters_.timeFitLimitsFirstEB = EBtimeFitLimits.first; + configParameters_.timeFitLimitsSecondEB = EBtimeFitLimits.second; + configParameters_.timeFitLimitsFirstEE = EEtimeFitLimits.first; + configParameters_.timeFitLimitsSecondEE = EEtimeFitLimits.second; + + // time constant terms + configParameters_.timeConstantTermEB = EBtimeConstantTerm; + configParameters_.timeConstantTermEE = EEtimeConstantTerm; + + // time N const + configParameters_.timeNconstEB = EBtimeNconst; + configParameters_.timeNconstEE = EEtimeNconst; + + // amplitude threshold for time flags + configParameters_.amplitudeThreshEB = amplitudeThreshEB; + configParameters_.amplitudeThreshEE = amplitudeThreshEE; + + // out of time thresholds gain-dependent + configParameters_.outOfTimeThreshG12pEB = outOfTimeThreshG12pEB; + configParameters_.outOfTimeThreshG12pEE = outOfTimeThreshG12pEE; + configParameters_.outOfTimeThreshG61pEB = outOfTimeThreshG61pEB; + configParameters_.outOfTimeThreshG61pEE = outOfTimeThreshG61pEE; + configParameters_.outOfTimeThreshG12mEB = outOfTimeThreshG12mEB; + configParameters_.outOfTimeThreshG12mEE = outOfTimeThreshG12mEE; + configParameters_.outOfTimeThreshG61mEB = outOfTimeThreshG61mEB; + configParameters_.outOfTimeThreshG61mEE = outOfTimeThreshG61mEE; + } + + void EcalUncalibRecHitProducerPortable::acquire(device::Event const& event, device::EventSetup const& setup) { + auto& queue = event.queue(); + + // get device collections from event + auto const& ebDigisDev = event.get(digisTokenEB_); + auto const& eeDigisDev = event.get(digisTokenEE_); + + // copy the actual numbers of digis in the collections to host + auto ebDigisSizeDevConstView = + cms::alpakatools::make_device_view(alpaka::getDev(queue), ebDigisDev.const_view().size()); + auto eeDigisSizeDevConstView = + cms::alpakatools::make_device_view(alpaka::getDev(queue), eeDigisDev.const_view().size()); + alpaka::memcpy(queue, ebDigisSizeHostBuf_, ebDigisSizeDevConstView); + alpaka::memcpy(queue, eeDigisSizeHostBuf_, eeDigisSizeDevConstView); + } + + void EcalUncalibRecHitProducerPortable::produce(device::Event& event, device::EventSetup const& setup) { + auto& queue = event.queue(); + + // get device collections from event + auto const& ebDigisDev = event.get(digisTokenEB_); + auto const& eeDigisDev = event.get(digisTokenEE_); + + // get the actual numbers of digis in the collections + auto const ebDigisSize = static_cast(*ebDigisSizeHostBuf_.data()); + auto const eeDigisSize = static_cast(*eeDigisSizeHostBuf_.data()); + + // output device collections + OutputProduct uncalibRecHitsDevEB{ebDigisSize, queue}; + OutputProduct uncalibRecHitsDevEE{eeDigisSize, queue}; + // reset the size scalar of the SoA + // memset takes an alpaka view that is created from the scalar in a view to the portable device collection + auto uncalibRecHitSizeViewEB = + cms::alpakatools::make_device_view(alpaka::getDev(queue), uncalibRecHitsDevEB.view().size()); + auto uncalibRecHitSizeViewEE = + cms::alpakatools::make_device_view(alpaka::getDev(queue), uncalibRecHitsDevEE.view().size()); + alpaka::memset(queue, uncalibRecHitSizeViewEB, 0); + alpaka::memset(queue, uncalibRecHitSizeViewEE, 0); + + // stop here if there are no digis + if (ebDigisSize + eeDigisSize > 0) { + // conditions + auto const& multifitConditionsDev = setup.getData(multifitConditionsToken_); + auto const& multifitParametersDev = setup.getData(multifitParametersToken_); + + // + // schedule algorithms + // + ecal::multifit::launchKernels(queue, + ebDigisDev, + eeDigisDev, + uncalibRecHitsDevEB, + uncalibRecHitsDevEE, + multifitConditionsDev, + multifitParametersDev, + configParameters_); + } + + // put into the event + event.emplace(uncalibRecHitsTokenEB_, std::move(uncalibRecHitsDevEB)); + event.emplace(uncalibRecHitsTokenEE_, std::move(uncalibRecHitsDevEE)); + } + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/MakerMacros.h" +DEFINE_FWK_ALPAKA_MODULE(EcalUncalibRecHitProducerPortable); diff --git a/RecoLocalCalo/EcalRecProducers/plugins/alpaka/KernelHelpers.dev.cc b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/KernelHelpers.dev.cc new file mode 100644 index 0000000000000..906b96fa2b6b6 --- /dev/null +++ b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/KernelHelpers.dev.cc @@ -0,0 +1,275 @@ +#include "DataFormats/EcalDetId/interface/EBDetId.h" +#include "DataFormats/EcalDetId/interface/EEDetId.h" + +#include "KernelHelpers.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::reconstruction { + + namespace internal::barrel { + + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool positiveZ(uint32_t id) { return id & 0x10000; } + + ALPAKA_FN_ACC ALPAKA_FN_INLINE uint32_t ietaAbs(uint32_t id) { return (id >> 9) & 0x7F; } + + ALPAKA_FN_ACC ALPAKA_FN_INLINE uint32_t iphi(uint32_t id) { return id & 0x1FF; } + + ALPAKA_FN_ACC int dccFromSm(int ism) { + int idcc = 9 + ism; + if (ism > 18) + idcc -= 18; + else + idcc += 18; + return idcc; + } + + ALPAKA_FN_ACC int sm(int ieta, int iphi) { + if (iphi > 360) + iphi -= 360; + int ism = (iphi - 1) / 20 + 1; + if (ieta < 0) + ism += 18; + return ism; + } + + ALPAKA_FN_ACC int dcc(int ieta, int iphi) { + int const ism = sm(ieta, iphi); + return dccFromSm(ism); + } + + ALPAKA_FN_ACC int lm_channel(int iX, int iY) { + static const int idx_[] = { + // clang-format off + // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 + 1, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6, 8, 8, 8, 8, // 3 + 1, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6, 8, 8, 8, 8, // 2 + 1, 3, 3, 3, 3, 5, 5, 5, 5, 7, 7, 7, 7, 9, 9, 9, 9, // 1 + 1, 3, 3, 3, 3, 5, 5, 5, 5, 7, 7, 7, 7, 9, 9, 9, 9 // 0 + // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 + // clang-format on + }; + + constexpr int iym = 4; + constexpr int ixm = 17; + int const il = iym - iY + 1; + int const ic = iX; + int const ii = il * ixm + ic; + if (ii < 0 || ii > (int)(sizeof(idx_) / sizeof(int))) { + return -1; + }; + return idx_[ii]; + } + + ALPAKA_FN_ACC int localCoord_x(int ieta) { + int iz = 1; + if (ieta < 0) { + iz = -1; + } + ieta *= iz; + int ix = ieta - 1; + + return ix; + } + + ALPAKA_FN_ACC int localCoord_y(int ieta, int iphi) { + if (iphi > 360) { + iphi -= 360; + } + int iy = (iphi - 1) % 20; + if (ieta < 0) { + iy = 19 - iy; + } + + return iy; + } + + ALPAKA_FN_ACC int lmmod(int ieta, int iphi) { + int const ix = localCoord_x(ieta); + int const iy = localCoord_y(ieta, iphi); + + return lm_channel(ix / 5, iy / 5); + } + + ALPAKA_FN_ACC int side(int ieta, int iphi) { + int const ilmmod = lmmod(ieta, iphi); + return (ilmmod % 2 == 0) ? 1 : 0; + } + + } // namespace internal::barrel + + ALPAKA_FN_ACC uint32_t hashedIndexEB(uint32_t id) { + using namespace internal::barrel; + return (EBDetId::MAX_IETA + (positiveZ(id) ? ietaAbs(id) - 1 : -ietaAbs(id))) * EBDetId::MAX_IPHI + iphi(id) - 1; + } + + // + // https://cmssdt.cern.ch/lxr/source/CalibCalorimetry/EcalLaserAnalyzer/src/MEEBGeom.cc + // function: "lmr" + + ALPAKA_FN_ACC int32_t laserMonitoringRegionEB(uint32_t id) { + using namespace internal::barrel; + + int ieta; + if (positiveZ(id)) { + ieta = ietaAbs(id); + } else { + ieta = -ietaAbs(id); + } + + int const idcc = dcc(ieta, (int)(iphi(id))); + int const ism = idcc - 9; + + int const iside = side(ieta, (int)(iphi(id))); + + return (1 + 2 * (ism - 1) + iside); + } + + namespace internal::endcap { + + ALPAKA_FN_ACC ALPAKA_FN_INLINE uint32_t ix(uint32_t id) { return (id >> 7) & 0x7F; } + + ALPAKA_FN_ACC ALPAKA_FN_INLINE uint32_t iy(uint32_t id) { return id & 0x7F; } + + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool positiveZ(uint32_t id) { return id & 0x4000; } + + // these constants come from EE Det Id + ALPAKA_STATIC_ACC_MEM_CONSTANT const unsigned short kxf[] = { + 41, 51, 41, 51, 41, 51, 36, 51, 36, 51, 26, 51, 26, 51, 26, 51, 21, 51, 21, 51, 21, 51, 21, 51, 21, + 51, 16, 51, 16, 51, 14, 51, 14, 51, 14, 51, 14, 51, 14, 51, 9, 51, 9, 51, 9, 51, 9, 51, 9, 51, + 6, 51, 6, 51, 6, 51, 6, 51, 6, 51, 6, 51, 6, 51, 6, 51, 6, 51, 6, 51, 4, 51, 4, 51, 4, + 51, 4, 51, 4, 56, 1, 58, 1, 59, 1, 60, 1, 61, 1, 61, 1, 62, 1, 62, 1, 62, 1, 62, 1, 62, + 1, 62, 1, 62, 1, 62, 1, 62, 1, 62, 1, 61, 1, 61, 1, 60, 1, 59, 1, 58, 4, 56, 4, 51, 4, + 51, 4, 51, 4, 51, 6, 51, 6, 51, 6, 51, 6, 51, 6, 51, 6, 51, 6, 51, 6, 51, 6, 51, 6, 51, + 9, 51, 9, 51, 9, 51, 9, 51, 9, 51, 14, 51, 14, 51, 14, 51, 14, 51, 14, 51, 16, 51, 16, 51, 21, + 51, 21, 51, 21, 51, 21, 51, 21, 51, 26, 51, 26, 51, 26, 51, 36, 51, 36, 51, 41, 51, 41, 51, 41, 51}; + + ALPAKA_STATIC_ACC_MEM_CONSTANT const unsigned short kdi[] = { + 0, 10, 20, 30, 40, 50, 60, 75, 90, 105, 120, 145, 170, 195, 220, 245, 270, + 300, 330, 360, 390, 420, 450, 480, 510, 540, 570, 605, 640, 675, 710, 747, 784, 821, + 858, 895, 932, 969, 1006, 1043, 1080, 1122, 1164, 1206, 1248, 1290, 1332, 1374, 1416, 1458, 1500, + 1545, 1590, 1635, 1680, 1725, 1770, 1815, 1860, 1905, 1950, 1995, 2040, 2085, 2130, 2175, 2220, 2265, + 2310, 2355, 2400, 2447, 2494, 2541, 2588, 2635, 2682, 2729, 2776, 2818, 2860, 2903, 2946, 2988, 3030, + 3071, 3112, 3152, 3192, 3232, 3272, 3311, 3350, 3389, 3428, 3467, 3506, 3545, 3584, 3623, 3662, 3701, + 3740, 3779, 3818, 3857, 3896, 3935, 3974, 4013, 4052, 4092, 4132, 4172, 4212, 4253, 4294, 4336, 4378, + 4421, 4464, 4506, 4548, 4595, 4642, 4689, 4736, 4783, 4830, 4877, 4924, 4969, 5014, 5059, 5104, 5149, + 5194, 5239, 5284, 5329, 5374, 5419, 5464, 5509, 5554, 5599, 5644, 5689, 5734, 5779, 5824, 5866, 5908, + 5950, 5992, 6034, 6076, 6118, 6160, 6202, 6244, 6281, 6318, 6355, 6392, 6429, 6466, 6503, 6540, 6577, + 6614, 6649, 6684, 6719, 6754, 6784, 6814, 6844, 6874, 6904, 6934, 6964, 6994, 7024, 7054, 7079, 7104, + 7129, 7154, 7179, 7204, 7219, 7234, 7249, 7264, 7274, 7284, 7294, 7304, 7314}; + + ALPAKA_FN_ACC int quadrant(int iX, int iY) { + bool const near = iX >= 11; + bool const far = !near; + bool const top = iY >= 11; + bool const bot = !top; + + int iquad = 0; + if (near && top) + iquad = 1; + else if (far && top) + iquad = 2; + else if (far && bot) + iquad = 3; + else + iquad = 4; + + return iquad; + } + + ALPAKA_FN_ACC int sector(int iX, int iY) { + // Y (towards the surface) + // T + // | + // | + // | + // o---------| X (towards center of LHC) + // + static const int idx_[] = { + // clang-format off + // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 + 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 9, 9, 9, 0, 0, 0, 0, 0, 0, 0, // 20 + 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 9, 9, 9, 9, 9, 9, 0, 0, 0, 0, // 19 + 0, 0, 0, 2, 1, 1, 1, 1, 1, 1, 9, 9, 9, 9, 9, 9, 8, 0, 0, 0, // 18 + 0, 0, 2, 2, 2, 1, 1, 1, 1, 1, 9, 9, 9, 9, 9, 8, 8, 8, 0, 0, // 17 + 0, 2, 2, 2, 2, 1, 1, 1, 1, 1, 9, 9, 9, 9, 9, 8, 8, 8, 8, 0, // 16 + 0, 2, 2, 2, 2, 2, 1, 1, 1, 1, 9, 9, 9, 9, 8, 8, 8, 8, 8, 0, // 15 + 0, 2, 2, 2, 2, 2, 2, 1, 1, 1, 9, 9, 9, 8, 8, 8, 8, 8, 8, 0, // 14 + 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, // 13 + 3, 3, 2, 2, 2, 2, 2, 2, 2, 0, 0, 8, 8, 8, 8, 8, 8, 8, 7, 7, // 12 + 3, 3, 3, 3, 3, 3, 3, 2, 0, 0, 0, 0, 8, 7, 7, 7, 7, 7, 7, 7, // 11 + 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7, 7, 7, // 10 + 3, 3, 3, 3, 3, 3, 3, 4, 4, 0, 0, 6, 6, 7, 7, 7, 7, 7, 7, 7, // 9 + 3, 3, 3, 3, 3, 3, 4, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 7, 7, 7, // 8 + 0, 3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 6, 6, 6, 6, 6, 7, 7, 7, 0, // 7 + 0, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 7, 0, // 6 + 0, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 0, // 5 + 0, 0, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 0, 0, // 4 + 0, 0, 0, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 0, 0, 0, // 3 + 0, 0, 0, 0, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 0, 0, 0, 0, // 2 + 0, 0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0 // 1 + // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 + // clang-format on + }; + + constexpr int iym = 20; + constexpr int ixm = 20; + int const il = iym - iY; + int const ic = iX - 1; + int const ii = il * ixm + ic; + + if (ii < 0 || ii > (int)(sizeof(idx_) / sizeof(int)) || idx_[ii] == 0) { + return -1; + }; + return idx_[ii]; + } + + } // namespace internal::endcap + + ALPAKA_FN_ACC uint32_t hashedIndexEE(uint32_t id) { + using namespace internal::endcap; + + const uint32_t jx(ix(id)); + const uint32_t jd(2 * (iy(id) - 1) + (jx - 1) / 50); + return ((positiveZ(id) ? EEDetId::kEEhalf : 0) + kdi[jd] + jx - kxf[jd]); + } + + // + // https://cmssdt.cern.ch/lxr/source/CalibCalorimetry/EcalLaserAnalyzer/src/MEEEGeom.cc + // https://github.com/cms-sw/cmssw/blob/master/CalibCalorimetry/EcalLaserCorrection/src/EcalLaserDbService.cc + // + + ALPAKA_FN_ACC int32_t laserMonitoringRegionEE(uint32_t id) { + using namespace internal::endcap; + + // SuperCrysCoord + uint32_t const iX = (ix(id) - 1) / 5 + 1; + uint32_t const iY = (iy(id) - 1) / 5 + 1; + + // Correct convention + // * @param iz iz/zside index: -1 for EE-, +1 for EE+ + // https://github.com/cms-sw/cmssw/blob/master/DataFormats/EcalDetId/interface/EEDetId.h#L68-L71 + // zside in https://github.com/cms-sw/cmssw/blob/master/CalibCalorimetry/EcalLaserCorrection/src/EcalLaserDbService.cc#L63 + // + int const iz = positiveZ(id) ? 1 : -1; + + int const iquad = quadrant(iX, iY); + int const isect = sector(iX, iY); + if (isect < 0) + return -1; + + int ilmr = 0; + ilmr = isect - 6; + if (ilmr <= 0) + ilmr += 9; + if (ilmr == 9) + ilmr++; + else if (ilmr == 8 && iquad == 4) + ilmr++; + if (iz == +1) + ilmr += 72; + else + ilmr += 82; + + return ilmr; + } + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::reconstruction diff --git a/RecoLocalCalo/EcalRecProducers/plugins/alpaka/KernelHelpers.h b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/KernelHelpers.h new file mode 100644 index 0000000000000..3b1772ecf2981 --- /dev/null +++ b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/KernelHelpers.h @@ -0,0 +1,19 @@ +#ifndef RecoLocalCalo_EcalRecProducers_plugins_alpaka_KernelHelpers_h +#define RecoLocalCalo_EcalRecProducers_plugins_alpaka_KernelHelpers_h + +#include +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::reconstruction { + + ALPAKA_FN_ACC uint32_t hashedIndexEB(uint32_t id); + + ALPAKA_FN_ACC uint32_t hashedIndexEE(uint32_t id); + + ALPAKA_FN_ACC int32_t laserMonitoringRegionEB(uint32_t id); + + ALPAKA_FN_ACC int32_t laserMonitoringRegionEE(uint32_t id); + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::reconstruction + +#endif // RecoLocalCalo_EcalRecProducers_plugins_alpaka_KernelHelpers_h diff --git a/RecoLocalCalo/EcalRecProducers/plugins/alpaka/TimeComputationKernels.h b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/TimeComputationKernels.h new file mode 100644 index 0000000000000..667e4d4687e51 --- /dev/null +++ b/RecoLocalCalo/EcalRecProducers/plugins/alpaka/TimeComputationKernels.h @@ -0,0 +1,1162 @@ +#ifndef RecoLocalCalo_EcalRecProducers_plugins_alpaka_TimeComputationKernels_h +#define RecoLocalCalo_EcalRecProducers_plugins_alpaka_TimeComputationKernels_h + +#include +#include +#include + +#include "CondFormats/EcalObjects/interface/alpaka/EcalMultifitConditionsDevice.h" +#include "CondFormats/EcalObjects/interface/alpaka/EcalMultifitParametersDevice.h" +#include "DataFormats/EcalDigi/interface/EcalDataFrame.h" +#include "DataFormats/EcalDigi/interface/EcalMGPASample.h" +#include "DataFormats/EcalRecHit/interface/EcalUncalibratedRecHit.h" +#include "DataFormats/Math/interface/approx_exp.h" +#include "DataFormats/Math/interface/approx_log.h" +#include "FWCore/Utilities/interface/CMSUnrollLoop.h" +#include "RecoLocalCalo/EcalRecProducers/interface/EigenMatrixTypes_gpu.h" + +#include "DeclsForKernels.h" +#include "KernelHelpers.h" + +//#define ECAL_RECO_ALPAKA_DEBUG + +namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::multifit { + + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool use_sample(unsigned int sample_mask, unsigned int sample) { + return sample_mask & (0x1 << (EcalDataFrame::MAXSAMPLES - (sample + 1))); + } + + ALPAKA_FN_ACC constexpr float fast_expf(float x) { return unsafe_expf<6>(x); } + ALPAKA_FN_ACC constexpr float fast_logf(float x) { return unsafe_logf<7>(x); } + + class Kernel_time_compute_nullhypot { + using ScalarType = ::ecal::multifit::SampleVector::Scalar; + + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, + ScalarType* const sample_values, + ScalarType* const sample_value_errors, + bool* const useless_sample_values, + ScalarType* chi2s, + ScalarType* sum0s, + ScalarType* sumAAs, + uint32_t const nchannels) const { + constexpr auto nsamples = EcalDataFrame::MAXSAMPLES; + + // indices + auto const elemsPerBlock = alpaka::getWorkDiv(acc)[0u]; + + // shared mem inits + auto* s_sum0 = alpaka::getDynSharedMem(acc); + auto* s_sum1 = reinterpret_cast(s_sum0 + elemsPerBlock); + auto* s_sumA = s_sum1 + elemsPerBlock; + auto* s_sumAA = s_sumA + elemsPerBlock; + + for (auto txforward : cms::alpakatools::elements_with_stride(acc, nchannels * nsamples)) { + // go backwards through the loop to have valid values for shared variables when reading from higher element indices in serial execution + auto tx = nchannels * nsamples - 1 - txforward; + auto const ch = tx / nsamples; + + auto const sample = tx % nsamples; + auto const ltx = tx % elemsPerBlock; + + // TODO make sure no div by 0 + auto const inv_error = + useless_sample_values[tx] ? 0. : 1. / (sample_value_errors[tx] * sample_value_errors[tx]); + auto const sample_value = sample_values[tx]; + s_sum0[ltx] = useless_sample_values[tx] ? 0 : 1; + s_sum1[ltx] = inv_error; + s_sumA[ltx] = sample_value * inv_error; + s_sumAA[ltx] = sample_value * sample_value * inv_error; + alpaka::syncBlockThreads(acc); + + // 5 threads for [0, 4] samples + if (sample < 5) { + s_sum0[ltx] += s_sum0[ltx + 5]; + s_sum1[ltx] += s_sum1[ltx + 5]; + s_sumA[ltx] += s_sumA[ltx + 5]; + s_sumAA[ltx] += s_sumAA[ltx + 5]; + } + alpaka::syncBlockThreads(acc); + + if (sample < 2) { + // note double counting of sample 3 + s_sum0[ltx] += s_sum0[ltx + 2] + s_sum0[ltx + 3]; + s_sum1[ltx] += s_sum1[ltx + 2] + s_sum1[ltx + 3]; + s_sumA[ltx] += s_sumA[ltx + 2] + s_sumA[ltx + 3]; + s_sumAA[ltx] += s_sumAA[ltx + 2] + s_sumAA[ltx + 3]; + } + alpaka::syncBlockThreads(acc); + + if (sample == 0) { + // note, subtract to remove the double counting of sample == 3 + auto const sum0 = s_sum0[ltx] + s_sum0[ltx + 1] - s_sum0[ltx + 3]; + auto const sum1 = s_sum1[ltx] + s_sum1[ltx + 1] - s_sum1[ltx + 3]; + auto const sumA = s_sumA[ltx] + s_sumA[ltx + 1] - s_sumA[ltx + 3]; + auto const sumAA = s_sumAA[ltx] + s_sumAA[ltx + 1] - s_sumAA[ltx + 3]; + auto const chi2 = sum0 > 0 ? (sumAA - sumA * sumA / sum1) / sum0 : static_cast(0); + chi2s[ch] = chi2; + sum0s[ch] = sum0; + sumAAs[ch] = sumAA; + +#ifdef DEBUG_TC_NULLHYPOT + if (ch == 0) { + printf("chi2 = %f sum0 = %d sumAA = %f\n", chi2, static_cast(sum0), sumAA); + } +#endif + } + } + } + }; + + // + // launch ctx parameters are + // 45 threads per channel, X channels per block, Y blocks + // 45 comes from: 10 samples for i <- 0 to 9 and for j <- i+1 to 9 + // TODO: it might be much beter to use 32 threads per channel instead of 45 + // to simplify the synchronization + class Kernel_time_compute_makeratio { + using ScalarType = ::ecal::multifit::SampleVector::Scalar; + + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, + EcalDigiDeviceCollection::ConstView digisDevEB, + EcalDigiDeviceCollection::ConstView digisDevEE, + ScalarType* const sample_values, + ScalarType* const sample_value_errors, + bool* const useless_sample_values, + char* const pedestal_nums, + ScalarType* const sumAAsNullHypot, + ScalarType* const sum0sNullHypot, + ScalarType* tMaxAlphaBetas, + ScalarType* tMaxErrorAlphaBetas, + ScalarType* g_accTimeMax, + ScalarType* g_accTimeWgt, + TimeComputationState* g_state, + EcalMultifitParametersDevice::ConstView paramsDev, + ConfigurationParameters::type const timeFitLimits_firstEB, + ConfigurationParameters::type const timeFitLimits_firstEE, + ConfigurationParameters::type const timeFitLimits_secondEB, + ConfigurationParameters::type const timeFitLimits_secondEE) const { + // constants + constexpr uint32_t nchannels_per_block = 10; + constexpr auto nthreads_per_channel = nchannels_per_block * (nchannels_per_block - 1) / 2; + constexpr auto nsamples = EcalDataFrame::MAXSAMPLES; + auto const nchannels = digisDevEB.size() + digisDevEE.size(); + auto const offsetForInputs = digisDevEB.size(); + auto const totalElements = nthreads_per_channel * nchannels; + + auto const elemsPerBlock = alpaka::getWorkDiv(acc)[0u]; + assert(nthreads_per_channel * nchannels_per_block == elemsPerBlock); + + auto* shr_chi2s = alpaka::getDynSharedMem(acc); + auto* shr_time_wgt = shr_chi2s + elemsPerBlock; + auto* shr_time_max = shr_time_wgt + elemsPerBlock; + auto* shrTimeMax = shr_time_max + elemsPerBlock; + auto* shrTimeWgt = shrTimeMax + elemsPerBlock; + auto* shr_chi2 = shrTimeWgt + elemsPerBlock; + auto* shr_tmax = shr_chi2 + elemsPerBlock; + auto* shr_tmaxerr = shr_tmax + elemsPerBlock; + auto* shr_condForUselessSamples = reinterpret_cast(shr_tmaxerr + elemsPerBlock); + auto* shr_internalCondForSkipping1 = shr_condForUselessSamples + elemsPerBlock; + auto* shr_internalCondForSkipping2 = shr_internalCondForSkipping1 + elemsPerBlock; + + for (auto block : cms::alpakatools::blocks_with_stride(acc, totalElements)) { + for (auto idx : cms::alpakatools::elements_in_block(acc, block, totalElements)) { + auto const ch = idx.global / nthreads_per_channel; + auto const ltx = idx.global % nthreads_per_channel; + + auto const ch_start = ch * nsamples; + auto const inputCh = ch >= offsetForInputs ? ch - offsetForInputs : ch; + auto const* dids = ch >= offsetForInputs ? digisDevEE.id() : digisDevEB.id(); + + auto const did = DetId{dids[inputCh]}; + auto const isBarrel = did.subdetId() == EcalBarrel; + auto* const amplitudeFitParameters = + isBarrel ? paramsDev.amplitudeFitParamsEB().data() : paramsDev.amplitudeFitParamsEE().data(); + auto* const timeFitParameters = + isBarrel ? paramsDev.timeFitParamsEB().data() : paramsDev.timeFitParamsEE().data(); + auto const timeFitParameters_size = + isBarrel ? paramsDev.timeFitParamsEB().size() : paramsDev.timeFitParamsEE().size(); + auto const timeFitLimits_first = isBarrel ? timeFitLimits_firstEB : timeFitLimits_firstEE; + auto const timeFitLimits_second = isBarrel ? timeFitLimits_secondEB : timeFitLimits_secondEE; + + // map tx -> (sample_i, sample_j) + int sample_i = 0; + int sample_j = 0; + if (ltx <= 8) { + sample_i = 0; + sample_j = 1 + ltx; + } else if (ltx <= 16) { + sample_i = 1; + sample_j = 2 + ltx - 9; + } else if (ltx <= 23) { + sample_i = 2; + sample_j = 3 + ltx - 17; + } else if (ltx <= 29) { + sample_i = 3; + sample_j = 4 + ltx - 24; + } else if (ltx <= 34) { + sample_i = 4; + sample_j = 5 + ltx - 30; + } else if (ltx <= 38) { + sample_i = 5; + sample_j = 6 + ltx - 35; + } else if (ltx <= 41) { + sample_i = 6; + sample_j = 7 + ltx - 39; + } else if (ltx <= 43) { + sample_i = 7; + sample_j = 8 + ltx - 42; + } else if (ltx <= 44) { + sample_i = 8; + sample_j = 9; + } else + assert(false); + + auto const tx_i = ch_start + sample_i; + auto const tx_j = ch_start + sample_j; + + // + // note, given the way we partition the block, with 45 threads per channel + // we will end up with inactive threads which need to be dragged along + // through the synching point + // + bool const condForUselessSamples = useless_sample_values[tx_i] || useless_sample_values[tx_j] || + sample_values[tx_i] <= 1 || sample_values[tx_j] <= 1; + + // + // see cpu implementation for explanation + // + ScalarType chi2 = std::numeric_limits::max(); + ScalarType tmax = 0; + ScalarType tmaxerr = 0; + shrTimeMax[idx.local] = 0; + shrTimeWgt[idx.local] = 0; + + bool internalCondForSkipping1 = true; + bool internalCondForSkipping2 = true; + if (!condForUselessSamples) { + auto const rtmp = sample_values[tx_i] / sample_values[tx_j]; + auto const invampl_i = 1. / sample_values[tx_i]; + auto const relErr2_i = sample_value_errors[tx_i] * sample_value_errors[tx_i] * invampl_i * invampl_i; + auto const invampl_j = 1. / sample_values[tx_j]; + auto const relErr2_j = sample_value_errors[tx_j] * sample_value_errors[tx_j] * invampl_j * invampl_j; + auto const err1 = rtmp * rtmp * (relErr2_i + relErr2_j); + auto err2 = + sample_value_errors[tx_j] * (sample_values[tx_i] - sample_values[tx_j]) * (invampl_j * invampl_j); + // TODO non-divergent branch for a block if each block has 1 channel + // otherwise non-divergent for groups of 45 threads + // at this point, pedestal_nums[ch] can be either 0, 1 or 2 + if (pedestal_nums[ch] == 2) + err2 *= err2 * 0.5; + auto const err3 = (0.289 * 0.289) * (invampl_j * invampl_j); + auto const total_error = std::sqrt(err1 + err2 + err3); + + auto const alpha = amplitudeFitParameters[0]; + auto const beta = amplitudeFitParameters[1]; + auto const alphabeta = alpha * beta; + auto const invalphabeta = 1. / alphabeta; + + // variables instead of a struct + auto const ratio_index = sample_i; + auto const ratio_step = sample_j - sample_i; + auto const ratio_value = rtmp; + auto const ratio_error = total_error; + + auto const rlim_i_j = fast_expf(static_cast(sample_j - sample_i) / beta) - 0.001; + internalCondForSkipping1 = !(total_error < 1. && rtmp > 0.001 && rtmp < rlim_i_j); + if (!internalCondForSkipping1) { + // + // precompute. + // in cpu version this was done conditionally + // however easier to do it here (precompute) and then just filter out + // if not needed + // + auto const l_timeFitLimits_first = timeFitLimits_first; + auto const l_timeFitLimits_second = timeFitLimits_second; + if (ratio_step == 1 && ratio_value >= l_timeFitLimits_first && ratio_value <= l_timeFitLimits_second) { + auto const time_max_i = static_cast(ratio_index); + auto u = timeFitParameters[timeFitParameters_size - 1]; + CMS_UNROLL_LOOP + for (int k = timeFitParameters_size - 2; k >= 0; --k) + u = u * ratio_value + timeFitParameters[k]; + + auto du = (timeFitParameters_size - 1) * (timeFitParameters[timeFitParameters_size - 1]); + for (int k = timeFitParameters_size - 2; k >= 1; --k) + du = du * ratio_value + k * timeFitParameters[k]; + + auto const error2 = ratio_error * ratio_error * du * du; + auto const time_max = error2 > 0 ? (time_max_i - u) / error2 : static_cast(0); + auto const time_wgt = error2 > 0 ? 1. / error2 : static_cast(0); + + // store into shared mem + // note, this name is essentially identical to the one used + // below. + shrTimeMax[idx.local] = error2 > 0 ? time_max : 0; + shrTimeWgt[idx.local] = error2 > 0 ? time_wgt : 0; + } else { + shrTimeMax[idx.local] = 0; + shrTimeWgt[idx.local] = 0; + } + + // continue with ratios + auto const stepOverBeta = static_cast(ratio_step) / beta; + auto const offset = static_cast(ratio_index) + alphabeta; + auto const rmin = std::max(ratio_value - ratio_error, 0.001); + auto const rmax = + std::min(ratio_value + ratio_error, fast_expf(static_cast(ratio_step) / beta) - 0.001); + auto const time1 = offset - ratio_step / (fast_expf((stepOverBeta - fast_logf(rmin)) / alpha) - 1.); + auto const time2 = offset - ratio_step / (fast_expf((stepOverBeta - fast_logf(rmax)) / alpha) - 1.); + + // set these guys + tmax = 0.5 * (time1 + time2); + tmaxerr = 0.5 * std::sqrt((time1 - time2) * (time1 - time2)); +#ifdef DEBUG_TC_MAKERATIO + if (ch == 1 || ch == 0) + printf( + "ch = %d ltx = %d tmax = %f tmaxerr = %f time1 = %f time2 = %f offset = %f rmin = %f rmax = " + "%f\n", + ch, + ltx, + tmax, + tmaxerr, + time1, + time2, + offset, + rmin, + rmax); +#endif + + ScalarType sumAf = 0; + ScalarType sumff = 0; + const int itmin = std::max(-1, static_cast(std::floor(tmax - alphabeta))); + auto loffset = (static_cast(itmin) - tmax) * invalphabeta; + // TODO: data dependence + for (int it = itmin + 1; it < nsamples; ++it) { + loffset += invalphabeta; + if (useless_sample_values[ch_start + it]) + continue; + auto const inverr2 = 1. / (sample_value_errors[ch_start + it] * sample_value_errors[ch_start + it]); + auto const term1 = 1. + loffset; + auto const f = (term1 > 1e-6) ? fast_expf(alpha * (fast_logf(term1) - loffset)) : 0; + sumAf += sample_values[ch_start + it] * (f * inverr2); + sumff += f * (f * inverr2); + } + + auto const sumAA = sumAAsNullHypot[ch]; + auto const sum0 = sum0sNullHypot[ch]; + chi2 = sumAA; + // TODO: sum0 can not be 0 below, need to introduce the check upfront + if (sumff > 0) { + chi2 = sumAA - sumAf * (sumAf / sumff); + } + chi2 /= sum0; + +#ifdef DEBUG_TC_MAKERATIO + if (ch == 1 || ch == 0) + printf( + "ch = %d ltx = %d sumAf = %f sumff = %f sumAA = %f sum0 = %d tmax = %f tmaxerr = %f chi2 = " + "%f\n", + ch, + ltx, + sumAf, + sumff, + sumAA, + static_cast(sum0), + tmax, + tmaxerr, + chi2); +#endif + + if (chi2 > 0 && tmax > 0 && tmaxerr > 0) + internalCondForSkipping2 = false; + else + chi2 = std::numeric_limits::max(); + } + } + + // store into smem + shr_chi2s[idx.local] = chi2; + shr_chi2[idx.local] = chi2; + shr_tmax[idx.local] = tmax; + shr_tmaxerr[idx.local] = tmaxerr; + shr_condForUselessSamples[idx.local] = condForUselessSamples; + shr_internalCondForSkipping1[idx.local] = internalCondForSkipping1; + shr_internalCondForSkipping2[idx.local] = internalCondForSkipping2; + } + + alpaka::syncBlockThreads(acc); + + // find min chi2 - quite crude for now + // TODO validate/check + auto iter = nthreads_per_channel / 2 + nthreads_per_channel % 2; + bool oddElements = nthreads_per_channel % 2; + CMS_UNROLL_LOOP + while (iter >= 1) { + for (auto idx : cms::alpakatools::elements_in_block(acc, block, totalElements)) { + auto const ltx = idx.global % nthreads_per_channel; + + if (ltx < iter && !(oddElements && (ltx == iter - 1 && ltx > 0))) { + // for odd ns, the last guy will just store itself + // exception is for ltx == 0 and iter==1 + shr_chi2s[idx.local] = std::min(shr_chi2s[idx.local], shr_chi2s[idx.local + iter]); + } + } + alpaka::syncBlockThreads(acc); + + oddElements = iter % 2; + iter = iter == 1 ? iter / 2 : iter / 2 + iter % 2; + } + + for (auto idx : cms::alpakatools::elements_in_block(acc, block, totalElements)) { + auto const ltx = idx.global % nthreads_per_channel; + + // get precomputedflags for this element from shared memory + auto const condForUselessSamples = shr_condForUselessSamples[idx.local]; + auto const internalCondForSkipping1 = shr_internalCondForSkipping1[idx.local]; + auto const internalCondForSkipping2 = shr_internalCondForSkipping2[idx.local]; + // filter out inactive or useless samples threads + if (!condForUselessSamples && !internalCondForSkipping1 && !internalCondForSkipping2) { + // min chi2, now compute weighted average of tmax measurements + // see cpu version for more explanation + auto const chi2 = shr_chi2[idx.local]; + auto const chi2min = shr_chi2s[idx.local - ltx]; + auto const chi2Limit = chi2min + 1.; + auto const tmaxerr = shr_tmaxerr[idx.local]; + auto const inverseSigmaSquared = chi2 < chi2Limit ? 1. / (tmaxerr * tmaxerr) : 0.; + +#ifdef DEBUG_TC_MAKERATIO + if (ch == 1 || ch == 0) { + auto const ch = idx.global / nthreads_per_channel; + printf("ch = %d ltx = %d chi2min = %f chi2Limit = %f inverseSigmaSquared = %f\n", + ch, + ltx, + chi2min, + chi2Limit, + inverseSigmaSquared); + } +#endif + + // store into shared mem and run reduction + // TODO: check if cooperative groups would be better + // TODO: check if shuffling intrinsics are better + auto const tmax = shr_tmax[idx.local]; + shr_time_wgt[idx.local] = inverseSigmaSquared; + shr_time_max[idx.local] = tmax * inverseSigmaSquared; + } else { + shr_time_wgt[idx.local] = 0; + shr_time_max[idx.local] = 0; + } + } + + alpaka::syncBlockThreads(acc); + + // reduce to compute time_max and time_wgt + iter = nthreads_per_channel / 2 + nthreads_per_channel % 2; + oddElements = nthreads_per_channel % 2; + CMS_UNROLL_LOOP + while (iter >= 1) { + for (auto idx : cms::alpakatools::elements_in_block(acc, block, totalElements)) { + auto const ltx = idx.global % nthreads_per_channel; + + if (ltx < iter && !(oddElements && (ltx == iter - 1 && ltx > 0))) { + shr_time_wgt[idx.local] += shr_time_wgt[idx.local + iter]; + shr_time_max[idx.local] += shr_time_max[idx.local + iter]; + shrTimeMax[idx.local] += shrTimeMax[idx.local + iter]; + shrTimeWgt[idx.local] += shrTimeWgt[idx.local + iter]; + } + } + + alpaka::syncBlockThreads(acc); + oddElements = iter % 2; + iter = iter == 1 ? iter / 2 : iter / 2 + iter % 2; + } + + for (auto idx : cms::alpakatools::elements_in_block(acc, block, totalElements)) { + auto const ltx = idx.global % nthreads_per_channel; + + // load from shared memory the 0th guy (will contain accumulated values) + // compute + // store into global mem + if (ltx == 0) { + auto const ch = idx.global / nthreads_per_channel; + auto const tmp_time_max = shr_time_max[idx.local]; + auto const tmp_time_wgt = shr_time_wgt[idx.local]; + + // we are done if there number of time ratios is 0 + if (tmp_time_wgt == 0 && tmp_time_max == 0) { + g_state[ch] = TimeComputationState::Finished; + continue; + } + + // no div by 0 + auto const tMaxAlphaBeta = tmp_time_max / tmp_time_wgt; + auto const tMaxErrorAlphaBeta = 1. / std::sqrt(tmp_time_wgt); + + tMaxAlphaBetas[ch] = tMaxAlphaBeta; + tMaxErrorAlphaBetas[ch] = tMaxErrorAlphaBeta; + g_accTimeMax[ch] = shrTimeMax[idx.local]; + g_accTimeWgt[ch] = shrTimeWgt[idx.local]; + g_state[ch] = TimeComputationState::NotFinished; + +#ifdef DEBUG_TC_MAKERATIO + printf("ch = %d time_max = %f time_wgt = %f\n", ch, tmp_time_max, tmp_time_wgt); + printf("ch = %d tMaxAlphaBeta = %f tMaxErrorAlphaBeta = %f timeMax = %f timeWgt = %f\n", + ch, + tMaxAlphaBeta, + tMaxErrorAlphaBeta, + shrTimeMax[idx.local], + shrTimeWgt[idx.local]); +#endif + } + } + } + } + }; + + class Kernel_time_compute_findamplchi2_and_finish { + using ScalarType = ::ecal::multifit::SampleVector::Scalar; + + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, + EcalDigiDeviceCollection::ConstView digisDevEB, + EcalDigiDeviceCollection::ConstView digisDevEE, + ScalarType* const sample_values, + ScalarType* const sample_value_errors, + bool* const useless_samples, + ScalarType* const g_tMaxAlphaBeta, + ScalarType* const g_tMaxErrorAlphaBeta, + ScalarType* const g_accTimeMax, + ScalarType* const g_accTimeWgt, + ScalarType* const sumAAsNullHypot, + ScalarType* const sum0sNullHypot, + ScalarType* const chi2sNullHypot, + TimeComputationState* g_state, + ScalarType* g_ampMaxAlphaBeta, + ScalarType* g_ampMaxError, + ScalarType* g_timeMax, + ScalarType* g_timeError, + EcalMultifitParametersDevice::ConstView paramsDev) const { + /// launch ctx parameters are + /// 10 threads per channel, N channels per block, Y blocks + /// TODO: do we need to keep the state around or can be removed?! + //#define DEBUG_FINDAMPLCHI2_AND_FINISH + + // constants + constexpr auto nsamples = EcalDataFrame::MAXSAMPLES; + auto const nchannels = digisDevEB.size() + digisDevEE.size(); + auto const offsetForInputs = digisDevEB.size(); + + auto const elemsPerBlock = alpaka::getWorkDiv(acc)[0u]; + + // configure shared mem + // per block, we need #threads per block * 2 * sizeof(ScalarType) + // we run with N channels per block + auto* shr_sumAf = alpaka::getDynSharedMem(acc); + auto* shr_sumff = shr_sumAf + elemsPerBlock; + + for (auto gtxforward : cms::alpakatools::elements_with_stride(acc, nchannels * nsamples)) { + // go backwards through the loop to have valid values for shared variables when reading from higher element indices in serial execution + auto gtx = nchannels * nsamples - 1 - gtxforward; + auto const ch = gtx / nsamples; + auto const elemIdx = gtx % elemsPerBlock; + auto const sample = elemIdx % nsamples; + + auto const* dids = ch >= offsetForInputs ? digisDevEE.id() : digisDevEB.id(); + auto const inputCh = ch >= offsetForInputs ? ch - offsetForInputs : ch; + + auto state = g_state[ch]; + auto const did = DetId{dids[inputCh]}; + auto* const amplitudeFitParameters = did.subdetId() == EcalBarrel ? paramsDev.amplitudeFitParamsEB().data() + : paramsDev.amplitudeFitParamsEE().data(); + + // TODO is that better than storing into global and launching another kernel + // for the first 10 threads + if (state == TimeComputationState::NotFinished) { + auto const alpha = amplitudeFitParameters[0]; + auto const beta = amplitudeFitParameters[1]; + auto const alphabeta = alpha * beta; + auto const invalphabeta = 1. / alphabeta; + auto const tMaxAlphaBeta = g_tMaxAlphaBeta[ch]; + auto const sample_value = sample_values[gtx]; + auto const sample_value_error = sample_value_errors[gtx]; + auto const inverr2 = + useless_samples[gtx] ? static_cast(0) : 1. / (sample_value_error * sample_value_error); + auto const offset = (static_cast(sample) - tMaxAlphaBeta) * invalphabeta; + auto const term1 = 1. + offset; + auto const f = term1 > 1e-6 ? fast_expf(alpha * (fast_logf(term1) - offset)) : static_cast(0.); + auto const sumAf = sample_value * (f * inverr2); + auto const sumff = f * (f * inverr2); + + // store into shared mem + shr_sumAf[elemIdx] = sumAf; + shr_sumff[elemIdx] = sumff; + } else { + shr_sumAf[elemIdx] = 0; + shr_sumff[elemIdx] = 0; + } + + alpaka::syncBlockThreads(acc); + + // reduce + // unroll completely here (but hardcoded) + if (sample < 5) { + shr_sumAf[elemIdx] += shr_sumAf[elemIdx + 5]; + shr_sumff[elemIdx] += shr_sumff[elemIdx + 5]; + } + + alpaka::syncBlockThreads(acc); + + if (sample < 2) { + // will need to subtract for ltx = 3, we double count here + shr_sumAf[elemIdx] += shr_sumAf[elemIdx + 2] + shr_sumAf[elemIdx + 3]; + shr_sumff[elemIdx] += shr_sumff[elemIdx + 2] + shr_sumff[elemIdx + 3]; + } + + alpaka::syncBlockThreads(acc); + + if (sample == 0) { + // exit if the state is done + // note, we do not exit before all __synchtreads are finished + if (state == TimeComputationState::Finished) { + g_timeMax[ch] = 5; + g_timeError[ch] = -999; + continue; + } + + // subtract to avoid double counting + auto const sumff = shr_sumff[elemIdx] + shr_sumff[elemIdx + 1] - shr_sumff[elemIdx + 3]; + auto const sumAf = shr_sumAf[elemIdx] + shr_sumAf[elemIdx + 1] - shr_sumAf[elemIdx + 3]; + + auto const ampMaxAlphaBeta = sumff > 0 ? sumAf / sumff : 0; + auto const sumAA = sumAAsNullHypot[ch]; + auto const sum0 = sum0sNullHypot[ch]; + auto const nullChi2 = chi2sNullHypot[ch]; + if (sumff > 0) { + auto const chi2AlphaBeta = (sumAA - sumAf * sumAf / sumff) / sum0; + if (chi2AlphaBeta > nullChi2) { + // null hypothesis is better + state = TimeComputationState::Finished; +#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH + printf("ch = %d chi2AlphaBeta = %f nullChi2 = %f sumAA = %f sumAf = %f sumff = %f sum0 = %f\n", + ch, + chi2AlphaBeta, + nullChi2, + sumAA, + sumAf, + sumff, + sum0); +#endif + } + + // store to global + g_ampMaxAlphaBeta[ch] = ampMaxAlphaBeta; + } else { +#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH + printf("ch = %d sum0 = %f sumAA = %f sumff = %f sumAf = %f\n", ch, sum0, sumAA, sumff, sumAf); +#endif + state = TimeComputationState::Finished; + } + + // store the state to global and finish calcs + g_state[ch] = state; + if (state == TimeComputationState::Finished) { + // store default values into global + g_timeMax[ch] = 5; + g_timeError[ch] = -999; +#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH + printf("ch = %d finished state\n", ch); +#endif + continue; + } + + auto const ampMaxError = g_ampMaxError[ch]; + auto const test_ratio = ampMaxAlphaBeta / ampMaxError; + auto const accTimeMax = g_accTimeMax[ch]; + auto const accTimeWgt = g_accTimeWgt[ch]; + auto const tMaxAlphaBeta = g_tMaxAlphaBeta[ch]; + auto const tMaxErrorAlphaBeta = g_tMaxErrorAlphaBeta[ch]; + // branch to separate large vs small pulses + // see cpu version for more info + if (test_ratio > 5. && accTimeWgt > 0) { + auto const tMaxRatio = accTimeWgt > 0 ? accTimeMax / accTimeWgt : static_cast(0); + auto const tMaxErrorRatio = accTimeWgt > 0 ? 1. / std::sqrt(accTimeWgt) : static_cast(0); + + if (test_ratio > 10.) { + g_timeMax[ch] = tMaxRatio; + g_timeError[ch] = tMaxErrorRatio; + +#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH + printf("ch = %d tMaxRatio = %f tMaxErrorRatio = %f\n", ch, tMaxRatio, tMaxErrorRatio); +#endif + } else { + auto const timeMax = (tMaxAlphaBeta * (10. - ampMaxAlphaBeta / ampMaxError) + + tMaxRatio * (ampMaxAlphaBeta / ampMaxError - 5.)) / + 5.; + auto const timeError = (tMaxErrorAlphaBeta * (10. - ampMaxAlphaBeta / ampMaxError) + + tMaxErrorRatio * (ampMaxAlphaBeta / ampMaxError - 5.)) / + 5.; + state = TimeComputationState::Finished; + g_state[ch] = state; + g_timeMax[ch] = timeMax; + g_timeError[ch] = timeError; + +#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH + printf("ch = %d timeMax = %f timeError = %f\n", ch, timeMax, timeError); +#endif + } + } else { + state = TimeComputationState::Finished; + g_state[ch] = state; + g_timeMax[ch] = tMaxAlphaBeta; + g_timeError[ch] = tMaxErrorAlphaBeta; + +#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH + printf("ch = %d tMaxAlphaBeta = %f tMaxErrorAlphaBeta = %f\n", ch, tMaxAlphaBeta, tMaxErrorAlphaBeta); +#endif + } + } + } + } + }; + + class Kernel_time_compute_fixMGPAslew { + using ScalarType = ::ecal::multifit::SampleVector::Scalar; + + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, + EcalDigiDeviceCollection::ConstView digisDevEB, + EcalDigiDeviceCollection::ConstView digisDevEE, + EcalMultifitConditionsDevice::ConstView conditionsDev, + ScalarType* sample_values, + ScalarType* sample_value_errors, + bool* useless_sample_values) const { + // constants + constexpr auto nsamples = EcalDataFrame::MAXSAMPLES; + + auto const nchannelsEB = digisDevEB.size(); + auto const offsetForInputs = nchannelsEB; + + auto const elemsPerBlock = alpaka::getWorkDiv(acc)[0u]; + + for (auto gtx : cms::alpakatools::elements_with_stride(acc, nchannelsEB * nsamples)) { + auto const elemIdx = gtx % elemsPerBlock; + auto const sample = elemIdx % nsamples; + auto const ch = gtx / nsamples; + + // remove thread for sample 0, oversubscribing is easier than .... + if (sample == 0) + continue; + + if (!use_sample(conditionsDev.sampleMask_EB(), sample)) + continue; + + int const inputGtx = ch >= offsetForInputs ? gtx - offsetForInputs * nsamples : gtx; + auto const* digis = ch >= offsetForInputs ? digisDevEE.data()->data() : digisDevEB.data()->data(); + + auto const gainIdPrev = ecalMGPA::gainId(digis[inputGtx - 1]); + auto const gainIdNext = ecalMGPA::gainId(digis[inputGtx]); + if (gainIdPrev >= 1 && gainIdPrev <= 3 && gainIdNext >= 1 && gainIdNext <= 3 && gainIdPrev < gainIdNext) { + sample_values[gtx - 1] = 0; + sample_value_errors[gtx - 1] = 1e+9; + useless_sample_values[gtx - 1] = true; + } + } + } + }; + + //#define ECAL_RECO_ALPAKA_TC_INIT_DEBUG + class Kernel_time_computation_init { + using ScalarType = ::ecal::multifit::SampleVector::Scalar; + + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, + EcalDigiDeviceCollection::ConstView digisDevEB, + EcalDigiDeviceCollection::ConstView digisDevEE, + EcalMultifitConditionsDevice::ConstView conditionsDev, + ScalarType* sample_values, + ScalarType* sample_value_errors, + ScalarType* ampMaxError, + bool* useless_sample_values, + char* pedestal_nums) const { + // constants + constexpr auto nsamples = EcalDataFrame::MAXSAMPLES; + + // indices + auto const nchannelsEB = digisDevEB.size(); + auto const nchannels = nchannelsEB + digisDevEE.size(); + auto const offsetForInputs = nchannelsEB; + auto const offsetForHashes = conditionsDev.offsetEE(); + + auto const elemsPerBlock = alpaka::getWorkDiv(acc)[0u]; + + // configure shared mem + auto* shrSampleValues = alpaka::getDynSharedMem(acc); + auto* shrSampleValueErrors = shrSampleValues + elemsPerBlock; + + for (auto txforward : cms::alpakatools::elements_with_stride(acc, nchannels * nsamples)) { + // go backwards through the loop to have valid values for shared variables when reading from higher element indices in serial execution + auto tx = nchannels * nsamples - 1 - txforward; + auto const ch = tx / nsamples; + auto const elemIdx = tx % elemsPerBlock; + + int const inputTx = ch >= offsetForInputs ? tx - offsetForInputs * nsamples : tx; + int const inputCh = ch >= offsetForInputs ? ch - offsetForInputs : ch; + auto const* digis = ch >= offsetForInputs ? digisDevEE.data()->data() : digisDevEB.data()->data(); + auto const* dids = ch >= offsetForInputs ? digisDevEE.id() : digisDevEB.id(); + + // indices/inits + auto const sample = tx % nsamples; + auto const input_ch_start = inputCh * nsamples; + ScalarType pedestal = 0.; + int num = 0; + + // 0 and 1 sample values + auto const adc0 = ecalMGPA::adc(digis[input_ch_start]); + auto const gainId0 = ecalMGPA::gainId(digis[input_ch_start]); + auto const adc1 = ecalMGPA::adc(digis[input_ch_start + 1]); + auto const gainId1 = ecalMGPA::gainId(digis[input_ch_start + 1]); + auto const did = DetId{dids[inputCh]}; + auto const isBarrel = did.subdetId() == EcalBarrel; + auto const sample_mask = isBarrel ? conditionsDev.sampleMask_EB() : conditionsDev.sampleMask_EE(); + auto const hashedId = isBarrel ? ecal::reconstruction::hashedIndexEB(did.rawId()) + : offsetForHashes + ecal::reconstruction::hashedIndexEE(did.rawId()); + + // set pedestal + // TODO this branch is non-divergent for a group of 10 threads + if (gainId0 == 1 && use_sample(sample_mask, 0)) { + pedestal = static_cast(adc0); + num = 1; + + auto const diff = adc1 - adc0; + if (gainId1 == 1 && use_sample(sample_mask, 1) && + std::abs(diff) < 3 * conditionsDev.pedestals_rms_x12()[hashedId]) { + pedestal = (pedestal + static_cast(adc1)) / 2.; + num = 2; + } + } else { + pedestal = conditionsDev.pedestals_mean_x12()[ch]; + } + + // ped subtracted and gain-renormalized samples. + auto const gainId = ecalMGPA::gainId(digis[inputTx]); + auto const adc = ecalMGPA::adc(digis[inputTx]); + + bool bad = false; + ScalarType sample_value, sample_value_error; + // TODO divergent branch + // TODO: piece below is general both for amplitudes and timing + // potentially there is a way to reduce the amount of code... + if (!use_sample(sample_mask, sample)) { + bad = true; + sample_value = 0; + sample_value_error = 0; + } else if (gainId == 1) { + sample_value = static_cast(adc) - pedestal; + sample_value_error = conditionsDev.pedestals_rms_x12()[hashedId]; + } else if (gainId == 2) { + auto const mean_x6 = conditionsDev.pedestals_mean_x6()[hashedId]; + auto const rms_x6 = conditionsDev.pedestals_rms_x6()[hashedId]; + auto const gain12Over6 = conditionsDev.gain12Over6()[hashedId]; + sample_value = (static_cast(adc) - mean_x6) * gain12Over6; + sample_value_error = rms_x6 * gain12Over6; + } else if (gainId == 3) { + auto const mean_x1 = conditionsDev.pedestals_mean_x1()[hashedId]; + auto const rms_x1 = conditionsDev.pedestals_rms_x1()[hashedId]; + auto const gain12Over6 = conditionsDev.gain12Over6()[hashedId]; + auto const gain6Over1 = conditionsDev.gain6Over1()[hashedId]; + sample_value = (static_cast(adc) - mean_x1) * gain6Over1 * gain12Over6; + sample_value_error = rms_x1 * gain6Over1 * gain12Over6; + } else { + sample_value = 0; + sample_value_error = 0; + bad = true; + } + + // TODO: make sure we save things correctly when sample is useless + auto const useless_sample = (sample_value_error <= 0) | bad; + useless_sample_values[tx] = useless_sample; + sample_values[tx] = sample_value; + sample_value_errors[tx] = useless_sample ? 1e+9 : sample_value_error; + + // DEBUG +#ifdef ECAL_RECO_ALPAKA_TC_INIT_DEBUG + if (ch == 0) { + printf("sample = %d sample_value = %f sample_value_error = %f useless = %c\n", + sample, + sample_value, + sample_value_error, + useless_sample ? '1' : '0'); + } +#endif + + // store into the shared mem + shrSampleValues[elemIdx] = sample_value_error > 0 ? sample_value : std::numeric_limits::min(); + shrSampleValueErrors[elemIdx] = sample_value_error; + alpaka::syncBlockThreads(acc); + + // perform the reduction with min + if (sample < 5) { + // note, if equal -> we keep the value with lower sample as for cpu + shrSampleValueErrors[elemIdx] = shrSampleValues[elemIdx] < shrSampleValues[elemIdx + 5] + ? shrSampleValueErrors[elemIdx + 5] + : shrSampleValueErrors[elemIdx]; + shrSampleValues[elemIdx] = std::max(shrSampleValues[elemIdx], shrSampleValues[elemIdx + 5]); + } + alpaka::syncBlockThreads(acc); + + // a bit of an overkill, but easier than to compare across 3 values + if (sample < 3) { + shrSampleValueErrors[elemIdx] = shrSampleValues[elemIdx] < shrSampleValues[elemIdx + 3] + ? shrSampleValueErrors[elemIdx + 3] + : shrSampleValueErrors[elemIdx]; + shrSampleValues[elemIdx] = std::max(shrSampleValues[elemIdx], shrSampleValues[elemIdx + 3]); + } + alpaka::syncBlockThreads(acc); + + if (sample < 2) { + shrSampleValueErrors[elemIdx] = shrSampleValues[elemIdx] < shrSampleValues[elemIdx + 2] + ? shrSampleValueErrors[elemIdx + 2] + : shrSampleValueErrors[elemIdx]; + shrSampleValues[elemIdx] = std::max(shrSampleValues[elemIdx], shrSampleValues[elemIdx + 2]); + } + alpaka::syncBlockThreads(acc); + + if (sample == 0) { + // we only need the max error + auto const maxSampleValueError = shrSampleValues[elemIdx] < shrSampleValues[elemIdx + 1] + ? shrSampleValueErrors[elemIdx + 1] + : shrSampleValueErrors[elemIdx]; + + // # pedestal samples used + pedestal_nums[ch] = num; + // this is used downstream + ampMaxError[ch] = maxSampleValueError; + + // DEBUG +#ifdef ECAL_RECO_ALPAKA_TC_INIT_DEBUG + if (ch == 0) { + printf("pedestal_nums = %d ampMaxError = %f\n", num, maxSampleValueError); + } +#endif + } + } + } + }; + + /// + /// launch context parameters: 1 thread per channel + /// + //#define DEBUG_TIME_CORRECTION + class Kernel_time_correction_and_finalize { + using ScalarType = ::ecal::multifit::SampleVector::Scalar; + + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, + EcalDigiDeviceCollection::ConstView digisDevEB, + EcalDigiDeviceCollection::ConstView digisDevEE, + EcalUncalibratedRecHitDeviceCollection::View uncalibRecHitsEB, + EcalUncalibratedRecHitDeviceCollection::View uncalibRecHitsEE, + EcalMultifitConditionsDevice::ConstView conditionsDev, + ScalarType* const g_timeMax, + ScalarType* const g_timeError, + ConfigurationParameters::type const timeConstantTermEB, + ConfigurationParameters::type const timeConstantTermEE, + ConfigurationParameters::type const timeNconstEB, + ConfigurationParameters::type const timeNconstEE, + ConfigurationParameters::type const amplitudeThresholdEB, + ConfigurationParameters::type const amplitudeThresholdEE, + ConfigurationParameters::type const outOfTimeThreshG12pEB, + ConfigurationParameters::type const outOfTimeThreshG12pEE, + ConfigurationParameters::type const outOfTimeThreshG12mEB, + ConfigurationParameters::type const outOfTimeThreshG12mEE, + ConfigurationParameters::type const outOfTimeThreshG61pEB, + ConfigurationParameters::type const outOfTimeThreshG61pEE, + ConfigurationParameters::type const outOfTimeThreshG61mEB, + ConfigurationParameters::type const outOfTimeThreshG61mEE) const { + // constants + constexpr auto nsamples = EcalDataFrame::MAXSAMPLES; + auto const nchannelsEB = digisDevEB.size(); + auto const nchannels = nchannelsEB + digisDevEE.size(); + auto const offsetForInputs = nchannelsEB; + auto const offsetForHashes = conditionsDev.offsetEE(); + + for (auto gtx : cms::alpakatools::elements_with_stride(acc, nchannels)) { + const int inputGtx = gtx >= offsetForInputs ? gtx - offsetForInputs : gtx; + auto const* dids = gtx >= offsetForInputs ? digisDevEE.id() : digisDevEB.id(); + auto const* digis = gtx >= offsetForInputs ? digisDevEE.data()->data() : digisDevEB.data()->data(); + + auto* g_amplitude = gtx >= nchannelsEB ? uncalibRecHitsEE.amplitude() : uncalibRecHitsEB.amplitude(); + auto* g_jitter = gtx >= nchannelsEB ? uncalibRecHitsEE.jitter() : uncalibRecHitsEB.jitter(); + auto* g_jitterError = gtx >= nchannelsEB ? uncalibRecHitsEE.jitterError() : uncalibRecHitsEB.jitterError(); + auto* flags = gtx >= nchannelsEB ? uncalibRecHitsEE.flags() : uncalibRecHitsEB.flags(); + + auto const did = DetId{dids[inputGtx]}; + auto const isBarrel = did.subdetId() == EcalBarrel; + auto const hashedId = isBarrel ? ecal::reconstruction::hashedIndexEB(did.rawId()) + : offsetForHashes + ecal::reconstruction::hashedIndexEE(did.rawId()); + // need to access the underlying data directly here because the std::arrays have different size for EB and EE, which is not compatible with the ? operator + auto* const amplitudeBins = isBarrel ? conditionsDev.timeBiasCorrections_amplitude_EB().data() + : conditionsDev.timeBiasCorrections_amplitude_EE().data(); + auto* const shiftBins = isBarrel ? conditionsDev.timeBiasCorrections_shift_EB().data() + : conditionsDev.timeBiasCorrections_shift_EE().data(); + auto const amplitudeBinsSize = + isBarrel ? conditionsDev.timeBiasCorrectionSizeEB() : conditionsDev.timeBiasCorrectionSizeEE(); + auto const timeConstantTerm = isBarrel ? timeConstantTermEB : timeConstantTermEE; + auto const timeNconst = isBarrel ? timeNconstEB : timeNconstEE; + auto const offsetTimeValue = isBarrel ? conditionsDev.timeOffset_EB() : conditionsDev.timeOffset_EE(); + auto const amplitudeThreshold = isBarrel ? amplitudeThresholdEB : amplitudeThresholdEE; + auto const outOfTimeThreshG12p = isBarrel ? outOfTimeThreshG12pEB : outOfTimeThreshG12pEE; + auto const outOfTimeThreshG12m = isBarrel ? outOfTimeThreshG12mEB : outOfTimeThreshG12mEE; + auto const outOfTimeThreshG61p = isBarrel ? outOfTimeThreshG61pEB : outOfTimeThreshG61pEE; + auto const outOfTimeThreshG61m = isBarrel ? outOfTimeThreshG61mEB : outOfTimeThreshG61mEE; + + // load some + auto const amplitude = g_amplitude[inputGtx]; + auto const rms_x12 = conditionsDev.pedestals_rms_x12()[hashedId]; + auto const timeCalibConst = conditionsDev.timeCalibConstants()[hashedId]; + + int myBin = -1; + for (size_t bin = 0; bin < amplitudeBinsSize; ++bin) { + if (amplitude > amplitudeBins[bin]) + myBin = bin; + else + break; + } + + ScalarType correction = 0; + if (myBin == -1) { + correction = shiftBins[0]; + } else if (myBin == static_cast(amplitudeBinsSize) - 1) { + correction = shiftBins[myBin]; + } else { + correction = shiftBins[myBin + 1] - shiftBins[myBin]; + correction *= (amplitude - amplitudeBins[myBin]) / (amplitudeBins[myBin + 1] - amplitudeBins[myBin]); + correction += shiftBins[myBin]; + } + + // correction * 1./25. + correction = correction * 0.04; + auto const timeMax = g_timeMax[gtx]; + auto const timeError = g_timeError[gtx]; + auto const jitter = timeMax - 5 + correction; + auto const jitterError = + std::sqrt(timeError * timeError + timeConstantTerm * timeConstantTerm * 0.04 * 0.04); // 0.04 = 1./25. + +#ifdef DEBUG_TIME_CORRECTION + printf("ch = %d timeMax = %f timeError = %f jitter = %f correction = %f\n", + gtx, + timeMax, + timeError, + jitter, + correction); +#endif + + // store back to global + g_jitter[inputGtx] = jitter; + g_jitterError[inputGtx] = jitterError; + + // set the flag + // TODO: replace with something more efficient (if required), + // for now just to make it work + if (amplitude > amplitudeThreshold * rms_x12) { + auto threshP = outOfTimeThreshG12p; + auto threshM = outOfTimeThreshG12m; + if (amplitude > 3000.) { + for (int isample = 0; isample < nsamples; isample++) { + auto const gainid = ecalMGPA::gainId(digis[nsamples * inputGtx + isample]); + if (gainid != 1) { + threshP = outOfTimeThreshG61p; + threshM = outOfTimeThreshG61m; + break; + } + } + } + + auto const correctedTime = (timeMax - 5) * 25 + timeCalibConst + offsetTimeValue; + auto const nterm = timeNconst * rms_x12 / amplitude; + auto const sigmat = std::sqrt(nterm * nterm + timeConstantTerm * timeConstantTerm); + if (correctedTime > sigmat * threshP || correctedTime < -sigmat * threshM) + flags[inputGtx] |= 0x1 << EcalUncalibratedRecHit::kOutOfTime; + } + } + } + }; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::multifit + +namespace alpaka::trait { + using namespace ALPAKA_ACCELERATOR_NAMESPACE::ecal::multifit; + + //! The trait for getting the size of the block shared dynamic memory for Kernel_time_compute_nullhypot. + template + struct BlockSharedMemDynSizeBytes { + //! \return The size of the shared memory allocated for a block. + template + ALPAKA_FN_HOST_ACC static auto getBlockSharedMemDynSizeBytes(Kernel_time_compute_nullhypot const&, + TVec const& threadsPerBlock, + TVec const& elemsPerThread, + TArgs const&...) -> std::size_t { + using ScalarType = ecal::multifit::SampleVector::Scalar; + + // return the amount of dynamic shared memory needed + std::size_t bytes = threadsPerBlock[0u] * elemsPerThread[0u] * 4 * sizeof(ScalarType); + return bytes; + } + }; + + //! The trait for getting the size of the block shared dynamic memory for Kernel_time_compute_makeratio. + template + struct BlockSharedMemDynSizeBytes { + template + ALPAKA_FN_HOST_ACC static auto getBlockSharedMemDynSizeBytes(Kernel_time_compute_makeratio const&, + TVec const& threadsPerBlock, + TVec const& elemsPerThread, + TArgs const&...) -> std::size_t { + using ScalarType = ecal::multifit::SampleVector::Scalar; + + std::size_t bytes = (8 * sizeof(ScalarType) + 3 * sizeof(bool)) * threadsPerBlock[0u] * elemsPerThread[0u]; + return bytes; + } + }; + + //! The trait for getting the size of the block shared dynamic memory for Kernel_time_compute_findamplchi2_and_finish. + template + struct BlockSharedMemDynSizeBytes { + template + ALPAKA_FN_HOST_ACC static auto getBlockSharedMemDynSizeBytes(Kernel_time_compute_findamplchi2_and_finish const&, + TVec const& threadsPerBlock, + TVec const& elemsPerThread, + TArgs const&...) -> std::size_t { + using ScalarType = ecal::multifit::SampleVector::Scalar; + + std::size_t bytes = 2 * threadsPerBlock[0u] * elemsPerThread[0u] * sizeof(ScalarType); + return bytes; + } + }; + + //! The trait for getting the size of the block shared dynamic memory for Kernel_time_computation_init. + template + struct BlockSharedMemDynSizeBytes { + template + ALPAKA_FN_HOST_ACC static auto getBlockSharedMemDynSizeBytes(Kernel_time_computation_init const&, + TVec const& threadsPerBlock, + TVec const& elemsPerThread, + TArgs const&...) -> std::size_t { + using ScalarType = ecal::multifit::SampleVector::Scalar; + + std::size_t bytes = 2 * threadsPerBlock[0u] * elemsPerThread[0u] * sizeof(ScalarType); + return bytes; + } + }; + +} // namespace alpaka::trait + +#endif // RecoLocalCalo_EcalRecProducers_plugins_TimeComputationKernels_h diff --git a/RecoLocalCalo/EcalRecProducers/python/ecalLocalCustom.py b/RecoLocalCalo/EcalRecProducers/python/ecalLocalCustom.py index 137c97ac7765a..12528d990a331 100644 --- a/RecoLocalCalo/EcalRecProducers/python/ecalLocalCustom.py +++ b/RecoLocalCalo/EcalRecProducers/python/ecalLocalCustom.py @@ -1,16 +1,16 @@ import FWCore.ParameterSet.Config as cms def configureEcalLocal25ns(process): - process.ecalMultiFitUncalibRecHit.cpu.activeBXs = [-5,-4,-3,-2,-1,0,1,2,3,4], - process.ecalMultiFitUncalibRecHit.cpu.useLumiInfoRunHeader = False + process.ecalMultiFitUncalibRecHitCPU.activeBXs = [-5,-4,-3,-2,-1,0,1,2,3,4], + process.ecalMultiFitUncalibRecHitCPU.useLumiInfoRunHeader = False return process def configureEcalLocal50ns(process): - process.ecalMultiFitUncalibRecHit.cpu.activeBXs = [-4,-2,0,2,4] - process.ecalMultiFitUncalibRecHit.cpu.useLumiInfoRunHeader = False + process.ecalMultiFitUncalibRecHitCPU.activeBXs = [-4,-2,0,2,4] + process.ecalMultiFitUncalibRecHitCPU.useLumiInfoRunHeader = False return process def configureEcalLocalNoOOTPU(process): - process.ecalMultiFitUncalibRecHit.cpu.activeBXs = [0] - process.ecalMultiFitUncalibRecHit.cpu.useLumiInfoRunHeader = False + process.ecalMultiFitUncalibRecHitCPU.activeBXs = [0] + process.ecalMultiFitUncalibRecHitCPU.useLumiInfoRunHeader = False return process diff --git a/RecoLocalCalo/EcalRecProducers/python/ecalMultiFitUncalibRecHit_cff.py b/RecoLocalCalo/EcalRecProducers/python/ecalMultiFitUncalibRecHit_cff.py index 4d8f415e40170..c6104c21b62db 100644 --- a/RecoLocalCalo/EcalRecProducers/python/ecalMultiFitUncalibRecHit_cff.py +++ b/RecoLocalCalo/EcalRecProducers/python/ecalMultiFitUncalibRecHit_cff.py @@ -4,8 +4,9 @@ # ECAL multifit running on CPU from RecoLocalCalo.EcalRecProducers.ecalMultiFitUncalibRecHit_cfi import ecalMultiFitUncalibRecHit as _ecalMultiFitUncalibRecHit +ecalMultiFitUncalibRecHitCPU = _ecalMultiFitUncalibRecHit.clone() ecalMultiFitUncalibRecHit = SwitchProducerCUDA( - cpu = _ecalMultiFitUncalibRecHit.clone() + cpu = ecalMultiFitUncalibRecHitCPU ) ecalMultiFitUncalibRecHitTask = cms.Task( @@ -13,6 +14,8 @@ ecalMultiFitUncalibRecHit ) +from Configuration.StandardSequences.Accelerators_cff import * + # ECAL conditions used by the multifit running on GPU from RecoLocalCalo.EcalRecProducers.ecalPedestalsGPUESProducer_cfi import ecalPedestalsGPUESProducer from RecoLocalCalo.EcalRecProducers.ecalGainRatiosGPUESProducer_cfi import ecalGainRatiosGPUESProducer @@ -64,3 +67,39 @@ # ECAL multifit running on CPU, or convert the uncalibrated rechits from SoA to legacy format ecalMultiFitUncalibRecHit, )) + +# modifications for alpaka +from Configuration.ProcessModifiers.alpaka_cff import alpaka + +# ECAL conditions used by the multifit running on the accelerator +from RecoLocalCalo.EcalRecProducers.ecalMultifitConditionsHostESProducer_cfi import ecalMultifitConditionsHostESProducer +from RecoLocalCalo.EcalRecProducers.ecalMultifitParametersHostESProducer_cfi import ecalMultifitParametersHostESProducer + +ecalMultifitParametersSource = cms.ESSource("EmptyESSource", + recordName = cms.string('EcalMultifitParametersRcd'), + iovIsRunNotTime = cms.bool(True), + firstValid = cms.vuint32(1) +) + +# ECAL multifit running on the accelerator +from RecoLocalCalo.EcalRecProducers.ecalUncalibRecHitProducerPortable_cfi import ecalUncalibRecHitProducerPortable as _ecalUncalibRecHitProducerPortable +ecalMultiFitUncalibRecHitPortable = _ecalUncalibRecHitProducerPortable.clone( + digisLabelEB = 'ecalDigisPortable:ebDigis', + digisLabelEE = 'ecalDigisPortable:eeDigis' +) + +# replace the SwitchProducerCUDA branches with the module to convert the uncalibrated rechits from SoA to legacy format +from RecoLocalCalo.EcalRecProducers.ecalUncalibRecHitSoAToLegacy_cfi import ecalUncalibRecHitSoAToLegacy as _ecalUncalibRecHitSoAToLegacy +alpaka.toModify(ecalMultiFitUncalibRecHit, + cpu = _ecalUncalibRecHitSoAToLegacy.clone() +) + +alpaka.toReplaceWith(ecalMultiFitUncalibRecHitTask, cms.Task( + # ECAL conditions used by the multifit running on the accelerator + ecalMultifitConditionsHostESProducer, + ecalMultifitParametersHostESProducer, + # ECAL multifit running on device + ecalMultiFitUncalibRecHitPortable, + # ECAL multifit running on CPU, or convert the uncalibrated rechits from SoA to legacy format + ecalMultiFitUncalibRecHit, +)) diff --git a/RecoLocalCalo/EcalRecProducers/python/ecalRecHit_cfi.py b/RecoLocalCalo/EcalRecProducers/python/ecalRecHit_cfi.py index a7f2772724661..6ee0e23689449 100644 --- a/RecoLocalCalo/EcalRecProducers/python/ecalRecHit_cfi.py +++ b/RecoLocalCalo/EcalRecProducers/python/ecalRecHit_cfi.py @@ -99,6 +99,20 @@ recoverEBIsolatedChannels = False ) +# use CC timing method for Run3 and Phase 2 (carried over from Run3 era) +from Configuration.Eras.Modifier_run3_ecal_cff import run3_ecal +run3_ecal.toModify(ecalRecHit, + timeCalibTag = ':CC', + timeOffsetTag = ':CC' +) + +# this overrides the modifications made by run3_ecal if both modifiers are active +from Configuration.ProcessModifiers.gpuValidationEcal_cff import gpuValidationEcal +gpuValidationEcal.toModify(ecalRecHit, + timeCalibTag = ':', + timeOffsetTag = ':' +) + # Phase 2 modifications from Configuration.Eras.Modifier_phase2_ecal_devel_cff import phase2_ecal_devel phase2_ecal_devel.toModify(ecalRecHit, diff --git a/RecoLocalCalo/HGCalRecProducers/plugins/EERecHitGPU.cc b/RecoLocalCalo/HGCalRecProducers/plugins/EERecHitGPU.cc index aa0e41bd82b6c..5fdc4fbd3a93a 100644 --- a/RecoLocalCalo/HGCalRecProducers/plugins/EERecHitGPU.cc +++ b/RecoLocalCalo/HGCalRecProducers/plugins/EERecHitGPU.cc @@ -25,7 +25,6 @@ class EERecHitGPU : public edm::stream::EDProducer<> { public: explicit EERecHitGPU(const edm::ParameterSet &ps); ~EERecHitGPU() override; - void beginRun(edm::Run const &, edm::EventSetup const &) override; void produce(edm::Event &, const edm::EventSetup &) override; @@ -106,8 +105,6 @@ void EERecHitGPU::assert_sizes_constants_(const HGCConstantVectorData &vd) { "weights", HGCeeUncalibRecHitConstantData::ee_weights, vdata_.weights_.size()); } -void EERecHitGPU::beginRun(edm::Run const &, edm::EventSetup const &setup) {} - void EERecHitGPU::produce(edm::Event &event, const edm::EventSetup &setup) { cms::cuda::ScopedContextProduce ctx{event.streamID()}; diff --git a/RecoLocalCalo/HGCalRecProducers/plugins/HEBRecHitGPU.cc b/RecoLocalCalo/HGCalRecProducers/plugins/HEBRecHitGPU.cc index b9c08de83d519..cc2d206fe67bc 100644 --- a/RecoLocalCalo/HGCalRecProducers/plugins/HEBRecHitGPU.cc +++ b/RecoLocalCalo/HGCalRecProducers/plugins/HEBRecHitGPU.cc @@ -25,7 +25,6 @@ class HEBRecHitGPU : public edm::stream::EDProducer<> { public: explicit HEBRecHitGPU(const edm::ParameterSet &ps); ~HEBRecHitGPU() override; - void beginRun(edm::Run const &, edm::EventSetup const &) override; void produce(edm::Event &, const edm::EventSetup &) override; @@ -88,8 +87,6 @@ void HEBRecHitGPU::assert_sizes_constants_(const HGCConstantVectorData &vd) { edm::LogError("WrongSize") << this->assert_error_message_("weights", vdata_.fCPerMIP_.size()); } -void HEBRecHitGPU::beginRun(edm::Run const &, edm::EventSetup const &setup) {} - void HEBRecHitGPU::produce(edm::Event &event, const edm::EventSetup &setup) { cms::cuda::ScopedContextProduce ctx{event.streamID()}; diff --git a/RecoLocalCalo/HGCalRecProducers/plugins/HEFRecHitGPU.cc b/RecoLocalCalo/HGCalRecProducers/plugins/HEFRecHitGPU.cc index 7ceedccb5d28e..eeb1dc0209817 100644 --- a/RecoLocalCalo/HGCalRecProducers/plugins/HEFRecHitGPU.cc +++ b/RecoLocalCalo/HGCalRecProducers/plugins/HEFRecHitGPU.cc @@ -25,7 +25,6 @@ class HEFRecHitGPU : public edm::stream::EDProducer<> { public: explicit HEFRecHitGPU(const edm::ParameterSet &ps); ~HEFRecHitGPU() override; - void beginRun(edm::Run const &, edm::EventSetup const &) override; void produce(edm::Event &, const edm::EventSetup &) override; @@ -108,8 +107,6 @@ void HEFRecHitGPU::assert_sizes_constants_(const HGCConstantVectorData &vd) { "weights", HGChefUncalibRecHitConstantData::hef_weights, vdata_.weights_.size()); } -void HEFRecHitGPU::beginRun(edm::Run const &, edm::EventSetup const &setup) {} - void HEFRecHitGPU::produce(edm::Event &event, const edm::EventSetup &setup) { cms::cuda::ScopedContextProduce ctx{event.streamID()}; diff --git a/RecoLocalCalo/HcalRecProducers/src/HFPreReconstructor.cc b/RecoLocalCalo/HcalRecProducers/src/HFPreReconstructor.cc index 4dc732c7666fd..ff751d2e830e5 100644 --- a/RecoLocalCalo/HcalRecProducers/src/HFPreReconstructor.cc +++ b/RecoLocalCalo/HcalRecProducers/src/HFPreReconstructor.cc @@ -60,7 +60,6 @@ class HFPreReconstructor : public edm::stream::EDProducer<> { typedef std::pair PmtAnodeId; typedef std::pair QIE10InfoWithId; - void beginRun(const edm::Run&, const edm::EventSetup&) override; void produce(edm::Event&, const edm::EventSetup&) override; // Module configuration parameters @@ -202,8 +201,6 @@ void HFPreReconstructor::fillInfos(const edm::Event& e, const edm::EventSetup& e } } -void HFPreReconstructor::beginRun(const edm::Run& r, const edm::EventSetup& es) {} - // ------------ method called to produce the data ------------ void HFPreReconstructor::produce(edm::Event& e, const edm::EventSetup& eventSetup) { // Process the input data diff --git a/RecoLocalFastTime/FTLCommonAlgos/src/MTDTimeCalib.cc b/RecoLocalFastTime/FTLCommonAlgos/src/MTDTimeCalib.cc index 6a4932e4c3a1d..d798435d55230 100644 --- a/RecoLocalFastTime/FTLCommonAlgos/src/MTDTimeCalib.cc +++ b/RecoLocalFastTime/FTLCommonAlgos/src/MTDTimeCalib.cc @@ -39,15 +39,12 @@ float MTDTimeCalib::getTimeCalib(const MTDDetId& id) const { const RectangularMTDTopology& topo = static_cast(topoproxy.specificTopology()); BTLDetId::CrysLayout btlL = MTDTopologyMode::crysLayoutFromTopoMode(topo_->getMTDTopologyMode()); - if (btlL == BTLDetId::CrysLayout::tile) { - time_calib -= btlLightCollTime_; //simply remove the offset introduced at sim level - } else if (btlL == BTLDetId::CrysLayout::bar || btlL == BTLDetId::CrysLayout::barphiflat || - btlL == BTLDetId::CrysLayout::v2) { + if (static_cast(btlL) >= static_cast(BTLDetId::CrysLayout::barphiflat)) { //for bars in phi time_calib -= 0.5 * topo.pitch().first * btlLightCollSlope_; //time offset for bar time is L/2v - } else if (btlL == BTLDetId::CrysLayout::barzflat) { - //for bars in z - time_calib -= 0.5 * topo.pitch().second * btlLightCollSlope_; //time offset for bar time is L/2v + } else { + throw cms::Exception("MTDTimeCalib") + << "BTL topology mode " << static_cast(btlL) << " unsupported! Aborting"; } } else if (id.mtdSubDetector() == MTDDetId::ETL) { time_calib += etlTimeOffset_; diff --git a/RecoLocalMuon/Configuration/python/RecoLocalMuonCosmics_cff.py b/RecoLocalMuon/Configuration/python/RecoLocalMuonCosmics_cff.py index 622d27884080f..797e3f66cd190 100644 --- a/RecoLocalMuon/Configuration/python/RecoLocalMuonCosmics_cff.py +++ b/RecoLocalMuon/Configuration/python/RecoLocalMuonCosmics_cff.py @@ -62,9 +62,13 @@ _phase2_muonlocalrecoTask = _run3_muonlocalrecoTask.copy() _phase2_muonlocalrecoTask.add(me0LocalRecoTask) +_phase2_ge0_muonlocalrecoTask = _phase2_muonlocalrecoTask.copyAndExclude([me0LocalRecoTask]) + from Configuration.Eras.Modifier_run2_GEM_2017_cff import run2_GEM_2017 run2_GEM_2017.toReplaceWith( muonlocalrecoTask , _run2_GEM_2017_muonlocalrecoTask ) from Configuration.Eras.Modifier_run3_GEM_cff import run3_GEM run3_GEM.toReplaceWith( muonlocalrecoTask , _run3_muonlocalrecoTask ) from Configuration.Eras.Modifier_phase2_muon_cff import phase2_muon phase2_muon.toReplaceWith( muonlocalrecoTask , _phase2_muonlocalrecoTask ) +from Configuration.Eras.Modifier_phase2_GE0_cff import phase2_GE0 +phase2_GE0.toReplaceWith( muonlocalrecoTask , _phase2_ge0_muonlocalrecoTask ) diff --git a/RecoLocalMuon/DTSegment/test/DTRecSegment2DReader.cc b/RecoLocalMuon/DTSegment/test/DTRecSegment2DReader.cc deleted file mode 100644 index f6f39ab3c86d6..0000000000000 --- a/RecoLocalMuon/DTSegment/test/DTRecSegment2DReader.cc +++ /dev/null @@ -1,77 +0,0 @@ -/** \file - * - * \author Stefano Lacaprara - INFN Legnaro - * \author Riccardo Bellan - INFN TO - */ - -/* This Class Header */ -#include "RecoLocalMuon/DTSegment/test/DTRecSegment2DReader.h" - -/* Collaborating Class Header */ -#include "FWCore/Framework/interface/MakerMacros.h" -#include "FWCore/Framework/interface/Frameworkfwd.h" -#include "FWCore/Framework/interface/Event.h" - -#include "DataFormats/DTRecHit/interface/DTRecSegment2DCollection.h" - -#include "TFile.h" -#include "TH1F.h" - -/* C++ Headers */ -#include - -using namespace std; -/* ====================================================================== */ - -/// Constructor -DTRecSegment2DReader::DTRecSegment2DReader(const edm::ParameterSet& pset) { - // Get the debug parameter for verbose output - debug = pset.getUntrackedParameter("debug"); - theRootFileName = pset.getUntrackedParameter("rootFileName"); - - // the name of the 2D rec hits collection - theRecHits2DLabel = pset.getParameter("recHits2DLabel"); - - if(debug) - cout << "[DTRecSegment2DReader] Constructor called" << endl; - - // Create the root file - theFile = new TFile(theRootFileName.c_str(), "RECREATE"); - theFile->cd(); - hPositionX = new TH1F("hPositionX","X Position of the Segments",200,-210,210); -} - -/// Destructor -DTRecSegment2DReader::~DTRecSegment2DReader() { - if(debug) - cout << "[DTRecSegment2DReader] Destructor called" << endl; - - // Write the histos to file - theFile->cd(); - hPositionX->Write(); - theFile->Close(); -} - -/* Operations */ -void DTRecSegment2DReader::analyze(const edm::Event & event, const - edm::EventSetup& eventSetup) { - cout << endl<<"--- [DTRecSegment2DReader] Event analysed #Run: " << event.id().run() - << " #Event: " << event.id().event() << endl; - - // Get the rechit collection from the event - edm::Handle all2DSegments; - event.getByLabel(theRecHits2DLabel, all2DSegments); - - DTRecSegment2DCollection::const_iterator segment; - - cout<<"Reconstructed segments: "<begin(); segment != all2DSegments->end(); ++segment){ - cout<<*segment<Fill( (*segment).localPosition().x()); - } - cout<<"---"< - * \author Riccardo Bellan - INFN TO - * - */ - -/* Base Class Headers */ -#include "FWCore/Framework/interface/EDAnalyzer.h" - -/* Collaborating Class Declarations */ -#include "DataFormats/Common/interface/Handle.h" - -/* C++ Headers */ -#include - -class TFile; -class TH1F; - -namespace edm { - class ParameterSet; - class Event; - class EventSetup; -} - -/* ====================================================================== */ - -/* Class DTRecSegment2DReader Interface */ - -class DTRecSegment2DReader : public edm::EDAnalyzer { - - public: - -/// Constructor - DTRecSegment2DReader(const edm::ParameterSet& pset) ; - -/// Destructor - virtual ~DTRecSegment2DReader() ; - -/* Operations */ - void analyze(const edm::Event & event, const edm::EventSetup& eventSetup); - - protected: - - private: - bool debug; - std::string theRootFileName; - TFile* theFile; - //static std::string theAlgoName; - std::string theRecHits2DLabel; - - TH1F *hPositionX; - -}; -#endif // DTSegment_DTRecSegment2DReader_h - diff --git a/RecoLocalMuon/DTSegment/test/DTRecSegment4DReader.cc b/RecoLocalMuon/DTSegment/test/DTRecSegment4DReader.cc deleted file mode 100644 index ff0b968eb53ac..0000000000000 --- a/RecoLocalMuon/DTSegment/test/DTRecSegment4DReader.cc +++ /dev/null @@ -1,76 +0,0 @@ -/** \file - * - * \author Riccardo Bellan - INFN TO - */ - -/* This Class Header */ -#include "RecoLocalMuon/DTSegment/test/DTRecSegment4DReader.h" - -/* Collaborating Class Header */ -#include "FWCore/Framework/interface/MakerMacros.h" -#include "FWCore/Framework/interface/Frameworkfwd.h" -#include "FWCore/Framework/interface/Event.h" - -#include "DataFormats/DTRecHit/interface/DTRecSegment4DCollection.h" - -#include "TFile.h" -#include "TH1F.h" - -/* C++ Headers */ -#include - -using namespace std; -/* ====================================================================== */ - -/// Constructor -DTRecSegment4DReader::DTRecSegment4DReader(const edm::ParameterSet& pset) { - // Get the debug parameter for verbose output - debug = pset.getUntrackedParameter("debug"); - theRootFileName = pset.getUntrackedParameter("rootFileName"); - - // the name of the 4D rec hits collection - theRecHits4DLabel = pset.getParameter("recHits4DLabel"); - - if(debug) - cout << "[DTRecSegment4DReader] Constructor called" << endl; - - // Create the root file - theFile = new TFile(theRootFileName.c_str(), "RECREATE"); - theFile->cd(); - hPositionX = new TH1F("hPositionX","X Position of the Segments",200,-210,210); -} - -/// Destructor -DTRecSegment4DReader::~DTRecSegment4DReader() { - if(debug) - cout << "[DTRecSegment4DReader] Destructor called" << endl; - - // Write the histos to file - theFile->cd(); - hPositionX->Write(); - theFile->Close(); -} - -/* Operations */ -void DTRecSegment4DReader::analyze(const edm::Event & event, const - edm::EventSetup& eventSetup) { - cout << endl<<"--- [DTRecSegment4DReader] Event analysed #Run: " << event.id().run() - << " #Event: " << event.id().event() << endl; - - // Get the rechit collection from the event - edm::Handle all4DSegments; - event.getByLabel(theRecHits4DLabel, all4DSegments); - - DTRecSegment4DCollection::const_iterator segment; - - cout<<"Reconstructed segments: "<begin(); segment != all4DSegments->end(); ++segment){ - cout<<*segment<Fill( (*segment).localPosition().x()); - } - cout<<"---"< - * - */ - -/* Base Class Headers */ -#include "FWCore/Framework/interface/EDAnalyzer.h" - -/* Collaborating Class Declarations */ -#include "DataFormats/Common/interface/Handle.h" - -/* C++ Headers */ -#include - -class TFile; -class TH1F; - -namespace edm { - class ParameterSet; - class Event; - class EventSetup; -} - -/* Class DTRecSegment4DReader Interface */ - -class DTRecSegment4DReader : public edm::EDAnalyzer { - - public: - -/// Constructor - DTRecSegment4DReader(const edm::ParameterSet& pset) ; - -/// Destructor - virtual ~DTRecSegment4DReader() ; - -/* Operations */ - void analyze(const edm::Event & event, const edm::EventSetup& eventSetup); - - protected: - - private: - bool debug; - std::string theRootFileName; - TFile* theFile; - //static std::string theAlgoName; - std::string theRecHits4DLabel; - - TH1F *hPositionX; - -}; -#endif // DTSegment_DTRecSegment4DReader_h - diff --git a/RecoLocalMuon/GEMSegment/plugins/GEMSegmentBuilder.cc b/RecoLocalMuon/GEMSegment/plugins/GEMSegmentBuilder.cc index fde8193f346ce..a14d62b1d0696 100644 --- a/RecoLocalMuon/GEMSegment/plugins/GEMSegmentBuilder.cc +++ b/RecoLocalMuon/GEMSegment/plugins/GEMSegmentBuilder.cc @@ -10,6 +10,10 @@ #include "FWCore/MessageLogger/interface/MessageLogger.h" GEMSegmentBuilder::GEMSegmentBuilder(const edm::ParameterSet& ps) : geom_(nullptr) { + // Segment building selection + enableGE0 = ps.getParameter("enableGE0"); + enableGE12 = ps.getParameter("enableGE12"); + // Algo name segAlgoName = ps.getParameter("algo_name"); ge0AlgoName = ps.getParameter("ge0_name"); @@ -27,6 +31,46 @@ GEMSegmentBuilder::GEMSegmentBuilder(const edm::ParameterSet& ps) : geom_(nullpt } GEMSegmentBuilder::~GEMSegmentBuilder() {} +void GEMSegmentBuilder::fillDescription(edm::ParameterSetDescription& desc) { + desc.add("enableGE0", true); + desc.add("enableGE12", false); + desc.add("ge0_name", "GE0SegAlgoRU"); + desc.add("algo_name", "GEMSegmentAlgorithm"); + + edm::ParameterSetDescription ge0AlgoConfigDesc; + ge0AlgoConfigDesc.add("allowWideSegments", true); + ge0AlgoConfigDesc.add("doCollisions", true); + ge0AlgoConfigDesc.add("maxChi2Additional", 100); + ge0AlgoConfigDesc.add("maxChi2Prune", 50); + ge0AlgoConfigDesc.add("maxChi2GoodSeg", 50); + ge0AlgoConfigDesc.add("maxPhiSeeds", 0.001096605744)->setComment("Assuming 384 strips"); + ge0AlgoConfigDesc.add("maxPhiAdditional", 0.001096605744)->setComment("Assuming 384 strips"); + ge0AlgoConfigDesc.add("maxETASeeds", 0.1)->setComment("Assuming 8 eta partitions"); + ge0AlgoConfigDesc.add("maxTOFDiff", 25); + ge0AlgoConfigDesc.add("requireCentralBX", true) + ->setComment("require that a majority of hits come from central BX"); + ge0AlgoConfigDesc.add("minNumberOfHits", 4); + ge0AlgoConfigDesc.add("maxNumberOfHits", 300); + ge0AlgoConfigDesc.add("maxNumberOfHitsPerLayer", 100); + desc.add("ge0_pset", ge0AlgoConfigDesc); + + edm::ParameterSetDescription recAlgoConfigDesc; + recAlgoConfigDesc.addUntracked("GEMDebug", false); + recAlgoConfigDesc.add("minHitsPerSegment", 2); + recAlgoConfigDesc.add("preClustering", true) + ->setComment("False => all hits in chamber are given to the fitter"); + recAlgoConfigDesc.add("dXclusBoxMax", 1)->setComment("Clstr Hit dPhi"); + recAlgoConfigDesc.add("dYclusBoxMax", 5)->setComment("Clstr Hit dEta"); + recAlgoConfigDesc.add("preClusteringUseChaining", true) + ->setComment("True ==> use Chaining() , False ==> use Clustering() Fnct"); + recAlgoConfigDesc.add("dPhiChainBoxMax", .02)->setComment("Chain Hit dPhi"); + recAlgoConfigDesc.add("dEtaChainBoxMax", .05)->setComment("Chain Hit dEta"); + recAlgoConfigDesc.add("maxRecHitsInCluster", 4)->setComment("Does 4 make sense here?"); + recAlgoConfigDesc.add("clusterOnlySameBXRecHits", true) + ->setComment("only working for (preClustering && preClusteringUseChaining)"); + desc.add("algo_pset", recAlgoConfigDesc); +} + void GEMSegmentBuilder::build(const GEMRecHitCollection* recHits, GEMSegmentCollection& oc) { edm::LogVerbatim("GEMSegmentBuilder") << "[GEMSegmentBuilder::build] Total number of rechits in this event: " << recHits->size(); @@ -93,9 +137,9 @@ void GEMSegmentBuilder::build(const GEMRecHitCollection* recHits, GEMSegmentColl // given the superchamber select the appropriate algo... and run it std::vector segv; - if (chamber->id().station() == 0) + if (enableGE0 and chamber->id().station() == 0) segv = ge0Algo->run(ensemble, gemRecHits); - else + else if (enableGE12) segv = segAlgo->run(ensemble, gemRecHits); #ifdef EDM_ML_DEBUG // have lines below only compiled when in debug mode LogTrace("GEMSegmentBuilder") << "[GEMSegmentBuilder::build] found " << segv.size(); diff --git a/RecoLocalMuon/GEMSegment/plugins/GEMSegmentBuilder.h b/RecoLocalMuon/GEMSegment/plugins/GEMSegmentBuilder.h index e244ea9eb320b..6d14b30303cac 100644 --- a/RecoLocalMuon/GEMSegment/plugins/GEMSegmentBuilder.h +++ b/RecoLocalMuon/GEMSegment/plugins/GEMSegmentBuilder.h @@ -17,6 +17,7 @@ #include "DataFormats/GEMRecHit/interface/GEMSegmentCollection.h" #include "Geometry/GEMGeometry/interface/GEMGeometry.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" class GEMSegmentAlgorithmBase; @@ -39,7 +40,11 @@ class GEMSegmentBuilder { */ void setGeometry(const GEMGeometry* g); + static void fillDescription(edm::ParameterSetDescription& descriptions); + private: + bool enableGE0; + bool enableGE12; std::string segAlgoName; std::string ge0AlgoName; edm::ParameterSet segAlgoPSet; diff --git a/RecoLocalMuon/GEMSegment/plugins/GEMSegmentProducer.cc b/RecoLocalMuon/GEMSegment/plugins/GEMSegmentProducer.cc index 73b6661af9a81..f06c2d3753882 100644 --- a/RecoLocalMuon/GEMSegment/plugins/GEMSegmentProducer.cc +++ b/RecoLocalMuon/GEMSegment/plugins/GEMSegmentProducer.cc @@ -12,6 +12,8 @@ #include "FWCore/Utilities/interface/InputTag.h" #include "FWCore/Utilities/interface/ESGetToken.h" #include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" #include "DataFormats/Common/interface/Handle.h" #include "DataFormats/GEMRecHit/interface/GEMRecHitCollection.h" @@ -30,6 +32,8 @@ class GEMSegmentProducer : public edm::stream::EDProducer<> { /// Produce the GEMSegment collection void produce(edm::Event&, const edm::EventSetup&) override; + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + private: int iev; // events through edm::EDGetTokenT theGEMRecHitToken; @@ -45,6 +49,13 @@ GEMSegmentProducer::GEMSegmentProducer(const edm::ParameterSet& ps) : iev(0) { produces(); } +void GEMSegmentProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("gemRecHitLabel", edm::InputTag("gemRecHits")); + GEMSegmentBuilder::fillDescription(desc); + descriptions.add("gemSegments", desc); +} + void GEMSegmentProducer::produce(edm::Event& ev, const edm::EventSetup& setup) { LogDebug("GEMSegmentProducer") << "start producing segments for " << ++iev << "th event with GEM data"; diff --git a/RecoLocalMuon/GEMSegment/python/gemSegments_cfi.py b/RecoLocalMuon/GEMSegment/python/gemSegments_cfi.py deleted file mode 100644 index b49ba69a7fb7e..0000000000000 --- a/RecoLocalMuon/GEMSegment/python/gemSegments_cfi.py +++ /dev/null @@ -1,33 +0,0 @@ -import FWCore.ParameterSet.Config as cms - -gemSegments = cms.EDProducer("GEMSegmentProducer", - gemRecHitLabel = cms.InputTag("gemRecHits"), - ge0_name = cms.string("GE0SegAlgoRU"), - algo_name = cms.string("GEMSegmentAlgorithm"), - ge0_pset = cms.PSet( - allowWideSegments = cms.bool(True), - doCollisions = cms.bool(True), - maxChi2Additional = cms.double(100.0), - maxChi2Prune = cms.double(50), - maxChi2GoodSeg = cms.double(50), - maxPhiSeeds = cms.double(0.001096605744), #Assuming 384 strips - maxPhiAdditional = cms.double(0.001096605744), #Assuming 384 strips - maxETASeeds = cms.double(0.1), #Assuming 8 eta partitions - maxTOFDiff = cms.double(25), - requireCentralBX = cms.bool(True), #require that a majority of hits come from central BX - minNumberOfHits = cms.uint32(4), - maxNumberOfHits = cms.uint32(300), - maxNumberOfHitsPerLayer = cms.uint32(100), - ), - algo_pset = cms.PSet( - minHitsPerSegment = cms.uint32(2), - preClustering = cms.bool(True), # False => all hits in chamber are given to the fitter - dXclusBoxMax = cms.double(1.), # Clstr Hit dPhi - dYclusBoxMax = cms.double(5.), # Clstr Hit dEta - preClusteringUseChaining = cms.bool(True), # True ==> use Chaining() , False ==> use Clustering() Fnct - dPhiChainBoxMax = cms.double(.02), # Chain Hit dPhi - dEtaChainBoxMax = cms.double(.05), # Chain Hit dEta - maxRecHitsInCluster = cms.int32(4), # Does 4 make sense here? - clusterOnlySameBXRecHits = cms.bool(True), # only working for (preClustering && preClusteringUseChaining) - ), -) diff --git a/RecoLocalTracker/Configuration/python/RecoLocalTracker_Cosmics_EventContent_cff.py b/RecoLocalTracker/Configuration/python/RecoLocalTracker_Cosmics_EventContent_cff.py index 3e4b6ef6ade3b..11d0b9f3612f2 100644 --- a/RecoLocalTracker/Configuration/python/RecoLocalTracker_Cosmics_EventContent_cff.py +++ b/RecoLocalTracker/Configuration/python/RecoLocalTracker_Cosmics_EventContent_cff.py @@ -19,8 +19,13 @@ ) RecoLocalTrackerRECO.outputCommands.extend(RecoLocalTrackerAOD.outputCommands) +from Configuration.Eras.Modifier_phase2_tracker_cff import phase2_tracker +phase2_tracker.toModify(RecoLocalTrackerRECO, outputCommands = RecoLocalTrackerRECO.outputCommands + ['keep *_siPhase2Clusters_*_*','keep *_siPhase2RecHits_*_*'] ) + # FEVT content RecoLocalTrackerFEVT = cms.PSet( outputCommands = cms.untracked.vstring() ) RecoLocalTrackerFEVT.outputCommands.extend(RecoLocalTrackerRECO.outputCommands) + +phase2_tracker.toModify(RecoLocalTrackerFEVT, outputCommands = RecoLocalTrackerFEVT.outputCommands + ['keep *_siPhase2Clusters_*_*','keep *_siPhase2RecHits_*_*'] ) diff --git a/RecoLocalTracker/Configuration/python/RecoLocalTracker_Cosmics_cff.py b/RecoLocalTracker/Configuration/python/RecoLocalTracker_Cosmics_cff.py index a47f16f301519..12db113b1eb63 100644 --- a/RecoLocalTracker/Configuration/python/RecoLocalTracker_Cosmics_cff.py +++ b/RecoLocalTracker/Configuration/python/RecoLocalTracker_Cosmics_cff.py @@ -11,4 +11,19 @@ pixeltrackerlocalrecoTask = cms.Task(siPixelClusters,siPixelRecHits) striptrackerlocalrecoTask = cms.Task(siStripZeroSuppression,siStripClusters,siStripMatchedRecHits) trackerlocalrecoTask = cms.Task(pixeltrackerlocalrecoTask,striptrackerlocalrecoTask) + +from RecoLocalTracker.SiPhase2Clusterizer.phase2TrackerClusterizer_cfi import * +from RecoLocalTracker.Phase2TrackerRecHits.Phase2StripCPEGeometricESProducer_cfi import * +from RecoLocalTracker.SiPhase2VectorHitBuilder.siPhase2RecHitMatcher_cfi import * +from RecoLocalTracker.SiPhase2VectorHitBuilder.siPhase2VectorHits_cfi import * +from RecoLocalTracker.Phase2TrackerRecHits.Phase2TrackerRecHits_cfi import * + +_pixeltrackerlocalrecoTask_phase2 = pixeltrackerlocalrecoTask.copy() +_pixeltrackerlocalrecoTask_phase2.add(siPhase2Clusters) +_pixeltrackerlocalrecoTask_phase2.add(siPhase2RecHits) +#_pixeltrackerlocalrecoTask_phase2.add(siPhase2VectorHits) +phase2_tracker.toReplaceWith(pixeltrackerlocalrecoTask, _pixeltrackerlocalrecoTask_phase2) +phase2_tracker.toReplaceWith(trackerlocalrecoTask, trackerlocalrecoTask.copyAndExclude([striptrackerlocalrecoTask])) + trackerlocalreco = cms.Sequence(trackerlocalrecoTask) + diff --git a/RecoLocalTracker/Records/interface/PixelCPEFastParamsRecord.h b/RecoLocalTracker/Records/interface/PixelCPEFastParamsRecord.h new file mode 100644 index 0000000000000..971e74f4cd683 --- /dev/null +++ b/RecoLocalTracker/Records/interface/PixelCPEFastParamsRecord.h @@ -0,0 +1,27 @@ +#ifndef RecoLocalTracker_Records_PixelCPEFastParamsRecord_h +#define RecoLocalTracker_Records_PixelCPEFastParamsRecord_h + +#include "FWCore/Framework/interface/EventSetupRecordImplementation.h" +#include "FWCore/Framework/interface/DependentRecordImplementation.h" +#include "Geometry/Records/interface/TrackerDigiGeometryRecord.h" +#include "Geometry/Records/interface/IdealGeometryRecord.h" +#include "Geometry/Records/interface/TrackerTopologyRcd.h" +#include "MagneticField/Records/interface/IdealMagneticFieldRecord.h" +#include "CondFormats/DataRecord/interface/SiPixelLorentzAngleRcd.h" +#include "CondFormats/DataRecord/interface/SiPixelGenErrorDBObjectRcd.h" +#include "CalibTracker/Records/interface/SiPixelTemplateDBObjectESProducerRcd.h" +#include "CalibTracker/Records/interface/SiPixel2DTemplateDBObjectESProducerRcd.h" + +#include "FWCore/Utilities/interface/mplVector.h" + +class PixelCPEFastParamsRecord + : public edm::eventsetup::DependentRecordImplementation > {}; + +#endif // RecoLocalTracker_Records_PixelCPEFastParamsRecord_h diff --git a/RecoLocalTracker/Records/src/PixelCPEFastParamsRecord.cc b/RecoLocalTracker/Records/src/PixelCPEFastParamsRecord.cc new file mode 100644 index 0000000000000..1410d7c1e66bf --- /dev/null +++ b/RecoLocalTracker/Records/src/PixelCPEFastParamsRecord.cc @@ -0,0 +1,5 @@ +#include "RecoLocalTracker/Records/interface/PixelCPEFastParamsRecord.h" +#include "FWCore/Framework/interface/eventsetuprecord_registration_macro.h" +#include "FWCore/Utilities/interface/typelookup.h" + +EVENTSETUP_RECORD_REG(PixelCPEFastParamsRecord); diff --git a/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelClusterThresholds.h b/RecoLocalTracker/SiPixelClusterizer/interface/SiPixelClusterThresholds.h similarity index 82% rename from RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelClusterThresholds.h rename to RecoLocalTracker/SiPixelClusterizer/interface/SiPixelClusterThresholds.h index f9ebb16ea2c7c..c224483bda40a 100644 --- a/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelClusterThresholds.h +++ b/RecoLocalTracker/SiPixelClusterizer/interface/SiPixelClusterThresholds.h @@ -1,5 +1,9 @@ -#ifndef RecoLocalTracker_SiPixelClusterizer_plugins_SiPixelClusterThresholds_h -#define RecoLocalTracker_SiPixelClusterizer_plugins_SiPixelClusterThresholds_h +#ifndef RecoLocalTracker_SiPixelClusterizer_interface_SiPixelClusterThresholds_h +#define RecoLocalTracker_SiPixelClusterizer_interface_SiPixelClusterThresholds_h + +/* This struct is an implementation detail of this package. + * It's in the interface directory because it needs to be shared by the legacy, CUDA, and Alpaka plugins. + */ struct SiPixelClusterThresholds { inline constexpr int32_t getThresholdForLayerOnCondition(bool isLayer1) const noexcept { @@ -51,4 +55,4 @@ struct SiPixelClusterThresholds { phase2KinkADC(phase2KinkADC) {} }; -#endif // RecoLocalTracker_SiPixelClusterizer_plugins_SiPixelClusterThresholds_h +#endif // RecoLocalTracker_SiPixelClusterizer_interface_SiPixelClusterThresholds_h diff --git a/RecoLocalTracker/SiPixelClusterizer/plugins/BuildFile.xml b/RecoLocalTracker/SiPixelClusterizer/plugins/BuildFile.xml index 1bc0c60a0d298..83bdae62636e0 100644 --- a/RecoLocalTracker/SiPixelClusterizer/plugins/BuildFile.xml +++ b/RecoLocalTracker/SiPixelClusterizer/plugins/BuildFile.xml @@ -1,16 +1,20 @@ - - + + + - + + + + @@ -18,3 +22,14 @@ + + + + + + + + + + + diff --git a/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelDigisClustersFromSoA.cc b/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelDigisClustersFromSoA.cc index 0bf734b6cd589..0bfa989c92969 100644 --- a/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelDigisClustersFromSoA.cc +++ b/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelDigisClustersFromSoA.cc @@ -14,12 +14,14 @@ #include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "FWCore/ParameterSet/interface/ParameterSetDescription.h" -#include "Geometry/Records/interface/TrackerTopologyRcd.h" #include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "Geometry/Records/interface/TrackerTopologyRcd.h" +#include "RecoLocalTracker/SiPixelClusterizer/interface/SiPixelClusterThresholds.h" // local include(s) #include "PixelClusterizerBase.h" -#include "SiPixelClusterThresholds.h" + +//#define GPU_DEBUG template class SiPixelDigisClustersFromSoAT : public edm::global::EDProducer<> { @@ -34,7 +36,7 @@ class SiPixelDigisClustersFromSoAT : public edm::global::EDProducer<> { const edm::ESGetToken topoToken_; - edm::EDGetTokenT digiGetToken_; + edm::EDGetTokenT digiGetToken_; edm::EDPutTokenT> digiPutToken_; edm::EDPutTokenT clusterPutToken_; @@ -48,7 +50,7 @@ class SiPixelDigisClustersFromSoAT : public edm::global::EDProducer<> { template SiPixelDigisClustersFromSoAT::SiPixelDigisClustersFromSoAT(const edm::ParameterSet& iConfig) : topoToken_(esConsumes()), - digiGetToken_(consumes(iConfig.getParameter("src"))), + digiGetToken_(consumes(iConfig.getParameter("src"))), clusterPutToken_(produces()), clusterThresholds_(iConfig.getParameter("clusterThreshold_layer1"), iConfig.getParameter("clusterThreshold_otherLayers")), @@ -122,7 +124,7 @@ void SiPixelDigisClustersFromSoAT::produce(edm::StreamID, for (int32_t ic = 0; ic < nclus + 1; ++ic) { auto const& acluster = aclusters[ic]; // in any case we cannot go out of sync with gpu... - if (!std::is_base_of::value and acluster.charge < clusterThreshold) + if (acluster.charge < clusterThreshold) edm::LogWarning("SiPixelDigisClustersFromSoA") << "cluster below charge Threshold " << "Layer/DetId/clusId " << layer << '/' << detId << '/' << ic << " size/charge " << acluster.isize << '/' << acluster.charge; @@ -148,6 +150,10 @@ void SiPixelDigisClustersFromSoAT::produce(edm::StreamID, spc.abort(); }; +#ifdef GPU_DEBUG + std::cout << "Dumping all digis. nDigis = " << nDigis << std::endl; +#endif + for (uint32_t i = 0; i < nDigis; i++) { // check for uninitialized digis if (digis.rawIdArr(i) == 0) @@ -161,6 +167,9 @@ void SiPixelDigisClustersFromSoAT::produce(edm::StreamID, assert(digis.rawIdArr(i) > 109999); #endif if (detId != digis.rawIdArr(i)) { +#ifdef GPU_DEBUG + std::cout << ">> Closed module --" << detId << "; nclus = " << nclus << std::endl; +#endif // new module fillClusters(detId); #ifdef EDM_ML_DEBUG @@ -178,6 +187,12 @@ void SiPixelDigisClustersFromSoAT::produce(edm::StreamID, } } PixelDigi dig(digis.pdigi(i)); + +#ifdef GPU_DEBUG + std::cout << i << ";" << digis.rawIdArr(i) << ";" << digis.clus(i) << ";" << digis.pdigi(i) << ";" << digis.adc(i) + << ";" << dig.row() << ";" << dig.column() << std::endl; +#endif + if (storeDigis_) (*detDigis).data.emplace_back(dig); // fill clusters diff --git a/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelDigisClustersFromSoAAlpaka.cc b/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelDigisClustersFromSoAAlpaka.cc new file mode 100644 index 0000000000000..423951f4cb74f --- /dev/null +++ b/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelDigisClustersFromSoAAlpaka.cc @@ -0,0 +1,241 @@ +#include + +#include "DataFormats/Common/interface/DetSetVector.h" +#include "DataFormats/DetId/interface/DetId.h" +#include "DataFormats/SiPixelCluster/interface/SiPixelCluster.h" +#include "DataFormats/SiPixelClusterSoA/interface/ClusteringConstants.h" +#include "DataFormats/SiPixelDigi/interface/PixelDigi.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisHost.h" +#include "DataFormats/TrackerCommon/interface/TrackerTopology.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/global/EDProducer.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "Geometry/Records/interface/TrackerTopologyRcd.h" +#include "RecoLocalTracker/SiPixelClusterizer/interface/SiPixelClusterThresholds.h" + +// local include(s) +#include "PixelClusterizerBase.h" + +//#define EDM_ML_DEBUG +//#define GPU_DEBUG + +template +class SiPixelDigisClustersFromSoAAlpaka : public edm::global::EDProducer<> { +public: + explicit SiPixelDigisClustersFromSoAAlpaka(const edm::ParameterSet& iConfig); + ~SiPixelDigisClustersFromSoAAlpaka() override = default; + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + +private: + void produce(edm::StreamID, edm::Event& iEvent, const edm::EventSetup& iSetup) const override; + + edm::ESGetToken const topoToken_; + edm::EDGetTokenT const digisHostToken_; + const SiPixelClusterThresholds clusterThresholds_; // Cluster threshold in electrons + const bool produceDigis_; + const bool storeDigis_; + + edm::EDPutTokenT> digisPutToken_; + edm::EDPutTokenT clustersPutToken_; +}; + +template +SiPixelDigisClustersFromSoAAlpaka::SiPixelDigisClustersFromSoAAlpaka(const edm::ParameterSet& iConfig) + : topoToken_(esConsumes()), + digisHostToken_(consumes(iConfig.getParameter("src"))), + clusterThresholds_(iConfig.getParameter("clusterThreshold_layer1"), + iConfig.getParameter("clusterThreshold_otherLayers")), + produceDigis_(iConfig.getParameter("produceDigis")), + storeDigis_(produceDigis_ && iConfig.getParameter("storeDigis")), + clustersPutToken_(produces()) { + if (produceDigis_) + digisPutToken_ = produces>(); +} + +template +void SiPixelDigisClustersFromSoAAlpaka::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("src", edm::InputTag("siPixelDigisSoA")); + desc.add("clusterThreshold_layer1", pixelClustering::clusterThresholdLayerOne); + desc.add("clusterThreshold_otherLayers", pixelClustering::clusterThresholdOtherLayers); + desc.add("produceDigis", true); + desc.add("storeDigis", true); + + descriptions.addWithDefaultLabel(desc); +} + +template +void SiPixelDigisClustersFromSoAAlpaka::produce(edm::StreamID, + edm::Event& iEvent, + const edm::EventSetup& iSetup) const { + const auto& digisHost = iEvent.get(digisHostToken_); + const auto& digisView = digisHost.const_view(); + const uint32_t nDigis = digisHost.nDigis(); + + const auto& ttopo = iSetup.getData(topoToken_); + constexpr auto maxModules = TrackerTraits::numberOfModules; + + std::unique_ptr> outputDigis; + if (produceDigis_) + outputDigis = std::make_unique>(); + if (storeDigis_) + outputDigis->reserve(maxModules); + auto outputClusters = std::make_unique(); + outputClusters->reserve(maxModules, nDigis / 2); + + edm::DetSet* detDigis = nullptr; + uint32_t detId = 0; + + for (uint32_t i = 0; i < nDigis; i++) { + // check for uninitialized digis + // this is set in RawToDigi_kernel in SiPixelRawToClusterGPUKernel.cu + if (digisView[i].rawIdArr() == 0) + continue; + + // check for noisy/dead pixels (electrons set to 0) + if (digisView[i].adc() == 0) + continue; + + detId = digisView[i].rawIdArr(); + if (storeDigis_) { + detDigis = &outputDigis->find_or_insert(detId); + + if ((*detDigis).empty()) + (*detDigis).data.reserve(64); // avoid the first relocations + } + + break; + } + + int32_t nclus = -1; + PixelClusterizerBase::AccretionCluster aclusters[TrackerTraits::maxNumClustersPerModules]; +#ifdef EDM_ML_DEBUG + auto totClustersFilled = 0; +#endif + + auto fillClusters = [&](uint32_t detId) { + if (nclus < 0) + return; // this in reality should never happen + edmNew::DetSetVector::FastFiller spc(*outputClusters, detId); + auto layer = (DetId(detId).subdetId() == 1) ? ttopo.pxbLayer(detId) : 0; + auto clusterThreshold = clusterThresholds_.getThresholdForLayerOnCondition(layer == 1); + for (int32_t ic = 0; ic < nclus + 1; ++ic) { + auto const& acluster = aclusters[ic]; + // in any case we cannot go out of sync with gpu... + if (acluster.charge < clusterThreshold) + edm::LogWarning("SiPixelDigisClustersFromSoAAlpaka") + << "cluster below charge Threshold " + << "Layer/DetId/clusId " << layer << '/' << detId << '/' << ic << " size/charge " << acluster.isize << '/' + << acluster.charge << "\n"; + // sort by row (x) + spc.emplace_back(acluster.isize, acluster.adc, acluster.x, acluster.y, acluster.xmin, acluster.ymin, ic); + aclusters[ic].clear(); +#ifdef EDM_ML_DEBUG + ++totClustersFilled; + const auto& cluster{spc.back()}; + // LogDebug("SiPixelDigisClustersFromSoAAlpaka") + std::cout << "putting in this cluster " << ic << " " << cluster.charge() << " " << cluster.pixelADC().size() + << "\n"; +#endif + std::push_heap(spc.begin(), spc.end(), [](SiPixelCluster const& cl1, SiPixelCluster const& cl2) { + return cl1.minPixelRow() < cl2.minPixelRow(); + }); + } + nclus = -1; + // sort by row (x) + std::sort_heap(spc.begin(), spc.end(), [](SiPixelCluster const& cl1, SiPixelCluster const& cl2) { + return cl1.minPixelRow() < cl2.minPixelRow(); + }); + if (spc.empty()) + spc.abort(); + }; + +#ifdef GPU_DEBUG + std::cout << "Dumping all digis. nDigis = " << nDigis << std::endl; +#endif + for (uint32_t i = 0; i < nDigis; i++) { +#ifdef GPU_DEBUG + PixelDigi dig2{digisView[i].pdigi()}; + std::cout << i << ";" << digisView[i].rawIdArr() << ";" << digisView[i].clus() << ";" << digisView[i].pdigi() << ";" + << digisView[i].adc() << ";" << dig2.row() << ";" << dig2.column() << std::endl; +#endif + + // check for uninitialized digis + if (digisView[i].rawIdArr() == 0) + continue; + // check for noisy/dead pixels (electrons set to 0) + if (digisView[i].adc() == 0) + continue; + if (digisView[i].clus() >= -pixelClustering::invalidClusterId) + continue; // not in cluster; TODO add an assert for the size + if (digisView[i].clus() == pixelClustering::invalidModuleId) + continue; // from clusters killed by charge cut +#ifdef EDM_ML_DEBUG + assert(digisView[i].rawIdArr() > 109999); +#endif + if (detId != digisView[i].rawIdArr()) { +#ifdef GPU_DEBUG + std::cout << ">> Closed module --" << detId << "; nclus = " << nclus << std::endl; +#endif + // new module + fillClusters(detId); +#ifdef EDM_ML_DEBUG + assert(nclus == -1); +#endif + detId = digisView[i].rawIdArr(); + if (storeDigis_) { + detDigis = &outputDigis->find_or_insert(detId); + if ((*detDigis).empty()) + (*detDigis).data.reserve(64); // avoid the first relocations + else { + edm::LogWarning("SiPixelDigisClustersFromSoAAlpaka") + << "Problem det present twice in input! " << (*detDigis).detId(); + } + } + } + PixelDigi dig{digisView[i].pdigi()}; + + if (storeDigis_) + (*detDigis).data.emplace_back(dig); + // fill clusters +#ifdef EDM_ML_DEBUG + assert(digisView[i].clus() >= 0); + assert(digisView[i].clus() < static_cast(TrackerTraits::maxNumClustersPerModules)); +#endif + nclus = std::max(digisView[i].clus(), nclus); + auto row = dig.row(); + auto col = dig.column(); + SiPixelCluster::PixelPos pix(row, col); + aclusters[digisView[i].clus()].add(pix, digisView[i].adc()); + } + + // fill final clusters + if (detId > 0) + fillClusters(detId); + +#ifdef EDM_ML_DEBUG + LogDebug("SiPixelDigisClustersFromSoAAlpaka") << "filled " << totClustersFilled << " clusters"; +#endif + + if (produceDigis_) + iEvent.put(digisPutToken_, std::move(outputDigis)); + + iEvent.put(clustersPutToken_, std::move(outputClusters)); +} + +#include "FWCore/Framework/interface/MakerMacros.h" + +using SiPixelDigisClustersFromSoAAlpakaPhase1 = SiPixelDigisClustersFromSoAAlpaka; +DEFINE_FWK_MODULE(SiPixelDigisClustersFromSoAAlpakaPhase1); + +using SiPixelDigisClustersFromSoAAlpakaPhase2 = SiPixelDigisClustersFromSoAAlpaka; +DEFINE_FWK_MODULE(SiPixelDigisClustersFromSoAAlpakaPhase2); + +using SiPixelDigisClustersFromSoAAlpakaHIonPhase1 = SiPixelDigisClustersFromSoAAlpaka; +DEFINE_FWK_MODULE(SiPixelDigisClustersFromSoAAlpakaHIonPhase1); diff --git a/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelPhase2DigiToClusterCUDA.cc b/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelPhase2DigiToClusterCUDA.cc index 260b288b581db..e270d31515842 100644 --- a/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelPhase2DigiToClusterCUDA.cc +++ b/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelPhase2DigiToClusterCUDA.cc @@ -9,37 +9,20 @@ #include "CUDADataFormats/SiPixelCluster/interface/gpuClusteringConstants.h" #include "CUDADataFormats/SiPixelDigi/interface/SiPixelDigiErrorsCUDA.h" #include "CUDADataFormats/SiPixelDigi/interface/SiPixelDigisCUDA.h" -#include "CalibTracker/Records/interface/SiPixelGainCalibrationForHLTGPURcd.h" -#include "CalibTracker/SiPixelESProducers/interface/SiPixelROCsStatusAndMappingWrapper.h" -#include "CalibTracker/SiPixelESProducers/interface/SiPixelGainCalibrationForHLTGPU.h" -#include "CondFormats/DataRecord/interface/SiPixelFedCablingMapRcd.h" -#include "CondFormats/SiPixelObjects/interface/SiPixelFedCablingMap.h" -#include "CondFormats/SiPixelObjects/interface/SiPixelFedCablingTree.h" #include "DataFormats/FEDRawData/interface/FEDNumbering.h" -#include "DataFormats/FEDRawData/interface/FEDRawData.h" -#include "DataFormats/FEDRawData/interface/FEDRawDataCollection.h" #include "EventFilter/SiPixelRawToDigi/interface/PixelDataFormatter.h" #include "EventFilter/SiPixelRawToDigi/interface/PixelUnpackingRegions.h" #include "FWCore/Framework/interface/ConsumesCollector.h" -#include "FWCore/Framework/interface/ESHandle.h" -#include "FWCore/Framework/interface/ESTransientHandle.h" -#include "FWCore/Framework/interface/ESWatcher.h" #include "FWCore/Framework/interface/Event.h" #include "FWCore/Framework/interface/EventSetup.h" -#include "FWCore/Framework/interface/MakerMacros.h" #include "FWCore/Framework/interface/stream/EDProducer.h" -#include "FWCore/MessageLogger/interface/MessageLogger.h" #include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "FWCore/ParameterSet/interface/ParameterSetDescription.h" -#include "FWCore/ServiceRegistry/interface/Service.h" -#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" #include "HeterogeneousCore/CUDACore/interface/ScopedContext.h" -#include "HeterogeneousCore/CUDAServices/interface/CUDAInterface.h" -#include "RecoTracker/Record/interface/CkfComponentsRecord.h" +#include "RecoLocalTracker/SiPixelClusterizer/interface/SiPixelClusterThresholds.h" // local includes -#include "SiPixelClusterThresholds.h" #include "SiPixelRawToClusterGPUKernel.h" class SiPixelPhase2DigiToClusterCUDA : public edm::stream::EDProducer { @@ -69,6 +52,7 @@ class SiPixelPhase2DigiToClusterCUDA : public edm::stream::EDProducer(gpuClustering::maxNumDigis, ctx.stream()); auto yDigis = cms::cuda::make_host_unique(gpuClustering::maxNumDigis, ctx.stream()); @@ -126,20 +110,23 @@ void SiPixelPhase2DigiToClusterCUDA::acquire(const edm::Event& iEvent, const GeomDetUnit* genericDet = geom_->idToDetUnit(detIdObject); auto const gind = genericDet->index(); for (auto const& px : *DSViter) { - moduleIds[nDigis] = uint16_t(gind); + moduleIds[nDigis_] = uint16_t(gind); - xDigis[nDigis] = uint16_t(px.row()); - yDigis[nDigis] = uint16_t(px.column()); - adcDigis[nDigis] = uint16_t(px.adc()); + xDigis[nDigis_] = uint16_t(px.row()); + yDigis[nDigis_] = uint16_t(px.column()); + adcDigis[nDigis_] = uint16_t(px.adc()); - packedData[nDigis] = uint32_t(px.packedData()); + packedData[nDigis_] = uint32_t(px.packedData()); - rawIds[nDigis] = uint32_t(detid); + rawIds[nDigis_] = uint32_t(detid); - nDigis++; + nDigis_++; } } + if (nDigis_ == 0) + return; + gpuAlgo_.makePhase2ClustersAsync(clusterThresholds_, moduleIds.get(), xDigis.get(), @@ -147,13 +134,22 @@ void SiPixelPhase2DigiToClusterCUDA::acquire(const edm::Event& iEvent, adcDigis.get(), packedData.get(), rawIds.get(), - nDigis, + nDigis_, ctx.stream()); } void SiPixelPhase2DigiToClusterCUDA::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { cms::cuda::ScopedContextProduce ctx{ctxState_}; + if (nDigis_ == 0) { + ctx.emplace(iEvent, digiPutToken_, nDigis_, ctx.stream()); + ctx.emplace(iEvent, clusterPutToken_, pixelTopology::Phase2::numberOfModules, ctx.stream()); + if (includeErrors_) { + ctx.emplace(iEvent, digiErrorPutToken_, SiPixelDigiErrorsCUDA{}); + } + return; + } + auto tmp = gpuAlgo_.getResults(); ctx.emplace(iEvent, digiPutToken_, std::move(tmp.first)); ctx.emplace(iEvent, clusterPutToken_, std::move(tmp.second)); @@ -163,4 +159,5 @@ void SiPixelPhase2DigiToClusterCUDA::produce(edm::Event& iEvent, const edm::Even } // define as framework plugin +#include "FWCore/Framework/interface/MakerMacros.h" DEFINE_FWK_MODULE(SiPixelPhase2DigiToClusterCUDA); diff --git a/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelRawToClusterCUDA.cc b/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelRawToClusterCUDA.cc index e426661eb3c33..0a763793d35fd 100644 --- a/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelRawToClusterCUDA.cc +++ b/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelRawToClusterCUDA.cc @@ -10,8 +10,8 @@ #include "CUDADataFormats/SiPixelDigi/interface/SiPixelDigiErrorsCUDA.h" #include "CUDADataFormats/SiPixelDigi/interface/SiPixelDigisCUDA.h" #include "CalibTracker/Records/interface/SiPixelGainCalibrationForHLTGPURcd.h" -#include "CalibTracker/SiPixelESProducers/interface/SiPixelROCsStatusAndMappingWrapper.h" #include "CalibTracker/SiPixelESProducers/interface/SiPixelGainCalibrationForHLTGPU.h" +#include "CalibTracker/SiPixelESProducers/interface/SiPixelROCsStatusAndMappingWrapper.h" #include "CondFormats/DataRecord/interface/SiPixelFedCablingMapRcd.h" #include "CondFormats/SiPixelObjects/interface/SiPixelFedCablingMap.h" #include "CondFormats/SiPixelObjects/interface/SiPixelFedCablingTree.h" @@ -36,10 +36,10 @@ #include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" #include "HeterogeneousCore/CUDACore/interface/ScopedContext.h" #include "HeterogeneousCore/CUDAServices/interface/CUDAInterface.h" +#include "RecoLocalTracker/SiPixelClusterizer/interface/SiPixelClusterThresholds.h" #include "RecoTracker/Record/interface/CkfComponentsRecord.h" // local includes -#include "SiPixelClusterThresholds.h" #include "SiPixelRawToClusterGPUKernel.h" template diff --git a/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelRawToClusterGPUKernel.cu b/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelRawToClusterGPUKernel.cu index 7b92dfc267e79..452b0e2097071 100644 --- a/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelRawToClusterGPUKernel.cu +++ b/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelRawToClusterGPUKernel.cu @@ -26,13 +26,14 @@ #include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h" #include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h" #include "HeterogeneousCore/CUDAUtilities/interface/host_unique_ptr.h" -#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuCalibPixel.h" -#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClusterChargeCut.h" -#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClustering.h" + // local includes #include "SiPixelRawToClusterGPUKernel.h" +#include "gpuCalibPixel.h" +#include "gpuClusterChargeCut.h" +#include "gpuClustering.h" -// #define GPU_DEBUG +//#define GPU_DEBUG namespace pixelgpudetails { @@ -288,7 +289,7 @@ namespace pixelgpudetails { const uint32_t wordCounter, const uint32_t *word, const uint8_t *fedIds, - SiPixelDigisCUDASOAView digisView, + SiPixelDigisSoA::View digisView, cms::cuda::SimpleVector *err, bool useQualityInfo, bool includeErrors) { diff --git a/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelRawToClusterGPUKernel.h b/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelRawToClusterGPUKernel.h index 802ad2eb42c7e..fe9cc260a5853 100644 --- a/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelRawToClusterGPUKernel.h +++ b/RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelRawToClusterGPUKernel.h @@ -2,24 +2,23 @@ #define RecoLocalTracker_SiPixelClusterizer_plugins_SiPixelRawToClusterGPUKernel_h #include + #include +#include "CUDADataFormats/SiPixelCluster/interface/SiPixelClustersCUDA.h" +#include "CUDADataFormats/SiPixelDigi/interface/SiPixelDigiErrorsCUDA.h" +#include "CUDADataFormats/SiPixelDigi/interface/SiPixelDigisCUDA.h" #include "DataFormats/SiPixelDetId/interface/PixelChannelIdentifier.h" #include "DataFormats/SiPixelRawData/interface/SiPixelErrorCompact.h" #include "DataFormats/SiPixelRawData/interface/SiPixelFormatterErrors.h" -#include "CUDADataFormats/SiPixelDigi/interface/SiPixelDigisCUDA.h" -#include "CUDADataFormats/SiPixelDigi/interface/SiPixelDigiErrorsCUDA.h" -#include "CUDADataFormats/SiPixelCluster/interface/SiPixelClustersCUDA.h" #include "FWCore/Utilities/interface/typedefs.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" #include "HeterogeneousCore/CUDAUtilities/interface/SimpleVector.h" -#include "HeterogeneousCore/CUDAUtilities/interface/host_unique_ptr.h" #include "HeterogeneousCore/CUDAUtilities/interface/host_noncached_unique_ptr.h" -#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" - -// #define GPU_DEBUG +#include "HeterogeneousCore/CUDAUtilities/interface/host_unique_ptr.h" +#include "RecoLocalTracker/SiPixelClusterizer/interface/SiPixelClusterThresholds.h" -// local include(s) -#include "SiPixelClusterThresholds.h" +//#define GPU_DEBUG struct SiPixelROCsStatusAndMapping; class SiPixelGainForHLTonGPU; @@ -131,6 +130,14 @@ namespace pixelgpudetails { digis_d.setNModulesDigis(nModules_Clusters_h[0], nDigis); assert(nModules_Clusters_h[2] <= nModules_Clusters_h[1]); clusters_d.setNClusters(nModules_Clusters_h[1], nModules_Clusters_h[2]); + +#ifdef GPU_DEBUG + std::cout << "SiPixelClusterizerCUDA results:" << std::endl + << " > no. of digis: " << nDigis << std::endl + << " > no. of active modules: " << nModules_Clusters_h[0] << std::endl + << " > no. of clusters: " << nModules_Clusters_h[1] << std::endl + << " > bpix2 offset: " << nModules_Clusters_h[2] << std::endl; +#endif // need to explicitly deallocate while the associated CUDA // stream is still alive // diff --git a/RecoLocalTracker/SiPixelClusterizer/plugins/alpaka/CalibPixel.h b/RecoLocalTracker/SiPixelClusterizer/plugins/alpaka/CalibPixel.h new file mode 100644 index 0000000000000..d1f5509052468 --- /dev/null +++ b/RecoLocalTracker/SiPixelClusterizer/plugins/alpaka/CalibPixel.h @@ -0,0 +1,136 @@ +#ifndef RecoLocalTracker_SiPixelClusterizer_plugins_alpaka_CalibPixel_h +#define RecoLocalTracker_SiPixelClusterizer_plugins_alpaka_CalibPixel_h + +#include +#include +#include +#include + +#include + +#include "CondFormats/SiPixelObjects/interface/SiPixelGainCalibrationForHLTLayout.h" +#include "CondFormats/SiPixelObjects/interface/alpaka/SiPixelGainCalibrationForHLTDevice.h" +#include "CondFormats/SiPixelObjects/interface/alpaka/SiPixelGainCalibrationForHLTUtilities.h" +#include "DataFormats/SiPixelClusterSoA/interface/ClusteringConstants.h" +#include "DataFormats/SiPixelClusterSoA/interface/SiPixelClustersSoA.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsSoA.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisSoA.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "RecoLocalTracker/SiPixelClusterizer/interface/SiPixelClusterThresholds.h" + +//#define GPU_DEBUG + +namespace calibPixel { + using namespace cms::alpakatools; + + constexpr uint16_t InvId = std::numeric_limits::max() - 1; + // must be > MaxNumModules + + struct CalibDigis { + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, + SiPixelClusterThresholds clusterThresholds, + SiPixelDigisSoAView view, + SiPixelClustersSoAView clus_view, + const SiPixelGainCalibrationForHLTSoAConstView gains, + int numElements) const { + const float VCaltoElectronGain = clusterThresholds.vCaltoElectronGain; + const float VCaltoElectronGain_L1 = clusterThresholds.vCaltoElectronGain_L1; + const float VCaltoElectronOffset = clusterThresholds.vCaltoElectronOffset; + const float VCaltoElectronOffset_L1 = clusterThresholds.vCaltoElectronOffset_L1; + + // zero for next kernels... + if (cms::alpakatools::once_per_grid(acc)) { + clus_view[0].clusModuleStart() = clus_view[0].moduleStart() = 0; + } + + cms::alpakatools::for_each_element_in_grid_strided( + acc, phase1PixelTopology::numberOfModules, [&](uint32_t i) { clus_view[i].clusInModule() = 0; }); + cms::alpakatools::for_each_element_in_grid_strided(acc, numElements, [&](uint32_t i) { + auto dvgi = view[i]; + if (dvgi.moduleId() != InvId) { + bool isDeadColumn = false, isNoisyColumn = false; + int row = dvgi.xx(); + int col = dvgi.yy(); + auto ret = SiPixelGainUtilities::getPedAndGain(gains, dvgi.moduleId(), col, row, isDeadColumn, isNoisyColumn); + float pedestal = ret.first; + float gain = ret.second; + if (isDeadColumn | isNoisyColumn) { + dvgi.moduleId() = InvId; + dvgi.adc() = 0; + printf("bad pixel at %d in %d\n", i, dvgi.moduleId()); + } else { + float vcal = dvgi.adc() * gain - pedestal * gain; + + float conversionFactor = dvgi.moduleId() < 96 ? VCaltoElectronGain_L1 : VCaltoElectronGain; + float offset = dvgi.moduleId() < 96 ? VCaltoElectronOffset_L1 : VCaltoElectronOffset; +#ifdef GPU_DEBUG + auto old_adc = dvgi.adc(); +#endif + dvgi.adc() = std::max(100, int(vcal * conversionFactor + offset)); +#ifdef GPU_DEBUG + if (cms::alpakatools::once_per_grid(acc)) { + printf( + "module %d pixel %d -> old_adc = %d; vcal = %.2f; conversionFactor = %.2f; offset = %.2f; new_adc = " + "%d \n", + dvgi.moduleId(), + i, + old_adc, + vcal, + conversionFactor, + offset, + dvgi.adc()); + } +#endif + } + } + }); + } + }; + struct CalibDigisPhase2 { + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, + SiPixelClusterThresholds clusterThresholds, + SiPixelDigisSoAView view, + SiPixelClustersSoAView clus_view, + int numElements) const { + const float ElectronPerADCGain = clusterThresholds.electronPerADCGain; + const int8_t Phase2ReadoutMode = clusterThresholds.phase2ReadoutMode; + const uint16_t Phase2DigiBaseline = clusterThresholds.phase2DigiBaseline; + const uint8_t Phase2KinkADC = clusterThresholds.phase2KinkADC; + + // zero for next kernels... + if (cms::alpakatools::once_per_grid(acc)) { + clus_view[0].clusModuleStart() = clus_view[0].moduleStart() = 0; + } + + cms::alpakatools::for_each_element_in_grid_strided( + acc, phase2PixelTopology::numberOfModules, [&](uint32_t i) { clus_view[i].clusInModule() = 0; }); + cms::alpakatools::for_each_element_in_grid_strided(acc, numElements, [&](uint32_t i) { + auto dvgi = view[i]; + if (pixelClustering::invalidModuleId != dvgi.moduleId()) { + const int mode = (Phase2ReadoutMode < -1 ? -1 : Phase2ReadoutMode); + int adc_int = dvgi.adc(); + if (mode < 0) + adc_int = int(adc_int * ElectronPerADCGain); + else { + if (adc_int < Phase2KinkADC) + adc_int = int((adc_int + 0.5) * ElectronPerADCGain); + else { + const int8_t dspp = (Phase2ReadoutMode < 10 ? Phase2ReadoutMode : 10); + const int8_t ds = int8_t(dspp <= 1 ? 1 : (dspp - 1) * (dspp - 1)); + adc_int -= Phase2KinkADC; + adc_int *= ds; + adc_int += Phase2KinkADC; + adc_int = ((adc_int + 0.5 * ds) * ElectronPerADCGain); + } + adc_int += int(Phase2DigiBaseline); + } + dvgi.adc() = std::min(adc_int, int(std::numeric_limits::max())); + } + }); + } + }; +} // namespace calibPixel + +#endif // RecoLocalTracker_SiPixelClusterizer_plugins_alpaka_CalibPixel_h diff --git a/RecoLocalTracker/SiPixelClusterizer/plugins/alpaka/ClusterChargeCut.h b/RecoLocalTracker/SiPixelClusterizer/plugins/alpaka/ClusterChargeCut.h new file mode 100644 index 0000000000000..4056090517aee --- /dev/null +++ b/RecoLocalTracker/SiPixelClusterizer/plugins/alpaka/ClusterChargeCut.h @@ -0,0 +1,207 @@ +#ifndef RecoLocalTracker_SiPixelClusterizer_alpaka_ClusterChargeCut_h +#define RecoLocalTracker_SiPixelClusterizer_alpaka_ClusterChargeCut_h + +#include +#include + +#include "DataFormats/SiPixelClusterSoA/interface/ClusteringConstants.h" +#include "DataFormats/SiPixelClusterSoA/interface/SiPixelClustersSoA.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisSoA.h" +#include "HeterogeneousCore/AlpakaInterface/interface/prefixScan.h" +#include "RecoLocalTracker/SiPixelClusterizer/interface/SiPixelClusterThresholds.h" + +//#define GPU_DEBUG + +namespace pixelClustering { + + template + struct ClusterChargeCut { + template + ALPAKA_FN_ACC void operator()( + const TAcc& acc, + SiPixelDigisSoAView digi_view, + SiPixelClustersSoAView clus_view, + SiPixelClusterThresholds + clusterThresholds, // charge cut on cluster in electrons (for layer 1 and for other layers) + const uint32_t numElements) const { + constexpr int startBPIX2 = TrackerTraits::layerStart[1]; + constexpr int32_t maxNumClustersPerModules = TrackerTraits::maxNumClustersPerModules; + [[maybe_unused]] constexpr int nMaxModules = TrackerTraits::numberOfModules; + + const uint32_t blockIdx(alpaka::getIdx(acc)[0u]); + auto firstModule = blockIdx; + auto endModule = clus_view[0].moduleStart(); + if (blockIdx >= endModule) + return; + + auto& charge = alpaka::declareSharedVar(acc); + auto& ok = alpaka::declareSharedVar(acc); + auto& newclusId = alpaka::declareSharedVar(acc); + + const uint32_t gridDimension(alpaka::getWorkDiv(acc)[0u]); + + for (auto module = firstModule; module < endModule; module += gridDimension) { + auto firstPixel = clus_view[1 + module].moduleStart(); + auto thisModuleId = digi_view[firstPixel].moduleId(); + + ALPAKA_ASSERT_OFFLOAD(nMaxModules < maxNumModules); + ALPAKA_ASSERT_OFFLOAD(startBPIX2 < nMaxModules); + + uint32_t nclus = clus_view[thisModuleId].clusInModule(); + if (nclus == 0) + return; + + if (cms::alpakatools::once_per_block(acc) && nclus > maxNumClustersPerModules) + printf("Warning too many clusters in module %d in block %d: %d > %d\n", + thisModuleId, + module, + nclus, + maxNumClustersPerModules); + + // Stride = block size. + const uint32_t blockDimension(alpaka::getWorkDiv(acc)[0u]); + + // Get thread / CPU element indices in block. + const auto& [firstElementIdxNoStride, endElementIdxNoStride] = + cms::alpakatools::element_index_range_in_block(acc, firstPixel); + + if (nclus > maxNumClustersPerModules) { + uint32_t firstElementIdx = firstElementIdxNoStride; + uint32_t endElementIdx = endElementIdxNoStride; + // remove excess FIXME find a way to cut charge first.... + for (uint32_t i = firstElementIdx; i < numElements; ++i) { + if (not cms::alpakatools::next_valid_element_index_strided( + i, firstElementIdx, endElementIdx, blockDimension, numElements)) + break; + if (digi_view[i].moduleId() == invalidModuleId) + continue; // not valid + if (digi_view[i].moduleId() != thisModuleId) + break; // end of module + if (digi_view[i].clus() >= maxNumClustersPerModules) { + digi_view[i].moduleId() = invalidModuleId; + digi_view[i].clus() = invalidModuleId; + } + } + nclus = maxNumClustersPerModules; + } + +#ifdef GPU_DEBUG + if (thisModuleId % 100 == 1) + if (cms::alpakatools::once_per_block(acc)) + printf("start cluster charge cut for module %d in block %d\n", thisModuleId, module); +#endif + + ALPAKA_ASSERT_OFFLOAD(nclus <= maxNumClustersPerModules); + cms::alpakatools::for_each_element_in_block_strided(acc, nclus, [&](uint32_t i) { charge[i] = 0; }); + alpaka::syncBlockThreads(acc); + + uint32_t firstElementIdx = firstElementIdxNoStride; + uint32_t endElementIdx = endElementIdxNoStride; + for (uint32_t i = firstElementIdx; i < numElements; ++i) { + if (not cms::alpakatools::next_valid_element_index_strided( + i, firstElementIdx, endElementIdx, blockDimension, numElements)) + break; + if (digi_view[i].moduleId() == invalidModuleId) + continue; // not valid + if (digi_view[i].moduleId() != thisModuleId) + break; // end of module + alpaka::atomicAdd(acc, + &charge[digi_view[i].clus()], + static_cast(digi_view[i].adc()), + alpaka::hierarchy::Threads{}); + } + alpaka::syncBlockThreads(acc); + + auto chargeCut = clusterThresholds.getThresholdForLayerOnCondition(thisModuleId < startBPIX2); + bool allGood = true; + + cms::alpakatools::for_each_element_in_block_strided(acc, nclus, [&](uint32_t i) { + newclusId[i] = ok[i] = (charge[i] > chargeCut) ? 1 : 0; + if (ok[i] == 0) + allGood = allGood && false; + + // #ifdef GPU_DEBUG + // printf("module %d -> chargeCut = %d; cluster %d; charge = %d; ok = %s\n",thisModuleId, chargeCut,i,charge[i],ok[i] > 0 ? " -> good" : "-> cut"); + // #endif + }); + alpaka::syncBlockThreads(acc); + + // if all clusters above threshold do nothing + // if (allGood) + // continue; + + // renumber + auto& ws = alpaka::declareSharedVar(acc); + constexpr uint32_t maxThreads = 1024; + auto minClust = std::min(nclus, maxThreads); + + cms::alpakatools::blockPrefixScan(acc, newclusId, minClust, ws); + + if constexpr (maxNumClustersPerModules > maxThreads) //only if needed + { + for (uint32_t offset = maxThreads; offset < nclus; offset += maxThreads) { + cms::alpakatools::blockPrefixScan(acc, newclusId + offset, nclus - offset, ws); + + cms::alpakatools::for_each_element_in_block_strided(acc, nclus - offset, [&](uint32_t i) { + uint32_t prevBlockEnd = ((i + offset / maxThreads) * maxThreads) - 1; + newclusId[i] += newclusId[prevBlockEnd]; + }); + alpaka::syncBlockThreads(acc); + } + } + + ALPAKA_ASSERT_OFFLOAD(nclus >= newclusId[nclus - 1]); + + if (nclus == newclusId[nclus - 1]) + return; + + clus_view[thisModuleId].clusInModule() = newclusId[nclus - 1]; + alpaka::syncBlockThreads(acc); + +#ifdef GPU_DEBUG + if (thisModuleId % 100 == 1) + if (cms::alpakatools::once_per_block(acc)) + printf("module %d -> chargeCut = %d; nclus (pre cut) = %d; nclus (after cut) = %d\n", + thisModuleId, + chargeCut, + nclus, + clus_view[thisModuleId].clusInModule()); +#endif + // mark bad cluster again + cms::alpakatools::for_each_element_in_block_strided(acc, nclus, [&](uint32_t i) { + if (0 == ok[i]) + newclusId[i] = invalidModuleId + 1; + }); + + alpaka::syncBlockThreads(acc); + + // reassign id + firstElementIdx = firstElementIdxNoStride; + endElementIdx = endElementIdxNoStride; + for (uint32_t i = firstElementIdx; i < numElements; ++i) { + if (not cms::alpakatools::next_valid_element_index_strided( + i, firstElementIdx, endElementIdx, blockDimension, numElements)) + break; + if (digi_view[i].moduleId() == invalidModuleId) + continue; // not valid + if (digi_view[i].moduleId() != thisModuleId) + break; // end of module + if (0 == ok[digi_view[i].clus()]) + digi_view[i].moduleId() = digi_view[i].clus() = invalidModuleId; + else + digi_view[i].clus() = newclusId[digi_view[i].clus()] - 1; + // digi_view[i].clus() = newclusId[digi_view[i].clus()] - 1; + // if (digi_view[i].clus() == invalidModuleId) + // digi_view[i].moduleId() = invalidModuleId; + } + + alpaka::syncBlockThreads(acc); + + //done + } + } + }; + +} // namespace pixelClustering + +#endif // diff --git a/RecoLocalTracker/SiPixelClusterizer/plugins/alpaka/PixelClustering.h b/RecoLocalTracker/SiPixelClusterizer/plugins/alpaka/PixelClustering.h new file mode 100644 index 0000000000000..7da68c7b2f5da --- /dev/null +++ b/RecoLocalTracker/SiPixelClusterizer/plugins/alpaka/PixelClustering.h @@ -0,0 +1,455 @@ +#ifndef RecoLocalTracker_SiPixelClusterizer_alpaka_PixelClustering_h +#define RecoLocalTracker_SiPixelClusterizer_alpaka_PixelClustering_h + +#include +#include +#include +#include + +#include + +#include "DataFormats/SiPixelClusterSoA/interface/ClusteringConstants.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "HeterogeneousCore/AlpakaInterface/interface/HistoContainer.h" +#include "HeterogeneousCore/AlpakaInterface/interface/SimpleVector.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" + +//#define GPU_DEBUG + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + namespace pixelClustering { + +#ifdef GPU_DEBUG + template >> + ALPAKA_STATIC_ACC_MEM_GLOBAL uint32_t gMaxHit = 0; +#endif + + namespace pixelStatus { + // Phase-1 pixel modules + constexpr uint32_t pixelSizeX = pixelTopology::Phase1::numRowsInModule; + constexpr uint32_t pixelSizeY = pixelTopology::Phase1::numColsInModule; + + // Use 0x00, 0x01, 0x03 so each can be OR'ed on top of the previous ones + enum Status : uint32_t { kEmpty = 0x00, kFound = 0x01, kDuplicate = 0x03 }; + + constexpr uint32_t bits = 2; + constexpr uint32_t mask = (0x01 << bits) - 1; + constexpr uint32_t valuesPerWord = sizeof(uint32_t) * 8 / bits; + constexpr uint32_t size = pixelSizeX * pixelSizeY / valuesPerWord; + + ALPAKA_FN_ACC ALPAKA_FN_INLINE constexpr uint32_t getIndex(uint16_t x, uint16_t y) { + return (pixelSizeX * y + x) / valuesPerWord; + } + + ALPAKA_FN_ACC ALPAKA_FN_INLINE constexpr uint32_t getShift(uint16_t x, uint16_t y) { + return (x % valuesPerWord) * 2; + } + + ALPAKA_FN_ACC ALPAKA_FN_INLINE constexpr Status getStatus(uint32_t const* __restrict__ status, + uint16_t x, + uint16_t y) { + uint32_t index = getIndex(x, y); + uint32_t shift = getShift(x, y); + return Status{(status[index] >> shift) & mask}; + } + + ALPAKA_FN_ACC ALPAKA_FN_INLINE constexpr bool isDuplicate(uint32_t const* __restrict__ status, + uint16_t x, + uint16_t y) { + return getStatus(status, x, y) == kDuplicate; + } + + /* FIXME + * In the more general case (e.g. a multithreaded CPU backend) there is a potential race condition + * between the read of status[index] at line NNN and the atomicCas at line NNN. + * We should investigate: + * - if `status` should be read through a `volatile` pointer (CUDA/ROCm) + * - if `status` should be read with an atomic load (CPU) + */ + template >> + ALPAKA_FN_ACC ALPAKA_FN_INLINE constexpr void promote(TAcc const& acc, + uint32_t* __restrict__ status, + const uint16_t x, + const uint16_t y) { + uint32_t index = getIndex(x, y); + uint32_t shift = getShift(x, y); + uint32_t old_word = status[index]; + uint32_t expected = old_word; + do { + expected = old_word; + Status old_status{(old_word >> shift) & mask}; + if (kDuplicate == old_status) { + // nothing to do + return; + } + Status new_status = (kEmpty == old_status) ? kFound : kDuplicate; + uint32_t new_word = old_word | (static_cast(new_status) << shift); + old_word = alpaka::atomicCas(acc, &status[index], expected, new_word, alpaka::hierarchy::Blocks{}); + } while (expected != old_word); + } + + } // namespace pixelStatus + + template + struct CountModules { + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, + SiPixelDigisSoAView digi_view, + SiPixelClustersSoAView clus_view, + const unsigned int numElements) const { + [[maybe_unused]] constexpr int nMaxModules = TrackerTraits::numberOfModules; + +#ifdef GPU_DEBUG + if (cms::alpakatools::once_per_grid(acc)) { + printf("Starting to count modules to set module starts:"); + } +#endif + cms::alpakatools::for_each_element_in_grid_strided(acc, numElements, [&](uint32_t i) { + digi_view[i].clus() = i; + if (::pixelClustering::invalidModuleId != digi_view[i].moduleId()) { + int j = i - 1; + while (j >= 0 and digi_view[j].moduleId() == ::pixelClustering::invalidModuleId) + --j; + if (j < 0 or digi_view[j].moduleId() != digi_view[i].moduleId()) { + // boundary... + auto loc = alpaka::atomicInc( + acc, clus_view.moduleStart(), std::decay_t(nMaxModules), alpaka::hierarchy::Blocks{}); +#ifdef GPU_DEBUG + printf("> New module (no. %d) found at digi %d \n", loc, i); +#endif + clus_view[loc + 1].moduleStart() = i; + } + } + }); + } + }; + + template + struct FindClus { + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, + SiPixelDigisSoAView digi_view, + SiPixelClustersSoAView clus_view, + const unsigned int numElements) const { + constexpr bool isPhase2 = std::is_base_of::value; + constexpr const uint32_t pixelStatusSize = isPhase2 ? 1 : pixelStatus::size; + + // packed words array used to store the pixelStatus of each pixel + auto& status = alpaka::declareSharedVar(acc); + + // find the index of the first pixel not belonging to this module (or invalid) + auto& msize = alpaka::declareSharedVar(acc); + + const uint32_t blockIdx = alpaka::getIdx(acc)[0u]; + if (blockIdx >= clus_view[0].moduleStart()) + return; + + auto firstModule = blockIdx; + auto endModule = clus_view[0].moduleStart(); + + const uint32_t gridDimension(alpaka::getWorkDiv(acc)[0u]); + + for (auto module = firstModule; module < endModule; module += gridDimension) { + auto firstPixel = clus_view[1 + module].moduleStart(); + auto thisModuleId = digi_view[firstPixel].moduleId(); + ALPAKA_ASSERT_OFFLOAD(thisModuleId < TrackerTraits::numberOfModules); +#ifdef GPU_DEBUG + if (thisModuleId % 100 == 1) + if (cms::alpakatools::once_per_block(acc)) + printf("start clusterizer for module %d in block %d\n", thisModuleId, module); +#endif + + msize = numElements; + alpaka::syncBlockThreads(acc); + + // Stride = block size. + const uint32_t blockDimension(alpaka::getWorkDiv(acc)[0u]); + + // Get thread / CPU element indices in block. + const auto& [firstElementIdxNoStride, endElementIdxNoStride] = + cms::alpakatools::element_index_range_in_block(acc, firstPixel); + uint32_t firstElementIdx = firstElementIdxNoStride; + uint32_t endElementIdx = endElementIdxNoStride; + + // skip threads not associated to an existing pixel + for (uint32_t i = firstElementIdx; i < numElements; ++i) { + if (not cms::alpakatools::next_valid_element_index_strided( + i, firstElementIdx, endElementIdx, blockDimension, numElements)) + break; + auto id = digi_view[i].moduleId(); + if (id == ::pixelClustering::invalidModuleId) // skip invalid pixels + continue; + if (id != thisModuleId) { // find the first pixel in a different module + alpaka::atomicMin(acc, &msize, i, alpaka::hierarchy::Threads{}); + break; + } + } + //init hist (ymax=416 < 512 : 9bits) + constexpr uint32_t maxPixInModule = TrackerTraits::maxPixInModule; + constexpr auto nbins = TrackerTraits::clusterBinning; + constexpr auto nbits = TrackerTraits::clusterBits; + using Hist = cms::alpakatools::HistoContainer; + auto& hist = alpaka::declareSharedVar(acc); + auto& ws = alpaka::declareSharedVar(acc); + cms::alpakatools::for_each_element_in_block_strided( + acc, Hist::totbins(), [&](uint32_t j) { hist.off[j] = 0; }); + alpaka::syncBlockThreads(acc); + ALPAKA_ASSERT_OFFLOAD((msize == numElements) or + ((msize < numElements) and (digi_view[msize].moduleId() != thisModuleId))); + // limit to maxPixInModule (FIXME if recurrent (and not limited to simulation with low threshold) one will need to implement something cleverer) + if (cms::alpakatools::once_per_grid(acc)) { + if (msize - firstPixel > maxPixInModule) { + printf("too many pixels in module %d: %d > %d\n", thisModuleId, msize - firstPixel, maxPixInModule); + msize = maxPixInModule + firstPixel; + } + } + alpaka::syncBlockThreads(acc); + ALPAKA_ASSERT_OFFLOAD(msize - firstPixel <= maxPixInModule); + +#ifdef GPU_DEBUG + auto& totGood = alpaka::declareSharedVar(acc); + totGood = 0; + alpaka::syncBlockThreads(acc); +#endif + // remove duplicate pixels + if constexpr (not isPhase2) { //FIXME remove THIS + if (msize > 1) { + cms::alpakatools::for_each_element_in_block_strided( + acc, pixelStatus::size, [&](uint32_t i) { status[i] = 0; }); + alpaka::syncBlockThreads(acc); + + cms::alpakatools::for_each_element_in_block_strided(acc, msize - 1, firstElementIdx, [&](uint32_t i) { + // skip invalid pixels + if (digi_view[i].moduleId() == ::pixelClustering::invalidModuleId) + return; + pixelStatus::promote(acc, status, digi_view[i].xx(), digi_view[i].yy()); + }); + alpaka::syncBlockThreads(acc); + cms::alpakatools::for_each_element_in_block_strided(acc, msize - 1, firstElementIdx, [&](uint32_t i) { + // skip invalid pixels + if (digi_view[i].moduleId() == ::pixelClustering::invalidModuleId) + return; + if (pixelStatus::isDuplicate(status, digi_view[i].xx(), digi_view[i].yy())) { + digi_view[i].moduleId() = ::pixelClustering::invalidModuleId; + digi_view[i].rawIdArr() = 0; + } + }); + alpaka::syncBlockThreads(acc); + } + } + // fill histo + cms::alpakatools::for_each_element_in_block_strided(acc, msize, firstPixel, [&](uint32_t i) { + if (digi_view[i].moduleId() != ::pixelClustering::invalidModuleId) { // skip invalid pixels + hist.count(acc, digi_view[i].yy()); +#ifdef GPU_DEBUG + alpaka::atomicAdd(acc, &totGood, 1u, alpaka::hierarchy::Blocks{}); +#endif + } + }); + alpaka::syncBlockThreads(acc); + cms::alpakatools::for_each_element_in_block(acc, 32u, [&](uint32_t i) { + ws[i] = 0; // used by prefix scan... + }); + alpaka::syncBlockThreads(acc); + hist.finalize(acc, ws); + alpaka::syncBlockThreads(acc); +#ifdef GPU_DEBUG + ALPAKA_ASSERT_OFFLOAD(hist.size() == totGood); + if (thisModuleId % 100 == 1) + if (cms::alpakatools::once_per_block(acc)) + printf("histo size %d\n", hist.size()); +#endif + cms::alpakatools::for_each_element_in_block_strided(acc, msize, firstPixel, [&](uint32_t i) { + if (digi_view[i].moduleId() != ::pixelClustering::invalidModuleId) { // skip invalid pixels + hist.fill(acc, digi_view[i].yy(), i - firstPixel); + } + }); + // Assume that we can cover the whole module with up to 16 blockDimension-wide iterations + // This maxiter value was tuned for GPU, with 256 or 512 threads per block. + // Hence, also works for CPU case, with 256 or 512 elements per thread. + // Real constrainst is maxiter = hist.size() / blockDimension, + // with blockDimension = threadPerBlock * elementsPerThread. + // Hence, maxiter can be tuned accordingly to the workdiv. + constexpr unsigned int maxiter = 16; + ALPAKA_ASSERT_OFFLOAD((hist.size() / blockDimension) <= maxiter); + + // NB: can be tuned. + constexpr uint32_t threadDimension = cms::alpakatools::requires_single_thread_per_block_v ? 256 : 1; + +#ifndef NDEBUG + [[maybe_unused]] const uint32_t runTimeThreadDimension = + alpaka::getWorkDiv(acc)[0u]; + ALPAKA_ASSERT_OFFLOAD(runTimeThreadDimension <= threadDimension); +#endif + + // nearest neighbour + // allocate space for duplicate pixels: a pixel can appear more than once with different charge in the same event + constexpr int maxNeighbours = 10; + uint16_t nn[maxiter][threadDimension][maxNeighbours]; + uint8_t nnn[maxiter][threadDimension]; // number of nn + for (uint32_t elementIdx = 0; elementIdx < threadDimension; ++elementIdx) { + for (uint32_t k = 0; k < maxiter; ++k) { + nnn[k][elementIdx] = 0; + } + } + + alpaka::syncBlockThreads(acc); // for hit filling! + +#ifdef GPU_DEBUG + // look for anomalous high occupancy + auto& n40 = alpaka::declareSharedVar(acc); + auto& n60 = alpaka::declareSharedVar(acc); + n40 = n60 = 0; + alpaka::syncBlockThreads(acc); + cms::alpakatools::for_each_element_in_block_strided(acc, Hist::nbins(), [&](uint32_t j) { + if (hist.size(j) > 60) + alpaka::atomicAdd(acc, &n60, 1u, alpaka::hierarchy::Blocks{}); + if (hist.size(j) > 40) + alpaka::atomicAdd(acc, &n40, 1u, alpaka::hierarchy::Blocks{}); + }); + alpaka::syncBlockThreads(acc); + if (cms::alpakatools::once_per_block(acc)) { + if (n60 > 0) + printf("columns with more than 60 px %d in %d\n", n60, thisModuleId); + else if (n40 > 0) + printf("columns with more than 40 px %d in %d\n", n40, thisModuleId); + } + alpaka::syncBlockThreads(acc); +#endif + // fill NN + uint32_t k = 0u; + cms::alpakatools::for_each_element_in_block_strided(acc, hist.size(), [&](uint32_t j) { + const uint32_t jEquivalentClass = j % threadDimension; + k = j / blockDimension; + ALPAKA_ASSERT_OFFLOAD(k < maxiter); + auto p = hist.begin() + j; + auto i = *p + firstPixel; + ALPAKA_ASSERT_OFFLOAD(digi_view[i].moduleId() != ::pixelClustering::invalidModuleId); + ALPAKA_ASSERT_OFFLOAD(digi_view[i].moduleId() == thisModuleId); // same module + int be = Hist::bin(digi_view[i].yy() + 1); + auto e = hist.end(be); + ++p; + ALPAKA_ASSERT_OFFLOAD(0 == nnn[k][jEquivalentClass]); + for (; p < e; ++p) { + auto m = (*p) + firstPixel; + ALPAKA_ASSERT_OFFLOAD(m != i); + ALPAKA_ASSERT_OFFLOAD(int(digi_view[m].yy()) - int(digi_view[i].yy()) >= 0); + ALPAKA_ASSERT_OFFLOAD(int(digi_view[m].yy()) - int(digi_view[i].yy()) <= 1); + if (std::abs(int(digi_view[m].xx()) - int(digi_view[i].xx())) <= 1) { + auto l = nnn[k][jEquivalentClass]++; + ALPAKA_ASSERT_OFFLOAD(l < maxNeighbours); + nn[k][jEquivalentClass][l] = *p; + } + } + }); + // for each pixel, look at all the pixels until the end of the module; + // when two valid pixels within +/- 1 in x or y are found, set their id to the minimum; + // after the loop, all the pixel in each cluster should have the id equeal to the lowest + // pixel in the cluster ( clus[i] == i ). + bool more = true; + int nloops = 0; + while (alpaka::syncBlockThreadsPredicate(acc, more)) { + if (1 == nloops % 2) { + cms::alpakatools::for_each_element_in_block_strided(acc, hist.size(), [&](uint32_t j) { + auto p = hist.begin() + j; + auto i = *p + firstPixel; + auto m = digi_view[i].clus(); + while (m != digi_view[m].clus()) + m = digi_view[m].clus(); + digi_view[i].clus() = m; + }); + } else { + more = false; + uint32_t k = 0u; + cms::alpakatools::for_each_element_in_block_strided(acc, hist.size(), [&](uint32_t j) { + k = j / blockDimension; + const uint32_t jEquivalentClass = j % threadDimension; + auto p = hist.begin() + j; + auto i = *p + firstPixel; + for (int kk = 0; kk < nnn[k][jEquivalentClass]; ++kk) { + auto l = nn[k][jEquivalentClass][kk]; + auto m = l + firstPixel; + ALPAKA_ASSERT_OFFLOAD(m != i); + auto old = + alpaka::atomicMin(acc, &digi_view[m].clus(), digi_view[i].clus(), alpaka::hierarchy::Blocks{}); + if (old != digi_view[i].clus()) { + // end the loop only if no changes were applied + more = true; + } + alpaka::atomicMin(acc, &digi_view[i].clus(), old, alpaka::hierarchy::Blocks{}); + } // nnloop + }); // pixel loop + } + ++nloops; + } // end while +#ifdef GPU_DEBUG + { + auto& n0 = alpaka::declareSharedVar(acc); + if (cms::alpakatools::once_per_block(acc)) + n0 = nloops; + alpaka::syncBlockThreads(acc); +#ifndef NDEBUG + [[maybe_unused]] auto ok = n0 == nloops; + ALPAKA_ASSERT_OFFLOAD(alpaka::syncBlockThreadsPredicate(acc, ok)); +#endif + if (thisModuleId % 100 == 1) + if (cms::alpakatools::once_per_block(acc)) + printf("# loops %d\n", nloops); + } +#endif + auto& foundClusters = alpaka::declareSharedVar(acc); + foundClusters = 0; + alpaka::syncBlockThreads(acc); + + // find the number of different clusters, identified by a pixels with clus[i] == i; + // mark these pixels with a negative id. + cms::alpakatools::for_each_element_in_block_strided(acc, msize, firstPixel, [&](uint32_t i) { + if (digi_view[i].moduleId() != ::pixelClustering::invalidModuleId) { // skip invalid pixels + if (digi_view[i].clus() == static_cast(i)) { + auto old = alpaka::atomicInc(acc, &foundClusters, 0xffffffff, alpaka::hierarchy::Threads{}); + digi_view[i].clus() = -(old + 1); + } + } + }); + alpaka::syncBlockThreads(acc); + + // propagate the negative id to all the pixels in the cluster. + cms::alpakatools::for_each_element_in_block_strided(acc, msize, firstPixel, [&](uint32_t i) { + if (digi_view[i].moduleId() != ::pixelClustering::invalidModuleId) { // skip invalid pixels + if (digi_view[i].clus() >= 0) { + // mark each pixel in a cluster with the same id as the first one + digi_view[i].clus() = digi_view[digi_view[i].clus()].clus(); + } + } + }); + alpaka::syncBlockThreads(acc); + + // adjust the cluster id to be a positive value starting from 0 + cms::alpakatools::for_each_element_in_block_strided(acc, msize, firstPixel, [&](uint32_t i) { + if (digi_view[i].moduleId() == ::pixelClustering::invalidModuleId) { // skip invalid pixels + digi_view[i].clus() = ::pixelClustering::invalidClusterId; + } else { + digi_view[i].clus() = -digi_view[i].clus() - 1; + } + }); + alpaka::syncBlockThreads(acc); + if (cms::alpakatools::once_per_block(acc)) { + clus_view[thisModuleId].clusInModule() = foundClusters; + clus_view[module].moduleId() = thisModuleId; +#ifdef GPU_DEBUG + if (foundClusters > gMaxHit) { + gMaxHit = foundClusters; + if (foundClusters > 8) + printf("max hit %d in %d\n", foundClusters, thisModuleId); + } + // if (thisModuleId % 100 == 1) + printf("%d clusters in module %d\n", foundClusters, thisModuleId); +#endif + } + } // module loop + } + }; + } // namespace pixelClustering +} // namespace ALPAKA_ACCELERATOR_NAMESPACE +#endif // plugin_SiPixelClusterizer_alpaka_PixelClustering.h diff --git a/RecoLocalTracker/SiPixelClusterizer/plugins/alpaka/SiPixelPhase2DigiToCluster.cc b/RecoLocalTracker/SiPixelClusterizer/plugins/alpaka/SiPixelPhase2DigiToCluster.cc new file mode 100644 index 0000000000000..5d0b355d1eebc --- /dev/null +++ b/RecoLocalTracker/SiPixelClusterizer/plugins/alpaka/SiPixelPhase2DigiToCluster.cc @@ -0,0 +1,158 @@ +// C++ includes +#include +#include +#include +#include +#include + +#include "DataFormats/Common/interface/DetSetVector.h" +#include "DataFormats/SiPixelClusterSoA/interface/alpaka/SiPixelClustersSoACollection.h" +#include "DataFormats/SiPixelDigi/interface/PixelDigi.h" +#include "DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigiErrorsSoACollection.h" +#include "DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigisSoACollection.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/Utilities/interface/ESGetToken.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "Geometry/Records/interface/TrackerDigiGeometryRecord.h" +#include "Geometry/TrackerGeometryBuilder/interface/TrackerGeometry.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/EDPutToken.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/Event.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/stream/SynchronizingEDProducer.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "RecoLocalTracker/SiPixelClusterizer/interface/SiPixelClusterThresholds.h" + +#include "SiPixelRawToClusterKernel.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + class SiPixelPhase2DigiToCluster : public stream::SynchronizingEDProducer<> { + public: + explicit SiPixelPhase2DigiToCluster(const edm::ParameterSet& iConfig); + ~SiPixelPhase2DigiToCluster() override = default; + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + using Algo = pixelDetails::SiPixelRawToClusterKernel; + + private: + void acquire(device::Event const& iEvent, device::EventSetup const& iSetup) override; + void produce(device::Event& iEvent, device::EventSetup const& iSetup) override; + + const edm::ESGetToken geomToken_; + const edm::EDGetTokenT> pixelDigiToken_; + + device::EDPutToken digiPutToken_; + device::EDPutToken digiErrorPutToken_; + device::EDPutToken clusterPutToken_; + + Algo Algo_; + + const bool includeErrors_; + const SiPixelClusterThresholds clusterThresholds_; + uint32_t nDigis_ = 0; + + SiPixelDigisSoACollection digis_d; + }; + + SiPixelPhase2DigiToCluster::SiPixelPhase2DigiToCluster(const edm::ParameterSet& iConfig) + : geomToken_(esConsumes()), + pixelDigiToken_(consumes>(iConfig.getParameter("InputDigis"))), + digiPutToken_(produces()), + clusterPutToken_(produces()), + includeErrors_(iConfig.getParameter("IncludeErrors")), + clusterThresholds_{iConfig.getParameter("clusterThreshold_layer1"), + iConfig.getParameter("clusterThreshold_otherLayers"), + static_cast(iConfig.getParameter("ElectronPerADCGain")), + static_cast(iConfig.getParameter("Phase2ReadoutMode")), + static_cast(iConfig.getParameter("Phase2DigiBaseline")), + static_cast(iConfig.getParameter("Phase2KinkADC"))} { + if (includeErrors_) { + digiErrorPutToken_ = produces(); + } + } + + void SiPixelPhase2DigiToCluster::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + + desc.add("IncludeErrors", true); + desc.add("clusterThreshold_layer1", + pixelClustering::clusterThresholdPhase2LayerOne); //FIXME (fix the CUDA) + desc.add("clusterThreshold_otherLayers", pixelClustering::clusterThresholdPhase2OtherLayers); + desc.add("ElectronPerADCGain", 1500.); + desc.add("Phase2ReadoutMode", 3); + desc.add("Phase2DigiBaseline", 1000); + desc.add("Phase2KinkADC", 8); + desc.add("InputDigis", edm::InputTag("simSiPixelDigis:Pixel")); + descriptions.addWithDefaultLabel(desc); + } + + void SiPixelPhase2DigiToCluster::acquire(device::Event const& iEvent, device::EventSetup const& iSetup) { + auto const& input = iEvent.get(pixelDigiToken_); + + const TrackerGeometry* geom_ = &iSetup.getData(geomToken_); + + uint32_t nDigis = 0; + + for (const auto& det : input) { + nDigis += det.size(); + } + + if (nDigis_ == 0) + return; + + SiPixelDigisHost digis_h(nDigis, iEvent.queue()); + nDigis_ = nDigis; + + nDigis = 0; + for (const auto& det : input) { + unsigned int detid = det.detId(); + DetId detIdObject(detid); + const GeomDetUnit* genericDet = geom_->idToDetUnit(detIdObject); + auto const gind = genericDet->index(); + for (auto const& px : det) { + digis_h.view()[nDigis].moduleId() = uint16_t(gind); + + digis_h.view()[nDigis].xx() = uint16_t(px.row()); + digis_h.view()[nDigis].yy() = uint16_t(px.column()); + digis_h.view()[nDigis].adc() = uint16_t(px.adc()); + + digis_h.view()[nDigis].pdigi() = uint32_t(px.packedData()); + + digis_h.view()[nDigis].rawIdArr() = uint32_t(detid); + + nDigis++; + } + } + + digis_d = SiPixelDigisSoACollection(nDigis, iEvent.queue()); + alpaka::memcpy(iEvent.queue(), digis_d.buffer(), digis_h.buffer()); + + Algo_.makePhase2ClustersAsync(iEvent.queue(), clusterThresholds_, digis_d.view(), nDigis); + } + + void SiPixelPhase2DigiToCluster::produce(device::Event& iEvent, device::EventSetup const& iSetup) { + if (nDigis_ == 0) { + SiPixelClustersSoACollection clusters_d{pixelTopology::Phase1::numberOfModules, iEvent.queue()}; + iEvent.emplace(digiPutToken_, std::move(digis_d)); + iEvent.emplace(clusterPutToken_, std::move(clusters_d)); + if (includeErrors_) { + iEvent.emplace(digiErrorPutToken_, SiPixelDigiErrorsSoACollection()); + } + return; + } + + digis_d.setNModulesDigis(Algo_.nModules(), nDigis_); + + iEvent.emplace(digiPutToken_, std::move(digis_d)); + iEvent.emplace(clusterPutToken_, Algo_.getClusters()); + if (includeErrors_) { + iEvent.emplace(digiErrorPutToken_, Algo_.getErrors()); + } + } + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +// define as framework plugin +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/MakerMacros.h" +DEFINE_FWK_ALPAKA_MODULE(SiPixelPhase2DigiToCluster); diff --git a/RecoLocalTracker/SiPixelClusterizer/plugins/alpaka/SiPixelRawToCluster.cc b/RecoLocalTracker/SiPixelClusterizer/plugins/alpaka/SiPixelRawToCluster.cc new file mode 100644 index 0000000000000..f3e13bade8e10 --- /dev/null +++ b/RecoLocalTracker/SiPixelClusterizer/plugins/alpaka/SiPixelRawToCluster.cc @@ -0,0 +1,289 @@ +#include +#include +#include +#include +#include + +#include "CalibTracker/Records/interface/SiPixelGainCalibrationForHLTSoARcd.h" +#include "CalibTracker/Records/interface/SiPixelMappingSoARecord.h" +#include "CondFormats/DataRecord/interface/SiPixelFedCablingMapRcd.h" +#include "CondFormats/SiPixelObjects/interface/SiPixelFedCablingMap.h" +#include "CondFormats/SiPixelObjects/interface/SiPixelFedCablingTree.h" +#include "CondFormats/SiPixelObjects/interface/alpaka/SiPixelGainCalibrationForHLTDevice.h" +#include "CondFormats/SiPixelObjects/interface/alpaka/SiPixelMappingDevice.h" +#include "CondFormats/SiPixelObjects/interface/alpaka/SiPixelMappingUtilities.h" +#include "DataFormats/FEDRawData/interface/FEDNumbering.h" +#include "DataFormats/FEDRawData/interface/FEDRawData.h" +#include "DataFormats/FEDRawData/interface/FEDRawDataCollection.h" +#include "DataFormats/SiPixelClusterSoA/interface/alpaka/SiPixelClustersSoACollection.h" +#include "DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigiErrorsSoACollection.h" +#include "DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigisSoACollection.h" +#include "DataFormats/SiPixelRawData/interface/SiPixelFormatterErrors.h" +#include "EventFilter/SiPixelRawToDigi/interface/PixelDataFormatter.h" +#include "EventFilter/SiPixelRawToDigi/interface/PixelUnpackingRegions.h" +#include "FWCore/Framework/interface/ESWatcher.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/Utilities/interface/ESGetToken.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/EDPutToken.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ESGetToken.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/Event.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/stream/SynchronizingEDProducer.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "RecoLocalTracker/SiPixelClusterizer/interface/SiPixelClusterThresholds.h" + +#include "SiPixelRawToClusterKernel.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + template + class SiPixelRawToCluster : public stream::SynchronizingEDProducer<> { + public: + explicit SiPixelRawToCluster(const edm::ParameterSet& iConfig); + ~SiPixelRawToCluster() override = default; + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + using Algo = pixelDetails::SiPixelRawToClusterKernel; + + private: + void acquire(device::Event const& iEvent, device::EventSetup const& iSetup) override; + void produce(device::Event& iEvent, device::EventSetup const& iSetup) override; + + edm::EDGetTokenT rawGetToken_; + edm::EDPutTokenT fmtErrorToken_; + device::EDPutToken digiPutToken_; + device::EDPutToken digiErrorPutToken_; + device::EDPutToken clusterPutToken_; + + edm::ESWatcher recordWatcher_; + const device::ESGetToken mapToken_; + const device::ESGetToken gainsToken_; + const edm::ESGetToken cablingMapToken_; + + std::unique_ptr cabling_; + std::vector fedIds_; + const SiPixelFedCablingMap* cablingMap_ = nullptr; + std::unique_ptr regions_; + + Algo Algo_; + PixelDataFormatter::Errors errors_; + + const bool includeErrors_; + const bool useQuality_; + uint32_t nDigis_; + const SiPixelClusterThresholds clusterThresholds_; + }; + + template + SiPixelRawToCluster::SiPixelRawToCluster(const edm::ParameterSet& iConfig) + : rawGetToken_(consumes(iConfig.getParameter("InputLabel"))), + digiPutToken_(produces()), + clusterPutToken_(produces()), + mapToken_(esConsumes()), + gainsToken_(esConsumes()), + cablingMapToken_(esConsumes( + edm::ESInputTag("", iConfig.getParameter("CablingMapLabel")))), + includeErrors_(iConfig.getParameter("IncludeErrors")), + useQuality_(iConfig.getParameter("UseQualityInfo")), + clusterThresholds_{iConfig.getParameter("clusterThreshold_layer1"), + iConfig.getParameter("clusterThreshold_otherLayers"), + static_cast(iConfig.getParameter("VCaltoElectronGain")), + static_cast(iConfig.getParameter("VCaltoElectronGain_L1")), + static_cast(iConfig.getParameter("VCaltoElectronOffset")), + static_cast(iConfig.getParameter("VCaltoElectronOffset_L1"))} { + if (includeErrors_) { + digiErrorPutToken_ = produces(); + fmtErrorToken_ = produces(); + } + + // regions + if (!iConfig.getParameter("Regions").getParameterNames().empty()) { + regions_ = std::make_unique(iConfig, consumesCollector()); + } + } + + template + void SiPixelRawToCluster::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("IncludeErrors", true); + desc.add("UseQualityInfo", false); + // Note: this parameter is obsolete: it is ignored and will have no effect. + // It is kept to avoid breaking older configurations, and will not be printed in the generated cfi.py file. + desc.addOptionalNode(edm::ParameterDescription("MaxFEDWords", 0, true), false) + ->setComment("This parameter is obsolete and will be ignored."); + desc.add("clusterThreshold_layer1", pixelClustering::clusterThresholdLayerOne); + desc.add("clusterThreshold_otherLayers", pixelClustering::clusterThresholdOtherLayers); + desc.add("VCaltoElectronGain", 47.f); + desc.add("VCaltoElectronGain_L1", 50.f); + desc.add("VCaltoElectronOffset", -60.f); + desc.add("VCaltoElectronOffset_L1", -670.f); + + desc.add("InputLabel", edm::InputTag("rawDataCollector")); + { + edm::ParameterSetDescription psd0; + psd0.addOptional>("inputs"); + psd0.addOptional>("deltaPhi"); + psd0.addOptional>("maxZ"); + psd0.addOptional("beamSpot"); + desc.add("Regions", psd0) + ->setComment("## Empty Regions PSet means complete unpacking"); + } + desc.add("CablingMapLabel", "")->setComment("CablingMap label"); //Tav + descriptions.addWithDefaultLabel(desc); + } + + template + void SiPixelRawToCluster::acquire(device::Event const& iEvent, device::EventSetup const& iSetup) { + [[maybe_unused]] auto const& hMap = iSetup.getData(mapToken_); + auto const& dGains = iSetup.getData(gainsToken_); + auto gains = SiPixelGainCalibrationForHLTDevice(1, iEvent.queue()); + auto modulesToUnpackRegional = + cms::alpakatools::make_device_buffer(iEvent.queue(), ::pixelgpudetails::MAX_SIZE); + const unsigned char* modulesToUnpack; + // initialize cabling map or update if necessary + if (recordWatcher_.check(iSetup)) { + // cabling map, which maps online address (fed->link->ROC->local pixel) to offline (DetId->global pixel) + cablingMap_ = &iSetup.getData(cablingMapToken_); + fedIds_ = cablingMap_->fedIds(); + cabling_ = cablingMap_->cablingTree(); + LogDebug("map version:") << cablingMap_->version(); + } + if (regions_) { + regions_->run(iEvent, iSetup); + LogDebug("SiPixelRawToCluster") << "region2unpack #feds: " << regions_->nFEDs(); + LogDebug("SiPixelRawToCluster") << "region2unpack #modules (BPIX,EPIX,total): " << regions_->nBarrelModules() + << " " << regions_->nForwardModules() << " " << regions_->nModules(); + + modulesToUnpackRegional = SiPixelMappingUtilities::getModToUnpRegionalAsync( + *(regions_->modulesToUnpack()), cabling_.get(), fedIds_, iEvent.queue()); + modulesToUnpack = modulesToUnpackRegional.data(); + } else { + modulesToUnpack = hMap->modToUnpDefault(); + } + + const auto& buffers = iEvent.get(rawGetToken_); + + errors_.clear(); + + // GPU specific: Data extraction for RawToDigi GPU + unsigned int wordCounter = 0; + unsigned int fedCounter = 0; + bool errorsInEvent = false; + std::vector index(fedIds_.size(), 0); + std::vector start(fedIds_.size(), nullptr); + std::vector words(fedIds_.size(), 0); + // In CPU algorithm this loop is part of PixelDataFormatter::interpretRawData() + ErrorChecker errorcheck; + for (uint32_t i = 0; i < fedIds_.size(); ++i) { + const int fedId = fedIds_[i]; + if (regions_ && !regions_->mayUnpackFED(fedId)) + continue; + + // for GPU + // first 150 index stores the fedId and next 150 will store the + // start index of word in that fed + assert(fedId >= FEDNumbering::MINSiPixeluTCAFEDID); + fedCounter++; + + // get event data for this fed + const FEDRawData& rawData = buffers.FEDData(fedId); + + // GPU specific + int nWords = rawData.size() / sizeof(cms_uint64_t); + if (nWords == 0) { + continue; + } + // check CRC bit + const cms_uint64_t* trailer = reinterpret_cast(rawData.data()) + (nWords - 1); + if (not errorcheck.checkCRC(errorsInEvent, fedId, trailer, errors_)) { + continue; + } + // check headers + const cms_uint64_t* header = reinterpret_cast(rawData.data()); + header--; + bool moreHeaders = true; + while (moreHeaders) { + header++; + bool headerStatus = errorcheck.checkHeader(errorsInEvent, fedId, header, errors_); + moreHeaders = headerStatus; + } + + // check trailers + bool moreTrailers = true; + trailer++; + while (moreTrailers) { + trailer--; + bool trailerStatus = errorcheck.checkTrailer(errorsInEvent, fedId, nWords, trailer, errors_); + moreTrailers = trailerStatus; + } + + const cms_uint32_t* bw = (const cms_uint32_t*)(header + 1); + const cms_uint32_t* ew = (const cms_uint32_t*)(trailer); + + assert(0 == (ew - bw) % 2); + index[i] = wordCounter; + start[i] = bw; + words[i] = (ew - bw); + wordCounter += (ew - bw); + + } // end of for loop + nDigis_ = wordCounter; + if (nDigis_ == 0) + return; + + // copy the FED data to a single cpu buffer + pixelDetails::WordFedAppender wordFedAppender(nDigis_); + for (uint32_t i = 0; i < fedIds_.size(); ++i) { + wordFedAppender.initializeWordFed(fedIds_[i], index[i], start[i], words[i]); + } + Algo_.makePhase1ClustersAsync(iEvent.queue(), + clusterThresholds_, + hMap.const_view(), + modulesToUnpack, + dGains.const_view(), + wordFedAppender, + wordCounter, + fedCounter, + useQuality_, + includeErrors_, + edm::MessageDrop::instance()->debugEnabled); + } + + template + void SiPixelRawToCluster::produce(device::Event& iEvent, device::EventSetup const& iSetup) { + if (nDigis_ == 0) { + // Cannot use the default constructor here, as it would not allocate memory. + // In the case of no digis, clusters_d are not being instantiated, but are + // still used downstream to initialize TrackingRecHitSoADevice. If there + // are no valid pointers to clusters' Collection columns, instantiation + // of TrackingRecHits fail. Example: workflow 11604.0 + + iEvent.emplace(digiPutToken_, nDigis_, iEvent.queue()); + iEvent.emplace(clusterPutToken_, pixelTopology::Phase1::numberOfModules, iEvent.queue()); + if (includeErrors_) { + iEvent.emplace(digiErrorPutToken_); + iEvent.emplace(fmtErrorToken_); + } + return; + } + + iEvent.emplace(digiPutToken_, Algo_.getDigis()); + iEvent.emplace(clusterPutToken_, Algo_.getClusters()); + if (includeErrors_) { + iEvent.emplace(digiErrorPutToken_, Algo_.getErrors()); + iEvent.emplace(fmtErrorToken_, std::move(errors_)); + } + } + + using SiPixelRawToClusterPhase1 = SiPixelRawToCluster; + using SiPixelRawToClusterHIonPhase1 = SiPixelRawToCluster; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +// define as framework plugin +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/MakerMacros.h" +DEFINE_FWK_ALPAKA_MODULE(SiPixelRawToClusterPhase1); +DEFINE_FWK_ALPAKA_MODULE(SiPixelRawToClusterHIonPhase1); diff --git a/RecoLocalTracker/SiPixelClusterizer/plugins/alpaka/SiPixelRawToClusterKernel.dev.cc b/RecoLocalTracker/SiPixelClusterizer/plugins/alpaka/SiPixelRawToClusterKernel.dev.cc new file mode 100644 index 0000000000000..88ad79c6af609 --- /dev/null +++ b/RecoLocalTracker/SiPixelClusterizer/plugins/alpaka/SiPixelRawToClusterKernel.dev.cc @@ -0,0 +1,807 @@ +// C++ includes +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// CMSSW includes +#include "HeterogeneousCore/AlpakaInterface/interface/prefixScan.h" +#include "HeterogeneousCore/AlpakaInterface/interface/SimpleVector.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" + +#include "CondFormats/SiPixelObjects/interface/SiPixelGainCalibrationForHLTLayout.h" +#include "CondFormats/SiPixelObjects/interface/SiPixelMappingLayout.h" +#include "DataFormats/SiPixelDigi/interface/SiPixelDigiConstants.h" + +// local includes +#include "CalibPixel.h" +#include "ClusterChargeCut.h" +#include "PixelClustering.h" +#include "SiPixelRawToClusterKernel.h" + +// #define GPU_DEBUG + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace pixelDetails { + + //////////////////// + + ALPAKA_FN_ACC uint32_t getLink(uint32_t ww) { + return ((ww >> ::sipixelconstants::LINK_shift) & ::sipixelconstants::LINK_mask); + } + + ALPAKA_FN_ACC uint32_t getRoc(uint32_t ww) { + return ((ww >> ::sipixelconstants::ROC_shift) & ::sipixelconstants::ROC_mask); + } + + ALPAKA_FN_ACC uint32_t getADC(uint32_t ww) { + return ((ww >> ::sipixelconstants::ADC_shift) & ::sipixelconstants::ADC_mask); + } + + ALPAKA_FN_ACC bool isBarrel(uint32_t rawId) { return (1 == ((rawId >> 25) & 0x7)); } + + ALPAKA_FN_ACC ::pixelDetails::DetIdGPU getRawId(const SiPixelMappingSoAConstView &cablingMap, + uint8_t fed, + uint32_t link, + uint32_t roc) { + using namespace ::pixelDetails; + uint32_t index = fed * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc; + ::pixelDetails::DetIdGPU detId = { + cablingMap.rawId()[index], cablingMap.rocInDet()[index], cablingMap.moduleId()[index]}; + return detId; + } + + //reference http://cmsdoxygen.web.cern.ch/cmsdoxygen/CMSSW_9_2_0/doc/html/dd/d31/FrameConversion_8cc_source.html + //http://cmslxr.fnal.gov/source/CondFormats/SiPixelObjects/src/PixelROC.cc?v=CMSSW_9_2_0#0071 + // Convert local pixel to pixelDetails::global pixel + ALPAKA_FN_ACC ::pixelDetails::Pixel frameConversion( + bool bpix, int side, uint32_t layer, uint32_t rocIdInDetUnit, ::pixelDetails::Pixel local) { + int slopeRow = 0, slopeCol = 0; + int rowOffset = 0, colOffset = 0; + + if (bpix) { + if (side == -1 && layer != 1) { // -Z side: 4 non-flipped modules oriented like 'dddd', except Layer 1 + if (rocIdInDetUnit < 8) { + slopeRow = 1; + slopeCol = -1; + rowOffset = 0; + colOffset = (8 - rocIdInDetUnit) * ::pixelDetails::numColsInRoc - 1; + } else { + slopeRow = -1; + slopeCol = 1; + rowOffset = 2 * ::pixelDetails::numRowsInRoc - 1; + colOffset = (rocIdInDetUnit - 8) * ::pixelDetails::numColsInRoc; + } // if roc + } else { // +Z side: 4 non-flipped modules oriented like 'pppp', but all 8 in layer1 + if (rocIdInDetUnit < 8) { + slopeRow = -1; + slopeCol = 1; + rowOffset = 2 * ::pixelDetails::numRowsInRoc - 1; + colOffset = rocIdInDetUnit * ::pixelDetails::numColsInRoc; + } else { + slopeRow = 1; + slopeCol = -1; + rowOffset = 0; + colOffset = (16 - rocIdInDetUnit) * ::pixelDetails::numColsInRoc - 1; + } + } + + } else { // fpix + if (side == -1) { // pannel 1 + if (rocIdInDetUnit < 8) { + slopeRow = 1; + slopeCol = -1; + rowOffset = 0; + colOffset = (8 - rocIdInDetUnit) * ::pixelDetails::numColsInRoc - 1; + } else { + slopeRow = -1; + slopeCol = 1; + rowOffset = 2 * ::pixelDetails::numRowsInRoc - 1; + colOffset = (rocIdInDetUnit - 8) * ::pixelDetails::numColsInRoc; + } + } else { // pannel 2 + if (rocIdInDetUnit < 8) { + slopeRow = 1; + slopeCol = -1; + rowOffset = 0; + colOffset = (8 - rocIdInDetUnit) * ::pixelDetails::numColsInRoc - 1; + } else { + slopeRow = -1; + slopeCol = 1; + rowOffset = 2 * ::pixelDetails::numRowsInRoc - 1; + colOffset = (rocIdInDetUnit - 8) * ::pixelDetails::numColsInRoc; + } + + } // side + } + + uint32_t gRow = rowOffset + slopeRow * local.row; + uint32_t gCol = colOffset + slopeCol * local.col; + ::pixelDetails::Pixel global = {gRow, gCol}; + return global; + } + + ALPAKA_FN_ACC uint8_t conversionError(uint8_t fedId, uint8_t status, bool debug = false) { + uint8_t errorType = 0; + + switch (status) { + case 1: { + if (debug) + printf("Error in Fed: %i, invalid channel Id (errorType = 35\n)", fedId); + errorType = 35; + break; + } + case 2: { + if (debug) + printf("Error in Fed: %i, invalid ROC Id (errorType = 36)\n", fedId); + errorType = 36; + break; + } + case 3: { + if (debug) + printf("Error in Fed: %i, invalid dcol/pixel value (errorType = 37)\n", fedId); + errorType = 37; + break; + } + case 4: { + if (debug) + printf("Error in Fed: %i, dcol/pixel read out of order (errorType = 38)\n", fedId); + errorType = 38; + break; + } + default: + if (debug) + printf("Cabling check returned unexpected result, status = %i\n", status); + }; + + return errorType; + } + + ALPAKA_FN_ACC bool rocRowColIsValid(uint32_t rocRow, uint32_t rocCol) { + uint32_t numRowsInRoc = 80; + uint32_t numColsInRoc = 52; + + /// row and collumn in ROC representation + return ((rocRow < numRowsInRoc) & (rocCol < numColsInRoc)); + } + + ALPAKA_FN_ACC bool dcolIsValid(uint32_t dcol, uint32_t pxid) { return ((dcol < 26) & (2 <= pxid) & (pxid < 162)); } + + ALPAKA_FN_ACC uint8_t checkROC(uint32_t errorWord, + uint8_t fedId, + uint32_t link, + const SiPixelMappingSoAConstView &cablingMap, + bool debug = false) { + uint8_t errorType = (errorWord >> ::pixelDetails::ROC_shift) & ::pixelDetails::ERROR_mask; + if (errorType < 25) + return 0; + bool errorFound = false; + + switch (errorType) { + case (25): { + errorFound = true; + uint32_t index = + fedId * ::pixelDetails::MAX_LINK * ::pixelDetails::MAX_ROC + (link - 1) * ::pixelDetails::MAX_ROC + 1; + if (index > 1 && index <= cablingMap.size()) { + if (!(link == cablingMap.link()[index] && 1 == cablingMap.roc()[index])) + errorFound = false; + } + if (debug and errorFound) + printf("Invalid ROC = 25 found (errorType = 25)\n"); + break; + } + case (26): { + if (debug) + printf("Gap word found (errorType = 26)\n"); + errorFound = true; + break; + } + case (27): { + if (debug) + printf("Dummy word found (errorType = 27)\n"); + errorFound = true; + break; + } + case (28): { + if (debug) + printf("Error fifo nearly full (errorType = 28)\n"); + errorFound = true; + break; + } + case (29): { + if (debug) + printf("Timeout on a channel (errorType = 29)\n"); + if ((errorWord >> ::pixelDetails::OMIT_ERR_shift) & ::pixelDetails::OMIT_ERR_mask) { + if (debug) + printf("...first errorType=29 error, this gets masked out\n"); + } + errorFound = true; + break; + } + case (30): { + if (debug) + printf("TBM error trailer (errorType = 30)\n"); + int StateMatch_bits = 4; + int StateMatch_shift = 8; + uint32_t StateMatch_mask = ~(~uint32_t(0) << StateMatch_bits); + int StateMatch = (errorWord >> StateMatch_shift) & StateMatch_mask; + if (StateMatch != 1 && StateMatch != 8) { + if (debug) + printf("FED error 30 with unexpected State Bits (errorType = 30)\n"); + } + if (StateMatch == 1) + errorType = 40; // 1=Overflow -> 40, 8=number of ROCs -> 30 + errorFound = true; + break; + } + case (31): { + if (debug) + printf("Event number error (errorType = 31)\n"); + errorFound = true; + break; + } + default: + errorFound = false; + }; + + return errorFound ? errorType : 0; + } + + ALPAKA_FN_ACC uint32_t getErrRawID(uint8_t fedId, + uint32_t errWord, + uint32_t errorType, + const SiPixelMappingSoAConstView &cablingMap, + bool debug = false) { + uint32_t rID = 0xffffffff; + + switch (errorType) { + case 25: + case 30: + case 31: + case 36: + case 40: { + uint32_t roc = 1; + uint32_t link = (errWord >> ::pixelDetails::LINK_shift) & ::pixelDetails::LINK_mask; + uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).RawId; + if (rID_temp != 9999) + rID = rID_temp; + break; + } + case 29: { + int chanNmbr = 0; + const int DB0_shift = 0; + const int DB1_shift = DB0_shift + 1; + const int DB2_shift = DB1_shift + 1; + const int DB3_shift = DB2_shift + 1; + const int DB4_shift = DB3_shift + 1; + const uint32_t DataBit_mask = ~(~uint32_t(0) << 1); + + int CH1 = (errWord >> DB0_shift) & DataBit_mask; + int CH2 = (errWord >> DB1_shift) & DataBit_mask; + int CH3 = (errWord >> DB2_shift) & DataBit_mask; + int CH4 = (errWord >> DB3_shift) & DataBit_mask; + int CH5 = (errWord >> DB4_shift) & DataBit_mask; + int BLOCK_bits = 3; + int BLOCK_shift = 8; + uint32_t BLOCK_mask = ~(~uint32_t(0) << BLOCK_bits); + int BLOCK = (errWord >> BLOCK_shift) & BLOCK_mask; + int localCH = 1 * CH1 + 2 * CH2 + 3 * CH3 + 4 * CH4 + 5 * CH5; + if (BLOCK % 2 == 0) + chanNmbr = (BLOCK / 2) * 9 + localCH; + else + chanNmbr = ((BLOCK - 1) / 2) * 9 + 4 + localCH; + if ((chanNmbr < 1) || (chanNmbr > 36)) + break; // signifies unexpected result + + uint32_t roc = 1; + uint32_t link = chanNmbr; + uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).RawId; + if (rID_temp != 9999) + rID = rID_temp; + break; + } + case 37: + case 38: { + uint32_t roc = (errWord >> ::pixelDetails::ROC_shift) & ::pixelDetails::ROC_mask; + uint32_t link = (errWord >> ::pixelDetails::LINK_shift) & ::pixelDetails::LINK_mask; + uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).RawId; + if (rID_temp != 9999) + rID = rID_temp; + break; + } + default: + break; + }; + + return rID; + } + + // Kernel to perform Raw to Digi conversion + struct RawToDigi_kernel { + template + ALPAKA_FN_ACC void operator()(const TAcc &acc, + const SiPixelMappingSoAConstView &cablingMap, + const unsigned char *modToUnp, + const uint32_t wordCounter, + const uint32_t *word, + const uint8_t *fedIds, + SiPixelDigisSoAView digisView, + SiPixelDigiErrorsSoAView err, + bool useQualityInfo, + bool includeErrors, + bool debug) const { + cms::alpakatools::for_each_element_in_grid_strided(acc, wordCounter, [&](uint32_t iloop) { + auto gIndex = iloop; + auto dvgi = digisView[gIndex]; + dvgi.xx() = 0; + dvgi.yy() = 0; + dvgi.adc() = 0; + bool skipROC = false; + + if (gIndex == 0) + err[gIndex].size() = 0; + + err[gIndex].pixelErrors() = SiPixelErrorCompact{0, 0, 0, 0}; + + uint8_t fedId = fedIds[gIndex / 2]; // +1200; + + // initialize (too many coninue below) + dvgi.pdigi() = 0; + dvgi.rawIdArr() = 0; + constexpr uint16_t invalidModuleId = std::numeric_limits::max() - 1; + dvgi.moduleId() = invalidModuleId; + + uint32_t ww = word[gIndex]; // Array containing 32 bit raw data + if (ww == 0) { + // 0 is an indicator of a noise/dead channel, skip these pixels during clusterization + return; + } + + uint32_t link = getLink(ww); // Extract link + uint32_t roc = getRoc(ww); // Extract Roc in link + ::pixelDetails::DetIdGPU detId = getRawId(cablingMap, fedId, link, roc); + + uint8_t errorType = checkROC(ww, fedId, link, cablingMap, debug); + skipROC = (roc < ::pixelDetails::maxROCIndex) ? false : (errorType != 0); + if (includeErrors and skipROC) { + uint32_t rID = getErrRawID(fedId, ww, errorType, cablingMap, debug); + err[gIndex].pixelErrors() = SiPixelErrorCompact{rID, ww, errorType, fedId}; + alpaka::atomicInc(acc, &err.size(), 0xffffffff, alpaka::hierarchy::Threads{}); + return; + } + + uint32_t rawId = detId.RawId; + uint32_t rocIdInDetUnit = detId.rocInDet; + bool barrel = isBarrel(rawId); + + uint32_t index = + fedId * ::pixelDetails::MAX_LINK * ::pixelDetails::MAX_ROC + (link - 1) * ::pixelDetails::MAX_ROC + roc; + if (useQualityInfo) { + skipROC = cablingMap.badRocs()[index]; + if (skipROC) + return; + } + skipROC = modToUnp[index]; + if (skipROC) + return; + + uint32_t layer = 0; //, ladder =0; + int side = 0, panel = 0, module = 0; //disk = 0, blade = 0 + + if (barrel) { + layer = (rawId >> ::pixelDetails::layerStartBit) & ::pixelDetails::layerMask; + module = (rawId >> ::pixelDetails::moduleStartBit) & ::pixelDetails::moduleMask; + side = (module < 5) ? -1 : 1; + } else { + // endcap ids + layer = 0; + panel = (rawId >> ::pixelDetails::panelStartBit) & ::pixelDetails::panelMask; + //disk = (rawId >> diskStartBit_) & diskMask_; + side = (panel == 1) ? -1 : 1; + //blade = (rawId >> bladeStartBit_) & bladeMask_; + } + + // ***special case of layer to 1 be handled here + ::pixelDetails::Pixel localPix; + if (layer == 1) { + uint32_t col = (ww >> ::pixelDetails::COL_shift) & ::pixelDetails::COL_mask; + uint32_t row = (ww >> ::pixelDetails::ROW_shift) & ::pixelDetails::ROW_mask; + localPix.row = row; + localPix.col = col; + if (includeErrors) { + if (not rocRowColIsValid(row, col)) { + uint8_t error = conversionError(fedId, 3, debug); //use the device function and fill the arrays + err[gIndex].pixelErrors() = SiPixelErrorCompact{rawId, ww, error, fedId}; + alpaka::atomicInc(acc, &err.size(), 0xffffffff, alpaka::hierarchy::Threads{}); + if (debug) + printf("BPIX1 Error status: %i\n", error); + return; + } + } + } else { + // ***conversion rules for dcol and pxid + uint32_t dcol = (ww >> ::pixelDetails::DCOL_shift) & ::pixelDetails::DCOL_mask; + uint32_t pxid = (ww >> ::pixelDetails::PXID_shift) & ::pixelDetails::PXID_mask; + uint32_t row = ::pixelDetails::numRowsInRoc - pxid / 2; + uint32_t col = dcol * 2 + pxid % 2; + localPix.row = row; + localPix.col = col; + if (includeErrors and not dcolIsValid(dcol, pxid)) { + uint8_t error = conversionError(fedId, 3, debug); + err[gIndex].pixelErrors() = SiPixelErrorCompact{rawId, ww, error, fedId}; + alpaka::atomicInc(acc, &err.size(), 0xffffffff, alpaka::hierarchy::Threads{}); + if (debug) + printf("Error status: %i %d %d %d %d\n", error, dcol, pxid, fedId, roc); + return; + } + } + + ::pixelDetails::Pixel globalPix = frameConversion(barrel, side, layer, rocIdInDetUnit, localPix); + dvgi.xx() = globalPix.row; // origin shifting by 1 0-159 + dvgi.yy() = globalPix.col; // origin shifting by 1 0-415 + dvgi.adc() = getADC(ww); + dvgi.pdigi() = ::pixelDetails::pack(globalPix.row, globalPix.col, dvgi.adc()); + dvgi.moduleId() = detId.moduleId; + dvgi.rawIdArr() = rawId; + }); // end of stride on grid + + } // end of Raw to Digi kernel operator() + }; // end of Raw to Digi struct + + template + struct FillHitsModuleStart { + template + ALPAKA_FN_ACC void operator()(const TAcc &acc, SiPixelClustersSoAView clus_view) const { + ALPAKA_ASSERT_OFFLOAD(TrackerTraits::numberOfModules < 2048); // easy to extend at least till 32*1024 + + constexpr int nMaxModules = TrackerTraits::numberOfModules; + constexpr uint32_t maxHitsInModule = TrackerTraits::maxHitsInModule; + +#ifndef NDEBUG + [[maybe_unused]] const uint32_t blockIdxLocal(alpaka::getIdx(acc)[0u]); + ALPAKA_ASSERT_OFFLOAD(0 == blockIdxLocal); + [[maybe_unused]] const uint32_t gridDimension(alpaka::getWorkDiv(acc)[0u]); + ALPAKA_ASSERT_OFFLOAD(1 == gridDimension); +#endif + + // limit to maxHitsInModule; + cms::alpakatools::for_each_element_in_block_strided(acc, nMaxModules, [&](uint32_t i) { + clus_view[i + 1].clusModuleStart() = std::min(maxHitsInModule, clus_view[i].clusInModule()); + }); + + constexpr bool isPhase2 = std::is_base_of::value; + constexpr auto leftModules = isPhase2 ? 1024 : nMaxModules - 1024; + + auto &&ws = alpaka::declareSharedVar(acc); + + cms::alpakatools::blockPrefixScan( + acc, clus_view.clusModuleStart() + 1, clus_view.clusModuleStart() + 1, 1024, ws); + + cms::alpakatools::blockPrefixScan( + acc, clus_view.clusModuleStart() + 1024 + 1, clus_view.clusModuleStart() + 1024 + 1, leftModules, ws); + + if constexpr (isPhase2) { + cms::alpakatools::blockPrefixScan( + acc, clus_view.clusModuleStart() + 2048 + 1, clus_view.clusModuleStart() + 2048 + 1, 1024, ws); + cms::alpakatools::blockPrefixScan(acc, + clus_view.clusModuleStart() + 3072 + 1, + clus_view.clusModuleStart() + 3072 + 1, + nMaxModules - 3072, + ws); + } + + constexpr auto lastModule = isPhase2 ? 2049u : nMaxModules + 1; + cms::alpakatools::for_each_element_in_block_strided(acc, lastModule, 1025u, [&](uint32_t i) { + clus_view[i].clusModuleStart() += clus_view[1024].clusModuleStart(); + }); + alpaka::syncBlockThreads(acc); + + if constexpr (isPhase2) { + cms::alpakatools::for_each_element_in_block_strided(acc, 3073u, 2049u, [&](uint32_t i) { + clus_view[i].clusModuleStart() += clus_view[2048].clusModuleStart(); + }); + alpaka::syncBlockThreads(acc); + + cms::alpakatools::for_each_element_in_block_strided(acc, nMaxModules + 1, 3073u, [&](uint32_t i) { + clus_view[i].clusModuleStart() += clus_view[3072].clusModuleStart(); + }); + alpaka::syncBlockThreads(acc); + } +#ifdef GPU_DEBUG + ALPAKA_ASSERT_OFFLOAD(0 == clus_view[0].moduleStart()); + auto c0 = std::min(maxHitsInModule, clus_view[1].clusModuleStart()); + ALPAKA_ASSERT_OFFLOAD(c0 == clus_view[1].moduleStart()); + ALPAKA_ASSERT_OFFLOAD(clus_view[1024].moduleStart() >= clus_view[1023].moduleStart()); + ALPAKA_ASSERT_OFFLOAD(clus_view[1025].moduleStart() >= clus_view[1024].moduleStart()); + ALPAKA_ASSERT_OFFLOAD(clus_view[nMaxModules].moduleStart() >= clus_view[1025].moduleStart()); + + cms::alpakatools::for_each_element_in_block_strided(acc, nMaxModules + 1, [&](uint32_t i) { + if (0 != i) + ALPAKA_ASSERT_OFFLOAD(clus_view[i].moduleStart() >= clus_view[i - i].moduleStart()); + // Check BPX2 (1), FP1 (4) + constexpr auto bpix2 = TrackerTraits::layerStart[1]; + constexpr auto fpix1 = TrackerTraits::layerStart[4]; + if (i == bpix2 || i == fpix1) + printf("moduleStart %d %d\n", i, clus_view[i].moduleStart()); + }); +#endif + // avoid overflow + constexpr auto MAX_HITS = TrackerTraits::maxNumberOfHits; + cms::alpakatools::for_each_element_in_block_strided(acc, nMaxModules + 1, [&](uint32_t i) { + if (clus_view[i].clusModuleStart() > MAX_HITS) + clus_view[i].clusModuleStart() = MAX_HITS; + }); + + } // end of FillHitsModuleStart kernel operator() + }; // end of FillHitsModuleStart struct + + // Interface to outside + template + void SiPixelRawToClusterKernel::makePhase1ClustersAsync( + Queue &queue, + const SiPixelClusterThresholds clusterThresholds, + const SiPixelMappingSoAConstView &cablingMap, + const unsigned char *modToUnp, + const SiPixelGainCalibrationForHLTSoAConstView &gains, + const WordFedAppender &wordFed, + const uint32_t wordCounter, + const uint32_t fedCounter, + bool useQualityInfo, + bool includeErrors, + bool debug) { + nDigis = wordCounter; + +#ifdef GPU_DEBUG + std::cout << "decoding " << wordCounter << " digis." << std::endl; +#endif + constexpr int numberOfModules = TrackerTraits::numberOfModules; + digis_d = SiPixelDigisSoACollection(wordCounter, queue); + if (includeErrors) { + digiErrors_d = SiPixelDigiErrorsSoACollection(wordCounter, queue); + } + clusters_d = SiPixelClustersSoACollection(numberOfModules, queue); + // protect in case of empty event.... + if (wordCounter) { + const int threadsPerBlockOrElementsPerThread = + cms::alpakatools::requires_single_thread_per_block_v ? 32 : 512; + // fill it all + const uint32_t blocks = cms::alpakatools::divide_up_by(wordCounter, threadsPerBlockOrElementsPerThread); + const auto workDiv = cms::alpakatools::make_workdiv(blocks, threadsPerBlockOrElementsPerThread); + assert(0 == wordCounter % 2); + // wordCounter is the total no of words in each event to be trasfered on device + auto word_d = cms::alpakatools::make_device_buffer(queue, wordCounter); + // NB: IMPORTANT: fedId_d: In legacy, wordCounter elements are allocated. + // However, only the first half of elements end up eventually used: + // hence, here, only wordCounter/2 elements are allocated. + auto fedId_d = cms::alpakatools::make_device_buffer(queue, wordCounter / 2); + alpaka::memcpy(queue, word_d, wordFed.word(), wordCounter); + alpaka::memcpy(queue, fedId_d, wordFed.fedId(), wordCounter / 2); + // Launch rawToDigi kernel + alpaka::exec(queue, + workDiv, + RawToDigi_kernel{}, + cablingMap, + modToUnp, + wordCounter, + word_d.data(), + fedId_d.data(), + digis_d->view(), + digiErrors_d->view(), + useQualityInfo, + includeErrors, + debug); + +#ifdef GPU_DEBUG + alpaka::wait(queue); + std::cout << "RawToDigi_kernel was run smoothly!" << std::endl; +#endif + } + // End of Raw2Digi and passing data for clustering + + { + // clusterizer + using namespace pixelClustering; + // calibrations + using namespace calibPixel; + const int threadsPerBlockOrElementsPerThread = []() { + if constexpr (std::is_same_v) { + // NB: MPORTANT: This could be tuned to benefit from innermost loop. + return 32; + } else { + return 256; + } + }(); + const auto blocks = cms::alpakatools::divide_up_by(std::max(wordCounter, numberOfModules), + threadsPerBlockOrElementsPerThread); + const auto workDiv = cms::alpakatools::make_workdiv(blocks, threadsPerBlockOrElementsPerThread); + + alpaka::exec( + queue, workDiv, CalibDigis{}, clusterThresholds, digis_d->view(), clusters_d->view(), gains, wordCounter); + +#ifdef GPU_DEBUG + alpaka::wait(queue); + std::cout << "CountModules kernel launch with " << blocks << " blocks of " << threadsPerBlockOrElementsPerThread + << " threadsPerBlockOrElementsPerThread\n"; +#endif + + alpaka::exec( + queue, workDiv, CountModules{}, digis_d->view(), clusters_d->view(), wordCounter); + + auto moduleStartFirstElement = + cms::alpakatools::make_device_view(alpaka::getDev(queue), clusters_d->view().moduleStart(), 1u); + alpaka::memcpy(queue, nModules_Clusters_h, moduleStartFirstElement); + + // TODO + // - we are fixing this here since it needs to be needed + // at compile time also in the kernel (for_each_element_in_block_strided) + // - put maxIter in the Geometry traits + constexpr auto threadsOrElementsFindClus = 256; + + const auto workDivMaxNumModules = + cms::alpakatools::make_workdiv(numberOfModules, threadsOrElementsFindClus); + // NB: With present FindClus() / chargeCut() algorithm, + // threadPerBlock (GPU) or elementsPerThread (CPU) = 256 show optimal performance. + // Though, it does not have to be the same number for CPU/GPU cases. + +#ifdef GPU_DEBUG + std::cout << " FindClus kernel launch with " << numberOfModules << " blocks of " << threadsOrElementsFindClus + << " threadsPerBlockOrElementsPerThread\n"; +#endif + + alpaka::exec( + queue, workDivMaxNumModules, FindClus{}, digis_d->view(), clusters_d->view(), wordCounter); + +#ifdef GPU_DEBUG + alpaka::wait(queue); +#endif + + constexpr auto threadsPerBlockChargeCut = 256; + const auto workDivChargeCut = cms::alpakatools::make_workdiv(numberOfModules, threadsPerBlockChargeCut); + // apply charge cut + alpaka::exec(queue, + workDivChargeCut, + ::pixelClustering::ClusterChargeCut{}, + digis_d->view(), + clusters_d->view(), + clusterThresholds, + wordCounter); + // count the module start indices already here (instead of + // rechits) so that the number of clusters/hits can be made + // available in the rechit producer without additional points of + // synchronization/ExternalWork + + // MUST be ONE block + const auto workDivOneBlock = cms::alpakatools::make_workdiv(1u, 1024u); + alpaka::exec(queue, workDivOneBlock, FillHitsModuleStart{}, clusters_d->view()); + + // last element holds the number of all clusters + const auto clusModuleStartLastElement = cms::alpakatools::make_device_view( + alpaka::getDev(queue), clusters_d->const_view().clusModuleStart() + numberOfModules, 1u); + constexpr int startBPIX2 = TrackerTraits::layerStart[1]; + + // element startBPIX2 hold the number of clusters until BPIX2 + const auto bpix2ClusterStart = cms::alpakatools::make_device_view( + alpaka::getDev(queue), clusters_d->const_view().clusModuleStart() + startBPIX2, 1u); + auto nModules_Clusters_h_1 = cms::alpakatools::make_host_view(nModules_Clusters_h.data() + 1, 1u); + alpaka::memcpy(queue, nModules_Clusters_h_1, clusModuleStartLastElement); + + auto nModules_Clusters_h_2 = cms::alpakatools::make_host_view(nModules_Clusters_h.data() + 2, 1u); + alpaka::memcpy(queue, nModules_Clusters_h_2, bpix2ClusterStart); + +#ifdef GPU_DEBUG + alpaka::wait(queue); + std::cout << "SiPixelClusterizerAlpaka results:" << std::endl + << " > no. of digis: " << nDigis << std::endl + << " > no. of active modules: " << nModules_Clusters_h[0] << std::endl + << " > no. of clusters: " << nModules_Clusters_h[1] << std::endl + << " > bpix2 offset: " << nModules_Clusters_h[2] << std::endl; +#endif + + } // end clusterizer scope + } + + template + void SiPixelRawToClusterKernel::makePhase2ClustersAsync( + Queue &queue, + const SiPixelClusterThresholds clusterThresholds, + SiPixelDigisSoAView &digis_view, + const uint32_t numDigis) { + using namespace pixelClustering; + using pixelTopology::Phase2; + nDigis = numDigis; + constexpr int numberOfModules = pixelTopology::Phase2::numberOfModules; + clusters_d = SiPixelClustersSoACollection(numberOfModules, queue); + const auto threadsPerBlockOrElementsPerThread = 512; + const auto blocks = + cms::alpakatools::divide_up_by(std::max(numDigis, numberOfModules), threadsPerBlockOrElementsPerThread); + const auto workDiv = cms::alpakatools::make_workdiv(blocks, threadsPerBlockOrElementsPerThread); + + alpaka::exec( + queue, workDiv, calibPixel::CalibDigisPhase2{}, clusterThresholds, digis_view, clusters_d->view(), numDigis); + +#ifdef GPU_DEBUG + alpaka::wait(queue); + std::cout << "CountModules kernel launch with " << blocks << " blocks of " << threadsPerBlockOrElementsPerThread + << " threadsPerBlockOrElementsPerThread\n"; +#endif + alpaka::exec( + queue, workDiv, CountModules{}, digis_view, clusters_d->view(), numDigis); + + auto moduleStartFirstElement = + cms::alpakatools::make_device_view(alpaka::getDev(queue), clusters_d->view().moduleStart(), 1u); + alpaka::memcpy(queue, nModules_Clusters_h, moduleStartFirstElement); + + /// should be larger than maxPixInModule/16 aka (maxPixInModule/maxiter in the kernel) + + const auto threadsPerBlockFindClus = 256; + const auto workDivMaxNumModules = cms::alpakatools::make_workdiv(numberOfModules, threadsPerBlockFindClus); + +#ifdef GPU_DEBUG + alpaka::wait(queue); + std::cout << "FindClus kernel launch with " << numberOfModules << " blocks of " << threadsPerBlockFindClus + << " threadsPerBlockOrElementsPerThread\n"; +#endif + alpaka::exec( + queue, workDivMaxNumModules, FindClus{}, digis_view, clusters_d->view(), numDigis); +#ifdef GPU_DEBUG + alpaka::wait(queue); +#endif + + // apply charge cut + alpaka::exec(queue, + workDivMaxNumModules, + ::pixelClustering::ClusterChargeCut{}, + digis_view, + clusters_d->view(), + clusterThresholds, + numDigis); + + // count the module start indices already here (instead of + // rechits) so that the number of clusters/hits can be made + // available in the rechit producer without additional points of + // synchronization/ExternalWork + + // MUST be ONE block + const auto workDivOneBlock = cms::alpakatools::make_workdiv(1u, 1024u); + alpaka::exec(queue, workDivOneBlock, FillHitsModuleStart{}, clusters_d->view()); + + // last element holds the number of all clusters + const auto clusModuleStartLastElement = cms::alpakatools::make_device_view( + alpaka::getDev(queue), clusters_d->const_view().clusModuleStart() + numberOfModules, 1u); + constexpr int startBPIX2 = pixelTopology::Phase2::layerStart[1]; + // element startBPIX2 hold the number of clusters until BPIX2 + const auto bpix2ClusterStart = cms::alpakatools::make_device_view( + alpaka::getDev(queue), clusters_d->const_view().clusModuleStart() + startBPIX2, 1u); + auto nModules_Clusters_h_1 = cms::alpakatools::make_host_view(nModules_Clusters_h.data() + 1, 1u); + alpaka::memcpy(queue, nModules_Clusters_h_1, clusModuleStartLastElement); + + auto nModules_Clusters_h_2 = cms::alpakatools::make_host_view(nModules_Clusters_h.data() + 2, 1u); + alpaka::memcpy(queue, nModules_Clusters_h_2, bpix2ClusterStart); + +#ifdef GPU_DEBUG + alpaka::wait(queue); + std::cout << "SiPixelPhase2DigiToCluster: results \n" + << " > no. of digis: " << numDigis << std::endl + << " > no. of active modules: " << nModules_Clusters_h[0] << std::endl + << " > no. of clusters: " << nModules_Clusters_h[1] << std::endl + << " > bpix2 offset: " << nModules_Clusters_h[2] << std::endl; +#endif + } // + + template class SiPixelRawToClusterKernel; + template class SiPixelRawToClusterKernel; + template class SiPixelRawToClusterKernel; + + } // namespace pixelDetails + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE diff --git a/RecoLocalTracker/SiPixelClusterizer/plugins/alpaka/SiPixelRawToClusterKernel.h b/RecoLocalTracker/SiPixelClusterizer/plugins/alpaka/SiPixelRawToClusterKernel.h new file mode 100644 index 0000000000000..b7b9071506652 --- /dev/null +++ b/RecoLocalTracker/SiPixelClusterizer/plugins/alpaka/SiPixelRawToClusterKernel.h @@ -0,0 +1,199 @@ +#ifndef RecoLocalTracker_SiPixelClusterizer_SiPixelRawToClusterKernel_h +#define RecoLocalTracker_SiPixelClusterizer_SiPixelRawToClusterKernel_h + +#include +#include +#include + +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" + +#include "DataFormats/SiPixelClusterSoA/interface/alpaka/SiPixelClustersSoACollection.h" +#include "DataFormats/SiPixelClusterSoA/interface/SiPixelClustersDevice.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisDevice.h" +#include "DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigisSoACollection.h" +#include "DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigiErrorsSoACollection.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigiErrorsDevice.h" +#include "DataFormats/SiPixelClusterSoA/interface/ClusteringConstants.h" + +#include "CondFormats/SiPixelObjects/interface/SiPixelGainCalibrationForHLTLayout.h" +#include "CondFormats/SiPixelObjects/interface/alpaka/SiPixelGainCalibrationForHLTDevice.h" +#include "CondFormats/SiPixelObjects/interface/alpaka/SiPixelMappingDevice.h" + +#include "DataFormats/SiPixelRawData/interface/SiPixelErrorCompact.h" +#include "DataFormats/SiPixelRawData/interface/SiPixelFormatterErrors.h" +#include "DataFormats/SiPixelDetId/interface/PixelChannelIdentifier.h" + +namespace pixelDetails { + + constexpr auto MAX_LINK = pixelgpudetails::MAX_LINK; + constexpr auto MAX_SIZE = pixelgpudetails::MAX_SIZE; + constexpr auto MAX_ROC = pixelgpudetails::MAX_ROC; + // Phase 1 geometry constants + constexpr uint32_t layerStartBit = 20; + constexpr uint32_t ladderStartBit = 12; + constexpr uint32_t moduleStartBit = 2; + + constexpr uint32_t panelStartBit = 10; + constexpr uint32_t diskStartBit = 18; + constexpr uint32_t bladeStartBit = 12; + + constexpr uint32_t layerMask = 0xF; + constexpr uint32_t ladderMask = 0xFF; + constexpr uint32_t moduleMask = 0x3FF; + constexpr uint32_t panelMask = 0x3; + constexpr uint32_t diskMask = 0xF; + constexpr uint32_t bladeMask = 0x3F; + + constexpr uint32_t LINK_bits = 6; + constexpr uint32_t ROC_bits = 5; + constexpr uint32_t DCOL_bits = 5; + constexpr uint32_t PXID_bits = 8; + constexpr uint32_t ADC_bits = 8; + + // special for layer 1 + constexpr uint32_t LINK_bits_l1 = 6; + constexpr uint32_t ROC_bits_l1 = 5; + constexpr uint32_t COL_bits_l1 = 6; + constexpr uint32_t ROW_bits_l1 = 7; + constexpr uint32_t OMIT_ERR_bits = 1; + + constexpr uint32_t maxROCIndex = 8; + constexpr uint32_t numRowsInRoc = 80; + constexpr uint32_t numColsInRoc = 52; + + constexpr uint32_t MAX_WORD = 2000; + + constexpr uint32_t ADC_shift = 0; + constexpr uint32_t PXID_shift = ADC_shift + ADC_bits; + constexpr uint32_t DCOL_shift = PXID_shift + PXID_bits; + constexpr uint32_t ROC_shift = DCOL_shift + DCOL_bits; + constexpr uint32_t LINK_shift = ROC_shift + ROC_bits_l1; + // special for layer 1 ROC + constexpr uint32_t ROW_shift = ADC_shift + ADC_bits; + constexpr uint32_t COL_shift = ROW_shift + ROW_bits_l1; + constexpr uint32_t OMIT_ERR_shift = 20; + + constexpr uint32_t LINK_mask = ~(~uint32_t(0) << LINK_bits_l1); + constexpr uint32_t ROC_mask = ~(~uint32_t(0) << ROC_bits_l1); + constexpr uint32_t COL_mask = ~(~uint32_t(0) << COL_bits_l1); + constexpr uint32_t ROW_mask = ~(~uint32_t(0) << ROW_bits_l1); + constexpr uint32_t DCOL_mask = ~(~uint32_t(0) << DCOL_bits); + constexpr uint32_t PXID_mask = ~(~uint32_t(0) << PXID_bits); + constexpr uint32_t ADC_mask = ~(~uint32_t(0) << ADC_bits); + constexpr uint32_t ERROR_mask = ~(~uint32_t(0) << ROC_bits_l1); + constexpr uint32_t OMIT_ERR_mask = ~(~uint32_t(0) << OMIT_ERR_bits); + + struct DetIdGPU { + uint32_t RawId; + uint32_t rocInDet; + uint32_t moduleId; + }; + + struct Pixel { + uint32_t row; + uint32_t col; + }; + + ALPAKA_FN_HOST_ACC ALPAKA_FN_INLINE constexpr pixelchannelidentifierimpl::Packing packing() { + return PixelChannelIdentifier::thePacking; + } + + ALPAKA_FN_HOST_ACC ALPAKA_FN_INLINE constexpr uint32_t pack(uint32_t row, + uint32_t col, + uint32_t adc, + uint32_t flag = 0) { + constexpr pixelchannelidentifierimpl::Packing thePacking = packing(); + adc = std::min(adc, uint32_t(thePacking.max_adc)); + + return (row << thePacking.row_shift) | (col << thePacking.column_shift) | (adc << thePacking.adc_shift); + } + + constexpr uint32_t pixelToChannel(int row, int col) { + constexpr pixelchannelidentifierimpl::Packing thePacking = packing(); + return (row << thePacking.column_width) | col; + } + +} // namespace pixelDetails + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace pixelDetails { + + class WordFedAppender { + public: + WordFedAppender(); + ~WordFedAppender() = default; + + WordFedAppender(uint32_t words) + : word_{cms::alpakatools::make_host_buffer(words)}, + fedId_{cms::alpakatools::make_host_buffer(words)} {}; + + void initializeWordFed(int fedId, unsigned int wordCounterGPU, const uint32_t* src, unsigned int length) { + std::memcpy(word_.data() + wordCounterGPU, src, sizeof(uint32_t) * length); + std::memset(fedId_.data() + wordCounterGPU / 2, fedId - 1200, length / 2); + } + auto word() const { return word_; } + auto fedId() const { return fedId_; } + + private: + cms::alpakatools::host_buffer word_; + cms::alpakatools::host_buffer fedId_; + }; + + template + class SiPixelRawToClusterKernel { + public: + SiPixelRawToClusterKernel() : nModules_Clusters_h{cms::alpakatools::make_host_buffer(3u)} {} + + ~SiPixelRawToClusterKernel() = default; + + SiPixelRawToClusterKernel(const SiPixelRawToClusterKernel&) = delete; + SiPixelRawToClusterKernel(SiPixelRawToClusterKernel&&) = delete; + SiPixelRawToClusterKernel& operator=(const SiPixelRawToClusterKernel&) = delete; + SiPixelRawToClusterKernel& operator=(SiPixelRawToClusterKernel&&) = delete; + + void makePhase1ClustersAsync(Queue& queue, + const SiPixelClusterThresholds clusterThresholds, + const SiPixelMappingSoAConstView& cablingMap, + const unsigned char* modToUnp, + const SiPixelGainCalibrationForHLTSoAConstView& gains, + const WordFedAppender& wordFed, + const uint32_t wordCounter, + const uint32_t fedCounter, + bool useQualityInfo, + bool includeErrors, + bool debug); + + void makePhase2ClustersAsync(Queue& queue, + const SiPixelClusterThresholds clusterThresholds, + SiPixelDigisSoAView& digis_view, + const uint32_t numDigis); + + SiPixelDigisSoACollection getDigis() { + digis_d->setNModulesDigis(nModules_Clusters_h[0], nDigis); + return std::move(*digis_d); + } + + SiPixelClustersSoACollection getClusters() { + clusters_d->setNClusters(nModules_Clusters_h[1], nModules_Clusters_h[2]); + return std::move(*clusters_d); + } + + SiPixelDigiErrorsSoACollection getErrors() { return std::move(*digiErrors_d); } + + auto nModules() { return nModules_Clusters_h[0]; } + + private: + uint32_t nDigis = 0; + + // Data to be put in the event + cms::alpakatools::host_buffer nModules_Clusters_h; + std::optional digis_d; + std::optional clusters_d; + std::optional digiErrors_d; + }; + + } // namespace pixelDetails +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +#endif // plugin_SiPixelClusterizer_alpaka_SiPixelRawToClusterKernel_h diff --git a/RecoLocalTracker/SiPixelClusterizer/plugins/gpuCalibPixel.h b/RecoLocalTracker/SiPixelClusterizer/plugins/gpuCalibPixel.h index 180b356db2c88..869beb74564b8 100644 --- a/RecoLocalTracker/SiPixelClusterizer/plugins/gpuCalibPixel.h +++ b/RecoLocalTracker/SiPixelClusterizer/plugins/gpuCalibPixel.h @@ -8,11 +8,9 @@ #include "CUDADataFormats/SiPixelCluster/interface/gpuClusteringConstants.h" #include "CondFormats/SiPixelObjects/interface/SiPixelGainForHLTonGPU.h" -#include "HeterogeneousCore/CUDAUtilities/interface/cuda_assert.h" #include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" - -// local include(s) -#include "SiPixelClusterThresholds.h" +#include "HeterogeneousCore/CUDAUtilities/interface/cuda_assert.h" +#include "RecoLocalTracker/SiPixelClusterizer/interface/SiPixelClusterThresholds.h" namespace gpuCalibPixel { @@ -52,6 +50,7 @@ namespace gpuCalibPixel { int row = x[i]; int col = y[i]; + auto ret = ped->getPedAndGain(id[i], col, row, isDeadColumn, isNoisyColumn); float pedestal = ret.first; float gain = ret.second; diff --git a/RecoLocalTracker/SiPixelClusterizer/plugins/gpuClusterChargeCut.h b/RecoLocalTracker/SiPixelClusterizer/plugins/gpuClusterChargeCut.h index a96cd0bcc5c15..1ff62ed1c6c57 100644 --- a/RecoLocalTracker/SiPixelClusterizer/plugins/gpuClusterChargeCut.h +++ b/RecoLocalTracker/SiPixelClusterizer/plugins/gpuClusterChargeCut.h @@ -8,9 +8,7 @@ #include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" #include "HeterogeneousCore/CUDAUtilities/interface/cuda_assert.h" #include "HeterogeneousCore/CUDAUtilities/interface/prefixScan.h" - -// local include(s) -#include "SiPixelClusterThresholds.h" +#include "RecoLocalTracker/SiPixelClusterizer/interface/SiPixelClusterThresholds.h" namespace gpuClustering { diff --git a/RecoLocalTracker/SiPixelClusterizer/python/siPixelClustersPreSplitting_cff.py b/RecoLocalTracker/SiPixelClusterizer/python/siPixelClustersPreSplitting_cff.py index 4460dd6ab0240..a6dd2bea80e2a 100644 --- a/RecoLocalTracker/SiPixelClusterizer/python/siPixelClustersPreSplitting_cff.py +++ b/RecoLocalTracker/SiPixelClusterizer/python/siPixelClustersPreSplitting_cff.py @@ -1,6 +1,8 @@ import FWCore.ParameterSet.Config as cms +from HeterogeneousCore.AlpakaCore.functions import * from Configuration.Eras.Modifier_run3_common_cff import run3_common from Configuration.ProcessModifiers.gpu_cff import gpu +from Configuration.ProcessModifiers.alpaka_cff import alpaka # conditions used *only* by the modules running on GPU from CalibTracker.SiPixelESProducers.siPixelROCsStatusAndMappingWrapperESProducer_cfi import siPixelROCsStatusAndMappingWrapperESProducer @@ -17,6 +19,7 @@ # reconstruct the pixel digis and clusters on the gpu from RecoLocalTracker.SiPixelClusterizer.siPixelRawToClusterCUDAPhase1_cfi import siPixelRawToClusterCUDAPhase1 as _siPixelRawToClusterCUDA from RecoLocalTracker.SiPixelClusterizer.siPixelRawToClusterCUDAHIonPhase1_cfi import siPixelRawToClusterCUDAHIonPhase1 as _siPixelRawToClusterCUDAHIonPhase1 + siPixelClustersPreSplittingCUDA = _siPixelRawToClusterCUDA.clone() # HIon Modifiers @@ -34,7 +37,6 @@ VCaltoElectronOffset = 0, VCaltoElectronOffset_L1 = 0) - from RecoLocalTracker.SiPixelClusterizer.siPixelDigisClustersFromSoAPhase1_cfi import siPixelDigisClustersFromSoAPhase1 as _siPixelDigisClustersFromSoAPhase1 from RecoLocalTracker.SiPixelClusterizer.siPixelDigisClustersFromSoAPhase2_cfi import siPixelDigisClustersFromSoAPhase2 as _siPixelDigisClustersFromSoAPhase2 @@ -93,3 +95,90 @@ siPixelDigisClustersPreSplitting, # SwitchProducer wrapping the legacy pixel cluster producer or an alias for the pixel clusters information converted from SoA siPixelClustersPreSplitting)) + +###################################################################### + +### Alpaka Pixel Clusters Reco + +#from CalibTracker.SiPixelESProducers.siPixelCablingSoAESProducer_cfi import siPixelCablingSoAESProducer +#from CalibTracker.SiPixelESProducers.siPixelGainCalibrationForHLTSoAESProducer_cfi import siPixelGainCalibrationForHLTSoAESProducer + +def _addProcessCalibTrackerAlpakaES(process): + process.load("CalibTracker.SiPixelESProducers.siPixelCablingSoAESProducer_cfi") + process.load("CalibTracker.SiPixelESProducers.siPixelGainCalibrationForHLTSoAESProducer_cfi") + +modifyConfigurationCalibTrackerAlpakaES_ = alpaka.makeProcessModifier(_addProcessCalibTrackerAlpakaES) + +# reconstruct the pixel digis and clusters with alpaka on the device +from RecoLocalTracker.SiPixelClusterizer.siPixelRawToClusterPhase1_cfi import siPixelRawToClusterPhase1 as _siPixelRawToClusterAlpaka +siPixelClustersPreSplittingAlpaka = _siPixelRawToClusterAlpaka.clone() + +(alpaka & run3_common).toModify(siPixelClustersPreSplittingAlpaka, + # use the pixel channel calibrations scheme for Run 3 + clusterThreshold_layer1 = 4000, + VCaltoElectronGain = 1, # all gains=1, pedestals=0 + VCaltoElectronGain_L1 = 1, + VCaltoElectronOffset = 0, + VCaltoElectronOffset_L1 = 0) + +from RecoLocalTracker.SiPixelClusterizer.siPixelPhase2DigiToCluster_cfi import siPixelPhase2DigiToCluster as _siPixelPhase2DigiToCluster + +(alpaka & phase2_tracker).toReplaceWith(siPixelClustersPreSplittingAlpaka, _siPixelPhase2DigiToCluster.clone( + Phase2ReadoutMode = PixelDigitizerAlgorithmCommon.Phase2ReadoutMode.value(), # flag to decide Readout Mode : linear TDR (-1), dual slope with slope parameters (+1,+2,+3,+4 ...) with threshold subtraction + Phase2DigiBaseline = int(PixelDigitizerAlgorithmCommon.ThresholdInElectrons_Barrel.value()), # same for barrel and endcap + Phase2KinkADC = 8, + ElectronPerADCGain = PixelDigitizerAlgorithmCommon.ElectronPerAdc.value() +)) + +# reconstruct the pixel digis and clusters with alpaka on the cpu, for validation +siPixelClustersPreSplittingAlpakaSerial = makeSerialClone(siPixelClustersPreSplittingAlpaka) + +from RecoLocalTracker.SiPixelClusterizer.siPixelDigisClustersFromSoAAlpakaPhase1_cfi import siPixelDigisClustersFromSoAAlpakaPhase1 as _siPixelDigisClustersFromSoAAlpakaPhase1 +from RecoLocalTracker.SiPixelClusterizer.siPixelDigisClustersFromSoAAlpakaPhase2_cfi import siPixelDigisClustersFromSoAAlpakaPhase2 as _siPixelDigisClustersFromSoAAlpakaPhase2 + +(alpaka & ~phase2_tracker).toReplaceWith(siPixelDigisClustersPreSplitting,_siPixelDigisClustersFromSoAAlpakaPhase1.clone( + src = "siPixelClustersPreSplittingAlpaka" +)) + +(alpaka & phase2_tracker).toReplaceWith(siPixelDigisClustersPreSplitting,_siPixelDigisClustersFromSoAAlpakaPhase2.clone( + clusterThreshold_layer1 = 4000, + clusterThreshold_otherLayers = 4000, + src = "siPixelClustersPreSplittingAlpaka", + storeDigis = False, + produceDigis = False +)) + +from RecoLocalTracker.SiPixelClusterizer.siPixelDigisClustersFromSoAAlpakaPhase1_cfi import siPixelDigisClustersFromSoAAlpakaPhase1 as _siPixelDigisClustersFromSoAAlpakaPhase1 +from RecoLocalTracker.SiPixelClusterizer.siPixelDigisClustersFromSoAAlpakaPhase2_cfi import siPixelDigisClustersFromSoAAlpakaPhase2 as _siPixelDigisClustersFromSoAAlpakaPhase2 + +alpaka.toModify(siPixelClustersPreSplitting, + cpu = cms.EDAlias( + siPixelDigisClustersPreSplitting = cms.VPSet( + cms.PSet(type = cms.string("SiPixelClusteredmNewDetSetVector")) + ) + ) +) + +# Run 3 +alpaka.toReplaceWith(siPixelClustersPreSplittingTask, cms.Task( + # reconstruct the pixel clusters with alpaka + siPixelClustersPreSplittingAlpaka, + # reconstruct the pixel clusters with alpaka on the cpu (if requested by the validation) + siPixelClustersPreSplittingAlpakaSerial, + # convert from host SoA to legacy formats (digis and clusters) + siPixelDigisClustersPreSplitting, + # EDAlias for the clusters + siPixelClustersPreSplitting) +) + +# Phase 2 +(alpaka & phase2_tracker).toReplaceWith(siPixelClustersPreSplittingTask, cms.Task( + # reconstruct the pixel clusters with alpaka from copied digis + siPixelClustersPreSplittingAlpaka, + # reconstruct the pixel clusters with alpaka from copied digis on the cpu (if requested by the validation) + siPixelClustersPreSplittingAlpakaSerial, + # convert the pixel digis (except errors) and clusters to the legacy format + siPixelDigisClustersPreSplitting, + # SwitchProducer wrapping the legacy pixel cluster producer or an alias for the pixel clusters information converted from SoA + siPixelClustersPreSplitting) +) diff --git a/RecoLocalTracker/SiPixelClusterizer/test/TestPixTracks.cc b/RecoLocalTracker/SiPixelClusterizer/test/TestPixTracks.cc deleted file mode 100644 index 9b98de741e709..0000000000000 --- a/RecoLocalTracker/SiPixelClusterizer/test/TestPixTracks.cc +++ /dev/null @@ -1,1004 +0,0 @@ -// File: ReadPixClusters.cc -// Description: TO test the pixel clusters with tracks (full) -// Author: Danek Kotlinski -// Creation Date: Initial version. 3/06 -// -//-------------------------------------------- -#include -#include -#include - -#include "DataFormats/Common/interface/Handle.h" -#include "FWCore/Framework/interface/ESHandle.h" -#include "FWCore/Framework/interface/EventSetup.h" - -#include "FWCore/Framework/interface/Frameworkfwd.h" -#include "FWCore/Framework/interface/EDAnalyzer.h" - -#include "FWCore/Framework/interface/Event.h" -#include "FWCore/Framework/interface/MakerMacros.h" -//#include "DataFormats/Common/interface/Handle.h" - -#include "FWCore/ParameterSet/interface/ParameterSet.h" -#include "FWCore/ServiceRegistry/interface/Service.h" -#include "FWCore/Utilities/interface/InputTag.h" - -//#include "DataFormats/SiPixelCluster/interface/SiPixelClusterCollection.h" -#include "DataFormats/SiPixelCluster/interface/SiPixelCluster.h" -#include "DataFormats/TrackerRecHit2D/interface/SiPixelRecHitCollection.h" -#include "DataFormats/Common/interface/DetSetVector.h" -#include "DataFormats/Common/interface/Ref.h" -#include "DataFormats/DetId/interface/DetId.h" - -#include "DataFormats/SiPixelDetId/interface/PXBDetId.h" -#include "DataFormats/SiPixelDetId/interface/PXFDetId.h" -#include "DataFormats/SiPixelDetId/interface/PixelSubdetector.h" - -#include "Geometry/CommonDetUnit/interface/PixelGeomDetUnit.h" -#include "Geometry/CommonDetUnit/interface/PixelGeomDetType.h" -#include "Geometry/TrackerGeometryBuilder/interface/TrackerGeometry.h" -#include "Geometry/Records/interface/TrackerDigiGeometryRecord.h" -#include "Geometry/CommonDetUnit/interface/GeomDetType.h" -#include "Geometry/CommonDetUnit/interface/GeomDet.h" -//#include "Geometry/CommonTopologies/interface/PixelTopology.h" - -// For L1 -#include "L1Trigger/GlobalTriggerAnalyzer/interface/L1GtUtils.h" -#include "DataFormats/L1GlobalTrigger/interface/L1GlobalTriggerReadoutSetupFwd.h" -#include "DataFormats/L1GlobalTrigger/interface/L1GlobalTriggerReadoutSetup.h" -#include "DataFormats/L1GlobalTrigger/interface/L1GlobalTriggerReadoutRecord.h" -#include "DataFormats/L1GlobalTrigger/interface/L1GlobalTriggerObjectMapRecord.h" - -// For HLT -#include "DataFormats/HLTReco/interface/TriggerEvent.h" -#include "DataFormats/HLTReco/interface/TriggerTypeDefs.h" -#include "DataFormats/Common/interface/TriggerResults.h" -#include "HLTrigger/HLTcore/interface/HLTConfigProvider.h" -#include "FWCore/Common/interface/TriggerNames.h" - -// For tracks -#include "DataFormats/TrackReco/interface/Track.h" -#include "TrackingTools/PatternTools/interface/Trajectory.h" - -//#include "TrackingTools/TrajectoryState/interface/TrajectoryStateTransform.h" - -#include "TrackingTools/TransientTrack/interface/TransientTrack.h" - -#include "TrackingTools/PatternTools/interface/TrajTrackAssociation.h" -//#include "TrackingTools/PatternTools/interface/TrajectoryFitter.h" - -#include "TrackingTools/TrackFitters/interface/TrajectoryStateCombiner.h" - -//#include "TrackingTools/TrackAssociator/interface/TrackDetectorAssociator.h" -//#include "TrackingTools/TrackAssociator/interface/TrackAssociatorParameters.h" - -//#include "Alignment/OfflineValidation/interface/TrackerValidationVariables.h" - -//#include "TrackingTools/TrackAssociator/interface/TrackDetectorAssociator.h" -//#include "TrackingTools/PatternTools/interface/Trajectory.h" -//#include "TrackingTools/TrajectoryState/interface/TrajectoryStateTransform.h" -//#include "TrackingTools/TransientTrack/interface/TransientTrack.h" -//#include "TrackingTools/PatternTools/interface/TrajTrackAssociation.h" - -#include "RecoVertex/VertexPrimitives/interface/TransientVertex.h" -#include - -// For luminisoty -#include "DataFormats/Luminosity/interface/LumiSummary.h" -#include "DataFormats/Common/interface/ConditionsInEdm.h" - -// To use root histos -#include "FWCore/ServiceRegistry/interface/Service.h" -#include "CommonTools/UtilAlgos/interface/TFileService.h" - -// For ROOT -#include -#include -#include -#include -#include -#include -#include -#include - -#define HISTOS -//#define L1 -//#define HLT - -using namespace std; - -class TestPixTracks : public edm::EDAnalyzer { -public: - explicit TestPixTracks(const edm::ParameterSet &conf); - virtual ~TestPixTracks(); - virtual void analyze(const edm::Event &e, const edm::EventSetup &c) override; - virtual void beginRun(const edm::EventSetup &iSetup) override; - virtual void beginJob() override; - virtual void endJob() override; - -private: - edm::EDGetTokenT lumiToken_; - edm::EDGetTokenT condToken_; - edm::EDGetTokenT l1gtrrToken_; - edm::EDGetTokenT hltToken_; - edm::EDGetTokenT vtxToken_; - edm::EDGetTokenT srcToken_; - edm::EDGetTokenT trackAssocToken_; - edm::ESGetToken trackerGeomToken_; - //const static bool PRINT = false; - bool PRINT; - float countTracks, countGoodTracks, countTracksInPix, countPVs, countEvents, countLumi; - - //TFile* hFile; - //TH1D *hdetunit; - //TH1D *hpixid,*hpixsubid, - //*hlayerid, - //*hladder1id,*hladder2id,*hladder3id, - //*hz1id,*hz2id,*hz3id; - - TH1D *hcharge1, *hcharge2, *hcharge3, *hcharge; - TH1D *hpixcharge1, *hpixcharge2, *hpixcharge3, *hpixcharge; - TH1D *hcols1, *hcols2, *hcols3, *hrows1, *hrows2, *hrows3; - TH1D *hsize1, *hsize2, *hsize3, *hsizex1, *hsizex2, *hsizex3, *hsizey1, *hsizey2, *hsizey3; - - TH1D *hclusPerTrk1, *hclusPerTrk2, *hclusPerTrk3; - TH1D *hclusPerLay1, *hclusPerLay2, *hclusPerLay3; - TH1D *hpixPerLay1, *hpixPerLay2, *hpixPerLay3; - //TH1D *hdetsPerLay1,*hdetsPerLay2,*hdetsPerLay3; - - //TH1D *hdetr, *hdetz; - // TH1D *hcolsB, *hrowsB, *hcolsF, *hrowsF; - TH2F *hDetMap1, *hDetMap2, *hDetMap3; // clusters - //TH2F *hpixDetMap1, *hpixDetMap2, *hpixDetMap3; - TH2F *hcluDetMap1, *hcluDetMap2, *hcluDetMap3; - - TH2F *hpvxy, *hclusMap1, *hclusMap2, *hclusMap3; - - TH1D *hpvz, *hpvr, *hNumPv, *hNumPvClean; - TH1D *hPt, *hEta, *hDz, *hD0, *hzdiff; - - //TH1D *hncharge1,*hncharge2, *hncharge3; - //TH1D *hchargeMonoPix1,*hchargeMonoPix2, *hchargeMonoPix3; - // TH1D *hnpixcharge1,*hnpixcharge2, *hnpixcharge3; - //TH1D *htest1,*htest2,*htest3,*htest4,*htest5,*htest6,*htest7,*htest8,*htest9; - TH1D *hl1a, *hl1t, *hlt1; - - TH1D *hclusBpix, *hpixBpix; - TH1D *htracks, *htracksGood, *htracksGoodInPix; - - TProfile *hclumult1, *hclumult2, *hclumult3; - TProfile *hclumultx1, *hclumultx2, *hclumultx3; - TProfile *hclumulty1, *hclumulty2, *hclumulty3; - TProfile *hcluchar1, *hcluchar2, *hcluchar3; - TProfile *hpixchar1, *hpixchar2, *hpixchar3; - - TProfile *htracksls, *hpvsls, *htrackslsn, *hpvslsn, *hintgl, *hinstl, *hbeam1, *hbeam2; - - TH1D *hlumi, *hlumi0, *hbx, *hbx0; -}; -///////////////////////////////////////////////////////////////// -// Contructor, -TestPixTracks::TestPixTracks(edm::ParameterSet const &conf) { - PRINT = conf.getUntrackedParameter("Verbosity", false); - lumiToken_ = consumes(edm::InputTag("lumiProducer")); - condToken_ = consumes(edm::InputTag("conditionsInEdm")); - l1gtrrToken_ = consumes(edm::InputTag("gtDigis")); - hltToken_ = consumes(edm::InputTag("TriggerResults", "", "HLT")); - vtxToken_ = consumes(edm::InputTag("offlinePrimaryVertices")); - srcToken_ = consumes(conf.getParameter("src")); - trackAssocToken_ = - consumes(edm::InputTag(conf.getParameter("trajectoryInput"))); - trackerGeomToken_ = esConsumes(); - //if(PRINT) cout<<" Construct "< fs; - - // put here whatever you want to do at the beginning of the job - //hFile = new TFile ( "histo.root", "RECREATE" ); - - //hladder1id = fs->make( "hladder1id", "Ladder L1 id", 50, 0., 50.); - //hladder2id = fs->make( "hladder2id", "Ladder L2 id", 50, 0., 50.); - //hladder3id = fs->make( "hladder3id", "Ladder L3 id", 50, 0., 50.); - //hz1id = fs->make( "hz1id", "Z-index id L1", 10, 0., 10.); - //hz2id = fs->make( "hz2id", "Z-index id L2", 10, 0., 10.); - //hz3id = fs->make( "hz3id", "Z-index id L3", 10, 0., 10.); - - int sizeH = 20; - float lowH = -0.5; - float highH = 19.5; - - hclusPerTrk1 = fs->make("hclusPerTrk1", "Clus per track l1", sizeH, lowH, highH); - hclusPerTrk2 = fs->make("hclusPerTrk2", "Clus per track l2", sizeH, lowH, highH); - hclusPerTrk3 = fs->make("hclusPerTrk3", "Clus per track l3", sizeH, lowH, highH); - - sizeH = 2000; - highH = 1999.5; - hclusPerLay1 = fs->make("hclusPerLay1", "Clus per layer l1", sizeH, lowH, highH); - hclusPerLay2 = fs->make("hclusPerLay2", "Clus per layer l2", sizeH, lowH, highH); - hclusPerLay3 = fs->make("hclusPerLay3", "Clus per layer l3", sizeH, lowH, highH); - - highH = 9999.5; - hpixPerLay1 = fs->make("hpixPerLay1", "Pix per layer l1", sizeH, lowH, highH); - hpixPerLay2 = fs->make("hpixPerLay2", "Pix per layer l2", sizeH, lowH, highH); - hpixPerLay3 = fs->make("hpixPerLay3", "Pix per layer l3", sizeH, lowH, highH); - - //hdetsPerLay1 = fs->make( "hdetsPerLay1", "Full dets per layer l1", - // 161, -0.5, 160.5); - //hdetsPerLay3 = fs->make( "hdetsPerLay3", "Full dets per layer l3", - // 353, -0.5, 352.5); - //hdetsPerLay2 = fs->make( "hdetsPerLay2", "Full dets per layer l2", - // 257, -0.5, 256.5); - - hcharge1 = fs->make("hcharge1", "Clu charge l1", 400, 0., 200.); //in ke - hcharge2 = fs->make("hcharge2", "Clu charge l2", 400, 0., 200.); - hcharge3 = fs->make("hcharge3", "Clu charge l3", 400, 0., 200.); - - //hchargeMonoPix1 = fs->make( "hchargeMonoPix1", "Clu charge l1 MonPix", 200, 0.,100.); //in ke - //hchargeMonoPix2 = fs->make( "hchargeMonoPix2", "Clu charge l2 MonPix", 200, 0.,100.); - //hchargeMonoPix3 = fs->make( "hchargeMonoPix3", "Clu charge l3 MonPix", 200, 0.,100.); - - //hncharge1 = fs->make( "hncharge1", "Noise charge l1", 200, 0.,100.); //in ke - //hncharge2 = fs->make( "hncharge2", "Noise charge l2", 200, 0.,100.); - //hncharge3 = fs->make( "hncharge3", "Noise charge l3", 200, 0.,100.); - - //hpixcharge1 = fs->make( "hpixcharge1", "Pix charge l1", 100, 0.,50.); //in ke - //hpixcharge2 = fs->make( "hpixcharge2", "Pix charge l2", 100, 0.,50.); - //hpixcharge3 = fs->make( "hpixcharge3", "Pix charge l3", 100, 0.,50.); - - //hnpixcharge1 = fs->make( "hnpixcharge1", "Noise pix charge l1", 100, 0.,50.); //in ke - //hnpixcharge2 = fs->make( "hnpixcharge2", "Noise pix charge l2", 100, 0.,50.); - //hnpixcharge3 = fs->make( "hnpixcharge3", "Noise pix charge l3", 100, 0.,50.); - - //hpixcharge = fs->make( "hpixcharge", "Clu charge", 100, 0.,50.); - //hcharge = fs->make( "hcharge", "Pix charge", 100, 0.,50.); - - hcols1 = fs->make("hcols1", "Layer 1 cols", 500, -0.5, 499.5); - hcols2 = fs->make("hcols2", "Layer 2 cols", 500, -0.5, 499.5); - hcols3 = fs->make("hcols3", "Layer 3 cols", 500, -0.5, 499.5); - - hrows1 = fs->make("hrows1", "Layer 1 rows", 200, -0.5, 199.5); - hrows2 = fs->make("hrows2", "Layer 2 rows", 200, -0.5, 199.5); - hrows3 = fs->make("hrows3", "layer 3 rows", 200, -0.5, 199.5); - - hsize1 = fs->make("hsize1", "layer 1 clu size", 100, -0.5, 99.5); - hsize2 = fs->make("hsize2", "layer 2 clu size", 100, -0.5, 99.5); - hsize3 = fs->make("hsize3", "layer 3 clu size", 100, -0.5, 99.5); - hsizex1 = fs->make("hsizex1", "lay1 clu size in x", 20, -0.5, 19.5); - hsizex2 = fs->make("hsizex2", "lay2 clu size in x", 20, -0.5, 19.5); - hsizex3 = fs->make("hsizex3", "lay3 clu size in x", 20, -0.5, 19.5); - hsizey1 = fs->make("hsizey1", "lay1 clu size in y", 30, -0.5, 29.5); - hsizey2 = fs->make("hsizey2", "lay2 clu size in y", 30, -0.5, 29.5); - hsizey3 = fs->make("hsizey3", "lay3 clu size in y", 30, -0.5, 29.5); - - hDetMap1 = fs->make("hDetMap1", "layer 1 clus map", 9, 0., 9., 23, 0., 23.); - hDetMap2 = fs->make("hDetMap2", "layer 2 clus map", 9, 0., 9., 33, 0., 33.); - hDetMap3 = fs->make("hDetMap3", "layer 3 clus map", 9, 0., 9., 45, 0., 45.); - //hpixDetMap1 = fs->make( "hpixDetMap1", "pix det layer 1", - // 416,0.,416.,160,0.,160.); - //hpixDetMap2 = fs->make( "hpixDetMap2", "pix det layer 2", - // 416,0.,416.,160,0.,160.); - //hpixDetMap3 = fs->make( "hpixDetMap3", "pix det layer 3", - // 416,0.,416.,160,0.,160.); - hcluDetMap1 = fs->make("hcluDetMap1", "clu det layer 1", 416, 0., 416., 160, 0., 160.); - hcluDetMap2 = fs->make("hcluDetMap2", "clu det layer 1", 416, 0., 416., 160, 0., 160.); - hcluDetMap3 = fs->make("hcluDetMap3", "clu det layer 1", 416, 0., 416., 160, 0., 160.); - - htracksGoodInPix = fs->make("htracksGoodInPix", "count good tracks in pix", 2000, -0.5, 1999.5); - htracksGood = fs->make("htracksGood", "count good tracks", 2000, -0.5, 1999.5); - htracks = fs->make("htracks", "count tracks", 2000, -0.5, 1999.5); - hclusBpix = fs->make("hclusBpix", "count clus in bpix", 200, -0.5, 1999.5); - hpixBpix = fs->make("hpixBpix", "count pixels", 200, -0.5, 1999.5); - - hpvxy = fs->make("hpvxy", "pv xy", 100, -1., 1., 100, -1., 1.); - hpvz = fs->make("hpvz", "pv z", 1000, -50., 50.); - hpvr = fs->make("hpvr", "pv r", 100, 0., 1.); - hNumPv = fs->make("hNumPv", "num of pv", 100, 0., 100.); - hNumPvClean = fs->make("hNumPvClean", "num of pv clean", 100, 0., 100.); - - hPt = fs->make("hPt", "pt", 100, 0., 100.); - hEta = fs->make("hEta", "eta", 50, -2.5, 2.5); - hD0 = fs->make("hD0", "d0", 500, 0., 5.); - hDz = fs->make("hDz", "pt", 250, -25., 25.); - hzdiff = fs->make("hzdiff", "PVz-Trackz", 200, -10., 10.); - - hl1a = fs->make("hl1a", "l1a", 128, -0.5, 127.5); - hl1t = fs->make("hl1t", "l1t", 128, -0.5, 127.5); - hlt1 = fs->make("hlt1", "hlt1", 256, -0.5, 255.5); - - hclumult1 = fs->make("hclumult1", "cluster size layer 1", 60, -3., 3., 0.0, 100.); - hclumult2 = fs->make("hclumult2", "cluster size layer 2", 60, -3., 3., 0.0, 100.); - hclumult3 = fs->make("hclumult3", "cluster size layer 3", 60, -3., 3., 0.0, 100.); - - hclumultx1 = fs->make("hclumultx1", "cluster x-size layer 1", 60, -3., 3., 0.0, 100.); - hclumultx2 = fs->make("hclumultx2", "cluster x-size layer 2", 60, -3., 3., 0.0, 100.); - hclumultx3 = fs->make("hclumultx3", "cluster x-size layer 3", 60, -3., 3., 0.0, 100.); - - hclumulty1 = fs->make("hclumulty1", "cluster y-size layer 1", 60, -3., 3., 0.0, 100.); - hclumulty2 = fs->make("hclumulty2", "cluster y-size layer 2", 60, -3., 3., 0.0, 100.); - hclumulty3 = fs->make("hclumulty3", "cluster y-size layer 3", 60, -3., 3., 0.0, 100.); - - hcluchar1 = fs->make("hcluchar1", "cluster char layer 1", 60, -3., 3., 0.0, 1000.); - hcluchar2 = fs->make("hcluchar2", "cluster char layer 2", 60, -3., 3., 0.0, 1000.); - hcluchar3 = fs->make("hcluchar3", "cluster char layer 3", 60, -3., 3., 0.0, 1000.); - - hpixchar1 = fs->make("hpixchar1", "pix char layer 1", 60, -3., 3., 0.0, 1000.); - hpixchar2 = fs->make("hpixchar2", "pix char layer 2", 60, -3., 3., 0.0, 1000.); - hpixchar3 = fs->make("hpixchar3", "pix char layer 3", 60, -3., 3., 0.0, 1000.); - - hintgl = fs->make("hintgl", "inst lumi vs ls ", 1000, 0., 3000., 0.0, 10000.); - hinstl = fs->make("hinstl", "intg lumi vs ls ", 1000, 0., 3000., 0.0, 100.); - hbeam1 = fs->make("hbeam1", "beam1 vs ls ", 1000, 0., 3000., 0.0, 1000.); - hbeam2 = fs->make("hbeam2", "beam2 vs ls ", 1000, 0., 3000., 0.0, 1000.); - - htracksls = fs->make("htracksls", "tracks with pix hits vs ls", 1000, 0., 3000., 0.0, 10000.); - hpvsls = fs->make("hpvsls", "pvs vs ls", 1000, 0., 3000., 0.0, 1000.); - htrackslsn = fs->make("htrackslsn", "tracks with pix hits/lumi vs ls", 1000, 0., 3000., 0.0, 10000.); - hpvslsn = fs->make("hpvslsn", "pvs/lumi vs ls", 1000, 0., 3000., 0.0, 1000.); - - hlumi0 = fs->make("hlumi0", "lumi", 2000, 0, 2000.); - hlumi = fs->make("hlumi", "lumi", 2000, 0, 2000.); - hbx0 = fs->make("hbx0", "bx", 4000, 0, 4000.); - hbx = fs->make("hbx", "bx", 4000, 0, 4000.); - - hclusMap1 = fs->make("hclusMap1", "clus - lay1", 260, -26., 26., 350, -3.5, 3.5); - hclusMap2 = fs->make("hclusMap2", "clus - lay2", 260, -26., 26., 350, -3.5, 3.5); - hclusMap3 = fs->make("hclusMap3", "clus - lay3", 260, -26., 26., 350, -3.5, 3.5); - -#endif -} -// ------------ method called to at the end of the job ------------ -void TestPixTracks::endJob() { - cout << " End PixelTracksTest " << endl; - - if (countEvents > 0.) { - countTracks /= countEvents; - countGoodTracks /= countEvents; - countTracksInPix /= countEvents; - countPVs /= countEvents; - countLumi /= 1000.; - cout << " Average tracks/event " << countTracks << " good " << countGoodTracks << " in pix " << countTracksInPix - << " PVs " << countPVs << " events " << countEvents << " lumi pb-1 " << countLumi << "/10, bug!" << endl; - } -} -////////////////////////////////////////////////////////////////// -// Functions that gets called by framework every event -void TestPixTracks::analyze(const edm::Event &e, const edm::EventSetup &es) { - using namespace edm; - using namespace reco; - static int lumiBlockOld = -9999; - - const float CLU_SIZE_PT_CUT = 1.; - - int trackNumber = 0; - int countNiceTracks = 0; - int countPixTracks = 0; - - int numberOfDetUnits = 0; - int numberOfClusters = 0; - int numberOfPixels = 0; - - int numberOfDetUnits1 = 0; - int numOfClustersPerDet1 = 0; - int numOfClustersPerLay1 = 0; - int numOfPixelsPerLay1 = 0; - - int numberOfDetUnits2 = 0; - int numOfClustersPerDet2 = 0; - int numOfClustersPerLay2 = 0; - int numOfPixelsPerLay2 = 0; - - int numberOfDetUnits3 = 0; - int numOfClustersPerDet3 = 0; - int numOfClustersPerLay3 = 0; - int numOfPixelsPerLay3 = 0; - - int run = e.id().run(); - int event = e.id().event(); - int lumiBlock = e.luminosityBlock(); - int bx = e.bunchCrossing(); - int orbit = e.orbitNumber(); - - if (PRINT) - cout << "Run " << run << " Event " << event << " LS " << lumiBlock << endl; - - hbx0->Fill(float(bx)); - hlumi0->Fill(float(lumiBlock)); - - edm::LuminosityBlock const &iLumi = e.getLuminosityBlock(); - edm::Handle lumi; - iLumi.getByToken(lumiToken_, lumi); - edm::Handle cond; - float intlumi = 0, instlumi = 0; - int beamint1 = 0, beamint2 = 0; - iLumi.getByToken(condToken_, cond); - // This will only work when running on RECO until (if) they fix it in the FW - // When running on RAW and reconstructing, the LumiSummary will not appear - // in the event before reaching endLuminosityBlock(). Therefore, it is not - // possible to get this info in the event - if (lumi.isValid()) { - intlumi = (lumi->intgRecLumi()) / 1000.; // 10^30 -> 10^33/cm2/sec -> 1/nb/sec - instlumi = (lumi->avgInsDelLumi()) / 1000.; - beamint1 = (cond->totalIntensityBeam1) / 1000; - beamint2 = (cond->totalIntensityBeam2) / 1000; - } else { - //std::cout << "** ERROR: Event does not get lumi info\n"; - } - - hinstl->Fill(float(lumiBlock), float(instlumi)); - hintgl->Fill(float(lumiBlock), float(intlumi)); - hbeam1->Fill(float(lumiBlock), float(beamint1)); - hbeam2->Fill(float(lumiBlock), float(beamint2)); - -#ifdef L1 - // Get L1 - Handle L1GTRR; - e.getByToken(l1gtrrToken_, L1GTRR); - - if (L1GTRR.isValid()) { - //bool l1a = L1GTRR->decision(); // global decission? - //cout<<" L1 status :"<decisionWord().size(); ++i) { - int l1flag = L1GTRR->decisionWord()[i]; - int t1flag = L1GTRR->technicalTriggerWord()[i]; - - if (l1flag > 0) - hl1a->Fill(float(i)); - if (t1flag > 0 && i < 64) - hl1t->Fill(float(i)); - } // for loop - } // if l1a -#endif - -#ifdef HLT - - bool hlt[256]; - for (int i = 0; i < 256; ++i) - hlt[i] = false; - - edm::TriggerNames TrigNames; - edm::Handle HLTResults; - - // Extract the HLT results - e.getByToken(hltToken_, HLTResults); - if ((HLTResults.isValid() == true) && (HLTResults->size() > 0)) { - //TrigNames.init(*HLTResults); - const edm::TriggerNames &TrigNames = e.triggerNames(*HLTResults); - - //cout<wasrun(TrigNames.triggerIndex(TrigNames.triggerName(i))) == true) && - (HLTResults->accept(TrigNames.triggerIndex(TrigNames.triggerName(i))) == true) && - (HLTResults->error(TrigNames.triggerIndex(TrigNames.triggerName(i))) == false)) { - hlt[i] = true; - hlt1->Fill(float(i)); - - } // if hlt - - } // loop - } // if valid -#endif - - // Get event setup - edm::ESHandle geom = es.getHandle(trackerGeomToken_); - const TrackerGeometry &theTracker(*geom); - - // -- Primary vertices - // ---------------------------------------------------------------------- - edm::Handle vertices; - e.getByToken(vtxToken_, vertices); - - if (PRINT) - cout << " PV list " << vertices->size() << endl; - int pvNotFake = 0, pvsTrue = 0; - vector pvzVector; - for (reco::VertexCollection::const_iterator iv = vertices->begin(); iv != vertices->end(); ++iv) { - if ((iv->isFake()) == 1) - continue; - pvNotFake++; - float pvx = iv->x(); - float pvy = iv->y(); - float pvz = iv->z(); - int numTracksPerPV = iv->tracksSize(); - //int numTracksPerPV = iv->nTracks(); - - //float xe = iv->xError(); - //float ye = iv->yError(); - //float ze = iv->zError(); - //int chi2 = iv->chi2(); - //int dof = iv->ndof(); - - if (PRINT) - cout << " PV " << pvNotFake << " pos = " << pvx << "/" << pvy << "/" << pvz << ", Num of tracks " - << numTracksPerPV << endl; - - hpvz->Fill(pvz); - if (pvz > -22. && pvz < 22.) { - float pvr = sqrt(pvx * pvx + pvy * pvy); - hpvxy->Fill(pvx, pvy); - hpvr->Fill(pvr); - if (pvr < 0.3) { - pvsTrue++; - pvzVector.push_back(pvz); - //if(PRINT) cout<<"PV "<Fill(float(pvNotFake)); - hNumPvClean->Fill(float(pvsTrue)); - - if (PRINT) - cout << " Not fake PVs = " << pvNotFake << " good position " << pvsTrue << endl; - - Handle recTracks; - e.getByToken(srcToken_, recTracks); - - if (PRINT) - cout << " Tracks " << recTracks->size() << endl; - for (reco::TrackCollection::const_iterator t = recTracks->begin(); t != recTracks->end(); ++t) { - trackNumber++; - numOfClustersPerDet1 = 0; - numOfClustersPerDet2 = 0; - numOfClustersPerDet3 = 0; - int pixelHits = 0; - - int size = t->recHitsSize(); - float pt = t->pt(); - float eta = t->eta(); - float phi = t->phi(); - float trackCharge = t->charge(); - float d0 = t->d0(); - float dz = t->dz(); - float tkvx = t->vx(); - float tkvy = t->vy(); - float tkvz = t->vz(); - - if (PRINT) - cout << "Track " << trackNumber << " Pt " << pt << " Eta " << eta << " d0/dz " << d0 << " " << dz << " Hits " - << size << endl; - - hEta->Fill(eta); - hDz->Fill(dz); - if (abs(eta) > 2.8 || abs(dz) > 25.) - continue; // skip - - hD0->Fill(d0); - if (d0 > 1.0) - continue; // skip - - bool goodTrack = false; - for (vector::iterator m = pvzVector.begin(); m != pvzVector.end(); ++m) { - float z = *m; - float tmp = abs(z - dz); - hzdiff->Fill(tmp); - if (tmp < 1.) - goodTrack = true; - } - - if (!goodTrack) - continue; - countNiceTracks++; - hPt->Fill(pt); - - // Loop over rechits - for (trackingRecHit_iterator recHit = t->recHitsBegin(); recHit != t->recHitsEnd(); ++recHit) { - if (!((*recHit)->isValid())) - continue; - - if ((*recHit)->geographicalId().det() != DetId::Tracker) - continue; - - const DetId &hit_detId = (*recHit)->geographicalId(); - uint IntSubDetID = (hit_detId.subdetId()); - - // Select pixel rechits - if (IntSubDetID == 0) - continue; // Select ?? - if (IntSubDetID != PixelSubdetector::PixelBarrel) - continue; // look only at bpix || IntSubDetID == PixelSubdetector::PixelEndcap) { - - // Pixel detector - PXBDetId pdetId = PXBDetId(hit_detId); - //unsigned int detTypeP=pdetId.det(); - //unsigned int subidP=pdetId.subdetId(); - // Barell layer = 1,2,3 - int layer = pdetId.layer(); - // Barrel ladder id 1-20,32,44. - int ladder = pdetId.ladder(); - // Barrel Z-index=1,8 - int zindex = pdetId.module(); - - if (PRINT) - cout << "barrel layer/ladder/module: " << layer << "/" << ladder << "/" << zindex << endl; - - // Get the geom-detector - const PixelGeomDetUnit *theGeomDet = dynamic_cast(theTracker.idToDet(hit_detId)); - double detZ = theGeomDet->surface().position().z(); - double detR = theGeomDet->surface().position().perp(); - const PixelTopology *topol = &(theGeomDet->specificTopology()); // pixel topology - - //std::vector output = getRecHitComponents((*recHit).get()); - //std::vector TrkComparison::getRecHitComponents(const TrackingRecHit* rechit){ - - const SiPixelRecHit *hit = dynamic_cast((*recHit).get()); - //edm::Ref ,SiStripCluster> cluster = hit->cluster(); - // get the edm::Ref to the cluster - - if (hit) { - // RecHit (recthits are transient, so not available without ttrack refit) - //double xloc = hit->localPosition().x();// 1st meas coord - //double yloc = hit->localPosition().y();// 2nd meas coord or zero - //double zloc = hit->localPosition().z();// up, always zero - //cout<<" rechit loc "<, SiPixelCluster> const &clust = hit->cluster(); - // check if the ref is not null - if (!clust.isNonnull()) - continue; - - numberOfClusters++; - pixelHits++; - float charge = (clust->charge()) / 1000.0; // convert electrons to kilo-electrons - int size = clust->size(); - int sizeX = clust->sizeX(); - int sizeY = clust->sizeY(); - float row = clust->x(); - float col = clust->y(); - numberOfPixels += size; - - //cout<<" clus loc "<surface().toGlobal(lp); - double gX = clustgp.x(); - double gY = clustgp.y(); - double gZ = clustgp.z(); - - //cout<<" CLU GLOBAL "<maxPixelCol(); - //int maxPixelRow = clust->maxPixelRow(); - //int minPixelCol = clust->minPixelCol(); - //int minPixelRow = clust->minPixelRow(); - //int geoId = PixGeom->geographicalId().rawId(); - // Replace with the topology methods - // edge method moved to topologi class - //int edgeHitX = (int) ( topol->isItEdgePixelInX( minPixelRow ) || topol->isItEdgePixelInX( maxPixelRow ) ); - //int edgeHitY = (int) ( topol->isItEdgePixelInY( minPixelCol ) || topol->isItEdgePixelInY( maxPixelCol ) ); - - // calculate alpha and beta from cluster position - //LocalTrajectoryParameters ltp = tsos.localParameters(); - //LocalVector localDir = ltp.momentum()/ltp.momentum().mag(); - //float locx = localDir.x(); - //float locy = localDir.y(); - //float locz = localDir.z(); - //float loctheta = localDir.theta(); // currently unused - //float alpha = atan2( locz, locx ); - //float beta = atan2( locz, locy ); - - if (layer == 1) { - hDetMap1->Fill(float(zindex), float(ladder)); - hcluDetMap1->Fill(col, row); - hcharge1->Fill(charge); - hcols1->Fill(col); - hrows1->Fill(row); - - hclusMap1->Fill(gZ, phi); - - if (pt > CLU_SIZE_PT_CUT) { - hsize1->Fill(float(size)); - hsizex1->Fill(float(sizeX)); - hsizey1->Fill(float(sizeY)); - - hclumult1->Fill(eta, float(size)); - hclumultx1->Fill(eta, float(sizeX)); - hclumulty1->Fill(eta, float(sizeY)); - hcluchar1->Fill(eta, float(charge)); - } - - numOfClustersPerDet1++; - numOfClustersPerLay1++; - numOfPixelsPerLay1 += size; - - } else if (layer == 2) { - hDetMap2->Fill(float(zindex), float(ladder)); - hcluDetMap2->Fill(col, row); - hcharge2->Fill(charge); - hcols2->Fill(col); - hrows2->Fill(row); - - hclusMap2->Fill(gZ, phi); - - if (pt > CLU_SIZE_PT_CUT) { - hsize2->Fill(float(size)); - hsizex2->Fill(float(sizeX)); - hsizey2->Fill(float(sizeY)); - - hclumult2->Fill(eta, float(size)); - hclumultx2->Fill(eta, float(sizeX)); - hclumulty2->Fill(eta, float(sizeY)); - hcluchar2->Fill(eta, float(charge)); - } - - numOfClustersPerDet2++; - numOfClustersPerLay2++; - numOfPixelsPerLay2 += size; - - } else if (layer == 3) { - hDetMap3->Fill(float(zindex), float(ladder)); - hcluDetMap3->Fill(col, row); - hcharge3->Fill(charge); - hcols3->Fill(col); - hrows3->Fill(row); - - hclusMap3->Fill(gZ, phi); - - if (pt > CLU_SIZE_PT_CUT) { - hsize3->Fill(float(size)); - hsizex3->Fill(float(sizeX)); - hsizey3->Fill(float(sizeY)); - hclumult3->Fill(eta, float(size)); - hclumultx3->Fill(eta, float(sizeX)); - hclumulty3->Fill(eta, float(sizeY)); - hcluchar3->Fill(eta, float(charge)); - } - - numOfClustersPerDet3++; - numOfClustersPerLay3++; - numOfPixelsPerLay3 += size; - } // if layer - - } // if valid - - } // clusters - - if (pixelHits > 0) - countPixTracks++; - - if (PRINT) - cout << " Clusters for track " << trackNumber << " num of clusters " << numberOfClusters << " num of pixels " - << pixelHits << endl; - -#ifdef HISTOS - if (numberOfClusters > 0) { - hclusPerTrk1->Fill(float(numOfClustersPerDet1)); - if (PRINT) - cout << "Lay1: number of clusters per track = " << numOfClustersPerDet1 << endl; - hclusPerTrk2->Fill(float(numOfClustersPerDet2)); - if (PRINT) - cout << "Lay2: number of clusters per track = " << numOfClustersPerDet1 << endl; - hclusPerTrk3->Fill(float(numOfClustersPerDet3)); - if (PRINT) - cout << "Lay3: number of clusters per track = " << numOfClustersPerDet1 << endl; - } -#endif // HISTOS - - } // tracks - -#ifdef HISTOS - if (numberOfClusters > 0) { - hclusPerLay1->Fill(float(numOfClustersPerLay1)); - hclusPerLay2->Fill(float(numOfClustersPerLay2)); - hclusPerLay3->Fill(float(numOfClustersPerLay3)); - //hdetsPerLay1->Fill(float(numberOfDetUnits1)); - //hdetsPerLay2->Fill(float(numberOfDetUnits2)); - //hdetsPerLay3->Fill(float(numberOfDetUnits3)); - //int tmp = numberOfDetUnits1+numberOfDetUnits2+numberOfDetUnits3; - hpixPerLay1->Fill(float(numOfPixelsPerLay1)); - hpixPerLay2->Fill(float(numOfPixelsPerLay2)); - hpixPerLay3->Fill(float(numOfPixelsPerLay3)); - //htest7->Fill(float(tmp)); - hclusBpix->Fill(float(numberOfClusters)); - hpixBpix->Fill(float(numberOfPixels)); - } - htracksGood->Fill(float(countNiceTracks)); - htracksGoodInPix->Fill(float(countPixTracks)); - htracks->Fill(float(trackNumber)); - - hbx->Fill(float(bx)); - hlumi->Fill(float(lumiBlock)); - - htracksls->Fill(float(lumiBlock), float(countPixTracks)); - hpvsls->Fill(float(lumiBlock), float(pvsTrue)); - if (instlumi > 0.) { - float tmp = float(countPixTracks) / instlumi; - htrackslsn->Fill(float(lumiBlock), tmp); - tmp = float(pvsTrue) / instlumi; - hpvslsn->Fill(float(lumiBlock), tmp); - } - -#endif // HISTOS - - // - countTracks += float(trackNumber); - countGoodTracks += float(countNiceTracks); - countTracksInPix += float(countPixTracks); - countPVs += float(pvsTrue); - countEvents++; - if (lumiBlock != lumiBlockOld) { - countLumi += intlumi; - lumiBlockOld = lumiBlock; - } - - if (PRINT) - cout << " event with tracks = " << trackNumber << " " << countNiceTracks << endl; - - return; - -#ifdef USE_TRAJ - - //------------------------------------------------------------------------------------ - // Use Trajectories - - edm::Handle trajTrackCollectionHandle; - e.getByToken(trackAssocToken_, trajTrackCollectionHandle); - - TrajectoryStateCombiner tsoscomb; - - int NbrTracks = trajTrackCollectionHandle->size(); - std::cout << " track measurements " << trajTrackCollectionHandle->size() << std::endl; - - int trackNumber = 0; - int numberOfClusters = 0; - - for (TrajTrackAssociationCollection::const_iterator it = trajTrackCollectionHandle->begin(), - itEnd = trajTrackCollectionHandle->end(); - it != itEnd; - ++it) { - int pixelHits = 0; - int stripHits = 0; - const Track &track = *it->val; - const Trajectory &traj = *it->key; - - std::vector checkColl = traj.measurements(); - for (std::vector::const_iterator checkTraj = checkColl.begin(); checkTraj != checkColl.end(); - ++checkTraj) { - if (!checkTraj->updatedState().isValid()) - continue; - TransientTrackingRecHit::ConstRecHitPointer testhit = checkTraj->recHit(); - if (!testhit->isValid() || testhit->geographicalId().det() != DetId::Tracker) - continue; - uint testSubDetID = (testhit->geographicalId().subdetId()); - if (testSubDetID == PixelSubdetector::PixelBarrel || testSubDetID == PixelSubdetector::PixelEndcap) - pixelHits++; - else if (testSubDetID == StripSubdetector::TIB || testSubDetID == StripSubdetector::TOB || - testSubDetID == StripSubdetector::TID || testSubDetID == StripSubdetector::TEC) - stripHits++; - } - - if (pixelHits == 0) - continue; - - trackNumber++; - std::cout << " track " << trackNumber << " has pixelhits " << pixelHits << std::endl; - pixelHits = 0; - - //std::vector tmColl = traj.measurements(); - for (std::vector::const_iterator itTraj = checkColl.begin(); itTraj != checkColl.end(); - ++itTraj) { - if (!itTraj->updatedState().isValid()) - continue; - - TrajectoryStateOnSurface tsos = tsoscomb(itTraj->forwardPredictedState(), itTraj->backwardPredictedState()); - TransientTrackingRecHit::ConstRecHitPointer hit = itTraj->recHit(); - if (!hit->isValid() || hit->geographicalId().det() != DetId::Tracker) - continue; - - const DetId &hit_detId = hit->geographicalId(); - uint IntSubDetID = (hit_detId.subdetId()); - - if (IntSubDetID == 0) - continue; // Select ?? - if (IntSubDetID != PixelSubdetector::PixelBarrel) - continue; // look only at bpix || IntSubDetID == PixelSubdetector::PixelEndcap) { - - // const GeomDetUnit* detUnit = hit->detUnit(); - // if(detUnit) { - // const Surface& surface = hit->detUnit()->surface(); - // const TrackerGeometry& theTracker(*tkGeom_); - // const PixelGeomDetUnit* theGeomDet = dynamic_cast (theTracker.idToDet(hit_detId) ); - // const RectangularPixelTopology * topol = dynamic_cast(&(theGeomDet->specificTopology())); - // } - - // get the enclosed persistent hit - const TrackingRecHit *persistentHit = hit->hit(); - // check if it's not null, and if it's a valid pixel hit - if ((persistentHit != 0) && (typeid(*persistentHit) == typeid(SiPixelRecHit))) { - // tell the C++ compiler that the hit is a pixel hit - const SiPixelRecHit *pixhit = dynamic_cast(hit->hit()); - // get the edm::Ref to the cluster - edm::Ref, SiPixelCluster> const &clust = (*pixhit).cluster(); - // check if the ref is not null - if (clust.isNonnull()) { - numberOfClusters++; - pixelHits++; - float charge = (clust->charge()) / 1000.0; // convert electrons to kilo-electrons - int size = clust->size(); - int size_x = clust->sizeX(); - int size_y = clust->sizeY(); - float row = clust->x(); - float col = clust->y(); - - //LocalPoint lp = topol->localPosition(MeasurementPoint(clust_.row,clust_.col)); - //float x = lp.x(); - //float y = lp.y(); - - int maxPixelCol = clust->maxPixelCol(); - int maxPixelRow = clust->maxPixelRow(); - int minPixelCol = clust->minPixelCol(); - int minPixelRow = clust->minPixelRow(); - - //int geoId = PixGeom->geographicalId().rawId(); - - // Replace with the topology methods - // edge method moved to topologi class - //int edgeHitX = (int) ( topol->isItEdgePixelInX( minPixelRow ) || topol->isItEdgePixelInX( maxPixelRow ) ); - //int edgeHitY = (int) ( topol->isItEdgePixelInY( minPixelCol ) || topol->isItEdgePixelInY( maxPixelCol ) ); - - // calculate alpha and beta from cluster position - //LocalTrajectoryParameters ltp = tsos.localParameters(); - //LocalVector localDir = ltp.momentum()/ltp.momentum().mag(); - - //float locx = localDir.x(); - //float locy = localDir.y(); - //float locz = localDir.z(); - //float loctheta = localDir.theta(); // currently unused - - //float alpha = atan2( locz, locx ); - //float beta = atan2( locz, locy ); - - //clust_.normalized_charge = clust_.charge*sqrt(1.0/(1.0/pow(tan(clust_.clust_alpha),2)+1.0/pow(tan(clust_.clust_beta),2)+1.0)); - } // valid cluster - } // valid peristant hit - - } // loop over trajectory meas. - - if (PRINT) - cout << " Cluster for track " << trackNumber << " cluaters " << numberOfClusters << " " << pixelHits << endl; - - } // loop over tracks - -#endif // USE_TRAJ - - cout << " event with tracks = " << trackNumber << " " << countGoodTracks << endl; - -} // end - -//define this as a plug-in -DEFINE_FWK_MODULE(TestPixTracks); diff --git a/RecoLocalTracker/SiPixelClusterizer/test/gpuClustering_t.h b/RecoLocalTracker/SiPixelClusterizer/test/gpuClustering_t.h index c0291ed9f32f8..5b70ded261ddf 100644 --- a/RecoLocalTracker/SiPixelClusterizer/test/gpuClustering_t.h +++ b/RecoLocalTracker/SiPixelClusterizer/test/gpuClustering_t.h @@ -16,12 +16,13 @@ #include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h" #endif // __CUDACC__ -#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClustering.h" -#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClusterChargeCut.h" -#include "RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelClusterThresholds.h" #include "CUDADataFormats/SiPixelCluster/interface/gpuClusteringConstants.h" - #include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "RecoLocalTracker/SiPixelClusterizer/interface/SiPixelClusterThresholds.h" + +// local includes, for testing only +#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClusterChargeCut.h" +#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClustering.h" int main(void) { #ifdef __CUDACC__ diff --git a/RecoLocalTracker/SiPixelRecHits/BuildFile.xml b/RecoLocalTracker/SiPixelRecHits/BuildFile.xml index 70a2970420c51..62787f4c989c1 100644 --- a/RecoLocalTracker/SiPixelRecHits/BuildFile.xml +++ b/RecoLocalTracker/SiPixelRecHits/BuildFile.xml @@ -1,15 +1,20 @@ + + + + + diff --git a/RecoLocalTracker/SiPixelRecHits/interface/PixelCPEFastParamsDevice.h b/RecoLocalTracker/SiPixelRecHits/interface/PixelCPEFastParamsDevice.h new file mode 100644 index 0000000000000..9a2139ab2e355 --- /dev/null +++ b/RecoLocalTracker/SiPixelRecHits/interface/PixelCPEFastParamsDevice.h @@ -0,0 +1,43 @@ +#ifndef RecoLocalTracker_SiPixelRecHits_interface_PixelCPEFastParamsDevice_h +#define RecoLocalTracker_SiPixelRecHits_interface_PixelCPEFastParamsDevice_h + +#include + +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforDevice.h" + +template +class PixelCPEFastParamsDevice { +public: + using Buffer = cms::alpakatools::device_buffer>; + using ConstBuffer = cms::alpakatools::const_device_buffer>; + + template + PixelCPEFastParamsDevice(TQueue queue) + : buffer_(cms::alpakatools::make_device_buffer>(queue)) {} + + // non-copyable + PixelCPEFastParamsDevice(PixelCPEFastParamsDevice const&) = delete; + PixelCPEFastParamsDevice& operator=(PixelCPEFastParamsDevice const&) = delete; + + // movable + PixelCPEFastParamsDevice(PixelCPEFastParamsDevice&&) = default; + PixelCPEFastParamsDevice& operator=(PixelCPEFastParamsDevice&&) = default; + + // default destructor + ~PixelCPEFastParamsDevice() = default; + + // access the buffer + Buffer buffer() { return buffer_; } + ConstBuffer buffer() const { return buffer_; } + ConstBuffer const_buffer() const { return buffer_; } + + auto size() const { return alpaka::getExtentProduct(buffer_); } + + pixelCPEforDevice::ParamsOnDeviceT const* data() const { return buffer_.data(); } + +private: + Buffer buffer_; +}; + +#endif // RecoLocalTracker_SiPixelRecHits_interface_PixelCPEFastParamsDevice_h diff --git a/RecoLocalTracker/SiPixelRecHits/interface/PixelCPEFastParamsHost.h b/RecoLocalTracker/SiPixelRecHits/interface/PixelCPEFastParamsHost.h new file mode 100644 index 0000000000000..7d57c46dd7a13 --- /dev/null +++ b/RecoLocalTracker/SiPixelRecHits/interface/PixelCPEFastParamsHost.h @@ -0,0 +1,66 @@ +#ifndef RecoLocalTracker_SiPixelRecHits_interface_PixelCPEFastParamsHost_h +#define RecoLocalTracker_SiPixelRecHits_interface_PixelCPEFastParamsHost_h + +#include + +#include "CondFormats/SiPixelTransient/interface/SiPixelGenError.h" +#include "DataFormats/GeometrySurface/interface/SOARotation.h" +#include "DataFormats/SiPixelClusterSoA/interface/ClusteringConstants.h" +#include "DataFormats/TrackingRecHitSoA/interface/SiPixelHitStatus.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "HeterogeneousCore/AlpakaInterface/interface/CopyToDevice.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "RecoLocalTracker/SiPixelRecHits/interface/PixelCPEGenericBase.h" +#include "RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforDevice.h" + +template +class PixelCPEFastParamsHost : public PixelCPEGenericBase { +public: + using Buffer = cms::alpakatools::host_buffer>; + using ConstBuffer = cms::alpakatools::const_host_buffer>; + + PixelCPEFastParamsHost(edm::ParameterSet const& conf, + const MagneticField* mag, + const TrackerGeometry& geom, + const TrackerTopology& ttopo, + const SiPixelLorentzAngle* lorentzAngle, + const SiPixelGenErrorDBObject* genErrorDBObject, + const SiPixelLorentzAngle* lorentzAngleWidth); + + // non-copyable + PixelCPEFastParamsHost(PixelCPEFastParamsHost const&) = delete; + PixelCPEFastParamsHost& operator=(PixelCPEFastParamsHost const&) = delete; + + // movable + PixelCPEFastParamsHost(PixelCPEFastParamsHost&&) = default; + PixelCPEFastParamsHost& operator=(PixelCPEFastParamsHost&&) = default; + + // default destructor + ~PixelCPEFastParamsHost() override = default; + + // access the buffer + Buffer buffer() { return buffer_; } + ConstBuffer buffer() const { return buffer_; } + ConstBuffer const_buffer() const { return buffer_; } + + auto size() const { return alpaka::getExtentProduct(buffer_); } + + pixelCPEforDevice::ParamsOnDeviceT const* data() const { return buffer_.data(); } + + static void fillPSetDescription(edm::ParameterSetDescription& desc); + +private: + LocalPoint localPosition(DetParam const& theDetParam, ClusterParam& theClusterParam) const override; + LocalError localError(DetParam const& theDetParam, ClusterParam& theClusterParam) const override; + + void errorFromTemplates(DetParam const& theDetParam, ClusterParamGeneric& theClusterParam, float qclus) const; + + std::vector thePixelGenError_; + + void fillParamsForDevice(); + + Buffer buffer_; +}; + +#endif // RecoLocalTracker_SiPixelRecHits_interface_PixelCPEFastParamsHost_h diff --git a/RecoLocalTracker/SiPixelRecHits/interface/PixelCPEGenericBase.h b/RecoLocalTracker/SiPixelRecHits/interface/PixelCPEGenericBase.h index 1c7b9646d037f..2f18d86a39944 100644 --- a/RecoLocalTracker/SiPixelRecHits/interface/PixelCPEGenericBase.h +++ b/RecoLocalTracker/SiPixelRecHits/interface/PixelCPEGenericBase.h @@ -1,9 +1,10 @@ #ifndef RecoLocalTracker_SiPixelRecHits_PixelCPEGenericBase_H #define RecoLocalTracker_SiPixelRecHits_PixelCPEGenericBase_H -#include "PixelCPEBase.h" #include +#include "RecoLocalTracker/SiPixelRecHits/interface/PixelCPEBase.h" + class PixelCPEGenericBase : public PixelCPEBase { public: struct ClusterParamGeneric : ClusterParam { diff --git a/RecoLocalTracker/SiPixelRecHits/interface/alpaka/PixelCPEFastParamsCollection.h b/RecoLocalTracker/SiPixelRecHits/interface/alpaka/PixelCPEFastParamsCollection.h new file mode 100644 index 0000000000000..4e66d24604aec --- /dev/null +++ b/RecoLocalTracker/SiPixelRecHits/interface/alpaka/PixelCPEFastParamsCollection.h @@ -0,0 +1,40 @@ +#ifndef RecoLocalTracker_SiPixelRecHits_interface_alpaka_PixelCPEFastParamsCollection_h +#define RecoLocalTracker_SiPixelRecHits_interface_alpaka_PixelCPEFastParamsCollection_h + +#include +#include +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "RecoLocalTracker/SiPixelRecHits/interface/PixelCPEFastParamsHost.h" +#include "RecoLocalTracker/SiPixelRecHits/interface/PixelCPEFastParamsDevice.h" +#include "DataFormats/Portable/interface/alpaka/PortableCollection.h" +#include "HeterogeneousCore/AlpakaInterface/interface/CopyToDevice.h" + +// TODO: The class is created via inheritance of the PortableCollection. +// This is generally discouraged, and should be done via composition. +// See: https://github.com/cms-sw/cmssw/pull/40465#discussion_r1067364306 +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + template + using PixelCPEFastParams = std::conditional_t, + PixelCPEFastParamsHost, + PixelCPEFastParamsDevice>; + + using PixelCPEFastParamsPhase1 = PixelCPEFastParams; + using PixelCPEFastParamsPhase2 = PixelCPEFastParams; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +namespace cms::alpakatools { + template + struct CopyToDevice> { + template + static auto copyAsync(TQueue& queue, PixelCPEFastParamsHost const& srcData) { + using TDevice = typename alpaka::trait::DevType::type; + PixelCPEFastParamsDevice dstData(queue); + alpaka::memcpy(queue, dstData.buffer(), srcData.buffer()); + return dstData; + } + }; +} // namespace cms::alpakatools + +#endif // DataFormats_PixelCPEFastParamsoA_interface_alpaka_PixelCPEFastParamsCollection_h diff --git a/RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforDevice.h b/RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforDevice.h new file mode 100644 index 0000000000000..ac99af3146904 --- /dev/null +++ b/RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforDevice.h @@ -0,0 +1,433 @@ +#ifndef RecoLocalTracker_SiPixelRecHits_interface_pixelCPEforDevice_h +#define RecoLocalTracker_SiPixelRecHits_interface_pixelCPEforDevice_h + +#include +#include +#include +#include +#include + +#include "DataFormats/SiPixelClusterSoA/interface/ClusteringConstants.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "DataFormats/TrackingRecHitSoA/interface/SiPixelHitStatus.h" +#include "DataFormats/SiPixelClusterSoA/interface/ClusteringConstants.h" +#include "DataFormats/GeometrySurface/interface/SOARotation.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" + +namespace pixelCPEforDevice { + + // From https://cmssdt.cern.ch/dxr/CMSSW/source/CondFormats/SiPixelTransient/src/SiPixelGenError.cc#485-486 + // qbin: int (0-4) describing the charge of the cluster + // [0: 1.5; + using Rotation = SOARotation; + + // SOA (on device) + + template + struct ClusParamsT { + uint32_t minRow[N]; + uint32_t maxRow[N]; + uint32_t minCol[N]; + uint32_t maxCol[N]; + + int32_t q_f_X[N]; + int32_t q_l_X[N]; + int32_t q_f_Y[N]; + int32_t q_l_Y[N]; + + int32_t charge[N]; + + float xpos[N]; + float ypos[N]; + + float xerr[N]; + float yerr[N]; + + int16_t xsize[N]; // (*8) clipped at 127 if negative is edge.... + int16_t ysize[N]; + + Status status[N]; + }; + + // all modules are identical! + struct CommonParams { + float theThicknessB; + float theThicknessE; + float thePitchX; + float thePitchY; + + uint16_t maxModuleStride; + uint8_t numberOfLaddersInBarrel; + }; + + struct DetParams { + bool isBarrel; + bool isPosZ; + uint16_t layer; + uint16_t index; + uint32_t rawId; + + float shiftX; + float shiftY; + float chargeWidthX; + float chargeWidthY; + uint16_t pixmx; // max pix charge + + uint16_t nRowsRoc; //we don't need 2^16 columns, is worth to use 15 + 1 for sign + uint16_t nColsRoc; + uint16_t nRows; + uint16_t nCols; + + uint32_t numPixsInModule; + + float x0, y0, z0; // the vertex in the local coord of the detector + + float apeXX, apeYY; // ape^2 + uint8_t sx2, sy1, sy2; + uint8_t sigmax[kNumErrorBins], sigmax1[kNumErrorBins], + sigmay[kNumErrorBins]; // in micron + float xfact[kGenErrorQBins], yfact[kGenErrorQBins]; + int minCh[kGenErrorQBins]; + + Frame frame; + }; + + template + struct LayerGeometryT { + uint32_t layerStart[TrackerTopology::numberOfLayers + 1]; + uint8_t layer[pixelTopology::layerIndexSize]; + uint16_t maxModuleStride; + }; + + constexpr int32_t MaxHitsInIter = pixelClustering::maxHitsInIter(); + using ClusParams = ClusParamsT; + + constexpr inline void computeAnglesFromDet( + DetParams const& __restrict__ detParams, float const x, float const y, float& cotalpha, float& cotbeta) { + // x,y local position on det + auto gvx = x - detParams.x0; + auto gvy = y - detParams.y0; + auto gvz = -1.f / detParams.z0; + // normalization not required as only ratio used... + // calculate angles + cotalpha = gvx * gvz; + cotbeta = gvy * gvz; + } + + constexpr inline float correction(int sizeM1, + int q_f, //!< Charge in the first pixel. + int q_l, //!< Charge in the last pixel. + uint16_t upper_edge_first_pix, //!< As the name says. + uint16_t lower_edge_last_pix, //!< As the name says. + float lorentz_shift, //!< L-shift at half thickness + float theThickness, //detector thickness + float cot_angle, //!< cot of alpha_ or beta_ + float pitch, //!< thePitchX or thePitchY + bool first_is_big, //!< true if the first is big + bool last_is_big) //!< true if the last is big + { + if (0 == sizeM1) // size 1 + return 0; + + float w_eff = 0; + bool simple = true; + if (1 == sizeM1) { // size 2 + //--- Width of the clusters minus the edge (first and last) pixels. + //--- In the note, they are denoted x_F and x_L (and y_F and y_L) + // assert(lower_edge_last_pix >= upper_edge_first_pix); + auto w_inner = pitch * float(lower_edge_last_pix - upper_edge_first_pix); // in cm + + //--- Predicted charge width from geometry + auto w_pred = theThickness * cot_angle // geometric correction (in cm) + - lorentz_shift; // (in cm) &&& check fpix! + + w_eff = std::abs(w_pred) - w_inner; + + //--- If the observed charge width is inconsistent with the expectations + //--- based on the track, do *not* use w_pred-w_inner. Instead, replace + //--- it with an *average* effective charge width, which is the average + //--- length of the edge pixels. + + // this can produce "large" regressions for very small numeric differences + simple = (w_eff < 0.0f) | (w_eff > pitch); + } + + if (simple) { + //--- Total length of the two edge pixels (first+last) + float sum_of_edge = 2.0f; + if (first_is_big) + sum_of_edge += 1.0f; + if (last_is_big) + sum_of_edge += 1.0f; + w_eff = pitch * 0.5f * sum_of_edge; // ave. length of edge pixels (first+last) (cm) + } + + //--- Finally, compute the position in this projection + float qdiff = q_l - q_f; + float qsum = q_l + q_f; + + //--- Temporary fix for clusters with both first and last pixel with charge = 0 + if (qsum == 0) + qsum = 1.0f; + + return 0.5f * (qdiff / qsum) * w_eff; + } + + template + constexpr inline void position(CommonParams const& __restrict__ comParams, + DetParams const& __restrict__ detParams, + ClusParams& cp, + uint32_t ic) { + constexpr int maxSize = TrackerTraits::maxSizeCluster; + //--- Upper Right corner of Lower Left pixel -- in measurement frame + uint16_t llx = cp.minRow[ic] + 1; + uint16_t lly = cp.minCol[ic] + 1; + + //--- Lower Left corner of Upper Right pixel -- in measurement frame + uint16_t urx = cp.maxRow[ic]; + uint16_t ury = cp.maxCol[ic]; + + uint16_t llxl = llx, llyl = lly, urxl = urx, uryl = ury; + + llxl = TrackerTraits::localX(llx); + llyl = TrackerTraits::localY(lly); + urxl = TrackerTraits::localX(urx); + uryl = TrackerTraits::localY(ury); + + auto mx = llxl + urxl; + auto my = llyl + uryl; + + int xsize = int(urxl) + 2 - int(llxl); + int ysize = int(uryl) + 2 - int(llyl); + assert(xsize >= 0); // 0 if bixpix... + assert(ysize >= 0); + + if (TrackerTraits::isBigPixX(cp.minRow[ic])) + ++xsize; + if (TrackerTraits::isBigPixX(cp.maxRow[ic])) + ++xsize; + if (TrackerTraits::isBigPixY(cp.minCol[ic])) + ++ysize; + if (TrackerTraits::isBigPixY(cp.maxCol[ic])) + ++ysize; + + int unbalanceX = 8.f * std::abs(float(cp.q_f_X[ic] - cp.q_l_X[ic])) / float(cp.q_f_X[ic] + cp.q_l_X[ic]); + int unbalanceY = 8.f * std::abs(float(cp.q_f_Y[ic] - cp.q_l_Y[ic])) / float(cp.q_f_Y[ic] + cp.q_l_Y[ic]); + + xsize = 8 * xsize - unbalanceX; + ysize = 8 * ysize - unbalanceY; + + cp.xsize[ic] = std::min(xsize, maxSize); + cp.ysize[ic] = std::min(ysize, maxSize); + + if (cp.minRow[ic] == 0 || cp.maxRow[ic] == uint32_t(detParams.nRows - 1)) + cp.xsize[ic] = -cp.xsize[ic]; + + if (cp.minCol[ic] == 0 || cp.maxCol[ic] == uint32_t(detParams.nCols - 1)) + cp.ysize[ic] = -cp.ysize[ic]; + + // apply the lorentz offset correction + float xoff = 0.5f * float(detParams.nRows) * comParams.thePitchX; + float yoff = 0.5f * float(detParams.nCols) * comParams.thePitchY; + + //correction for bigpixels for phase1 + xoff = xoff + TrackerTraits::bigPixXCorrection * comParams.thePitchX; + yoff = yoff + TrackerTraits::bigPixYCorrection * comParams.thePitchY; + + // apply the lorentz offset correction + auto xPos = detParams.shiftX + (comParams.thePitchX * 0.5f * float(mx)) - xoff; + auto yPos = detParams.shiftY + (comParams.thePitchY * 0.5f * float(my)) - yoff; + + float cotalpha = 0, cotbeta = 0; + + computeAnglesFromDet(detParams, xPos, yPos, cotalpha, cotbeta); + + auto thickness = detParams.isBarrel ? comParams.theThicknessB : comParams.theThicknessE; + + auto xcorr = correction(cp.maxRow[ic] - cp.minRow[ic], + cp.q_f_X[ic], + cp.q_l_X[ic], + llxl, + urxl, + detParams.chargeWidthX, // lorentz shift in cm + thickness, + cotalpha, + comParams.thePitchX, + TrackerTraits::isBigPixX(cp.minRow[ic]), + TrackerTraits::isBigPixX(cp.maxRow[ic])); + + auto ycorr = correction(cp.maxCol[ic] - cp.minCol[ic], + cp.q_f_Y[ic], + cp.q_l_Y[ic], + llyl, + uryl, + detParams.chargeWidthY, // lorentz shift in cm + thickness, + cotbeta, + comParams.thePitchY, + TrackerTraits::isBigPixY(cp.minCol[ic]), + TrackerTraits::isBigPixY(cp.maxCol[ic])); + + cp.xpos[ic] = xPos + xcorr; + cp.ypos[ic] = yPos + ycorr; + } + + template + constexpr inline void errorFromSize(CommonParams const& __restrict__ comParams, + DetParams const& __restrict__ detParams, + ClusParams& cp, + uint32_t ic) { + // Edge cluster errors + cp.xerr[ic] = 0.0050; + cp.yerr[ic] = 0.0085; + + // FIXME these are errors form Run1 + float xerr_barrel_l1_def = TrackerTraits::xerr_barrel_l1_def; + float yerr_barrel_l1_def = TrackerTraits::yerr_barrel_l1_def; + float xerr_barrel_ln_def = TrackerTraits::xerr_barrel_ln_def; + float yerr_barrel_ln_def = TrackerTraits::yerr_barrel_ln_def; + float xerr_endcap_def = TrackerTraits::xerr_endcap_def; + float yerr_endcap_def = TrackerTraits::yerr_endcap_def; + + constexpr float xerr_barrel_l1[] = {0.00115, 0.00120, 0.00088}; //TODO MOVE THESE SOMEWHERE ELSE + constexpr float yerr_barrel_l1[] = { + 0.00375, 0.00230, 0.00250, 0.00250, 0.00230, 0.00230, 0.00210, 0.00210, 0.00240}; + constexpr float xerr_barrel_ln[] = {0.00115, 0.00120, 0.00088}; + constexpr float yerr_barrel_ln[] = { + 0.00375, 0.00230, 0.00250, 0.00250, 0.00230, 0.00230, 0.00210, 0.00210, 0.00240}; + constexpr float xerr_endcap[] = {0.0020, 0.0020}; + constexpr float yerr_endcap[] = {0.00210}; + + auto sx = cp.maxRow[ic] - cp.minRow[ic]; + auto sy = cp.maxCol[ic] - cp.minCol[ic]; + + // is edgy ? + bool isEdgeX = cp.xsize[ic] < 1; + bool isEdgeY = cp.ysize[ic] < 1; + + // is one and big? + bool isBig1X = ((0 == sx) && TrackerTraits::isBigPixX(cp.minRow[ic])); + bool isBig1Y = ((0 == sy) && TrackerTraits::isBigPixY(cp.minCol[ic])); + + if (!isEdgeX && !isBig1X) { + if (not detParams.isBarrel) { + cp.xerr[ic] = sx < std::size(xerr_endcap) ? xerr_endcap[sx] : xerr_endcap_def; + } else if (detParams.layer == 1) { + cp.xerr[ic] = sx < std::size(xerr_barrel_l1) ? xerr_barrel_l1[sx] : xerr_barrel_l1_def; + } else { + cp.xerr[ic] = sx < std::size(xerr_barrel_ln) ? xerr_barrel_ln[sx] : xerr_barrel_ln_def; + } + } + + if (!isEdgeY && !isBig1Y) { + if (not detParams.isBarrel) { + cp.yerr[ic] = sy < std::size(yerr_endcap) ? yerr_endcap[sy] : yerr_endcap_def; + } else if (detParams.layer == 1) { + cp.yerr[ic] = sy < std::size(yerr_barrel_l1) ? yerr_barrel_l1[sy] : yerr_barrel_l1_def; + } else { + cp.yerr[ic] = sy < std::size(yerr_barrel_ln) ? yerr_barrel_ln[sy] : yerr_barrel_ln_def; + } + } + } + + template + constexpr inline void errorFromDB(CommonParams const& __restrict__ comParams, + DetParams const& __restrict__ detParams, + ClusParams& cp, + uint32_t ic) { + // Edge cluster errors + cp.xerr[ic] = 0.0050f; + cp.yerr[ic] = 0.0085f; + + auto sx = cp.maxRow[ic] - cp.minRow[ic]; + auto sy = cp.maxCol[ic] - cp.minCol[ic]; + + // is edgy ? (size is set negative: see above) + bool isEdgeX = cp.xsize[ic] < 1; + bool isEdgeY = cp.ysize[ic] < 1; + // is one and big? + bool isOneX = (0 == sx); + bool isOneY = (0 == sy); + bool isBigX = TrackerTraits::isBigPixX(cp.minRow[ic]); + bool isBigY = TrackerTraits::isBigPixY(cp.minCol[ic]); + + auto ch = cp.charge[ic]; + auto bin = 0; + for (; bin < kGenErrorQBins - 1; ++bin) + // find first bin which minimum charge exceeds cluster charge + if (ch < detParams.minCh[bin + 1]) + break; + + // in detParams qBins are reversed bin0 -> smallest charge, bin4-> largest charge + // whereas in CondFormats/SiPixelTransient/src/SiPixelGenError.cc it is the opposite + // so we reverse the bin here -> kGenErrorQBins - 1 - bin + cp.status[ic].qBin = kGenErrorQBins - 1 - bin; + cp.status[ic].isOneX = isOneX; + cp.status[ic].isBigX = (isOneX & isBigX) | isEdgeX; + cp.status[ic].isOneY = isOneY; + cp.status[ic].isBigY = (isOneY & isBigY) | isEdgeY; + + auto xoff = -float(TrackerTraits::xOffset) * comParams.thePitchX; + int low_value = 0; + int high_value = kNumErrorBins - 1; + int bin_value = float(kNumErrorBins) * (cp.xpos[ic] + xoff) / (2 * xoff); + // return estimated bin value truncated to [0, 15] + int jx = std::clamp(bin_value, low_value, high_value); + + auto toCM = [](uint8_t x) { return float(x) * 1.e-4f; }; + + if (not isEdgeX) { + cp.xerr[ic] = isOneX ? toCM(isBigX ? detParams.sx2 : detParams.sigmax1[jx]) + : detParams.xfact[bin] * toCM(detParams.sigmax[jx]); + } + + auto ey = cp.ysize[ic] > 8 ? detParams.sigmay[std::min(cp.ysize[ic] - 9, 15)] : detParams.sy1; + if (not isEdgeY) { + cp.yerr[ic] = isOneY ? toCM(isBigY ? detParams.sy2 : detParams.sy1) : detParams.yfact[bin] * toCM(ey); + } + } + + //for Phase2 -> fallback to error from size + template <> + constexpr inline void errorFromDB(CommonParams const& __restrict__ comParams, + DetParams const& __restrict__ detParams, + ClusParams& cp, + uint32_t ic) { + errorFromSize(comParams, detParams, cp, ic); + } + + template + struct ParamsOnDeviceT { + using LayerGeometry = LayerGeometryT; + using AverageGeometry = pixelTopology::AverageGeometryT; + + CommonParams m_commonParams; + // Will contain an array of DetParams instances + DetParams m_detParams[TrackerTopology::numberOfModules]; + LayerGeometry m_layerGeometry; + AverageGeometry m_averageGeometry; + + constexpr CommonParams const& __restrict__ commonParams() const { return m_commonParams; } + constexpr DetParams const& __restrict__ detParams(int i) const { return m_detParams[i]; } + constexpr LayerGeometry const& __restrict__ layerGeometry() const { return m_layerGeometry; } + constexpr AverageGeometry const& __restrict__ averageGeometry() const { return m_averageGeometry; } + + CommonParams& commonParams() { return m_commonParams; } + DetParams& detParams(int i) { return m_detParams[i]; } + LayerGeometry& layerGeometry() { return m_layerGeometry; } + AverageGeometry& averageGeometry() { return m_averageGeometry; } + + constexpr uint8_t layer(uint16_t id) const { return m_layerGeometry.layer[id / TrackerTopology::maxModuleStride]; }; + }; + +} // namespace pixelCPEforDevice + +#endif // RecoLocalTracker_SiPixelRecHits_interface_pixelCPEforDevice_h diff --git a/RecoLocalTracker/SiPixelRecHits/plugins/BuildFile.xml b/RecoLocalTracker/SiPixelRecHits/plugins/BuildFile.xml index 00c88eadd4b51..35a973120e9fd 100644 --- a/RecoLocalTracker/SiPixelRecHits/plugins/BuildFile.xml +++ b/RecoLocalTracker/SiPixelRecHits/plugins/BuildFile.xml @@ -1,12 +1,10 @@ - - - - + + + + - - @@ -14,5 +12,16 @@ + + + + + + + + + + + diff --git a/RecoLocalTracker/SiPixelRecHits/plugins/PixelRecHitGPUKernel.cu b/RecoLocalTracker/SiPixelRecHits/plugins/PixelRecHitGPUKernel.cu index 61442ea9d2b8c..b1e5e1c3c90e9 100644 --- a/RecoLocalTracker/SiPixelRecHits/plugins/PixelRecHitGPUKernel.cu +++ b/RecoLocalTracker/SiPixelRecHits/plugins/PixelRecHitGPUKernel.cu @@ -12,7 +12,8 @@ #include "PixelRecHitGPUKernel.h" #include "gpuPixelRecHits.h" -// #define GPU_DEBUG + +//#define GPU_DEBUG namespace { template diff --git a/RecoLocalTracker/SiPixelRecHits/plugins/PixelRecHitGPUKernel.h b/RecoLocalTracker/SiPixelRecHits/plugins/PixelRecHitGPUKernel.h index 25cc724cd4c4a..407a18be04fa9 100644 --- a/RecoLocalTracker/SiPixelRecHits/plugins/PixelRecHitGPUKernel.h +++ b/RecoLocalTracker/SiPixelRecHits/plugins/PixelRecHitGPUKernel.h @@ -10,7 +10,9 @@ #include "CUDADataFormats/SiPixelDigi/interface/SiPixelDigisCUDA.h" #include "CUDADataFormats/TrackingRecHit/interface/TrackingRecHitSoADevice.h" #include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" + //#define GPU_DEBUG + namespace pixelgpudetails { template diff --git a/RecoLocalTracker/SiPixelRecHits/plugins/SiPixelRecHitFromSoAAlpaka.cc b/RecoLocalTracker/SiPixelRecHits/plugins/SiPixelRecHitFromSoAAlpaka.cc new file mode 100644 index 0000000000000..a76ff6af49ac9 --- /dev/null +++ b/RecoLocalTracker/SiPixelRecHits/plugins/SiPixelRecHitFromSoAAlpaka.cc @@ -0,0 +1,189 @@ +#include +#include + +#include "DataFormats/Common/interface/DetSetVectorNew.h" +#include "DataFormats/Common/interface/Handle.h" +#include "DataFormats/SiPixelCluster/interface/SiPixelCluster.h" +#include "DataFormats/TrackerRecHit2D/interface/SiPixelRecHitCollection.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsHost.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/global/EDProducer.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "Geometry/CommonDetUnit/interface/PixelGeomDetUnit.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "Geometry/Records/interface/TrackerDigiGeometryRecord.h" +#include "Geometry/TrackerGeometryBuilder/interface/TrackerGeometry.h" +#include "RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforDevice.h" + +template +class SiPixelRecHitFromSoAAlpaka : public edm::global::EDProducer<> { + using HitModuleStartArray = typename TrackingRecHitSoA::HitModuleStartArray; + using hindex_type = typename TrackerTraits::hindex_type; + using HMSstorage = typename std::vector; + +public: + explicit SiPixelRecHitFromSoAAlpaka(const edm::ParameterSet& iConfig); + ~SiPixelRecHitFromSoAAlpaka() override = default; + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + + // Data has been implicitly copied from Device to Host by the framework + using HitsOnHost = TrackingRecHitHost; + +private: + void produce(edm::StreamID streamID, edm::Event& iEvent, const edm::EventSetup& iSetup) const override; + + const edm::ESGetToken geomToken_; + const edm::EDGetTokenT hitsToken_; // Alpaka hits + const edm::EDGetTokenT clusterToken_; // legacy clusters + const edm::EDPutTokenT rechitsPutToken_; // legacy rechits + const edm::EDPutTokenT hostPutToken_; +}; + +template +SiPixelRecHitFromSoAAlpaka::SiPixelRecHitFromSoAAlpaka(const edm::ParameterSet& iConfig) + : geomToken_(esConsumes()), + hitsToken_(consumes(iConfig.getParameter("pixelRecHitSrc"))), + clusterToken_(consumes(iConfig.getParameter("src"))), + rechitsPutToken_(produces()), + hostPutToken_(produces()) {} + +template +void SiPixelRecHitFromSoAAlpaka::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("pixelRecHitSrc", edm::InputTag("siPixelRecHitsPreSplittingAlpaka")); + desc.add("src", edm::InputTag("siPixelClustersPreSplitting")); + descriptions.addWithDefaultLabel(desc); +} + +template +void SiPixelRecHitFromSoAAlpaka::produce(edm::StreamID streamID, + edm::Event& iEvent, + const edm::EventSetup& iSetup) const { + auto const& hits = iEvent.get(hitsToken_); + auto nHits = hits.view().metadata().size(); + LogDebug("SiPixelRecHitFromSoAAlpaka") << "converting " << nHits << " Hits"; + + // allocate a buffer for the indices of the clusters + constexpr auto nMaxModules = TrackerTraits::numberOfModules; + + SiPixelRecHitCollection output; + output.reserve(nMaxModules, nHits); + + HMSstorage hmsp(nMaxModules + 1); + + if (0 == nHits) { + hmsp.clear(); + iEvent.emplace(rechitsPutToken_, std::move(output)); + iEvent.emplace(hostPutToken_, std::move(hmsp)); + return; + } + + // fill content of HMSstorage product, and put it into the Event + for (unsigned int idx = 0; idx < hmsp.size(); ++idx) { + hmsp[idx] = hits.view().hitsModuleStart()[idx]; + } + iEvent.emplace(hostPutToken_, std::move(hmsp)); + + auto xl = hits.view().xLocal(); + auto yl = hits.view().yLocal(); + auto xe = hits.view().xerrLocal(); + auto ye = hits.view().yerrLocal(); + + TrackerGeometry const& geom = iSetup.getData(geomToken_); + + auto const hclusters = iEvent.getHandle(clusterToken_); + + constexpr uint32_t maxHitsInModule = pixelClustering::maxHitsInModule(); + + int numberOfDetUnits = 0; + int numberOfClusters = 0; + for (auto const& dsv : *hclusters) { + numberOfDetUnits++; + unsigned int detid = dsv.detId(); + DetId detIdObject(detid); + const GeomDetUnit* genericDet = geom.idToDetUnit(detIdObject); + auto gind = genericDet->index(); + const PixelGeomDetUnit* pixDet = dynamic_cast(genericDet); + assert(pixDet); + SiPixelRecHitCollection::FastFiller recHitsOnDetUnit(output, detid); + auto fc = hits.view().hitsModuleStart()[gind]; + auto lc = hits.view().hitsModuleStart()[gind + 1]; + auto nhits = lc - fc; + + assert(lc > fc); + LogDebug("SiPixelRecHitFromSoAAlpaka") << "in det " << gind << ": conv " << nhits << " hits from " << dsv.size() + << " legacy clusters" << ' ' << fc << ',' << lc << "\n"; + if (nhits > maxHitsInModule) + edm::LogWarning("SiPixelRecHitFromSoAAlpaka") + .format("Too many clusters {} in module {}. Only the first {} hits will be converted", + nhits, + gind, + maxHitsInModule); + + nhits = std::min(nhits, maxHitsInModule); + + LogDebug("SiPixelRecHitFromSoAAlpaka") << "in det " << gind << "conv " << nhits << " hits from " << dsv.size() + << " legacy clusters" << ' ' << lc << ',' << fc; + + if (0 == nhits) + continue; + auto jnd = [&](int k) { return fc + k; }; + assert(nhits <= dsv.size()); + if (nhits != dsv.size()) { + edm::LogWarning("GPUHits2CPU") << "nhits!= nclus " << nhits << ' ' << dsv.size(); + } + for (auto const& clust : dsv) { + assert(clust.originalId() >= 0); + assert(clust.originalId() < dsv.size()); + if (clust.originalId() >= nhits) + continue; + auto ij = jnd(clust.originalId()); + LocalPoint lp(xl[ij], yl[ij]); + LocalError le(xe[ij], 0, ye[ij]); + SiPixelRecHitQuality::QualWordType rqw = 0; + + numberOfClusters++; + + /* cpu version.... (for reference) + std::tuple tuple = cpe_->getParameters( clust, *genericDet ); + LocalPoint lp( std::get<0>(tuple) ); + LocalError le( std::get<1>(tuple) ); + SiPixelRecHitQuality::QualWordType rqw( std::get<2>(tuple) ); + */ + + // Create a persistent edm::Ref to the cluster + edm::Ref, SiPixelCluster> cluster = edmNew::makeRefTo(hclusters, &clust); + // Make a RecHit and add it to the DetSet + recHitsOnDetUnit.emplace_back(lp, le, rqw, *genericDet, cluster); + // ============================= + + LogDebug("SiPixelRecHitFromSoAAlpaka") << "cluster " << numberOfClusters << " at " << lp << ' ' << le; + + } // <-- End loop on Clusters + + // LogDebug("SiPixelRecHitGPU") + LogDebug("SiPixelRecHitFromSoAAlpaka") << "found " << recHitsOnDetUnit.size() << " RecHits on " << detid; + + } // <-- End loop on DetUnits + + LogDebug("SiPixelRecHitFromSoAAlpaka") << "found " << numberOfDetUnits << " dets, " << numberOfClusters + << " clusters"; + + iEvent.emplace(rechitsPutToken_, std::move(output)); +} + +using SiPixelRecHitFromSoAAlpakaPhase1 = SiPixelRecHitFromSoAAlpaka; +using SiPixelRecHitFromSoAAlpakaPhase2 = SiPixelRecHitFromSoAAlpaka; +using SiPixelRecHitFromSoAAlpakaHIonPhase1 = SiPixelRecHitFromSoAAlpaka; + +#include "FWCore/Framework/interface/MakerMacros.h" +DEFINE_FWK_MODULE(SiPixelRecHitFromSoAAlpakaPhase1); +DEFINE_FWK_MODULE(SiPixelRecHitFromSoAAlpakaPhase2); +DEFINE_FWK_MODULE(SiPixelRecHitFromSoAAlpakaHIonPhase1); diff --git a/RecoLocalTracker/SiPixelRecHits/plugins/SiPixelRecHitSoAFromLegacy.cc b/RecoLocalTracker/SiPixelRecHits/plugins/SiPixelRecHitSoAFromLegacy.cc index 8dc6ae93018ea..21da864c1c348 100644 --- a/RecoLocalTracker/SiPixelRecHits/plugins/SiPixelRecHitSoAFromLegacy.cc +++ b/RecoLocalTracker/SiPixelRecHits/plugins/SiPixelRecHitSoAFromLegacy.cc @@ -198,7 +198,7 @@ void SiPixelRecHitSoAFromLegacyT::produce(edm::StreamID streamID, ndigi += clust.size(); } - cms::cuda::PortableHostCollection> digis_h(ndigi); + cms::cuda::PortableHostCollection digis_h(ndigi); clusterRef.clear(); clusters_h.view()[0].moduleId() = gind; diff --git a/RecoLocalTracker/SiPixelRecHits/plugins/alpaka/PixelCPEFastParamsESProducerAlpaka.cc b/RecoLocalTracker/SiPixelRecHits/plugins/alpaka/PixelCPEFastParamsESProducerAlpaka.cc new file mode 100644 index 0000000000000..73059a13dc636 --- /dev/null +++ b/RecoLocalTracker/SiPixelRecHits/plugins/alpaka/PixelCPEFastParamsESProducerAlpaka.cc @@ -0,0 +1,120 @@ +#include +#include +#include +#include "DataFormats/TrackerCommon/interface/TrackerTopology.h" + +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ESProducer.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/EventSetup.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ModuleFactory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "RecoLocalTracker/Records/interface/TkPixelCPERecord.h" +#include "RecoLocalTracker/SiPixelRecHits/interface/alpaka/PixelCPEFastParamsCollection.h" +#include "RecoLocalTracker/SiPixelRecHits/interface/PixelCPEFastParamsHost.h" + +#include "Geometry/Records/interface/TrackerDigiGeometryRecord.h" +#include "Geometry/Records/interface/TrackerTopologyRcd.h" +#include "Geometry/TrackerGeometryBuilder/interface/TrackerGeometry.h" +#include "MagneticField/Engine/interface/MagneticField.h" +#include "MagneticField/Records/interface/IdealMagneticFieldRecord.h" +#include "RecoLocalTracker/ClusterParameterEstimator/interface/PixelClusterParameterEstimator.h" + +#include "CondFormats/DataRecord/interface/SiPixelGenErrorDBObjectRcd.h" +#include "RecoLocalTracker/Records/interface/PixelCPEFastParamsRecord.h" +#include "DataFormats/TrackerCommon/interface/TrackerTopology.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + template + class PixelCPEFastParamsESProducerAlpaka : public ESProducer { + public: + PixelCPEFastParamsESProducerAlpaka(edm::ParameterSet const& iConfig); + std::unique_ptr> produce(const PixelCPEFastParamsRecord& iRecord); + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + + private: + edm::ESGetToken magfieldToken_; + edm::ESGetToken pDDToken_; + edm::ESGetToken hTTToken_; + edm::ESGetToken lorentzAngleToken_; + edm::ESGetToken lorentzAngleWidthToken_; + edm::ESGetToken genErrorDBObjectToken_; + + edm::ParameterSet pset_; + bool useErrorsFromTemplates_; + }; + + using namespace edm; + + template + PixelCPEFastParamsESProducerAlpaka::PixelCPEFastParamsESProducerAlpaka(const edm::ParameterSet& p) + : ESProducer(p), pset_(p) { + auto const& myname = p.getParameter("ComponentName"); + auto const& magname = p.getParameter("MagneticFieldRecord"); + useErrorsFromTemplates_ = p.getParameter("UseErrorsFromTemplates"); + + auto cc = setWhatProduced(this, myname); + magfieldToken_ = cc.consumes(magname); + pDDToken_ = cc.consumes(); + hTTToken_ = cc.consumes(); + lorentzAngleToken_ = cc.consumes(edm::ESInputTag("")); + lorentzAngleWidthToken_ = cc.consumes(edm::ESInputTag("", "forWidth")); + if (useErrorsFromTemplates_) { + genErrorDBObjectToken_ = cc.consumes(); + } + } + + template + std::unique_ptr> PixelCPEFastParamsESProducerAlpaka::produce( + const PixelCPEFastParamsRecord& iRecord) { + // add the new la width object + const SiPixelLorentzAngle* lorentzAngleWidthProduct = &iRecord.get(lorentzAngleWidthToken_); + + const SiPixelGenErrorDBObject* genErrorDBObjectProduct = nullptr; + + // Errors take only from new GenError + if (useErrorsFromTemplates_) { // do only when generrors are needed + genErrorDBObjectProduct = &iRecord.get(genErrorDBObjectToken_); + //} else { + //std::cout<<" pass an empty GenError pointer"<>(pset_, + &iRecord.get(magfieldToken_), + iRecord.get(pDDToken_), + iRecord.get(hTTToken_), + &iRecord.get(lorentzAngleToken_), + genErrorDBObjectProduct, + lorentzAngleWidthProduct); + } + + template + void PixelCPEFastParamsESProducerAlpaka::fillDescriptions( + edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + + // from PixelCPEBase + PixelCPEBase::fillPSetDescription(desc); + + // from PixelCPEFast + PixelCPEFastParamsHost::fillPSetDescription(desc); + + // used by PixelCPEFast + desc.add("EdgeClusterErrorX", 50.0); + desc.add("EdgeClusterErrorY", 85.0); + desc.add("UseErrorsFromTemplates", true); + desc.add("TruncatePixelCharge", true); + + std::string name = "PixelCPEFastParams"; + name += TrackerTraits::nameModifier; + desc.add("ComponentName", name); + desc.add("MagneticFieldRecord", edm::ESInputTag()); + + descriptions.addWithDefaultLabel(desc); + } + + using PixelCPEFastParamsESProducerAlpakaPhase1 = PixelCPEFastParamsESProducerAlpaka; + using PixelCPEFastParamsESProducerAlpakaPhase2 = PixelCPEFastParamsESProducerAlpaka; +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +DEFINE_FWK_EVENTSETUP_ALPAKA_MODULE(PixelCPEFastParamsESProducerAlpakaPhase1); +DEFINE_FWK_EVENTSETUP_ALPAKA_MODULE(PixelCPEFastParamsESProducerAlpakaPhase2); diff --git a/RecoLocalTracker/SiPixelRecHits/plugins/alpaka/PixelRecHitKernel.h b/RecoLocalTracker/SiPixelRecHits/plugins/alpaka/PixelRecHitKernel.h new file mode 100644 index 0000000000000..2fc1404a03bb7 --- /dev/null +++ b/RecoLocalTracker/SiPixelRecHits/plugins/alpaka/PixelRecHitKernel.h @@ -0,0 +1,45 @@ +#ifndef RecoLocalTracker_SiPixelRecHits_PixelRecHitKernel_h +#define RecoLocalTracker_SiPixelRecHits_PixelRecHitKernel_h + +#include + +#include + +#include "DataFormats/BeamSpot/interface/BeamSpotPOD.h" +#include "DataFormats/SiPixelClusterSoA/interface/alpaka/SiPixelClustersSoACollection.h" +#include "DataFormats/SiPixelClusterSoA/interface/SiPixelClustersDevice.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisDevice.h" +#include "DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigisSoACollection.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsDevice.h" +#include "DataFormats/TrackingRecHitSoA/interface/alpaka/TrackingRecHitsSoACollection.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforDevice.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace pixelgpudetails { + using namespace cms::alpakatools; + + template + class PixelRecHitKernel { + public: + PixelRecHitKernel() = default; + ~PixelRecHitKernel() = default; + + PixelRecHitKernel(const PixelRecHitKernel&) = delete; + PixelRecHitKernel(PixelRecHitKernel&&) = delete; + PixelRecHitKernel& operator=(const PixelRecHitKernel&) = delete; + PixelRecHitKernel& operator=(PixelRecHitKernel&&) = delete; + + using ParamsOnDevice = pixelCPEforDevice::ParamsOnDeviceT; + + TrackingRecHitsSoACollection makeHitsAsync(SiPixelDigisSoACollection const& digis_d, + SiPixelClustersSoACollection const& clusters_d, + BeamSpotPOD const* bs_d, + ParamsOnDevice const* cpeParams, + Queue queue) const; + }; + } // namespace pixelgpudetails +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +#endif // RecoLocalTracker_SiPixelRecHits_PixelRecHitKernel_h diff --git a/RecoLocalTracker/SiPixelRecHits/plugins/alpaka/PixelRecHitKernels.dev.cc b/RecoLocalTracker/SiPixelRecHits/plugins/alpaka/PixelRecHitKernels.dev.cc new file mode 100644 index 0000000000000..f0d61a646c0ce --- /dev/null +++ b/RecoLocalTracker/SiPixelRecHits/plugins/alpaka/PixelRecHitKernels.dev.cc @@ -0,0 +1,143 @@ +// C++ headers +#include +#include + +// Alpaka headers +#include + +// CMSSW headers +#include "DataFormats/BeamSpot/interface/BeamSpotPOD.h" +#include "DataFormats/SiPixelClusterSoA/interface/ClusteringConstants.h" +#include "HeterogeneousCore/AlpakaInterface/interface/HistoContainer.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" + +#include "PixelRecHitKernel.h" +#include "PixelRecHits.h" + +//#define GPU_DEBUG + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + using namespace cms::alpakatools; + template + class setHitsLayerStart { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, + uint32_t const* __restrict__ hitsModuleStart, + pixelCPEforDevice::ParamsOnDeviceT const* __restrict__ cpeParams, + uint32_t* __restrict__ hitsLayerStart) const { + assert(0 == hitsModuleStart[0]); + + for (int32_t i : cms::alpakatools::elements_with_stride(acc, TrackerTraits::numberOfLayers + 1)) { + hitsLayerStart[i] = hitsModuleStart[cpeParams->layerGeometry().layerStart[i]]; +#ifdef GPU_DEBUG + int old = i == 0 ? 0 : hitsModuleStart[cpeParams->layerGeometry().layerStart[i - 1]]; + printf("LayerStart %d/%d at module %d: %d - %d\n", + i, + TrackerTraits::numberOfLayers, + cpeParams->layerGeometry().layerStart[i], + hitsLayerStart[i], + hitsLayerStart[i] - old); +#endif + } + } + }; + + namespace pixelgpudetails { + + template + TrackingRecHitsSoACollection PixelRecHitKernel::makeHitsAsync( + SiPixelDigisSoACollection const& digis_d, + SiPixelClustersSoACollection const& clusters_d, + BeamSpotPOD const* bs_d, + pixelCPEforDevice::ParamsOnDeviceT const* cpeParams, + Queue queue) const { + using namespace pixelRecHits; + auto nHits = clusters_d.nClusters(); + auto offsetBPIX2 = clusters_d.offsetBPIX2(); + + TrackingRecHitsSoACollection hits_d(nHits, offsetBPIX2, clusters_d->clusModuleStart(), queue); + + int activeModulesWithDigis = digis_d.nModules(); + + // protect from empty events + if (activeModulesWithDigis) { + int threadsPerBlock = 128; + int blocks = activeModulesWithDigis; + const auto workDiv1D = cms::alpakatools::make_workdiv(blocks, threadsPerBlock); + +#ifdef GPU_DEBUG + std::cout << "launching GetHits kernel on " << alpaka::core::demangled << " with " << blocks << " blocks" + << std::endl; +#endif + alpaka::exec(queue, + workDiv1D, + GetHits{}, + cpeParams, + bs_d, + digis_d.view(), + digis_d.nDigis(), + clusters_d.view(), + hits_d.view()); +#ifdef GPU_DEBUG + alpaka::wait(queue); +#endif + + // assuming full warp of threads is better than a smaller number... + if (nHits) { + const auto workDiv1D = cms::alpakatools::make_workdiv(1, 32); + alpaka::exec(queue, + workDiv1D, + setHitsLayerStart{}, + clusters_d->clusModuleStart(), + cpeParams, + hits_d.view().hitsLayerStart().data()); + constexpr auto nLayers = TrackerTraits::numberOfLayers; + + // Use a view since it's runtime sized and can't use the implicit definition + // see HeterogeneousCore/AlpakaInterface/interface/OneToManyAssoc.h:100 + typename TrackingRecHitSoA::PhiBinnerView hrv_d; + hrv_d.assoc = &(hits_d.view().phiBinner()); + hrv_d.offSize = -1; + hrv_d.offStorage = nullptr; + hrv_d.contentSize = nHits; + hrv_d.contentStorage = hits_d.view().phiBinnerStorage(); + + // fillManyFromVector(h_d.data(), nParts, v_d.data(), offsets_d.data(), offsets[10], 256, queue); + /* cms::alpakatools::fillManyFromVector(&(hits_d.view().phiBinner()), + nLayers, + hits_d.view().iphi(), + hits_d.view().hitsLayerStart().data(), + nHits, + (uint32_t)256, + queue); +*/ + cms::alpakatools::fillManyFromVector(&(hits_d.view().phiBinner()), + hrv_d, + nLayers, + hits_d.view().iphi(), + hits_d.view().hitsLayerStart().data(), + nHits, + (uint32_t)256, + queue); + +#ifdef GPU_DEBUG + alpaka::wait(queue); +#endif + } + } + +#ifdef GPU_DEBUG + alpaka::wait(queue); + std::cout << "PixelRecHitKernel -> DONE!" << std::endl; +#endif + + return hits_d; + } + + template class PixelRecHitKernel; + template class PixelRecHitKernel; + template class PixelRecHitKernel; + + } // namespace pixelgpudetails +} // namespace ALPAKA_ACCELERATOR_NAMESPACE diff --git a/RecoLocalTracker/SiPixelRecHits/plugins/alpaka/PixelRecHits.h b/RecoLocalTracker/SiPixelRecHits/plugins/alpaka/PixelRecHits.h new file mode 100644 index 0000000000000..45587034b572b --- /dev/null +++ b/RecoLocalTracker/SiPixelRecHits/plugins/alpaka/PixelRecHits.h @@ -0,0 +1,241 @@ +#ifndef RecoLocalTracker_SiPixelRecHits_alpaka_PixelRecHits_h +#define RecoLocalTracker_SiPixelRecHits_alpaka_PixelRecHits_h + +#include +#include +#include +#include + +#include + +#include "DataFormats/BeamSpot/interface/BeamSpotPOD.h" +#include "DataFormats/Math/interface/approx_atan2.h" +#include "DataFormats/SiPixelClusterSoA/interface/ClusteringConstants.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisDevice.h" +#include "DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigisSoACollection.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforDevice.h" + +//#define GPU_DEBUG + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace pixelRecHits { + + template + class GetHits { + public: + template >> + ALPAKA_FN_ACC void operator()(const TAcc& acc, + pixelCPEforDevice::ParamsOnDeviceT const* __restrict__ cpeParams, + BeamSpotPOD const* __restrict__ bs, + SiPixelDigisSoAConstView digis, + uint32_t numElements, + SiPixelClustersSoAConstView clusters, + TrackingRecHitSoAView hits) const { + // FIXME + // the compiler seems NOT to optimize loads from views (even in a simple test case) + // The whole gimnastic here of copying or not is a pure heuristic exercise that seems to produce the fastest code with the above signature + // not using views (passing a gazzilion of array pointers) seems to produce the fastest code (but it is harder to mantain) + + ALPAKA_ASSERT_OFFLOAD(cpeParams); + + const uint32_t blockIdx(alpaka::getIdx(acc)[0u]); + + // copy average geometry corrected by beamspot . FIXME (move it somewhere else???) + if (0 == blockIdx) { + auto& agc = hits.averageGeometry(); + auto const& ag = cpeParams->averageGeometry(); + auto nLadders = TrackerTraits::numberOfLaddersInBarrel; + + cms::alpakatools::for_each_element_in_block_strided(acc, nLadders, [&](uint32_t il) { + agc.ladderZ[il] = ag.ladderZ[il] - bs->z; + agc.ladderX[il] = ag.ladderX[il] - bs->x; + agc.ladderY[il] = ag.ladderY[il] - bs->y; + agc.ladderR[il] = sqrt(agc.ladderX[il] * agc.ladderX[il] + agc.ladderY[il] * agc.ladderY[il]); + agc.ladderMinZ[il] = ag.ladderMinZ[il] - bs->z; + agc.ladderMaxZ[il] = ag.ladderMaxZ[il] - bs->z; + }); + + if (cms::alpakatools::once_per_block(acc)) { + agc.endCapZ[0] = ag.endCapZ[0] - bs->z; + agc.endCapZ[1] = ag.endCapZ[1] - bs->z; + } + } + + // to be moved in common namespace... + using pixelClustering::invalidModuleId; + constexpr int32_t MaxHitsInIter = pixelCPEforDevice::MaxHitsInIter; + + using ClusParams = pixelCPEforDevice::ClusParams; + + // as usual one block per module + auto& clusParams = alpaka::declareSharedVar(acc); + + auto me = clusters[blockIdx].moduleId(); + int nclus = clusters[me].clusInModule(); + + if (0 == nclus) + return; +#ifdef GPU_DEBUG + if (cms::alpakatools::once_per_block(acc)) { + auto k = clusters[1 + blockIdx].moduleStart(); + while (digis[k].moduleId() == invalidModuleId) + ++k; + ALPAKA_ASSERT_OFFLOAD(digis[k].moduleId() == me); + } + + if (me % 100 == 1) + if (cms::alpakatools::once_per_block(acc)) + printf( + "hitbuilder: %d clusters in module %d. will write at %d\n", nclus, me, clusters[me].clusModuleStart()); +#endif + + for (int startClus = 0, endClus = nclus; startClus < endClus; startClus += MaxHitsInIter) { + auto first = clusters[1 + blockIdx].moduleStart(); + + int nClusInIter = alpaka::math::min(acc, MaxHitsInIter, endClus - startClus); + int lastClus = startClus + nClusInIter; + assert(nClusInIter <= nclus); + assert(nClusInIter > 0); + assert(lastClus <= nclus); + + assert(nclus > MaxHitsInIter || (0 == startClus && nClusInIter == nclus && lastClus == nclus)); + + // init + cms::alpakatools::for_each_element_in_block_strided(acc, nClusInIter, [&](uint32_t ic) { + clusParams.minRow[ic] = std::numeric_limits::max(); + clusParams.maxRow[ic] = 0; + clusParams.minCol[ic] = std::numeric_limits::max(); + clusParams.maxCol[ic] = 0; + clusParams.charge[ic] = 0; + clusParams.q_f_X[ic] = 0; + clusParams.q_l_X[ic] = 0; + clusParams.q_f_Y[ic] = 0; + clusParams.q_l_Y[ic] = 0; + }); + + alpaka::syncBlockThreads(acc); + + // one thread per "digi" + const uint32_t blockDimension(alpaka::getWorkDiv(acc)[0u]); + const auto& [firstElementIdxNoStride, endElementIdxNoStride] = + cms::alpakatools::element_index_range_in_block(acc, first); + uint32_t rowsColsFirstElementIdx = firstElementIdxNoStride; + uint32_t rowsColsEndElementIdx = endElementIdxNoStride; + for (uint32_t i = rowsColsFirstElementIdx; i < numElements; ++i) { + if (not cms::alpakatools::next_valid_element_index_strided( + i, rowsColsFirstElementIdx, rowsColsEndElementIdx, blockDimension, numElements)) + break; + auto id = digis[i].moduleId(); + if (id == invalidModuleId) + continue; // not valid + if (id != me) + break; // end of module + auto cl = digis[i].clus(); + if (cl < startClus || cl >= lastClus) + continue; + cl -= startClus; + ALPAKA_ASSERT_OFFLOAD(cl >= 0); + ALPAKA_ASSERT_OFFLOAD(cl < MaxHitsInIter); + auto x = digis[i].xx(); + auto y = digis[i].yy(); + alpaka::atomicMin(acc, &clusParams.minRow[cl], (uint32_t)x, alpaka::hierarchy::Threads{}); + alpaka::atomicMax(acc, &clusParams.maxRow[cl], (uint32_t)x, alpaka::hierarchy::Threads{}); + alpaka::atomicMin(acc, &clusParams.minCol[cl], (uint32_t)y, alpaka::hierarchy::Threads{}); + alpaka::atomicMax(acc, &clusParams.maxCol[cl], (uint32_t)y, alpaka::hierarchy::Threads{}); + } + + alpaka::syncBlockThreads(acc); + + auto pixmx = cpeParams->detParams(me).pixmx; + uint32_t chargeFirstElementIdx = firstElementIdxNoStride; + uint32_t chargeEndElementIdx = endElementIdxNoStride; + for (uint32_t i = chargeFirstElementIdx; i < numElements; ++i) { + if (not cms::alpakatools::next_valid_element_index_strided( + i, chargeFirstElementIdx, chargeEndElementIdx, blockDimension, numElements)) + break; + auto id = digis[i].moduleId(); + if (id == invalidModuleId) + continue; // not valid + if (id != me) + break; // end of module + auto cl = digis[i].clus(); + if (cl < startClus || cl >= lastClus) + continue; + cl -= startClus; + ALPAKA_ASSERT_OFFLOAD(cl >= 0); + ALPAKA_ASSERT_OFFLOAD(cl < MaxHitsInIter); + auto x = digis[i].xx(); + auto y = digis[i].yy(); + auto ch = digis[i].adc(); + alpaka::atomicAdd(acc, &clusParams.charge[cl], (int32_t)ch, alpaka::hierarchy::Threads{}); + ch = alpaka::math::min(acc, ch, pixmx); + if (clusParams.minRow[cl] == x) + alpaka::atomicAdd(acc, &clusParams.q_f_X[cl], (int32_t)ch, alpaka::hierarchy::Threads{}); + if (clusParams.maxRow[cl] == x) + alpaka::atomicAdd(acc, &clusParams.q_l_X[cl], (int32_t)ch, alpaka::hierarchy::Threads{}); + if (clusParams.minCol[cl] == y) + alpaka::atomicAdd(acc, &clusParams.q_f_Y[cl], (int32_t)ch, alpaka::hierarchy::Threads{}); + if (clusParams.maxCol[cl] == y) + alpaka::atomicAdd(acc, &clusParams.q_l_Y[cl], (int32_t)ch, alpaka::hierarchy::Threads{}); + } + + alpaka::syncBlockThreads(acc); + + // next one cluster per thread... + first = clusters[me].clusModuleStart() + startClus; + cms::alpakatools::for_each_element_in_block_strided(acc, nClusInIter, [&](uint32_t ic) { + auto h = first + ic; // output index in global memory + + assert(h < (uint32_t)hits.metadata().size()); + assert(h < clusters[me + 1].clusModuleStart()); + + pixelCPEforDevice::position( + cpeParams->commonParams(), cpeParams->detParams(me), clusParams, ic); + + pixelCPEforDevice::errorFromDB( + cpeParams->commonParams(), cpeParams->detParams(me), clusParams, ic); + + // store it + hits[h].chargeAndStatus().charge = clusParams.charge[ic]; + hits[h].chargeAndStatus().status = clusParams.status[ic]; + hits[h].detectorIndex() = me; + + float xl, yl; + hits[h].xLocal() = xl = clusParams.xpos[ic]; + hits[h].yLocal() = yl = clusParams.ypos[ic]; + + hits[h].clusterSizeX() = clusParams.xsize[ic]; + hits[h].clusterSizeY() = clusParams.ysize[ic]; + + hits[h].xerrLocal() = clusParams.xerr[ic] * clusParams.xerr[ic] + cpeParams->detParams(me).apeXX; + hits[h].yerrLocal() = clusParams.yerr[ic] * clusParams.yerr[ic] + cpeParams->detParams(me).apeYY; + + // keep it local for computations + float xg, yg, zg; + // to global and compute phi... + cpeParams->detParams(me).frame.toGlobal(xl, yl, xg, yg, zg); + // here correct for the beamspot... + xg -= bs->x; + yg -= bs->y; + zg -= bs->z; + + hits[h].xGlobal() = xg; + hits[h].yGlobal() = yg; + hits[h].zGlobal() = zg; + + hits[h].rGlobal() = alpaka::math::sqrt(acc, xg * xg + yg * yg); + hits[h].iphi() = unsafe_atan2s<7>(yg, xg); + }); + alpaka::syncBlockThreads(acc); + } // end loop on batches + } + }; + + } // namespace pixelRecHits +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +#endif // RecoLocalTracker_SiPixelRecHits_plugins_alpaka_PixelRecHits_h diff --git a/RecoLocalTracker/SiPixelRecHits/plugins/alpaka/SiPixelRecHitAlpaka.cc b/RecoLocalTracker/SiPixelRecHits/plugins/alpaka/SiPixelRecHitAlpaka.cc new file mode 100644 index 0000000000000..46fd8a6b8c2ca --- /dev/null +++ b/RecoLocalTracker/SiPixelRecHits/plugins/alpaka/SiPixelRecHitAlpaka.cc @@ -0,0 +1,100 @@ +#include "DataFormats/BeamSpot/interface/BeamSpotPOD.h" +#include "DataFormats/BeamSpot/interface/alpaka/BeamSpotDevice.h" +#include "DataFormats/SiPixelClusterSoA/interface/SiPixelClustersDevice.h" +#include "DataFormats/SiPixelClusterSoA/interface/alpaka/SiPixelClustersSoACollection.h" +#include "DataFormats/SiPixelDigiSoA/interface/SiPixelDigisDevice.h" +#include "DataFormats/SiPixelDigiSoA/interface/alpaka/SiPixelDigisSoACollection.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsDevice.h" +#include "DataFormats/TrackingRecHitSoA/interface/alpaka/TrackingRecHitsSoACollection.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "Geometry/Records/interface/TrackerDigiGeometryRecord.h" +#include "Geometry/TrackerGeometryBuilder/interface/TrackerGeometry.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/Event.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/EventSetup.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/global/EDProducer.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" + +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "Geometry/Records/interface/TrackerDigiGeometryRecord.h" +#include "Geometry/TrackerGeometryBuilder/interface/TrackerGeometry.h" +#include "RecoLocalTracker/Records/interface/PixelCPEFastParamsRecord.h" + +#include "RecoLocalTracker/SiPixelRecHits/interface/PixelCPEBase.h" +#include "RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforDevice.h" +#include "RecoLocalTracker/SiPixelRecHits/interface/alpaka/PixelCPEFastParamsCollection.h" + +#include "PixelRecHitKernel.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + template + class SiPixelRecHitAlpaka : public global::EDProducer<> { + public: + explicit SiPixelRecHitAlpaka(const edm::ParameterSet& iConfig); + ~SiPixelRecHitAlpaka() override = default; + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + + private: + void produce(edm::StreamID streamID, device::Event& iEvent, const device::EventSetup& iSetup) const override; + + const device::ESGetToken, PixelCPEFastParamsRecord> cpeToken_; + const device::EDGetToken tBeamSpot; + const device::EDGetToken tokenClusters_; + const device::EDGetToken tokenDigi_; + const device::EDPutToken> tokenHit_; + + const pixelgpudetails::PixelRecHitKernel Algo_; + }; + + template + SiPixelRecHitAlpaka::SiPixelRecHitAlpaka(const edm::ParameterSet& iConfig) + : cpeToken_(esConsumes(edm::ESInputTag("", iConfig.getParameter("CPE")))), + tBeamSpot(consumes(iConfig.getParameter("beamSpot"))), + tokenClusters_(consumes(iConfig.getParameter("src"))), + tokenDigi_(consumes(iConfig.getParameter("src"))), + tokenHit_(produces()) {} + + template + void SiPixelRecHitAlpaka::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + + desc.add("beamSpot", edm::InputTag("offlineBeamSpotDevice")); + desc.add("src", edm::InputTag("siPixelClustersPreSplittingAlpaka")); + + std::string cpe = "PixelCPEFastParams"; + cpe += TrackerTraits::nameModifier; + desc.add("CPE", cpe); + + descriptions.addWithDefaultLabel(desc); + } + + template + void SiPixelRecHitAlpaka::produce(edm::StreamID streamID, + device::Event& iEvent, + const device::EventSetup& es) const { + auto& fcpe = es.getData(cpeToken_); + + auto const& clusters = iEvent.get(tokenClusters_); + + auto const& digis = iEvent.get(tokenDigi_); + + auto const& bs = iEvent.get(tBeamSpot); + + iEvent.emplace(tokenHit_, + Algo_.makeHitsAsync(digis, clusters, bs.data(), fcpe.const_buffer().data(), iEvent.queue())); + } + using SiPixelRecHitAlpakaPhase1 = SiPixelRecHitAlpaka; + using SiPixelRecHitAlpakaPhase2 = SiPixelRecHitAlpaka; +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/MakerMacros.h" +DEFINE_FWK_ALPAKA_MODULE(SiPixelRecHitAlpakaPhase1); +DEFINE_FWK_ALPAKA_MODULE(SiPixelRecHitAlpakaPhase2); diff --git a/RecoLocalTracker/SiPixelRecHits/plugins/gpuPixelRecHits.h b/RecoLocalTracker/SiPixelRecHits/plugins/gpuPixelRecHits.h index 09d0b55030d9c..55c556bd63048 100644 --- a/RecoLocalTracker/SiPixelRecHits/plugins/gpuPixelRecHits.h +++ b/RecoLocalTracker/SiPixelRecHits/plugins/gpuPixelRecHits.h @@ -7,19 +7,20 @@ #include "CUDADataFormats/BeamSpot/interface/BeamSpotCUDA.h" #include "CUDADataFormats/SiPixelCluster/interface/gpuClusteringConstants.h" +#include "CUDADataFormats/SiPixelDigi/interface/SiPixelDigisCUDA.h" #include "CUDADataFormats/TrackingRecHit/interface/TrackingRecHitsUtilities.h" #include "DataFormats/Math/interface/approx_atan2.h" #include "HeterogeneousCore/CUDAUtilities/interface/cuda_assert.h" #include "RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforGPU.h" -#include "CUDADataFormats/SiPixelDigi/interface/SiPixelDigisCUDA.h" -//#define GPU_DEBUG 1 +//#define GPU_DEBUG + namespace gpuPixelRecHits { template __global__ void getHits(pixelCPEforGPU::ParamsOnGPUT const* __restrict__ cpeParams, BeamSpotPOD const* __restrict__ bs, - SiPixelDigisCUDASOAConstView digis, + SiPixelDigisSoA::ConstView digis, int numElements, SiPixelClustersCUDASOAConstView clusters, TrackingRecHitSoAView hits) { diff --git a/RecoLocalTracker/SiPixelRecHits/python/PixelCPEESProducers_cff.py b/RecoLocalTracker/SiPixelRecHits/python/PixelCPEESProducers_cff.py index 686b0afc335c4..52efaece5e4df 100644 --- a/RecoLocalTracker/SiPixelRecHits/python/PixelCPEESProducers_cff.py +++ b/RecoLocalTracker/SiPixelRecHits/python/PixelCPEESProducers_cff.py @@ -1,4 +1,5 @@ import FWCore.ParameterSet.Config as cms +from Configuration.ProcessModifiers.alpaka_cff import alpaka # # Load all Pixel Cluster Position Estimator ESProducers @@ -18,3 +19,10 @@ # from CalibTracker.SiPixelESProducers.SiPixelTemplateDBObjectESProducer_cfi import * from CalibTracker.SiPixelESProducers.SiPixel2DTemplateDBObjectESProducer_cfi import * + +def _addProcessCPEsAlpaka(process): + process.load("RecoLocalTracker.SiPixelRecHits.pixelCPEFastParamsESProducerAlpakaPhase1_cfi") + process.load("RecoLocalTracker.SiPixelRecHits.pixelCPEFastParamsESProducerAlpakaPhase2_cfi") + +modifyConfigurationForAlpakaCPEs_ = alpaka.makeProcessModifier(_addProcessCPEsAlpaka) + diff --git a/RecoLocalTracker/SiPixelRecHits/python/SiPixelRecHits_cfi.py b/RecoLocalTracker/SiPixelRecHits/python/SiPixelRecHits_cfi.py index f45b41861995d..7e8910a8e0918 100644 --- a/RecoLocalTracker/SiPixelRecHits/python/SiPixelRecHits_cfi.py +++ b/RecoLocalTracker/SiPixelRecHits/python/SiPixelRecHits_cfi.py @@ -1,6 +1,8 @@ import FWCore.ParameterSet.Config as cms +from HeterogeneousCore.AlpakaCore.functions import * from HeterogeneousCore.CUDACore.SwitchProducerCUDA import SwitchProducerCUDA from Configuration.ProcessModifiers.gpu_cff import gpu +from Configuration.ProcessModifiers.alpaka_cff import alpaka # legacy pixel rechit producer siPixelRecHits = cms.EDProducer("SiPixelRecHitConverter", @@ -112,9 +114,6 @@ ) ) - -#(gpu & pixelNtupletFit & phase2_tracker).toReplaceWith(siPixelRecHitsPreSplitting , cuda = _siPixelRecHitFromCUDAPhase2.clone()) - (gpu & pixelNtupletFit).toReplaceWith(siPixelRecHitsPreSplittingTask, cms.Task( # reconstruct the pixel rechits on the gpu or on the cpu # (normally only one of the two is run because only one is consumed from later stages) @@ -125,3 +124,46 @@ # producing and converting on cpu (if needed) siPixelRecHitsPreSplittingSoA )) + +###################################################################### + +### Alpaka Pixel Hits Reco +from RecoLocalTracker.SiPixelRecHits.siPixelRecHitAlpakaPhase1_cfi import siPixelRecHitAlpakaPhase1 as _siPixelRecHitAlpakaPhase1 +from RecoLocalTracker.SiPixelRecHits.siPixelRecHitAlpakaPhase2_cfi import siPixelRecHitAlpakaPhase2 as _siPixelRecHitAlpakaPhase2 + +# Hit SoA producer on the device +siPixelRecHitsPreSplittingAlpaka = _siPixelRecHitAlpakaPhase1.clone( + src = "siPixelClustersPreSplittingAlpaka" +) +phase2_tracker.toReplaceWith(siPixelRecHitsPreSplittingAlpaka,_siPixelRecHitAlpakaPhase2.clone( + src = "siPixelClustersPreSplittingAlpaka" +)) + +# Hit SoA producer on the cpu, for validation +siPixelRecHitsPreSplittingAlpakaSerial = makeSerialClone(siPixelRecHitsPreSplittingAlpaka, + src = "siPixelClustersPreSplittingAlpakaSerial" +) + +from RecoLocalTracker.SiPixelRecHits.siPixelRecHitFromSoAAlpakaPhase1_cfi import siPixelRecHitFromSoAAlpakaPhase1 as _siPixelRecHitFromSoAAlpakaPhase1 +from RecoLocalTracker.SiPixelRecHits.siPixelRecHitFromSoAAlpakaPhase2_cfi import siPixelRecHitFromSoAAlpakaPhase2 as _siPixelRecHitFromSoAAlpakaPhase2 + +(alpaka & ~phase2_tracker).toModify(siPixelRecHitsPreSplitting, + cpu = _siPixelRecHitFromSoAAlpakaPhase1.clone( + pixelRecHitSrc = cms.InputTag('siPixelRecHitsPreSplittingAlpaka'), + src = cms.InputTag('siPixelClustersPreSplitting')) +) + +(alpaka & phase2_tracker).toModify(siPixelRecHitsPreSplitting, + cpu = _siPixelRecHitFromSoAAlpakaPhase2.clone( + pixelRecHitSrc = cms.InputTag('siPixelRecHitsPreSplittingAlpaka'), + src = cms.InputTag('siPixelClustersPreSplitting')) +) + + +alpaka.toReplaceWith(siPixelRecHitsPreSplittingTask, cms.Task( + # Reconstruct the pixel hits with alpaka on the device + siPixelRecHitsPreSplittingAlpaka, + # Reconstruct the pixel hits with alpaka on the cpu (if requested by the validation) + siPixelRecHitsPreSplittingAlpakaSerial, + # Convert hit soa on host to legacy formats + siPixelRecHitsPreSplitting)) diff --git a/RecoLocalTracker/SiPixelRecHits/src/ES_PixelCPEFastParams.cc b/RecoLocalTracker/SiPixelRecHits/src/ES_PixelCPEFastParams.cc new file mode 100644 index 0000000000000..804f817bdb6e0 --- /dev/null +++ b/RecoLocalTracker/SiPixelRecHits/src/ES_PixelCPEFastParams.cc @@ -0,0 +1,9 @@ +#include "RecoLocalTracker/SiPixelRecHits/interface/PixelCPEFastParamsHost.h" +#include "FWCore/Utilities/interface/typelookup.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" + +using PixelCPEFastParamsHostPhase1 = PixelCPEFastParamsHost; +using PixelCPEFastParamsHostPhase2 = PixelCPEFastParamsHost; + +TYPELOOKUP_DATA_REG(PixelCPEFastParamsHostPhase1); +TYPELOOKUP_DATA_REG(PixelCPEFastParamsHostPhase2); diff --git a/RecoLocalTracker/SiPixelRecHits/src/PixelCPEFastParams.cc b/RecoLocalTracker/SiPixelRecHits/src/PixelCPEFastParams.cc new file mode 100644 index 0000000000000..d98c84e5860f4 --- /dev/null +++ b/RecoLocalTracker/SiPixelRecHits/src/PixelCPEFastParams.cc @@ -0,0 +1,9 @@ +#include "FWCore/Utilities/interface/typelookup.h" +#include "RecoLocalTracker/SiPixelRecHits/interface/PixelCPEFastParamsDevice.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" + +using PixelCPEFastParamsPhase1 = PixelCPEFastParamsDevice; +using PixelCPEFastParamsPhase2 = PixelCPEFastParamsDevice; + +TYPELOOKUP_DATA_REG(PixelCPEFastParamsPhase1); +TYPELOOKUP_DATA_REG(PixelCPEFastParamsPhase2); \ No newline at end of file diff --git a/RecoLocalTracker/SiPixelRecHits/src/PixelCPEFastParamsHost.cc b/RecoLocalTracker/SiPixelRecHits/src/PixelCPEFastParamsHost.cc new file mode 100644 index 0000000000000..36c127259a383 --- /dev/null +++ b/RecoLocalTracker/SiPixelRecHits/src/PixelCPEFastParamsHost.cc @@ -0,0 +1,482 @@ +#include + +#include "CondFormats/SiPixelTransient/interface/SiPixelGenError.h" +#include "DataFormats/GeometrySurface/interface/SOARotation.h" +#include "DataFormats/SiPixelClusterSoA/interface/ClusteringConstants.h" +#include "DataFormats/TrackingRecHitSoA/interface/SiPixelHitStatus.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "HeterogeneousCore/AlpakaInterface/interface/CopyToDevice.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "RecoLocalTracker/SiPixelRecHits/interface/PixelCPEFastParamsHost.h" + +//----------------------------------------------------------------------------- +//! The constructor. +//----------------------------------------------------------------------------- +template +PixelCPEFastParamsHost::PixelCPEFastParamsHost(edm::ParameterSet const& conf, + const MagneticField* mag, + const TrackerGeometry& geom, + const TrackerTopology& ttopo, + const SiPixelLorentzAngle* lorentzAngle, + const SiPixelGenErrorDBObject* genErrorDBObject, + const SiPixelLorentzAngle* lorentzAngleWidth) + : PixelCPEGenericBase(conf, mag, geom, ttopo, lorentzAngle, genErrorDBObject, lorentzAngleWidth), + buffer_(cms::alpakatools::make_host_buffer>()) { + // Use errors from templates or from GenError + if (useErrorsFromTemplates_) { + if (!SiPixelGenError::pushfile(*genErrorDBObject_, this->thePixelGenError_)) + throw cms::Exception("InvalidCalibrationLoaded") + << "ERROR: GenErrors not filled correctly. Check the sqlite file. Using SiPixelTemplateDBObject version " + << (*genErrorDBObject_).version(); + } + + fillParamsForDevice(); +} + +template +void PixelCPEFastParamsHost::fillParamsForDevice() { + // this code executes only once per job, computation inefficiency is not an issue + // many code blocks are repeated: better keep the computation local and self consistent as blocks may in future move around, be deleted ... + // It is valid only for Phase1 and the version of GenError in DB used in late 2018 and in 2021 + + buffer_->commonParams().theThicknessB = m_DetParams.front().theThickness; + buffer_->commonParams().theThicknessE = m_DetParams.back().theThickness; + buffer_->commonParams().thePitchX = m_DetParams[0].thePitchX; + buffer_->commonParams().thePitchY = m_DetParams[0].thePitchY; + + buffer_->commonParams().numberOfLaddersInBarrel = TrackerTraits::numberOfLaddersInBarrel; + + LogDebug("PixelCPEFastParamsHost") << "pitch & thickness " << buffer_->commonParams().thePitchX << ' ' + << buffer_->commonParams().thePitchY << " " + << buffer_->commonParams().theThicknessB << ' ' + << buffer_->commonParams().theThicknessE; + + // zero average geometry + memset(&buffer_->averageGeometry(), 0, sizeof(pixelTopology::AverageGeometryT)); + + uint32_t oldLayer = 0; + uint32_t oldLadder = 0; + float rl = 0; + float zl = 0; + float miz = 500, mxz = 0; + float pl = 0; + int nl = 0; + + assert(m_DetParams.size() <= TrackerTraits::numberOfModules); + for (auto i = 0U; i < m_DetParams.size(); ++i) { + auto& p = m_DetParams[i]; + auto& g = buffer_->detParams(i); + + g.nRowsRoc = p.theDet->specificTopology().rowsperroc(); + g.nColsRoc = p.theDet->specificTopology().colsperroc(); + g.nRows = p.theDet->specificTopology().rocsX() * g.nRowsRoc; + g.nCols = p.theDet->specificTopology().rocsY() * g.nColsRoc; + + g.numPixsInModule = g.nRows * g.nCols; + + assert(p.theDet->index() == int(i)); + assert(buffer_->commonParams().thePitchY == p.thePitchY); + assert(buffer_->commonParams().thePitchX == p.thePitchX); + + g.isBarrel = GeomDetEnumerators::isBarrel(p.thePart); + g.isPosZ = p.theDet->surface().position().z() > 0; + g.layer = ttopo_.layer(p.theDet->geographicalId()); + g.index = i; // better be! + g.rawId = p.theDet->geographicalId(); + auto thickness = g.isBarrel ? buffer_->commonParams().theThicknessB : buffer_->commonParams().theThicknessE; + assert(thickness == p.theThickness); + + auto ladder = ttopo_.pxbLadder(p.theDet->geographicalId()); + if (oldLayer != g.layer) { + oldLayer = g.layer; + LogDebug("PixelCPEFastParamsHost") << "new layer at " << i << (g.isBarrel ? " B " : (g.isPosZ ? " E+ " : " E- ")) + << g.layer << " starting at " << g.rawId << '\n' + << "old layer had " << nl << " ladders"; + nl = 0; + } + if (oldLadder != ladder) { + oldLadder = ladder; + LogDebug("PixelCPEFastParamsHost") << "new ladder at " << i + << (g.isBarrel ? " B " : (g.isPosZ ? " E+ " : " E- ")) << ladder + << " starting at " << g.rawId << '\n' + << "old ladder ave z,r,p mz " << zl / 8.f << " " << rl / 8.f << " " << pl / 8.f + << ' ' << miz << ' ' << mxz; + rl = 0; + zl = 0; + pl = 0; + miz = 500; + mxz = 0; + nl++; + } + + g.shiftX = 0.5f * p.lorentzShiftInCmX; + g.shiftY = 0.5f * p.lorentzShiftInCmY; + g.chargeWidthX = p.lorentzShiftInCmX * p.widthLAFractionX; + g.chargeWidthY = p.lorentzShiftInCmY * p.widthLAFractionY; + + g.x0 = p.theOrigin.x(); + g.y0 = p.theOrigin.y(); + g.z0 = p.theOrigin.z(); + + auto vv = p.theDet->surface().position(); + auto rr = pixelCPEforDevice::Rotation(p.theDet->surface().rotation()); + g.frame = pixelCPEforDevice::Frame(vv.x(), vv.y(), vv.z(), rr); + + zl += vv.z(); + miz = std::min(miz, std::abs(vv.z())); + mxz = std::max(mxz, std::abs(vv.z())); + rl += vv.perp(); + pl += vv.phi(); // (not obvious) + + // errors ..... + ClusterParamGeneric cp; + + cp.with_track_angle = false; + + auto lape = p.theDet->localAlignmentError(); + if (lape.invalid()) + lape = LocalError(); // zero.... + + g.apeXX = lape.xx(); + g.apeYY = lape.yy(); + + auto toMicron = [&](float x) { return std::min(511, int(x * 1.e4f + 0.5f)); }; + + // average angle + auto gvx = p.theOrigin.x() + 40.f * buffer_->commonParams().thePitchX; + auto gvy = p.theOrigin.y(); + auto gvz = 1.f / p.theOrigin.z(); + //--- Note that the normalization is not required as only the ratio used + + { + // calculate angles (fed into errorFromTemplates) + cp.cotalpha = gvx * gvz; + cp.cotbeta = gvy * gvz; + + errorFromTemplates(p, cp, 20000.); + } + +#ifdef EDM_ML_DEBUG + auto m = 10000.f; + for (float qclus = 15000; qclus < 35000; qclus += 15000) { + errorFromTemplates(p, cp, qclus); + LogDebug("PixelCPEFastParamsHost") << i << ' ' << qclus << ' ' << cp.pixmx << ' ' << m * cp.sigmax << ' ' + << m * cp.sx1 << ' ' << m * cp.sx2 << ' ' << m * cp.sigmay << ' ' << m * cp.sy1 + << ' ' << m * cp.sy2; + } + LogDebug("PixelCPEFastParamsHost") << i << ' ' << m * std::sqrt(lape.xx()) << ' ' << m * std::sqrt(lape.yy()); +#endif // EDM_ML_DEBUG + + g.pixmx = std::max(0, cp.pixmx); + g.sx2 = toMicron(cp.sx2); + g.sy1 = std::max(21, toMicron(cp.sy1)); // for some angles sy1 is very small + g.sy2 = std::max(55, toMicron(cp.sy2)); // sometimes sy2 is smaller than others (due to angle?) + + //sample xerr as function of position + // moduleOffsetX is the definition of TrackerTraits::xOffset, + // needs to be calculated because for Phase2 the modules are not uniform + float moduleOffsetX = -(0.5f * float(g.nRows) + TrackerTraits::bigPixXCorrection); + auto const xoff = moduleOffsetX * buffer_->commonParams().thePitchX; + + for (int ix = 0; ix < pixelCPEforDevice::kNumErrorBins; ++ix) { + auto x = xoff * (1.f - (0.5f + float(ix)) / 8.f); + auto gvx = p.theOrigin.x() - x; + auto gvy = p.theOrigin.y(); + auto gvz = 1.f / p.theOrigin.z(); + cp.cotbeta = gvy * gvz; + cp.cotalpha = gvx * gvz; + errorFromTemplates(p, cp, 20000.f); + g.sigmax[ix] = toMicron(cp.sigmax); + g.sigmax1[ix] = toMicron(cp.sx1); + LogDebug("PixelCPEFastParamsHost") << "sigmax vs x " << i << ' ' << x << ' ' << cp.cotalpha << ' ' + << int(g.sigmax[ix]) << ' ' << int(g.sigmax1[ix]) << ' ' << 10000.f * cp.sigmay + << std::endl; + } +#ifdef EDM_ML_DEBUG + // sample yerr as function of position + // moduleOffsetY is the definition of TrackerTraits::yOffset (removed) + float moduleOffsetY = 0.5f * float(g.nCols) + TrackerTraits::bigPixYCorrection; + auto const yoff = -moduleOffsetY * buffer_->commonParams().thePitchY; + + for (int ix = 0; ix < pixelCPEforDevice::kNumErrorBins; ++ix) { + auto y = yoff * (1.f - (0.5f + float(ix)) / 8.f); + auto gvx = p.theOrigin.x() + 40.f * buffer_->commonParams().thePitchY; + auto gvy = p.theOrigin.y() - y; + auto gvz = 1.f / p.theOrigin.z(); + cp.cotbeta = gvy * gvz; + cp.cotalpha = gvx * gvz; + errorFromTemplates(p, cp, 20000.f); + LogDebug("PixelCPEFastParamsHost") << "sigmay vs y " << i << ' ' << y << ' ' << cp.cotbeta << ' ' + << 10000.f * cp.sigmay << std::endl; + } +#endif // EDM_ML_DEBUG + + // calculate angles (repeated) + cp.cotalpha = gvx * gvz; + cp.cotbeta = gvy * gvz; + auto aveCB = cp.cotbeta; + + // sample x by charge + int qbin = pixelCPEforDevice::kGenErrorQBins; // low charge + int k = 0; + for (int qclus = 1000; qclus < 200000; qclus += 1000) { + errorFromTemplates(p, cp, qclus); + if (cp.qBin_ == qbin) + continue; + qbin = cp.qBin_; + g.xfact[k] = cp.sigmax; + g.yfact[k] = cp.sigmay; + g.minCh[k++] = qclus; +#ifdef EDM_ML_DEBUG + LogDebug("PixelCPEFastParamsHost") << i << ' ' << g.rawId << ' ' << cp.cotalpha << ' ' << qclus << ' ' << cp.qBin_ + << ' ' << cp.pixmx << ' ' << m * cp.sigmax << ' ' << m * cp.sx1 << ' ' + << m * cp.sx2 << ' ' << m * cp.sigmay << ' ' << m * cp.sy1 << ' ' << m * cp.sy2 + << std::endl; +#endif // EDM_ML_DEBUG + } + + assert(k <= pixelCPEforDevice::kGenErrorQBins); + + // fill the rest (sometimes bin 4 is missing) + for (int kk = k; kk < pixelCPEforDevice::kGenErrorQBins; ++kk) { + g.xfact[kk] = g.xfact[k - 1]; + g.yfact[kk] = g.yfact[k - 1]; + g.minCh[kk] = g.minCh[k - 1]; + } + auto detx = 1.f / g.xfact[0]; + auto dety = 1.f / g.yfact[0]; + for (int kk = 0; kk < pixelCPEforDevice::kGenErrorQBins; ++kk) { + g.xfact[kk] *= detx; + g.yfact[kk] *= dety; + } + // sample y in "angle" (estimated from cluster size) + float ys = 8.f - 4.f; // apperent bias of half pixel (see plot) + // plot: https://indico.cern.ch/event/934821/contributions/3974619/attachments/2091853/3515041/DigilessReco.pdf page 25 + // sample yerr as function of "size" + for (int iy = 0; iy < pixelCPEforDevice::kNumErrorBins; ++iy) { + ys += 1.f; // first bin 0 is for size 9 (and size is in fixed point 2^3) + if (pixelCPEforDevice::kNumErrorBins - 1 == iy) + ys += 8.f; // last bin for "overflow" + // cp.cotalpha = ys*(buffer_->commonParams().thePitchX/(8.f*thickness)); // use this to print sampling in "x" (and comment the line below) + cp.cotbeta = std::copysign(ys * (buffer_->commonParams().thePitchY / (8.f * thickness)), aveCB); + errorFromTemplates(p, cp, 20000.f); + g.sigmay[iy] = toMicron(cp.sigmay); + LogDebug("PixelCPEFastParamsHost") << "sigmax/sigmay " << i << ' ' << (ys + 4.f) / 8.f << ' ' << cp.cotalpha + << '/' << cp.cotbeta << ' ' << 10000.f * cp.sigmax << '/' << int(g.sigmay[iy]) + << std::endl; + } + } // loop over det + + constexpr int numberOfModulesInLadder = TrackerTraits::numberOfModulesInLadder; + constexpr int numberOfLaddersInBarrel = TrackerTraits::numberOfLaddersInBarrel; + constexpr int numberOfModulesInBarrel = TrackerTraits::numberOfModulesInBarrel; + + constexpr float ladderFactor = 1.f / float(numberOfModulesInLadder); + + constexpr int firstEndcapPos = TrackerTraits::firstEndcapPos; + constexpr int firstEndcapNeg = TrackerTraits::firstEndcapNeg; + + // compute ladder baricenter (only in global z) for the barrel + // + auto& aveGeom = buffer_->averageGeometry(); + int il = 0; + for (int im = 0, nm = numberOfModulesInBarrel; im < nm; ++im) { + auto const& g = buffer_->detParams(im); + il = im / numberOfModulesInLadder; + assert(il < int(numberOfLaddersInBarrel)); + auto z = g.frame.z(); + aveGeom.ladderZ[il] += ladderFactor * z; + aveGeom.ladderMinZ[il] = std::min(aveGeom.ladderMinZ[il], z); + aveGeom.ladderMaxZ[il] = std::max(aveGeom.ladderMaxZ[il], z); + aveGeom.ladderX[il] += ladderFactor * g.frame.x(); + aveGeom.ladderY[il] += ladderFactor * g.frame.y(); + aveGeom.ladderR[il] += ladderFactor * sqrt(g.frame.x() * g.frame.x() + g.frame.y() * g.frame.y()); + } + assert(il + 1 == int(numberOfLaddersInBarrel)); + // add half_module and tollerance + constexpr float moduleLength = TrackerTraits::moduleLength; + constexpr float module_tolerance = 0.2f; + for (int il = 0, nl = numberOfLaddersInBarrel; il < nl; ++il) { + aveGeom.ladderMinZ[il] -= (0.5f * moduleLength - module_tolerance); + aveGeom.ladderMaxZ[il] += (0.5f * moduleLength - module_tolerance); + } + + // compute "max z" for first layer in endcap (should we restrict to the outermost ring?) + for (auto im = TrackerTraits::layerStart[firstEndcapPos]; im < TrackerTraits::layerStart[firstEndcapPos + 1]; ++im) { + auto const& g = buffer_->detParams(im); + aveGeom.endCapZ[0] = std::max(aveGeom.endCapZ[0], g.frame.z()); + } + for (auto im = TrackerTraits::layerStart[firstEndcapNeg]; im < TrackerTraits::layerStart[firstEndcapNeg + 1]; ++im) { + auto const& g = buffer_->detParams(im); + aveGeom.endCapZ[1] = std::min(aveGeom.endCapZ[1], g.frame.z()); + } + // correct for outer ring being closer + aveGeom.endCapZ[0] -= TrackerTraits::endcapCorrection; + aveGeom.endCapZ[1] += TrackerTraits::endcapCorrection; +#ifdef EDM_ML_DEBUG + for (int jl = 0, nl = numberOfLaddersInBarrel; jl < nl; ++jl) { + LogDebug("PixelCPEFastParamsHost") << jl << ':' << aveGeom.ladderR[jl] << '/' + << std::sqrt(aveGeom.ladderX[jl] * aveGeom.ladderX[jl] + + aveGeom.ladderY[jl] * aveGeom.ladderY[jl]) + << ',' << aveGeom.ladderZ[jl] << ',' << aveGeom.ladderMinZ[jl] << ',' + << aveGeom.ladderMaxZ[jl] << '\n'; + } + LogDebug("PixelCPEFastParamsHost") << aveGeom.endCapZ[0] << ' ' << aveGeom.endCapZ[1]; +#endif // EDM_ML_DEBUG + + // fill Layer and ladders geometry + memset(&buffer_->layerGeometry(), 0, sizeof(pixelCPEforDevice::LayerGeometryT)); + memcpy(buffer_->layerGeometry().layerStart, + TrackerTraits::layerStart, + sizeof(pixelCPEforDevice::LayerGeometryT::layerStart)); + memcpy(buffer_->layerGeometry().layer, + pixelTopology::layer.data(), + pixelTopology::layer.size()); + buffer_->layerGeometry().maxModuleStride = pixelTopology::maxModuleStride; +} + +template +void PixelCPEFastParamsHost::errorFromTemplates(DetParam const& theDetParam, + ClusterParamGeneric& theClusterParam, + float qclus) const { + float locBz = theDetParam.bz; + float locBx = theDetParam.bx; + LogDebug("PixelCPEFastParamsHost") << "PixelCPEFastParamsHost::localPosition(...) : locBz = " << locBz; + + theClusterParam.pixmx = std::numeric_limits::max(); // max pixel charge for truncation of 2-D cluster + + theClusterParam.sigmay = -999.9; // CPE Generic y-error for multi-pixel cluster + theClusterParam.sigmax = -999.9; // CPE Generic x-error for multi-pixel cluster + theClusterParam.sy1 = -999.9; // CPE Generic y-error for single single-pixel + theClusterParam.sy2 = -999.9; // CPE Generic y-error for single double-pixel cluster + theClusterParam.sx1 = -999.9; // CPE Generic x-error for single single-pixel cluster + theClusterParam.sx2 = -999.9; // CPE Generic x-error for single double-pixel cluster + + float dummy; + + SiPixelGenError gtempl(this->thePixelGenError_); + int gtemplID = theDetParam.detTemplateId; + + theClusterParam.qBin_ = gtempl.qbin(gtemplID, + theClusterParam.cotalpha, + theClusterParam.cotbeta, + locBz, + locBx, + qclus, + false, + theClusterParam.pixmx, + theClusterParam.sigmay, + dummy, + theClusterParam.sigmax, + dummy, + theClusterParam.sy1, + dummy, + theClusterParam.sy2, + dummy, + theClusterParam.sx1, + dummy, + theClusterParam.sx2, + dummy); + + theClusterParam.sigmax = theClusterParam.sigmax * pixelCPEforDevice::micronsToCm; + theClusterParam.sx1 = theClusterParam.sx1 * pixelCPEforDevice::micronsToCm; + theClusterParam.sx2 = theClusterParam.sx2 * pixelCPEforDevice::micronsToCm; + + theClusterParam.sigmay = theClusterParam.sigmay * pixelCPEforDevice::micronsToCm; + theClusterParam.sy1 = theClusterParam.sy1 * pixelCPEforDevice::micronsToCm; + theClusterParam.sy2 = theClusterParam.sy2 * pixelCPEforDevice::micronsToCm; +} + +template <> +void PixelCPEFastParamsHost::errorFromTemplates(DetParam const& theDetParam, + ClusterParamGeneric& theClusterParam, + float qclus) const { + theClusterParam.qBin_ = 0.0f; +} + +//----------------------------------------------------------------------------- +//! Hit position in the local frame (in cm). Unlike other CPE's, this +//! one converts everything from the measurement frame (in channel numbers) +//! into the local frame (in centimeters). +//----------------------------------------------------------------------------- +template +LocalPoint PixelCPEFastParamsHost::localPosition(DetParam const& theDetParam, + ClusterParam& theClusterParamBase) const { + ClusterParamGeneric& theClusterParam = static_cast(theClusterParamBase); + + if (useErrorsFromTemplates_) { + errorFromTemplates(theDetParam, theClusterParam, theClusterParam.theCluster->charge()); + } else { + theClusterParam.qBin_ = 0; + } + + int q_f_X; //!< Q of the first pixel in X + int q_l_X; //!< Q of the last pixel in X + int q_f_Y; //!< Q of the first pixel in Y + int q_l_Y; //!< Q of the last pixel in Y + collect_edge_charges(theClusterParam, q_f_X, q_l_X, q_f_Y, q_l_Y, useErrorsFromTemplates_ && truncatePixelCharge_); + + // do GPU like ... + pixelCPEforDevice::ClusParams cp; + + cp.minRow[0] = theClusterParam.theCluster->minPixelRow(); + cp.maxRow[0] = theClusterParam.theCluster->maxPixelRow(); + cp.minCol[0] = theClusterParam.theCluster->minPixelCol(); + cp.maxCol[0] = theClusterParam.theCluster->maxPixelCol(); + + cp.q_f_X[0] = q_f_X; + cp.q_l_X[0] = q_l_X; + cp.q_f_Y[0] = q_f_Y; + cp.q_l_Y[0] = q_l_Y; + + cp.charge[0] = theClusterParam.theCluster->charge(); + + auto ind = theDetParam.theDet->index(); + pixelCPEforDevice::position(buffer_->commonParams(), buffer_->detParams(ind), cp, 0); + auto xPos = cp.xpos[0]; + auto yPos = cp.ypos[0]; + + // set the error (mind ape....) + pixelCPEforDevice::errorFromDB(buffer_->commonParams(), buffer_->detParams(ind), cp, 0); + theClusterParam.sigmax = cp.xerr[0]; + theClusterParam.sigmay = cp.yerr[0]; + + LogDebug("PixelCPEFastParamsHost") << " in PixelCPEFastParamsHost:localPosition - pos = " << xPos << " " << yPos + << " size " << cp.maxRow[0] - cp.minRow[0] << ' ' << cp.maxCol[0] - cp.minCol[0]; + + //--- Now put the two together + LocalPoint pos_in_local(xPos, yPos); + return pos_in_local; +} + +//============== INFLATED ERROR AND ERRORS FROM DB BELOW ================ + +//------------------------------------------------------------------------- +// Hit error in the local frame +//------------------------------------------------------------------------- +template +LocalError PixelCPEFastParamsHost::localError(DetParam const& theDetParam, + ClusterParam& theClusterParamBase) const { + ClusterParamGeneric& theClusterParam = static_cast(theClusterParamBase); + + auto xerr = theClusterParam.sigmax; + auto yerr = theClusterParam.sigmay; + + LogDebug("PixelCPEFastParamsHost") << " errors " << xerr << " " << yerr; + + auto xerr_sq = xerr * xerr; + auto yerr_sq = yerr * yerr; + + return LocalError(xerr_sq, 0, yerr_sq); +} + +template +void PixelCPEFastParamsHost::fillPSetDescription(edm::ParameterSetDescription& desc) { + // call PixelCPEGenericBase fillPSetDescription to add common rechit errors + PixelCPEGenericBase::fillPSetDescription(desc); +} + +template class PixelCPEFastParamsHost; +template class PixelCPEFastParamsHost; +template class PixelCPEFastParamsHost; diff --git a/RecoLocalTracker/SiPixelRecHits/src/alpaka/ES_PixelCPEFastParams.cc b/RecoLocalTracker/SiPixelRecHits/src/alpaka/ES_PixelCPEFastParams.cc new file mode 100644 index 0000000000000..3b4a2f74a8869 --- /dev/null +++ b/RecoLocalTracker/SiPixelRecHits/src/alpaka/ES_PixelCPEFastParams.cc @@ -0,0 +1,5 @@ +#include "RecoLocalTracker/SiPixelRecHits/interface/alpaka/PixelCPEFastParamsCollection.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/typelookup.h" + +TYPELOOKUP_ALPAKA_DATA_REG(PixelCPEFastParamsPhase1); +TYPELOOKUP_ALPAKA_DATA_REG(PixelCPEFastParamsPhase2); diff --git a/RecoMTD/DetLayers/src/BTLDetLayerGeometryBuilder.cc b/RecoMTD/DetLayers/src/BTLDetLayerGeometryBuilder.cc index efb2a1ae8e61b..a814be8b2ad48 100644 --- a/RecoMTD/DetLayers/src/BTLDetLayerGeometryBuilder.cc +++ b/RecoMTD/DetLayers/src/BTLDetLayerGeometryBuilder.cc @@ -30,7 +30,7 @@ vector BTLDetLayerGeometryBuilder::buildLayers(const MTDGeometry& geo const int mtdTopologyMode = topo.getMTDTopologyMode(); BTLDetId::CrysLayout btlL = MTDTopologyMode::crysLayoutFromTopoMode(mtdTopologyMode); - if (btlL != BTLDetId::CrysLayout::v2) { + if (btlL != BTLDetId::CrysLayout::v2 && btlL != BTLDetId::CrysLayout::v3) { for (unsigned tray = 1; tray <= BTLDetId::HALF_ROD; ++tray) { vector geomDets; for (unsigned module = 1; module <= BTLDetId::kModulesPerRODBarPhiFlat; ++module) { diff --git a/RecoMTD/DetLayers/src/ETLDetLayerGeometryBuilder.cc b/RecoMTD/DetLayers/src/ETLDetLayerGeometryBuilder.cc index aec6258a1063b..d207aa6f212b6 100644 --- a/RecoMTD/DetLayers/src/ETLDetLayerGeometryBuilder.cc +++ b/RecoMTD/DetLayers/src/ETLDetLayerGeometryBuilder.cc @@ -29,6 +29,8 @@ pair, vector > ETLDetLayerGeometryBuilder::buildLay nSector *= ETLDetId::kETLv4maxSector; } else if (etlL == ETLDetId::EtlLayout::v5) { nSector *= ETLDetId::kETLv5maxSector; + } else if (etlL == ETLDetId::EtlLayout::v8) { + nSector *= ETLDetId::kETLv5maxSector; } else { throw cms::Exception("MTDDetLayers") << "Not implemented scenario " << mtdTopologyMode; } diff --git a/RecoMTD/DetLayers/src/MTDDetLayerGeometry.cc b/RecoMTD/DetLayers/src/MTDDetLayerGeometry.cc index 8fa5b760bb57c..5472945566382 100644 --- a/RecoMTD/DetLayers/src/MTDDetLayerGeometry.cc +++ b/RecoMTD/DetLayers/src/MTDDetLayerGeometry.cc @@ -75,7 +75,7 @@ void MTDDetLayerGeometry::addBTLLayers(const vector& dtlayers) { DetId MTDDetLayerGeometry::makeDetLayerId(const DetLayer* detLayer) const { if (detLayer->subDetector() == GeomDetEnumerators::TimingEndcap) { ETLDetId id(detLayer->basicComponents().front()->geographicalId().rawId()); - return ETLDetId(id.mtdSide(), 0, 0, 0); + return ETLDetId(id.mtdSide(), 0, 0, 0, 0); // Constructor of new geometry is compatible with prev8 } else if (detLayer->subDetector() == GeomDetEnumerators::TimingBarrel) { BTLDetId id(detLayer->basicComponents().front()->geographicalId().rawId()); return BTLDetId(id.mtdSide(), 0, 0, 0, 0); @@ -105,7 +105,7 @@ const DetLayer* MTDDetLayerGeometry::idToLayer(const DetId& id) const { if (detId.mtdSubDetector() == 2) { // 2 is ETL ETLDetId etlId(detId.rawId()); - idout = ETLDetId(etlId.mtdSide(), 0, 0, 0); + idout = ETLDetId(etlId.mtdSide(), 0, 0, 0, 0); } else if (detId.mtdSubDetector() == 1) { // 1 is BTL BTLDetId btlId(detId.rawId()); idout = BTLDetId(btlId.mtdSide(), 0, 0, 0, 0); diff --git a/RecoMTD/TimingIDTools/plugins/TOFPIDProducer.cc b/RecoMTD/TimingIDTools/plugins/TOFPIDProducer.cc index 1ae9f060d8fa6..d1144ca7c2ec1 100644 --- a/RecoMTD/TimingIDTools/plugins/TOFPIDProducer.cc +++ b/RecoMTD/TimingIDTools/plugins/TOFPIDProducer.cc @@ -49,11 +49,18 @@ class TOFPIDProducer : public edm::stream::EDProducer<> { edm::EDGetTokenT> tofkToken_; edm::EDGetTokenT> tofpToken_; edm::EDGetTokenT vtxsToken_; - double vtxMaxSigmaT_; - double maxDz_; - double maxDtSignificance_; - double minProbHeavy_; - double fixedT0Error_; + edm::EDGetTokenT> trackMTDTimeQualityToken_; + const double vtxMaxSigmaT_; + const double maxDz_; + const double maxDtSignificance_; + const double minProbHeavy_; + const double fixedT0Error_; + const double probPion_; + const double probKaon_; + const double probProton_; + const double minTrackTimeQuality_; + const bool MVASel_; + const bool vertexReassignment_; }; TOFPIDProducer::TOFPIDProducer(const ParameterSet& iConfig) @@ -65,11 +72,19 @@ TOFPIDProducer::TOFPIDProducer(const ParameterSet& iConfig) tofkToken_(consumes>(iConfig.getParameter("tofkSrc"))), tofpToken_(consumes>(iConfig.getParameter("tofpSrc"))), vtxsToken_(consumes(iConfig.getParameter("vtxsSrc"))), + trackMTDTimeQualityToken_( + consumes>(iConfig.getParameter("trackMTDTimeQualityVMapTag"))), vtxMaxSigmaT_(iConfig.getParameter("vtxMaxSigmaT")), maxDz_(iConfig.getParameter("maxDz")), maxDtSignificance_(iConfig.getParameter("maxDtSignificance")), minProbHeavy_(iConfig.getParameter("minProbHeavy")), - fixedT0Error_(iConfig.getParameter("fixedT0Error")) { + fixedT0Error_(iConfig.getParameter("fixedT0Error")), + probPion_(iConfig.getParameter("probPion")), + probKaon_(iConfig.getParameter("probKaon")), + probProton_(iConfig.getParameter("probProton")), + minTrackTimeQuality_(iConfig.getParameter("minTrackTimeQuality")), + MVASel_(iConfig.getParameter("MVASel")), + vertexReassignment_(iConfig.getParameter("vertexReassignment")) { produces>(t0Name); produces>(sigmat0Name); produces>(t0safeName); @@ -97,6 +112,8 @@ void TOFPIDProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptio ->setComment("Input ValueMap for track tof as proton"); desc.add("vtxsSrc", edm::InputTag("unsortedOfflinePrimaryVertices4DwithPID")) ->setComment("Input primary vertex collection"); + desc.add("trackMTDTimeQualityVMapTag", edm::InputTag("mtdTrackQualityMVA:mtdQualMVA")) + ->setComment("Track MVA quality value"); desc.add("vtxMaxSigmaT", 0.025) ->setComment("Maximum primary vertex time uncertainty for use in particle id [ns]"); desc.add("maxDz", 0.1) @@ -107,6 +124,12 @@ void TOFPIDProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptio desc.add("minProbHeavy", 0.75) ->setComment("Minimum probability for a particle to be a kaon or proton before reassigning the timestamp"); desc.add("fixedT0Error", 0.)->setComment("Use a fixed T0 uncertainty [ns]"); + desc.add("probPion", 1.)->setComment("A priori probability pions"); + desc.add("probKaon", 1.)->setComment("A priori probability kaons"); + desc.add("probProton", 1.)->setComment("A priori probability for protons"); + desc.add("minTrackTimeQuality", 0.8)->setComment("Minimum MVA Quality selection on tracks"); + desc.add("MVASel", false)->setComment("Use MVA Quality selection"); + desc.add("vertexReassignment", true)->setComment("Track-vertex reassignment"); descriptions.add("tofPIDProducer", desc); } @@ -142,6 +165,8 @@ void TOFPIDProducer::produce(edm::Event& ev, const edm::EventSetup& es) { const auto& vtxs = ev.get(vtxsToken_); + const auto& trackMVAQualIn = ev.get(trackMTDTimeQualityToken_); + //output value maps (PID probabilities and recalculated time at beamline) std::vector t0OutRaw; std::vector sigmat0OutRaw; @@ -165,7 +190,9 @@ void TOFPIDProducer::produce(edm::Event& ev, const edm::EventSetup& es) { float prob_k = -1.; float prob_p = -1.; - if (sigmat0 > 0.) { + float trackMVAQual = trackMVAQualIn[trackref]; + + if (sigmat0 > 0. && (!MVASel_ || (MVASel_ && trackMVAQual >= minTrackTimeQuality_))) { double rsigmazsq = 1. / track.dzError() / track.dzError(); double rsigmat = 1. / sigmatmtd; @@ -239,7 +266,12 @@ void TOFPIDProducer::produce(edm::Event& ev, const edm::EventSetup& es) { double chisqmin_k = std::numeric_limits::max(); double chisqmin_p = std::numeric_limits::max(); //loop through vertices and check for better matches - for (const reco::Vertex& vtx : vtxs) { + for (unsigned int ivtx = 0; ivtx < vtxs.size(); ++ivtx) { + const reco::Vertex& vtx = vtxs[ivtx]; + if (!vertexReassignment_) { + if (ivtx != (unsigned int)vtxidx) + continue; + } if (!(vtx.tError() > 0. && vtx.tError() < vtxMaxSigmaT_)) { continue; } @@ -283,9 +315,9 @@ void TOFPIDProducer::produce(edm::Event& ev, const edm::EventSetup& es) { //compute PID probabilities //*TODO* deal with heavier nucleons and/or BSM case here? - double rawprob_pi = exp(-0.5 * chisqmin_pi); - double rawprob_k = exp(-0.5 * chisqmin_k); - double rawprob_p = exp(-0.5 * chisqmin_p); + double rawprob_pi = probPion_ * exp(-0.5 * chisqmin_pi); + double rawprob_k = probKaon_ * exp(-0.5 * chisqmin_k); + double rawprob_p = probProton_ * exp(-0.5 * chisqmin_p); double normprob = 1. / (rawprob_pi + rawprob_k + rawprob_p); diff --git a/RecoMTD/TrackExtender/plugins/TrackExtenderWithMTD.cc b/RecoMTD/TrackExtender/plugins/TrackExtenderWithMTD.cc index 3adf7eab66f03..363f63a79bba8 100644 --- a/RecoMTD/TrackExtender/plugins/TrackExtenderWithMTD.cc +++ b/RecoMTD/TrackExtender/plugins/TrackExtenderWithMTD.cc @@ -96,15 +96,19 @@ namespace { class TrackSegments { public: - TrackSegments() = default; + TrackSegments() { + sigmaTofs_.reserve(30); // observed upper limit on nSegments + }; - inline uint32_t addSegment(float tPath, float tMom2) { + inline uint32_t addSegment(float tPath, float tMom2, float sigmaMom) { segmentPathOvc_.emplace_back(tPath * c_inv); segmentMom2_.emplace_back(tMom2); + segmentSigmaMom_.emplace_back(sigmaMom); nSegment_++; LogTrace("TrackExtenderWithMTD") << "addSegment # " << nSegment_ << " s = " << tPath - << " p = " << std::sqrt(tMom2); + << " p = " << std::sqrt(tMom2) << " sigma_p = " << sigmaMom + << " sigma_p/p = " << sigmaMom / std::sqrt(tMom2) * 100 << " %"; return nSegment_; } @@ -118,11 +122,49 @@ namespace { LogTrace("TrackExtenderWithMTD") << " TOF Segment # " << iSeg + 1 << " p = " << std::sqrt(segmentMom2_[iSeg]) << " tof = " << tof; + +#ifdef EDM_ML_DEBUG + float sigma_tof = segmentPathOvc_[iSeg] * segmentSigmaMom_[iSeg] / + (segmentMom2_[iSeg] * sqrt(segmentMom2_[iSeg] + 1 / mass_inv2) * mass_inv2); + + LogTrace("TrackExtenderWithMTD") << "TOF Segment # " << iSeg + 1 << std::fixed << std::setw(6) + << " tof segment = " << segmentPathOvc_[iSeg] / beta << std::scientific + << "+/- " << sigma_tof << std::fixed + << "(rel. err. = " << sigma_tof / (segmentPathOvc_[iSeg] / beta) * 100 + << " %)"; +#endif } return tof; } + inline float computeSigmaTof(float mass_inv2) { + float sigmatof = 0.; + + // remove previously calculated sigmaTofs + sigmaTofs_.clear(); + + // compute sigma(tof) on each segment first by propagating sigma(p) + // also add diagonal terms to sigmatof + float sigma = 0.; + for (uint32_t iSeg = 0; iSeg < nSegment_; iSeg++) { + sigma = segmentPathOvc_[iSeg] * segmentSigmaMom_[iSeg] / + (segmentMom2_[iSeg] * sqrt(segmentMom2_[iSeg] + 1 / mass_inv2) * mass_inv2); + sigmaTofs_.push_back(sigma); + + sigmatof += sigma * sigma; + } + + // compute sigma on sum of tofs assuming full correlation between segments + for (uint32_t iSeg = 0; iSeg < nSegment_; iSeg++) { + for (uint32_t jSeg = iSeg + 1; jSeg < nSegment_; jSeg++) { + sigmatof += 2 * sigmaTofs_[iSeg] * sigmaTofs_[jSeg]; + } + } + + return sqrt(sigmatof); + } + inline uint32_t size() const { return nSegment_; } inline uint32_t removeFirstSegment() { @@ -144,6 +186,9 @@ namespace { uint32_t nSegment_ = 0; std::vector segmentPathOvc_; std::vector segmentMom2_; + std::vector segmentSigmaMom_; + + std::vector sigmaTofs_; }; struct TrackTofPidInfo { @@ -164,14 +209,17 @@ namespace { float gammasq_pi; float beta_pi; float dt_pi; + float sigma_dt_pi; float gammasq_k; float beta_k; float dt_k; + float sigma_dt_k; float gammasq_p; float beta_p; float dt_p; + float sigma_dt_p; float prob_pi; float prob_k; @@ -179,6 +227,7 @@ namespace { }; enum class TofCalc { kCost = 1, kSegm = 2, kMixd = 3 }; + enum class SigmaTofCalc { kCost = 1, kSegm = 2 }; const TrackTofPidInfo computeTrackTofPidInfo(float magp2, float length, @@ -188,7 +237,8 @@ namespace { float t_vtx, float t_vtx_err, bool addPIDError = true, - TofCalc choice = TofCalc::kCost) { + TofCalc choice = TofCalc::kCost, + SigmaTofCalc sigma_choice = SigmaTofCalc::kCost) { constexpr float m_pi = 0.13957018f; constexpr float m_pi_inv2 = 1.0f / m_pi / m_pi; constexpr float m_k = 0.493677f; @@ -218,17 +268,36 @@ namespace { return res; }; + auto sigmadeltat = [&](const float mass_inv2) { + float res(1.f); + switch (sigma_choice) { + case SigmaTofCalc::kCost: + // sigma(t) = sigma(p) * |dt/dp| = sigma(p) * DeltaL/c * m^2 / (p^2 * E) + res = tofpid.pathlength * c_inv * trs.segmentSigmaMom_[trs.nSegment_ - 1] / + (magp2 * sqrt(magp2 + 1 / mass_inv2) * mass_inv2); + break; + case SigmaTofCalc::kSegm: + res = trs.computeSigmaTof(mass_inv2); + break; + } + + return res; + }; + tofpid.gammasq_pi = 1.f + magp2 * m_pi_inv2; tofpid.beta_pi = std::sqrt(1.f - 1.f / tofpid.gammasq_pi); tofpid.dt_pi = deltat(m_pi_inv2, tofpid.beta_pi); + tofpid.sigma_dt_pi = sigmadeltat(m_pi_inv2); tofpid.gammasq_k = 1.f + magp2 * m_k_inv2; tofpid.beta_k = std::sqrt(1.f - 1.f / tofpid.gammasq_k); tofpid.dt_k = deltat(m_k_inv2, tofpid.beta_k); + tofpid.sigma_dt_k = sigmadeltat(m_k_inv2); tofpid.gammasq_p = 1.f + magp2 * m_p_inv2; tofpid.beta_p = std::sqrt(1.f - 1.f / tofpid.gammasq_p); tofpid.dt_p = deltat(m_p_inv2, tofpid.beta_p); + tofpid.sigma_dt_p = sigmadeltat(m_p_inv2); tofpid.dt = tofpid.tmtd - tofpid.dt_pi - t_vtx; //assume by default the pi hypothesis tofpid.dterror = sqrt(tofpid.tmtderror * tofpid.tmtderror + t_vtx_err * t_vtx_err); @@ -323,7 +392,13 @@ namespace { validpropagation = false; } pathlength1 += layerpathlength; - trs.addSegment(layerpathlength, (it + 1)->updatedState().globalMomentum().mag2()); + + // sigma(p) from curvilinear error (on q/p) + float sigma_p = sqrt((it + 1)->updatedState().curvilinearError().matrix()(0, 0)) * + (it + 1)->updatedState().globalMomentum().mag2(); + + trs.addSegment(layerpathlength, (it + 1)->updatedState().globalMomentum().mag2(), sigma_p); + LogTrace("TrackExtenderWithMTD") << "TSOS " << std::fixed << std::setw(4) << trs.size() << " R_i " << std::fixed << std::setw(14) << it->updatedState().globalPosition().perp() << " z_i " << std::fixed << std::setw(14) << it->updatedState().globalPosition().z() @@ -345,12 +420,19 @@ namespace { validpropagation = false; } pathlength = pathlength1 + pathlength2; - trs.addSegment(pathlength2, tscblPCA.momentum().mag2()); + + float sigma_p = sqrt(tscblPCA.curvilinearError().matrix()(0, 0)) * tscblPCA.momentum().mag2(); + + trs.addSegment(pathlength2, tscblPCA.momentum().mag2(), sigma_p); + LogTrace("TrackExtenderWithMTD") << "TSOS " << std::fixed << std::setw(4) << trs.size() << " R_e " << std::fixed << std::setw(14) << tscblPCA.position().perp() << " z_e " << std::fixed << std::setw(14) << tscblPCA.position().z() << " p " << std::fixed << std::setw(14) << tscblPCA.momentum().mag() << " dp " << std::fixed << std::setw(14) - << tscblPCA.momentum().mag() - oldp; + << tscblPCA.momentum().mag() - oldp << " sigma_p = " << std::fixed << std::setw(14) + << sigma_p << " sigma_p/p = " << std::fixed << std::setw(14) + << sigma_p / tscblPCA.momentum().mag() * 100 << " %"; + return validpropagation; } @@ -459,7 +541,10 @@ class TrackExtenderWithMTDT : public edm::stream::EDProducer<> { float& sigmatmtdOut, float& tofpi, float& tofk, - float& tofp) const; + float& tofp, + float& sigmatofpi, + float& sigmatofk, + float& sigmatofp) const; reco::TrackExtra buildTrackExtra(const Trajectory& trajectory) const; string dumpLayer(const DetLayer* layer) const; @@ -481,6 +566,9 @@ class TrackExtenderWithMTDT : public edm::stream::EDProducer<> { edm::EDPutToken tofpiOrigTrkToken_; edm::EDPutToken tofkOrigTrkToken_; edm::EDPutToken tofpOrigTrkToken_; + edm::EDPutToken sigmatofpiOrigTrkToken_; + edm::EDPutToken sigmatofkOrigTrkToken_; + edm::EDPutToken sigmatofpOrigTrkToken_; edm::EDPutToken assocOrigTrkToken_; edm::EDGetTokenT tracksToken_; @@ -569,6 +657,9 @@ TrackExtenderWithMTDT::TrackExtenderWithMTDT(const ParameterSet tofpiOrigTrkToken_ = produces>("generalTrackTofPi"); tofkOrigTrkToken_ = produces>("generalTrackTofK"); tofpOrigTrkToken_ = produces>("generalTrackTofP"); + sigmatofpiOrigTrkToken_ = produces>("generalTrackSigmaTofPi"); + sigmatofkOrigTrkToken_ = produces>("generalTrackSigmaTofK"); + sigmatofpOrigTrkToken_ = produces>("generalTrackSigmaTofP"); assocOrigTrkToken_ = produces>("generalTrackassoc"); builderToken_ = esConsumes(edm::ESInputTag("", transientTrackBuilder_)); @@ -683,6 +774,9 @@ void TrackExtenderWithMTDT::produce(edm::Event& ev, const edm:: std::vector tofpiOrigTrkRaw; std::vector tofkOrigTrkRaw; std::vector tofpOrigTrkRaw; + std::vector sigmatofpiOrigTrkRaw; + std::vector sigmatofkOrigTrkRaw; + std::vector sigmatofpOrigTrkRaw; std::vector assocOrigTrkRaw; auto const tracksH = ev.getHandle(tracksToken_); @@ -727,6 +821,9 @@ void TrackExtenderWithMTDT::produce(edm::Event& ev, const edm:: LogTrace("TrackExtenderWithMTD") << "TrackExtenderWithMTD: extrapolating track " << itrack << " p/pT = " << track->p() << " " << track->pt() << " eta = " << track->eta(); + LogTrace("TrackExtenderWithMTD") << "TrackExtenderWithMTD: sigma_p = " + << sqrt(track->covariance()(0, 0)) * track->p2() + << " sigma_p/p = " << sqrt(track->covariance()(0, 0)) * track->p() * 100 << " %"; float trackVtxTime = 0.f; if (useVertex_) { @@ -803,12 +900,14 @@ void TrackExtenderWithMTDT::produce(edm::Event& ev, const edm:: const auto& trajwithmtd = mtdthits.empty() ? std::vector(1, trajs) : theTransformer->transform(ttrack, thits); float pMap = 0.f, betaMap = 0.f, t0Map = 0.f, sigmat0Map = -1.f, pathLengthMap = -1.f, tmtdMap = 0.f, - sigmatmtdMap = -1.f, tofpiMap = 0.f, tofkMap = 0.f, tofpMap = 0.f; + sigmatmtdMap = -1.f, tofpiMap = 0.f, tofkMap = 0.f, tofpMap = 0.f, sigmatofpiMap = -1.f, sigmatofkMap = -1.f, + sigmatofpMap = -1.f; int iMap = -1; for (const auto& trj : trajwithmtd) { const auto& thetrj = (updateTraj_ ? trj : trajs); - float pathLength = 0.f, tmtd = 0.f, sigmatmtd = -1.f, tofpi = 0.f, tofk = 0.f, tofp = 0.f; + float pathLength = 0.f, tmtd = 0.f, sigmatmtd = -1.f, tofpi = 0.f, tofk = 0.f, tofp = 0.f, sigmatofpi = -1.f, + sigmatofk = -1.f, sigmatofp = -1.f; LogTrace("TrackExtenderWithMTD") << "TrackExtenderWithMTD: refit track " << itrack << " p/pT = " << track->p() << " " << track->pt() << " eta = " << track->eta(); reco::Track result = buildTrack(track, @@ -823,7 +922,10 @@ void TrackExtenderWithMTDT::produce(edm::Event& ev, const edm:: sigmatmtd, tofpi, tofk, - tofp); + tofp, + sigmatofpi, + sigmatofk, + sigmatofp); if (result.ndof() >= 0) { /// setup the track extras reco::TrackExtra::TrajParams trajParams; @@ -856,6 +958,9 @@ void TrackExtenderWithMTDT::produce(edm::Event& ev, const edm:: tofpiMap = tofpi; tofkMap = tofk; tofpMap = tofp; + sigmatofpiMap = sigmatofpi; + sigmatofkMap = sigmatofk; + sigmatofpMap = sigmatofp; reco::TrackExtraRef extraRef(extrasRefProd, extras->size() - 1); backtrack.setExtra((updateExtra_ ? extraRef : track->extra())); for (unsigned ihit = hitsstart; ihit < hitsend; ++ihit) { @@ -865,7 +970,12 @@ void TrackExtenderWithMTDT::produce(edm::Event& ev, const edm:: npixEndcap.push_back(backtrack.hitPattern().numberOfValidPixelEndcapHits()); LogTrace("TrackExtenderWithMTD") << "TrackExtenderWithMTD: tmtd " << tmtdMap << " +/- " << sigmatmtdMap << " t0 " << t0Map << " +/- " << sigmat0Map << " tof pi/K/p " << tofpiMap - << " " << tofkMap << " " << tofpMap; + << "+/-" << fmt::format("{:0.2g}", sigmatofpiMap) << " (" + << fmt::format("{:0.2g}", sigmatofpiMap / tofpiMap * 100) << "%) " << tofkMap + << "+/-" << fmt::format("{:0.2g}", sigmatofkMap) << " (" + << fmt::format("{:0.2g}", sigmatofkMap / tofkMap * 100) << "%) " << tofpMap + << "+/-" << fmt::format("{:0.2g}", sigmatofpMap) << " (" + << fmt::format("{:0.2g}", sigmatofpMap / tofpMap * 100) << "%) "; } else { LogTrace("TrackExtenderWithMTD") << "Error in the MTD track refitting. This should not happen"; } @@ -881,6 +991,9 @@ void TrackExtenderWithMTDT::produce(edm::Event& ev, const edm:: tofpiOrigTrkRaw.push_back(tofpiMap); tofkOrigTrkRaw.push_back(tofkMap); tofpOrigTrkRaw.push_back(tofpMap); + sigmatofpiOrigTrkRaw.push_back(sigmatofpiMap); + sigmatofkOrigTrkRaw.push_back(sigmatofkMap); + sigmatofpOrigTrkRaw.push_back(sigmatofpMap); assocOrigTrkRaw.push_back(iMap); if (iMap == -1) { @@ -915,6 +1028,9 @@ void TrackExtenderWithMTDT::produce(edm::Event& ev, const edm:: fillValueMap(ev, tracksH, tofpiOrigTrkRaw, tofpiOrigTrkToken_); fillValueMap(ev, tracksH, tofkOrigTrkRaw, tofkOrigTrkToken_); fillValueMap(ev, tracksH, tofpOrigTrkRaw, tofpOrigTrkToken_); + fillValueMap(ev, tracksH, sigmatofpiOrigTrkRaw, sigmatofpiOrigTrkToken_); + fillValueMap(ev, tracksH, sigmatofkOrigTrkRaw, sigmatofkOrigTrkToken_); + fillValueMap(ev, tracksH, sigmatofpOrigTrkRaw, sigmatofpOrigTrkToken_); fillValueMap(ev, tracksH, assocOrigTrkRaw, assocOrigTrkToken_); } @@ -1176,7 +1292,10 @@ reco::Track TrackExtenderWithMTDT::buildTrack(const reco::Track float& sigmatmtdOut, float& tofpi, float& tofk, - float& tofp) const { + float& tofp, + float& sigmatofpi, + float& sigmatofk, + float& sigmatofp) const { TrajectoryStateClosestToBeamLine tscbl; bool tsbcl_status = getTrajectoryStateClosestToBeamLine(traj, bs, thePropagator, tscbl); @@ -1307,8 +1426,9 @@ reco::Track TrackExtenderWithMTDT::buildTrack(const reco::Track if (validmtd && validpropagation) { //here add the PID uncertainty for later use in the 1st step of 4D vtx reconstruction - TrackTofPidInfo tofInfo = - computeTrackTofPidInfo(p.mag2(), pathlength, trs, thit, thiterror, 0.f, 0.f, true, TofCalc::kSegm); + TrackTofPidInfo tofInfo = computeTrackTofPidInfo( + p.mag2(), pathlength, trs, thit, thiterror, 0.f, 0.f, true, TofCalc::kSegm, SigmaTofCalc::kCost); + pathLengthOut = pathlength; // set path length if we've got a timing hit tmtdOut = thit; sigmatmtdOut = thiterror; @@ -1319,6 +1439,9 @@ reco::Track TrackExtenderWithMTDT::buildTrack(const reco::Track tofpi = tofInfo.dt_pi; tofk = tofInfo.dt_k; tofp = tofInfo.dt_p; + sigmatofpi = tofInfo.sigma_dt_pi; + sigmatofk = tofInfo.sigma_dt_k; + sigmatofp = tofInfo.sigma_dt_p; } } @@ -1426,4 +1549,4 @@ string TrackExtenderWithMTDT::dumpLayer(const DetLayer* layer) #include "DataFormats/GsfTrackReco/interface/GsfTrackFwd.h" typedef TrackExtenderWithMTDT TrackExtenderWithMTD; -DEFINE_FWK_MODULE(TrackExtenderWithMTD); +DEFINE_FWK_MODULE(TrackExtenderWithMTD); \ No newline at end of file diff --git a/RecoMuon/MuonIdentification/plugins/MuonIdProducer.cc b/RecoMuon/MuonIdentification/plugins/MuonIdProducer.cc index e3619f9f92c49..4a462888aac66 100644 --- a/RecoMuon/MuonIdentification/plugins/MuonIdProducer.cc +++ b/RecoMuon/MuonIdentification/plugins/MuonIdProducer.cc @@ -684,7 +684,9 @@ void MuonIdProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) } if (arbitrateTrackerMuons_) { - fillArbitrationInfo(outputMuons.get()); + fillArbitrationInfo(outputMuons.get(), reco::Muon::TrackerMuon); + fillArbitrationInfo(outputMuons.get(), reco::Muon::GEMMuon); + fillArbitrationInfo(outputMuons.get(), reco::Muon::ME0Muon); arbitrateMuons(outputMuons.get(), caloMuons.get()); } @@ -798,10 +800,11 @@ void MuonIdProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) bool MuonIdProducer::isGoodTrackerMuon(const reco::Muon& muon) { if (muon.track()->pt() < minPt_ || muon.track()->p() < minP_) return false; - if (addExtraSoftMuons_ && muon.pt() < 5 && std::abs(muon.eta()) < 1.5 && - muon.numberOfMatches(reco::Muon::NoArbitration) >= 1) + // NoArbitration checks for CSC/DT segments only, also use ME0 segments + int numMatches = muon.numberOfMatches(reco::Muon::NoArbitration); + if (addExtraSoftMuons_ && muon.pt() < 5 && std::abs(muon.eta()) < 1.5 && numMatches >= 1) return true; - return (muon.numberOfMatches(reco::Muon::NoArbitration) >= minNumberOfMatches_); + return (numMatches >= minNumberOfMatches_); } bool MuonIdProducer::isGoodCaloMuon(const reco::CaloMuon& caloMuon) { @@ -825,8 +828,14 @@ bool MuonIdProducer::isGoodGEMMuon(const reco::Muon& muon) { return false; if (muon.track()->pt() < minPt_ || muon.track()->p() < minP_) return false; - return (muon.numberOfMatches(reco::Muon::GEMSegmentAndTrackArbitration) + - muon.numberOfMatches(reco::Muon::GEMHitAndTrackArbitration)) >= 1; + // + int numMatches = 0; + for (auto& chamberMatch : muon.matches()) { + if (chamberMatch.gemMatches.empty()) + continue; + numMatches += chamberMatch.gemMatches.size(); + } + return (numMatches + muon.numberOfMatches(reco::Muon::GEMHitAndTrackArbitration)) >= 1; } bool MuonIdProducer::isGoodME0Muon(const reco::Muon& muon) { @@ -1139,7 +1148,9 @@ void MuonIdProducer::arbitrateMuons(reco::MuonCollection* muons, reco::CaloMuonC // if a muon was exclusively TrackerMuon check if it can be a calo muon for (reco::MuonCollection::iterator muon = muons->begin(); muon != muons->end();) { if (muon->isTrackerMuon()) { - if (muon->numberOfMatches(arbitration) < minNumberOfMatches_) { + int numMatches = + muon->numberOfMatches(reco::Muon::GEMSegmentAndTrackArbitration) + muon->numberOfMatches(arbitration); + if (numMatches < minNumberOfMatches_) { // TrackerMuon failed arbitration // If not any other base type - erase the element // (PFMuon is not a base type) diff --git a/RecoMuon/TrackerSeedGenerator/plugins/TSGForOIDNN.cc b/RecoMuon/TrackerSeedGenerator/plugins/TSGForOIDNN.cc index 0e7e4b1b440a0..02f10e21145ad 100644 --- a/RecoMuon/TrackerSeedGenerator/plugins/TSGForOIDNN.cc +++ b/RecoMuon/TrackerSeedGenerator/plugins/TSGForOIDNN.cc @@ -43,6 +43,7 @@ class TSGForOIDNN : public edm::global::EDProducer<> { ~TSGForOIDNN() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); void produce(edm::StreamID sid, edm::Event& iEvent, const edm::EventSetup& iSetup) const override; + void beginJob() override; private: /// Labels for input collections @@ -151,6 +152,8 @@ class TSGForOIDNN : public edm::global::EDProducer<> { const TrajectoryStateOnSurface& tsos_IP, const TrajectoryStateOnSurface& tsos_MuS) const; + void initializeTensorflow() const; + /// Container for DNN outupts struct StrategyParameters { int nHBd, nHLIP, nHLMuS, sf; @@ -246,6 +249,50 @@ TSGForOIDNN::~TSGForOIDNN() { } } +void TSGForOIDNN::beginJob() { initializeTensorflow(); } + +namespace { + std::unordered_map dummyFeatureMap() { + std::unordered_map the_map; + the_map["pt"] = 2.0; + the_map["eta"] = 0.5; + the_map["phi"] = 1.0; + the_map["validHits"] = 0.; + the_map["tsos_IP_eta"] = -999; + the_map["tsos_IP_phi"] = -999; + the_map["tsos_IP_pt"] = -999; + the_map["tsos_IP_pt_eta"] = -999; + the_map["tsos_IP_pt_phi"] = -999; + the_map["err0_IP"] = -999; + the_map["err1_IP"] = -999; + the_map["err2_IP"] = -999; + the_map["err3_IP"] = -999; + the_map["err4_IP"] = -999; + the_map["tsos_IP_valid"] = 0.0; + the_map["tsos_MuS_eta"] = -999; + the_map["tsos_MuS_phi"] = -999; + the_map["tsos_MuS_pt"] = -999; + the_map["tsos_MuS_pt_eta"] = -999; + the_map["tsos_MuS_pt_phi"] = -999; + the_map["err0_MuS"] = -999; + the_map["err1_MuS"] = -999; + the_map["err2_MuS"] = -999; + the_map["err3_MuS"] = -999; + the_map["err4_MuS"] = -999; + the_map["tsos_MuS_valid"] = 0.0; + return the_map; + } +} // namespace + +void TSGForOIDNN::initializeTensorflow() const { + if (getStrategyFromDNN_ and not useRegressor_) { + // Container for DNN outputs + StrategyParameters strPars; + bool dnnSuccess = false; + evaluateClassifier(dummyFeatureMap(), tf_session_, metadata_, strPars, dnnSuccess); + } +} + // // Produce seeds // diff --git a/RecoPPS/Local/test/2023_lhcinfo_test_recoCTPPS.sh b/RecoPPS/Local/test/2023_lhcinfo_test_recoCTPPS.sh new file mode 100755 index 0000000000000..80b93b2230b07 --- /dev/null +++ b/RecoPPS/Local/test/2023_lhcinfo_test_recoCTPPS.sh @@ -0,0 +1,5 @@ + #!/bin/bash -ex +TEST_DIR=$CMSSW_BASE/src/RecoPPS/Local/test +echo "test dir: $TEST_DIR" + +cmsRun ${TEST_DIR}/2023_lhcinfo_test_recoCTPPS_cfg.py diff --git a/RecoPPS/Local/test/2023_lhcinfo_test_recoCTPPS_cfg.py b/RecoPPS/Local/test/2023_lhcinfo_test_recoCTPPS_cfg.py index 790d9e66b512d..f15d42b838374 100644 --- a/RecoPPS/Local/test/2023_lhcinfo_test_recoCTPPS_cfg.py +++ b/RecoPPS/Local/test/2023_lhcinfo_test_recoCTPPS_cfg.py @@ -3,7 +3,7 @@ process = cms.Process('RECODQM', Run3) -process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) +process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(50) ) process.verbosity = cms.untracked.PSet( input = cms.untracked.int32(-1) ) # minimum of logs @@ -31,7 +31,7 @@ from Configuration.AlCa.GlobalTag import GlobalTag -process.GlobalTag.globaltag = "130X_dataRun3_Prompt_forLHCInfo_Candidate_2023_08_08_10_52_01" +process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run3_data') # local RP reconstruction chain with standard settings process.load("RecoPPS.Configuration.recoCTPPS_cff") diff --git a/RecoPPS/Local/test/BuildFile.xml b/RecoPPS/Local/test/BuildFile.xml index 7f53a44b17c3a..eb1c199017400 100644 --- a/RecoPPS/Local/test/BuildFile.xml +++ b/RecoPPS/Local/test/BuildFile.xml @@ -1 +1,2 @@ + diff --git a/RecoPPS/ProtonReconstruction/plugins/CTPPSProtonProducer.cc b/RecoPPS/ProtonReconstruction/plugins/CTPPSProtonProducer.cc index 73809feb4aae5..43c3a0a5b2663 100644 --- a/RecoPPS/ProtonReconstruction/plugins/CTPPSProtonProducer.cc +++ b/RecoPPS/ProtonReconstruction/plugins/CTPPSProtonProducer.cc @@ -189,7 +189,7 @@ void CTPPSProtonProducer::fillDescriptions(edm::ConfigurationDescriptions &descr desc.add("multiRPAlgorithm", "chi2") ->setComment("algorithm for multi-RP reco, options include chi2, newton, anal-iter"); - descriptions.add("ctppsProtons", desc); + descriptions.add("ctppsProtonsDefault", desc); } //---------------------------------------------------------------------------------------------------- diff --git a/RecoPPS/ProtonReconstruction/python/ctppsProtons_cff.py b/RecoPPS/ProtonReconstruction/python/ctppsProtons_cff.py index 2a564e400b589..25f6dba850683 100644 --- a/RecoPPS/ProtonReconstruction/python/ctppsProtons_cff.py +++ b/RecoPPS/ProtonReconstruction/python/ctppsProtons_cff.py @@ -1,17 +1,6 @@ import FWCore.ParameterSet.Config as cms -# import default alignment settings -from CalibPPS.ESProducers.ctppsAlignment_cff import * +from RecoPPS.ProtonReconstruction.ctppsProtons_cfi import * # import default optics settings from CalibPPS.ESProducers.ctppsOpticalFunctions_cff import * - -# import and adjust proton-reconstructions settings -from RecoPPS.ProtonReconstruction.ctppsProtons_cfi import * - - -ctppsProtons.pixelDiscardBXShiftedTracks = True -ctppsProtons.default_time = -999. - -from Configuration.Eras.Modifier_run3_common_cff import run3_common -run3_common.toModify(ctppsProtons, useNewLHCInfo = True) \ No newline at end of file diff --git a/RecoPPS/ProtonReconstruction/python/ctppsProtons_cfi.py b/RecoPPS/ProtonReconstruction/python/ctppsProtons_cfi.py new file mode 100644 index 0000000000000..9421ed546ddd2 --- /dev/null +++ b/RecoPPS/ProtonReconstruction/python/ctppsProtons_cfi.py @@ -0,0 +1,12 @@ +# import and adjust proton-reconstructions settings +from RecoPPS.ProtonReconstruction.ctppsProtonsDefault_cfi import ctppsProtonsDefault as _ctppsProtonsDefault +ctppsProtons = _ctppsProtonsDefault.clone( + pixelDiscardBXShiftedTracks = True, + default_time = -999. +) + +from Configuration.Eras.Modifier_run3_common_cff import run3_common +run3_common.toModify(ctppsProtons, useNewLHCInfo = True) + +from Configuration.Eras.Modifier_ctpps_directSim_cff import ctpps_directSim +ctpps_directSim.toModify(ctppsProtons, useNewLHCInfo = False) diff --git a/RecoParticleFlow/PFClusterProducer/BuildFile.xml b/RecoParticleFlow/PFClusterProducer/BuildFile.xml index 06ef529ae273b..712bc52ed5539 100644 --- a/RecoParticleFlow/PFClusterProducer/BuildFile.xml +++ b/RecoParticleFlow/PFClusterProducer/BuildFile.xml @@ -18,6 +18,15 @@ + + + + + + + + + diff --git a/RecoParticleFlow/PFClusterProducer/interface/PFClusterParamsHostCollection.h b/RecoParticleFlow/PFClusterProducer/interface/PFClusterParamsHostCollection.h new file mode 100644 index 0000000000000..775adb4a8638e --- /dev/null +++ b/RecoParticleFlow/PFClusterProducer/interface/PFClusterParamsHostCollection.h @@ -0,0 +1,14 @@ +#ifndef RecoParticleFlow_PFClusterProducer_interface_PFClusterParamsHostCollection_h +#define RecoParticleFlow_PFClusterProducer_interface_PFClusterParamsHostCollection_h + +#include "DataFormats/Portable/interface/PortableHostCollection.h" + +#include "RecoParticleFlow/PFClusterProducer/interface/PFClusterParamsSoA.h" + +namespace reco { + + using PFClusterParamsHostCollection = PortableHostCollection; + +} + +#endif diff --git a/RecoParticleFlow/PFClusterProducer/interface/PFClusterParamsSoA.h b/RecoParticleFlow/PFClusterProducer/interface/PFClusterParamsSoA.h new file mode 100644 index 0000000000000..9003ce1c527b7 --- /dev/null +++ b/RecoParticleFlow/PFClusterProducer/interface/PFClusterParamsSoA.h @@ -0,0 +1,49 @@ +#ifndef RecoParticleFlow_PFClusterProducer_interface_PFClusterParamsSoA_h +#define RecoParticleFlow_PFClusterProducer_interface_PFClusterParamsSoA_h + +#include "DataFormats/SoATemplate/interface/SoACommon.h" +#include "DataFormats/SoATemplate/interface/SoALayout.h" +#include "DataFormats/SoATemplate/interface/SoAView.h" + +namespace reco { + + GENERATE_SOA_LAYOUT(PFClusterParamsSoALayout, + SOA_SCALAR(int32_t, nNeigh), + SOA_SCALAR(float, seedPt2ThresholdHB), + SOA_SCALAR(float, seedPt2ThresholdHE), + SOA_COLUMN(float, seedEThresholdHB_vec), + SOA_COLUMN(float, seedEThresholdHE_vec), + SOA_COLUMN(float, topoEThresholdHB_vec), + SOA_COLUMN(float, topoEThresholdHE_vec), + SOA_SCALAR(float, showerSigma2), + SOA_SCALAR(float, minFracToKeep), + SOA_SCALAR(float, minFracTot), + SOA_SCALAR(uint32_t, maxIterations), + SOA_SCALAR(bool, excludeOtherSeeds), + SOA_SCALAR(float, stoppingTolerance), + SOA_SCALAR(float, minFracInCalc), + SOA_SCALAR(float, minAllowedNormalization), + SOA_COLUMN(float, recHitEnergyNormInvHB_vec), + SOA_COLUMN(float, recHitEnergyNormInvHE_vec), + SOA_SCALAR(float, barrelTimeResConsts_corrTermLowE), + SOA_SCALAR(float, barrelTimeResConsts_threshLowE), + SOA_SCALAR(float, barrelTimeResConsts_noiseTerm), + SOA_SCALAR(float, barrelTimeResConsts_constantTermLowE2), + SOA_SCALAR(float, barrelTimeResConsts_noiseTermLowE), + SOA_SCALAR(float, barrelTimeResConsts_threshHighE), + SOA_SCALAR(float, barrelTimeResConsts_constantTerm2), + SOA_SCALAR(float, barrelTimeResConsts_resHighE2), + SOA_SCALAR(float, endcapTimeResConsts_corrTermLowE), + SOA_SCALAR(float, endcapTimeResConsts_threshLowE), + SOA_SCALAR(float, endcapTimeResConsts_noiseTerm), + SOA_SCALAR(float, endcapTimeResConsts_constantTermLowE2), + SOA_SCALAR(float, endcapTimeResConsts_noiseTermLowE), + SOA_SCALAR(float, endcapTimeResConsts_threshHighE), + SOA_SCALAR(float, endcapTimeResConsts_constantTerm2), + SOA_SCALAR(float, endcapTimeResConsts_resHighE2)) + + using PFClusterParamsSoA = PFClusterParamsSoALayout<>; + +} // namespace reco + +#endif diff --git a/RecoParticleFlow/PFClusterProducer/interface/PFClusteringEdgeVarsSoA.h b/RecoParticleFlow/PFClusterProducer/interface/PFClusteringEdgeVarsSoA.h new file mode 100644 index 0000000000000..72da0070624b4 --- /dev/null +++ b/RecoParticleFlow/PFClusterProducer/interface/PFClusteringEdgeVarsSoA.h @@ -0,0 +1,17 @@ +#ifndef RecoParticleFlow_PFRecHitProducer_interface_PFClusteringEdgeVarsSoA_h +#define RecoParticleFlow_PFRecHitProducer_interface_PFClusteringEdgeVarsSoA_h + +#include "DataFormats/SoATemplate/interface/SoACommon.h" +#include "DataFormats/SoATemplate/interface/SoALayout.h" +#include "DataFormats/SoATemplate/interface/SoAView.h" + +namespace reco { + + GENERATE_SOA_LAYOUT(PFClusteringEdgeVarsSoALayout, + SOA_COLUMN(int, pfrh_edgeIdx), // needs nRH + 1 allocation + SOA_COLUMN(int, pfrh_edgeList)) // needs nRH + maxNeighbors allocation + + using PFClusteringEdgeVarsSoA = PFClusteringEdgeVarsSoALayout<>; +} // namespace reco + +#endif diff --git a/RecoParticleFlow/PFClusterProducer/interface/PFClusteringVarsSoA.h b/RecoParticleFlow/PFClusterProducer/interface/PFClusteringVarsSoA.h new file mode 100644 index 0000000000000..1e88366543ff4 --- /dev/null +++ b/RecoParticleFlow/PFClusterProducer/interface/PFClusteringVarsSoA.h @@ -0,0 +1,31 @@ +#ifndef RecoParticleFlow_PFClusterProducer_interface_PFClusteringVarsSoA_h +#define RecoParticleFlow_PFClusterProducer_interface_PFClusteringVarsSoA_h + +#include "DataFormats/SoATemplate/interface/SoACommon.h" +#include "DataFormats/SoATemplate/interface/SoALayout.h" +#include "DataFormats/SoATemplate/interface/SoAView.h" + +namespace reco { + + GENERATE_SOA_LAYOUT(PFClusteringVarsSoALayout, + SOA_COLUMN(int, pfrh_topoId), + SOA_COLUMN(int, pfrh_isSeed), + SOA_COLUMN(int, pfrh_passTopoThresh), + SOA_COLUMN(int, topoSeedCount), + SOA_COLUMN(int, topoRHCount), + SOA_COLUMN(int, seedFracOffsets), + SOA_COLUMN(int, topoSeedOffsets), + SOA_COLUMN(int, topoSeedList), + SOA_SCALAR(int, pcrhFracSize), + SOA_COLUMN(int, rhCount), + SOA_SCALAR(int, nEdges), + SOA_COLUMN(int, rhcount), + SOA_SCALAR(int, nTopos), + SOA_COLUMN(int, topoIds), + SOA_SCALAR(int, nRHFracs), + SOA_COLUMN(int, rhIdxToSeedIdx)) + + using PFClusteringVarsSoA = PFClusteringVarsSoALayout<>; +} // namespace reco + +#endif diff --git a/RecoParticleFlow/PFClusterProducer/interface/alpaka/PFClusterParamsDeviceCollection.h b/RecoParticleFlow/PFClusterProducer/interface/alpaka/PFClusterParamsDeviceCollection.h new file mode 100644 index 0000000000000..228246a940101 --- /dev/null +++ b/RecoParticleFlow/PFClusterProducer/interface/alpaka/PFClusterParamsDeviceCollection.h @@ -0,0 +1,21 @@ +#ifndef RecoParticleFlow_PFClusterProducer_interface_alpaka_PFClusterParamsDeviceCollection_h +#define RecoParticleFlow_PFClusterProducer_interface_alpaka_PFClusterParamsDeviceCollection_h + +#include "DataFormats/Portable/interface/alpaka/PortableCollection.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" + +#include "RecoParticleFlow/PFClusterProducer/interface/PFClusterParamsHostCollection.h" +#include "RecoParticleFlow/PFClusterProducer/interface/PFClusterParamsSoA.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE::reco { + + using ::reco::PFClusterParamsHostCollection; + + using PFClusterParamsDeviceCollection = PortableCollection<::reco::PFClusterParamsSoA>; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE::reco + +// check that the portable device collection for the host device is the same as the portable host collection +ASSERT_DEVICE_MATCHES_HOST_COLLECTION(reco::PFClusterParamsDeviceCollection, reco::PFClusterParamsHostCollection); + +#endif diff --git a/RecoParticleFlow/PFClusterProducer/interface/alpaka/PFClusteringEdgeVarsDeviceCollection.h b/RecoParticleFlow/PFClusterProducer/interface/alpaka/PFClusteringEdgeVarsDeviceCollection.h new file mode 100644 index 0000000000000..d2c8943af08d1 --- /dev/null +++ b/RecoParticleFlow/PFClusterProducer/interface/alpaka/PFClusteringEdgeVarsDeviceCollection.h @@ -0,0 +1,16 @@ +#ifndef RecoParticleFlow_PFRecHitProducer_interface_alpaka_PFClusteringEdgeVarsDevice_h +#define RecoParticleFlow_PFRecHitProducer_interface_alpaka_PFClusteringEdgeVarsDevice_h + +#include "DataFormats/Portable/interface/alpaka/PortableCollection.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" + +#include "RecoParticleFlow/PFClusterProducer/interface/PFClusteringEdgeVarsSoA.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE::reco { + + using PFClusteringEdgeVarsDeviceCollection = PortableCollection<::reco::PFClusteringEdgeVarsSoA>; + // needs nRH + maxNeighbors allocation + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE::reco + +#endif diff --git a/RecoParticleFlow/PFClusterProducer/interface/alpaka/PFClusteringVarsDeviceCollection.h b/RecoParticleFlow/PFClusterProducer/interface/alpaka/PFClusteringVarsDeviceCollection.h new file mode 100644 index 0000000000000..ab20ccae7d7b4 --- /dev/null +++ b/RecoParticleFlow/PFClusterProducer/interface/alpaka/PFClusteringVarsDeviceCollection.h @@ -0,0 +1,15 @@ +#ifndef RecoParticleFlow_PFRecHitProducer_interface_alpaka_PFClusteringVarsDevice_h +#define RecoParticleFlow_PFRecHitProducer_interface_alpaka_PFClusteringVarsDevice_h + +#include "DataFormats/Portable/interface/alpaka/PortableCollection.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" + +#include "RecoParticleFlow/PFClusterProducer/interface/PFClusteringVarsSoA.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE::reco { + + using PFClusteringVarsDeviceCollection = PortableCollection<::reco::PFClusteringVarsSoA>; + // needs nRH allocation +} // namespace ALPAKA_ACCELERATOR_NAMESPACE::reco + +#endif diff --git a/RecoParticleFlow/PFClusterProducer/plugins/BuildFile.xml b/RecoParticleFlow/PFClusterProducer/plugins/BuildFile.xml index 48efc1b5ef274..220bee86bde6e 100644 --- a/RecoParticleFlow/PFClusterProducer/plugins/BuildFile.xml +++ b/RecoParticleFlow/PFClusterProducer/plugins/BuildFile.xml @@ -2,6 +2,7 @@ + @@ -11,9 +12,11 @@ + + @@ -38,3 +41,23 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/RecoParticleFlow/PFClusterProducer/plugins/LegacyPFClusterProducer.cc b/RecoParticleFlow/PFClusterProducer/plugins/LegacyPFClusterProducer.cc new file mode 100644 index 0000000000000..4027bb340a168 --- /dev/null +++ b/RecoParticleFlow/PFClusterProducer/plugins/LegacyPFClusterProducer.cc @@ -0,0 +1,245 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/Utilities/interface/StreamID.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/Utilities/interface/EDGetToken.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Utilities/interface/EDPutToken.h" + +#include "CondFormats/DataRecord/interface/HcalPFCutsRcd.h" +#include "CondTools/Hcal/interface/HcalPFCutsHandler.h" + +#include "DataFormats/Common/interface/Handle.h" +#include "DataFormats/DetId/interface/DetId.h" +#include "DataFormats/ParticleFlowReco/interface/PFRecHitHostCollection.h" +#include "DataFormats/ParticleFlowReco/interface/PFClusterHostCollection.h" +#include "DataFormats/ParticleFlowReco/interface/PFRecHitFractionHostCollection.h" +#include "HeterogeneousCore/CUDACore/interface/JobConfigurationGPURecord.h" +#include "RecoParticleFlow/PFClusterProducer/interface/PFCPositionCalculatorBase.h" +#include "RecoParticleFlow/PFClusterProducer/interface/PFClusterParamsHostCollection.h" + +class LegacyPFClusterProducer : public edm::stream::EDProducer<> { +public: + LegacyPFClusterProducer(edm::ParameterSet const& config) + : pfClusterSoAToken_(consumes(config.getParameter("src"))), + pfRecHitFractionSoAToken_(consumes(config.getParameter("src"))), + InputPFRecHitSoA_Token_{consumes(config.getParameter("PFRecHitsLabelIn"))}, + pfClusParamsToken_(esConsumes(config.getParameter("pfClusterParams"))), + legacyPfClustersToken_(produces()), + recHitsLabel_(consumes(config.getParameter("recHitsSource"))), + hcalCutsToken_(esConsumes(edm::ESInputTag("", "withTopo"))), + cutsFromDB_(config.getParameter("usePFThresholdsFromDB")) { + edm::ConsumesCollector cc = consumesCollector(); + + //setup pf cluster builder if requested + const edm::ParameterSet& pfcConf = config.getParameterSet("pfClusterBuilder"); + if (!pfcConf.empty()) { + if (pfcConf.exists("positionCalc")) { + const edm::ParameterSet& acConf = pfcConf.getParameterSet("positionCalc"); + const std::string& algoac = acConf.getParameter("algoName"); + positionCalc_ = PFCPositionCalculatorFactory::get()->create(algoac, acConf, cc); + } + + if (pfcConf.exists("allCellsPositionCalc")) { + const edm::ParameterSet& acConf = pfcConf.getParameterSet("allCellsPositionCalc"); + const std::string& algoac = acConf.getParameter("algoName"); + allCellsPositionCalc_ = PFCPositionCalculatorFactory::get()->create(algoac, acConf, cc); + } + } + } + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("src"); + desc.add("PFRecHitsLabelIn"); + desc.add("pfClusterParams"); + desc.add("recHitsSource"); + desc.add("usePFThresholdsFromDB", true); + { + edm::ParameterSetDescription pfClusterBuilder; + pfClusterBuilder.add("maxIterations", 5); + pfClusterBuilder.add("minFracTot", 1e-20); + pfClusterBuilder.add("minFractionToKeep", 1e-7); + pfClusterBuilder.add("excludeOtherSeeds", true); + pfClusterBuilder.add("showerSigma", 10.); + pfClusterBuilder.add("stoppingTolerance", 1e-8); + pfClusterBuilder.add("timeSigmaEB", 10.); + pfClusterBuilder.add("timeSigmaEE", 10.); + pfClusterBuilder.add("maxNSigmaTime", 10.); + pfClusterBuilder.add("minChi2Prob", 0.); + pfClusterBuilder.add("clusterTimeResFromSeed", false); + pfClusterBuilder.add("algoName", ""); + { + edm::ParameterSetDescription validator; + validator.add("detector", ""); + validator.add>("depths", {}); + validator.add>("recHitEnergyNorm", {}); + std::vector vDefaults(2); + vDefaults[0].addParameter("detector", "HCAL_BARREL1"); + vDefaults[0].addParameter>("depths", {1, 2, 3, 4}); + vDefaults[0].addParameter>("recHitEnergyNorm", {0.1, 0.2, 0.3, 0.3}); + vDefaults[1].addParameter("detector", "HCAL_ENDCAP"); + vDefaults[1].addParameter>("depths", {1, 2, 3, 4, 5, 6, 7}); + vDefaults[1].addParameter>("recHitEnergyNorm", {0.1, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2}); + pfClusterBuilder.addVPSet("recHitEnergyNorms", validator, vDefaults); + } + { + edm::ParameterSetDescription bar; + bar.add("algoName", "Basic2DGenericPFlowPositionCalc"); + bar.add("minFractionInCalc", 1e-9); + bar.add("posCalcNCrystals", 5); + { + edm::ParameterSetDescription validator; + validator.add("detector", ""); + validator.add>("depths", {}); + validator.add>("logWeightDenominator", {}); + std::vector vDefaults(2); + vDefaults[0].addParameter("detector", "HCAL_BARREL1"); + vDefaults[0].addParameter>("depths", {1, 2, 3, 4}); + vDefaults[0].addParameter>("logWeightDenominator", {0.1, 0.2, 0.3, 0.3}); + vDefaults[1].addParameter("detector", "HCAL_ENDCAP"); + vDefaults[1].addParameter>("depths", {1, 2, 3, 4, 5, 6, 7}); + vDefaults[1].addParameter>("logWeightDenominator", {0.1, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2}); + bar.addVPSet("logWeightDenominatorByDetector", validator, vDefaults); + } + bar.add("minAllowedNormalization", 1e-9); + pfClusterBuilder.add("positionCalc", bar); + } + { + edm::ParameterSetDescription bar; + bar.add("algoName", "Basic2DGenericPFlowPositionCalc"); + bar.add("minFractionInCalc", 1e-9); + bar.add("posCalcNCrystals", -1); + { + edm::ParameterSetDescription validator; + validator.add("detector", ""); + validator.add>("depths", {}); + validator.add>("logWeightDenominator", {}); + std::vector vDefaults(2); + vDefaults[0].addParameter("detector", "HCAL_BARREL1"); + vDefaults[0].addParameter>("depths", {1, 2, 3, 4}); + vDefaults[0].addParameter>("logWeightDenominator", {0.1, 0.2, 0.3, 0.3}); + vDefaults[1].addParameter("detector", "HCAL_ENDCAP"); + vDefaults[1].addParameter>("depths", {1, 2, 3, 4, 5, 6, 7}); + vDefaults[1].addParameter>("logWeightDenominator", {0.1, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2}); + bar.addVPSet("logWeightDenominatorByDetector", validator, vDefaults); + } + bar.add("minAllowedNormalization", 1e-9); + pfClusterBuilder.add("allCellsPositionCalc", bar); + } + { + edm::ParameterSetDescription bar; + bar.add("corrTermLowE", 0.); + bar.add("threshLowE", 6.); + bar.add("noiseTerm", 21.86); + bar.add("constantTermLowE", 4.24); + bar.add("noiseTermLowE", 8.); + bar.add("threshHighE", 15.); + bar.add("constantTerm", 2.82); + pfClusterBuilder.add("timeResolutionCalcBarrel", bar); + } + { + edm::ParameterSetDescription bar; + bar.add("corrTermLowE", 0.); + bar.add("threshLowE", 6.); + bar.add("noiseTerm", 21.86); + bar.add("constantTermLowE", 4.24); + bar.add("noiseTermLowE", 8.); + bar.add("threshHighE", 15.); + bar.add("constantTerm", 2.82); + pfClusterBuilder.add("timeResolutionCalcEndcap", bar); + } + { + edm::ParameterSetDescription bar; + pfClusterBuilder.add("positionReCalc", bar); + } + { + edm::ParameterSetDescription bar; + pfClusterBuilder.add("energyCorrector", bar); + } + desc.add("pfClusterBuilder", pfClusterBuilder); + } + descriptions.addWithDefaultLabel(desc); + } + +private: + void produce(edm::Event&, const edm::EventSetup&) override; + const edm::EDGetTokenT pfClusterSoAToken_; + const edm::EDGetTokenT pfRecHitFractionSoAToken_; + const edm::EDGetTokenT InputPFRecHitSoA_Token_; + const edm::ESGetToken pfClusParamsToken_; + const edm::EDPutTokenT legacyPfClustersToken_; + const edm::EDGetTokenT recHitsLabel_; + const edm::ESGetToken hcalCutsToken_; + const bool cutsFromDB_; + // the actual algorithm + std::unique_ptr positionCalc_; + std::unique_ptr allCellsPositionCalc_; +}; + +void LegacyPFClusterProducer::produce(edm::Event& event, const edm::EventSetup& setup) { + const reco::PFRecHitHostCollection& pfRecHits = event.get(InputPFRecHitSoA_Token_); + + HcalPFCuts const* paramPF = cutsFromDB_ ? &setup.getData(hcalCutsToken_) : nullptr; + + auto const& pfClusterSoA = event.get(pfClusterSoAToken_).const_view(); + auto const& pfRecHitFractionSoA = event.get(pfRecHitFractionSoAToken_).const_view(); + + int nRH = pfRecHits.view().size(); + reco::PFClusterCollection out; + out.reserve(nRH); + + auto const rechitsHandle = event.getHandle(recHitsLabel_); + + // Build PFClusters in legacy format + std::vector nTopoSeeds(nRH, 0); + + for (int i = 0; i < pfClusterSoA.nSeeds(); i++) { + nTopoSeeds[pfClusterSoA[i].topoId()]++; + } + + // Looping over SoA PFClusters to produce legacy PFCluster collection + for (int i = 0; i < pfClusterSoA.nSeeds(); i++) { + unsigned int n = pfClusterSoA[i].seedRHIdx(); + reco::PFCluster temp; + temp.setSeed((*rechitsHandle)[n].detId()); // Pulling the detId of this PFRecHit from the legacy format input + int offset = pfClusterSoA[i].rhfracOffset(); + for (int k = offset; k < (offset + pfClusterSoA[i].rhfracSize()) && k >= 0; + k++) { // Looping over PFRecHits in the same topo cluster + if (pfRecHitFractionSoA[k].pfrhIdx() < nRH && pfRecHitFractionSoA[k].pfrhIdx() > -1 && + pfRecHitFractionSoA[k].frac() > 0.0) { + const reco::PFRecHitRef& refhit = reco::PFRecHitRef(rechitsHandle, pfRecHitFractionSoA[k].pfrhIdx()); + temp.addRecHitFraction(reco::PFRecHitFraction(refhit, pfRecHitFractionSoA[k].frac())); + } + } + + // Now PFRecHitFraction of this PFCluster is set. Now compute calculateAndSetPosition (energy, position etc) + if (nTopoSeeds[pfClusterSoA[i].topoId()] == 1 && allCellsPositionCalc_) { + allCellsPositionCalc_->calculateAndSetPosition(temp, paramPF); + } else { + positionCalc_->calculateAndSetPosition(temp, paramPF); + } + out.emplace_back(std::move(temp)); + } + + event.emplace(legacyPfClustersToken_, std::move(out)); +} + +#include "FWCore/Framework/interface/MakerMacros.h" +DEFINE_FWK_MODULE(LegacyPFClusterProducer); diff --git a/RecoParticleFlow/PFClusterProducer/plugins/alpaka/PFClusterECLCC.h b/RecoParticleFlow/PFClusterProducer/plugins/alpaka/PFClusterECLCC.h new file mode 100644 index 0000000000000..b1fc0a35f4396 --- /dev/null +++ b/RecoParticleFlow/PFClusterProducer/plugins/alpaka/PFClusterECLCC.h @@ -0,0 +1,174 @@ +#ifndef RecoParticleFlow_PFClusterProducer_plugins_alpaka_PFClusterECLCC_h +#define RecoParticleFlow_PFClusterProducer_plugins_alpaka_PFClusterECLCC_h + +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "RecoParticleFlow/PFClusterProducer/interface/alpaka/PFClusteringVarsDeviceCollection.h" +#include "RecoParticleFlow/PFClusterProducer/interface/alpaka/PFClusteringEdgeVarsDeviceCollection.h" + +// The following comment block is required in using the ECL-CC algorithm for topological clustering + +/* + ECL-CC code: ECL-CC is a connected components graph algorithm. The CUDA + implementation thereof is quite fast. It operates on graphs stored in + binary CSR format. + + Copyright (c) 2017-2020, Texas State University. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Texas State University nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL TEXAS STATE UNIVERSITY BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + Authors: Jayadharini Jaiganesh and Martin Burtscher + + URL: The latest version of this code is available at + https://userweb.cs.txstate.edu/~burtscher/research/ECL-CC/. + + Publication: This work is described in detail in the following paper. + Jayadharini Jaiganesh and Martin Burtscher. A High-Performance Connected + Components Implementation for GPUs. Proceedings of the 2018 ACM International + Symposium on High-Performance Parallel and Distributed Computing, pp. 92-104. + June 2018. +*/ + +/* + The code is modified for the specific use-case of generating topological clusters + for PFClustering. It is adapted to work with the Alpaka portability library. The + kernels for processing vertices at warp and block level granularity have been + removed since the degree of vertices in our inputs is only ever 8; the number of + neighbors. +*/ + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + /* intermediate pointer jumping */ + + ALPAKA_FN_ACC inline int representative(const int idx, + reco::PFClusteringVarsDeviceCollection::View pfClusteringVars) { + int curr = pfClusteringVars[idx].pfrh_topoId(); + if (curr != idx) { + int next, prev = idx; + while (curr > (next = pfClusteringVars[curr].pfrh_topoId())) { + pfClusteringVars[prev].pfrh_topoId() = next; + prev = curr; + curr = next; + } + } + return curr; + } + + // Initial step of ECL-CC. Uses ID of first neighbour in edgeList with a smaller ID + class ECLCCInit { + public: + template >> + ALPAKA_FN_ACC void operator()(const TAcc& acc, + reco::PFRecHitHostCollection::ConstView pfRecHits, + reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, + reco::PFClusteringEdgeVarsDeviceCollection::View pfClusteringEdgeVars) const { + const int nRH = pfRecHits.size(); + for (int v : cms::alpakatools::elements_with_stride(acc, nRH)) { + const int beg = pfClusteringEdgeVars[v].pfrh_edgeIdx(); + const int end = pfClusteringEdgeVars[v + 1].pfrh_edgeIdx(); + int m = v; + int i = beg; + while ((m == v) && (i < end)) { + m = std::min(m, pfClusteringEdgeVars[i].pfrh_edgeList()); + i++; + } + pfClusteringVars[v].pfrh_topoId() = m; + } + } + }; + + // First edge processing kernel of ECL-CC + // Processes vertices + class ECLCCCompute1 { + public: + template >> + ALPAKA_FN_ACC void operator()(const TAcc& acc, + reco::PFRecHitHostCollection::ConstView pfRecHits, + reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, + reco::PFClusteringEdgeVarsDeviceCollection::View pfClusteringEdgeVars) const { + const int nRH = pfRecHits.size(); + + for (int v : cms::alpakatools::elements_with_stride(acc, nRH)) { + const int vstat = pfClusteringVars[v].pfrh_topoId(); + if (v != vstat) { + const int beg = pfClusteringEdgeVars[v].pfrh_edgeIdx(); + const int end = pfClusteringEdgeVars[v + 1].pfrh_edgeIdx(); + int vstat = representative(v, pfClusteringVars); + for (int i = beg; i < end; i++) { + const int nli = pfClusteringEdgeVars[i].pfrh_edgeList(); + if (v > nli) { + int ostat = representative(nli, pfClusteringVars); + bool repeat; + do { + repeat = false; + if (vstat != ostat) { + int ret; + if (vstat < ostat) { + if ((ret = alpaka::atomicCas(acc, &pfClusteringVars[ostat].pfrh_topoId(), ostat, vstat)) != ostat) { + ostat = ret; + repeat = true; + } + } else { + if ((ret = alpaka::atomicCas(acc, &pfClusteringVars[vstat].pfrh_topoId(), vstat, ostat)) != vstat) { + vstat = ret; + repeat = true; + } + } + } + } while (repeat); + } + } + } + } + } + }; + + /* link all vertices to sink */ + class ECLCCFlatten { + public: + template >> + ALPAKA_FN_ACC void operator()(const TAcc& acc, + reco::PFRecHitHostCollection::ConstView pfRecHits, + reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, + reco::PFClusteringEdgeVarsDeviceCollection::View pfClusteringEdgeVars) const { + const int nRH = pfRecHits.size(); + + for (int v : cms::alpakatools::elements_with_stride(acc, nRH)) { + int next, vstat = pfClusteringVars[v].pfrh_topoId(); + const int old = vstat; + while (vstat > (next = pfClusteringVars[vstat].pfrh_topoId())) { + vstat = next; + } + if (old != vstat) + pfClusteringVars[v].pfrh_topoId() = vstat; + } + } + }; + + // ECL-CC ends + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +#endif diff --git a/RecoParticleFlow/PFClusterProducer/plugins/alpaka/PFClusterParamsESProducer.cc b/RecoParticleFlow/PFClusterProducer/plugins/alpaka/PFClusterParamsESProducer.cc new file mode 100644 index 0000000000000..36d4b025c3a48 --- /dev/null +++ b/RecoParticleFlow/PFClusterProducer/plugins/alpaka/PFClusterParamsESProducer.cc @@ -0,0 +1,249 @@ +#include "FWCore/Framework/interface/EventSetupRecordIntervalFinder.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/Utilities/interface/Exception.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ESProducer.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ModuleFactory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/CUDACore/interface/JobConfigurationGPURecord.h" +#include "RecoParticleFlow/PFClusterProducer/interface/alpaka/PFClusterParamsDeviceCollection.h" + +#include + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + class PFClusterParamsESProducer : public ESProducer { + public: + PFClusterParamsESProducer(edm::ParameterSet const& iConfig) : ESProducer(iConfig) { + constexpr static uint32_t kMaxDepth_barrel = 4; + constexpr static uint32_t kMaxDepth_endcap = 7; + product = std::make_shared(std::max(kMaxDepth_barrel, kMaxDepth_endcap), + cms::alpakatools::host()); + auto view = product->view(); + + // seedFinder + auto const& sfConf = iConfig.getParameterSet("seedFinder"); + view.nNeigh() = sfConf.getParameter("nNeighbours"); + auto const& seedFinderConfs = sfConf.getParameterSetVector("thresholdsByDetector"); + for (auto const& pset : seedFinderConfs) { + auto const& det = pset.getParameter("detector"); + auto seedPt2Threshold = std::pow(pset.getParameter("seedingThresholdPt"), 2.); + auto const& thresholds = pset.getParameter>("seedingThreshold"); + if (det == "HCAL_BARREL1") { + if (thresholds.size() != kMaxDepth_barrel) + throw cms::Exception("Configuration") << "Invalid size (" << thresholds.size() << " != " << kMaxDepth_barrel + << ") for \"\" vector of det = \"" << det << "\""; + view.seedPt2ThresholdHB() = seedPt2Threshold; + for (size_t idx = 0; idx < thresholds.size(); ++idx) { + view.seedEThresholdHB_vec()[idx] = thresholds[idx]; + } + } else if (det == "HCAL_ENDCAP") { + if (thresholds.size() != kMaxDepth_endcap) + throw cms::Exception("Configuration") << "Invalid size (" << thresholds.size() << " != " << kMaxDepth_endcap + << ") for \"\" vector of det = \"" << det << "\""; + view.seedPt2ThresholdHE() = seedPt2Threshold; + for (size_t idx = 0; idx < thresholds.size(); ++idx) { + view.seedEThresholdHE_vec()[idx] = thresholds[idx]; + } + } else { + throw cms::Exception("Configuration") << "Unknown detector when parsing seedFinder: " << det; + } + } + + // initialClusteringStep + auto const& initConf = iConfig.getParameterSet("initialClusteringStep"); + auto const& topoThresholdConf = initConf.getParameterSetVector("thresholdsByDetector"); + for (auto const& pset : topoThresholdConf) { + auto const& det = pset.getParameter("detector"); + auto const& thresholds = pset.getParameter>("gatheringThreshold"); + if (det == "HCAL_BARREL1") { + if (thresholds.size() != kMaxDepth_barrel) + throw cms::Exception("Configuration") << "Invalid size (" << thresholds.size() << " != " << kMaxDepth_barrel + << ") for \"\" vector of det = \"" << det << "\""; + for (size_t idx = 0; idx < thresholds.size(); ++idx) { + view.topoEThresholdHB_vec()[idx] = thresholds[idx]; + } + } else if (det == "HCAL_ENDCAP") { + if (thresholds.size() != kMaxDepth_endcap) + throw cms::Exception("Configuration") << "Invalid size (" << thresholds.size() << " != " << kMaxDepth_endcap + << ") for \"\" vector of det = \"" << det << "\""; + for (size_t idx = 0; idx < thresholds.size(); ++idx) { + view.topoEThresholdHE_vec()[idx] = thresholds[idx]; + } + } else { + throw cms::Exception("Configuration") << "Unknown detector when parsing initClusteringStep: " << det; + } + } + + // pfClusterBuilder + auto const& pfClusterPSet = iConfig.getParameterSet("pfClusterBuilder"); + view.showerSigma2() = std::pow(pfClusterPSet.getParameter("showerSigma"), 2.); + view.minFracToKeep() = pfClusterPSet.getParameter("minFractionToKeep"); + view.minFracTot() = pfClusterPSet.getParameter("minFracTot"); + view.maxIterations() = pfClusterPSet.getParameter("maxIterations"); + view.excludeOtherSeeds() = pfClusterPSet.getParameter("excludeOtherSeeds"); + view.stoppingTolerance() = pfClusterPSet.getParameter("stoppingTolerance"); + auto const& pcPSet = pfClusterPSet.getParameterSet("positionCalc"); + view.minFracInCalc() = pcPSet.getParameter("minFractionInCalc"); + view.minAllowedNormalization() = pcPSet.getParameter("minAllowedNormalization"); + + auto const& recHitEnergyNormConf = pfClusterPSet.getParameterSetVector("recHitEnergyNorms"); + for (auto const& pset : recHitEnergyNormConf) { + auto const& recHitNorms = pset.getParameter>("recHitEnergyNorm"); + auto const& det = pset.getParameter("detector"); + if (det == "HCAL_BARREL1") { + if (recHitNorms.size() != kMaxDepth_barrel) + throw cms::Exception("Configuration") + << "Invalid size (" << recHitNorms.size() << " != " << kMaxDepth_barrel + << ") for \"\" vector of det = \"" << det << "\""; + for (size_t idx = 0; idx < recHitNorms.size(); ++idx) { + view.recHitEnergyNormInvHB_vec()[idx] = 1. / recHitNorms[idx]; + } + } else if (det == "HCAL_ENDCAP") { + if (recHitNorms.size() != kMaxDepth_endcap) + throw cms::Exception("Configuration") + << "Invalid size (" << recHitNorms.size() << " != " << kMaxDepth_endcap + << ") for \"\" vector of det = \"" << det << "\""; + for (size_t idx = 0; idx < recHitNorms.size(); ++idx) { + view.recHitEnergyNormInvHE_vec()[idx] = 1. / recHitNorms[idx]; + } + } else { + throw cms::Exception("Configuration") << "Unknown detector when parsing recHitEnergyNorms: " << det; + } + } + + auto const& barrelTimeResConf = pfClusterPSet.getParameterSet("timeResolutionCalcBarrel"); + view.barrelTimeResConsts_corrTermLowE() = barrelTimeResConf.getParameter("corrTermLowE"); + view.barrelTimeResConsts_threshLowE() = barrelTimeResConf.getParameter("threshLowE"); + view.barrelTimeResConsts_noiseTerm() = barrelTimeResConf.getParameter("noiseTerm"); + view.barrelTimeResConsts_constantTermLowE2() = + std::pow(barrelTimeResConf.getParameter("constantTermLowE"), 2.); + view.barrelTimeResConsts_noiseTermLowE() = barrelTimeResConf.getParameter("noiseTermLowE"); + view.barrelTimeResConsts_threshHighE() = barrelTimeResConf.getParameter("threshHighE"); + view.barrelTimeResConsts_constantTerm2() = std::pow(barrelTimeResConf.getParameter("constantTerm"), 2.); + view.barrelTimeResConsts_resHighE2() = + std::pow(view.barrelTimeResConsts_noiseTerm() / view.barrelTimeResConsts_threshHighE(), 2.) + + view.barrelTimeResConsts_constantTerm2(); + + auto const& endcapTimeResConf = pfClusterPSet.getParameterSet("timeResolutionCalcEndcap"); + view.endcapTimeResConsts_corrTermLowE() = endcapTimeResConf.getParameter("corrTermLowE"); + view.endcapTimeResConsts_threshLowE() = endcapTimeResConf.getParameter("threshLowE"); + view.endcapTimeResConsts_noiseTerm() = endcapTimeResConf.getParameter("noiseTerm"); + view.endcapTimeResConsts_constantTermLowE2() = + std::pow(endcapTimeResConf.getParameter("constantTermLowE"), 2.); + view.endcapTimeResConsts_noiseTermLowE() = endcapTimeResConf.getParameter("noiseTermLowE"); + view.endcapTimeResConsts_threshHighE() = endcapTimeResConf.getParameter("threshHighE"); + view.endcapTimeResConsts_constantTerm2() = std::pow(endcapTimeResConf.getParameter("constantTerm"), 2.); + view.endcapTimeResConsts_resHighE2() = + std::pow(view.endcapTimeResConsts_noiseTerm() / view.endcapTimeResConsts_threshHighE(), 2.) + + view.endcapTimeResConsts_constantTerm2(); + + setWhatProduced(this); + } + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription psetDesc; + { + auto const psetName = "seedFinder"; + edm::ParameterSetDescription foo; + foo.add("nNeighbours", 4); + { + edm::ParameterSetDescription validator; + validator.add("detector", ""); + validator.add>("seedingThreshold", {}); + validator.add("seedingThresholdPt", 0.); + std::vector vDefaults(2); + vDefaults[0].addParameter("detector", "HCAL_BARREL1"); + vDefaults[0].addParameter>("seedingThreshold", {0.125, 0.25, 0.35, 0.35}); + vDefaults[0].addParameter("seedingThresholdPt", 0.); + vDefaults[1].addParameter("detector", "HCAL_ENDCAP"); + vDefaults[1].addParameter>("seedingThreshold", + {0.1375, 0.275, 0.275, 0.275, 0.275, 0.275, 0.275}); + vDefaults[1].addParameter("seedingThresholdPt", 0.); + foo.addVPSet("thresholdsByDetector", validator, vDefaults); + } + psetDesc.add(psetName, foo); + } + { + auto const psetName = "initialClusteringStep"; + edm::ParameterSetDescription foo; + { + edm::ParameterSetDescription validator; + validator.add("detector", ""); + validator.add>("gatheringThreshold", {}); + std::vector vDefaults(2); + vDefaults[0].addParameter("detector", "HCAL_BARREL1"); + vDefaults[0].addParameter>("gatheringThreshold", {0.1, 0.2, 0.3, 0.3}); + vDefaults[1].addParameter("detector", "HCAL_ENDCAP"); + vDefaults[1].addParameter>("gatheringThreshold", {0.1, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2}); + foo.addVPSet("thresholdsByDetector", validator, vDefaults); + } + psetDesc.add(psetName, foo); + } + { + auto const psetName = "pfClusterBuilder"; + edm::ParameterSetDescription foo; + foo.add("maxIterations", 50); + foo.add("minFracTot", 1e-20); + foo.add("minFractionToKeep", 1e-7); + foo.add("excludeOtherSeeds", true); + foo.add("showerSigma", 10.); + foo.add("stoppingTolerance", 1e-8); + { + edm::ParameterSetDescription validator; + validator.add("detector", ""); + validator.add>("recHitEnergyNorm", {}); + std::vector vDefaults(2); + vDefaults[0].addParameter("detector", "HCAL_BARREL1"); + vDefaults[0].addParameter>("recHitEnergyNorm", {0.1, 0.2, 0.3, 0.3}); + vDefaults[1].addParameter("detector", "HCAL_ENDCAP"); + vDefaults[1].addParameter>("recHitEnergyNorm", {0.1, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2}); + foo.addVPSet("recHitEnergyNorms", validator, vDefaults); + } + { + edm::ParameterSetDescription bar; + bar.add("minFractionInCalc", 1e-9); + bar.add("minAllowedNormalization", 1e-9); + foo.add("positionCalc", bar); + } + { + edm::ParameterSetDescription bar; + bar.add("corrTermLowE", 0.); + bar.add("threshLowE", 6.); + bar.add("noiseTerm", 21.86); + bar.add("constantTermLowE", 4.24); + bar.add("noiseTermLowE", 8.); + bar.add("threshHighE", 15.); + bar.add("constantTerm", 2.82); + foo.add("timeResolutionCalcBarrel", bar); + } + { + edm::ParameterSetDescription bar; + bar.add("corrTermLowE", 0.); + bar.add("threshLowE", 6.); + bar.add("noiseTerm", 21.86); + bar.add("constantTermLowE", 4.24); + bar.add("noiseTermLowE", 8.); + bar.add("threshHighE", 15.); + bar.add("constantTerm", 2.82); + foo.add("timeResolutionCalcEndcap", bar); + } + psetDesc.add(psetName, foo); + } + + descriptions.addWithDefaultLabel(psetDesc); + } + + std::shared_ptr produce(JobConfigurationGPURecord const& iRecord) { + return product; + } + + private: + std::shared_ptr product; + }; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +DEFINE_FWK_EVENTSETUP_ALPAKA_MODULE(PFClusterParamsESProducer); diff --git a/RecoParticleFlow/PFClusterProducer/plugins/alpaka/PFClusterSoAProducer.cc b/RecoParticleFlow/PFClusterProducer/plugins/alpaka/PFClusterSoAProducer.cc new file mode 100644 index 0000000000000..bde6db46b08d9 --- /dev/null +++ b/RecoParticleFlow/PFClusterProducer/plugins/alpaka/PFClusterSoAProducer.cc @@ -0,0 +1,73 @@ +#include +#include +#include "DataFormats/ParticleFlowReco/interface/PFRecHitHostCollection.h" +#include "FWCore/Utilities/interface/StreamID.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/Utilities/interface/EDGetToken.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/stream/EDProducer.h" +#include "HeterogeneousCore/CUDACore/interface/JobConfigurationGPURecord.h" +#include "RecoParticleFlow/PFClusterProducer/interface/alpaka/PFClusterParamsDeviceCollection.h" +#include "RecoParticleFlow/PFClusterProducer/plugins/alpaka/PFClusterSoAProducerKernel.h" +#include "RecoParticleFlow/PFClusterProducer/interface/PFCPositionCalculatorBase.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + class PFClusterSoAProducer : public stream::EDProducer<> { + public: + PFClusterSoAProducer(edm::ParameterSet const& config) + : pfClusParamsToken(esConsumes(config.getParameter("pfClusterParams"))), + topologyToken_(esConsumes(config.getParameter("topology"))), + inputPFRecHitSoA_Token_{consumes(config.getParameter("pfRecHits"))}, + outputPFClusterSoA_Token_{produces()}, + outputPFRHFractionSoA_Token_{produces()}, + synchronise_(config.getParameter("synchronise")), + pfRecHitFractionAllocation_(config.getParameter("pfRecHitFractionAllocation")) {} + + void produce(device::Event& event, device::EventSetup const& setup) override { + const reco::PFClusterParamsDeviceCollection& params = setup.getData(pfClusParamsToken); + const reco::PFRecHitHCALTopologyDeviceCollection& topology = setup.getData(topologyToken_); + const reco::PFRecHitHostCollection& pfRecHits = event.get(inputPFRecHitSoA_Token_); + const int nRH = pfRecHits->size(); + + reco::PFClusteringVarsDeviceCollection pfClusteringVars{nRH, event.queue()}; + reco::PFClusteringEdgeVarsDeviceCollection pfClusteringEdgeVars{(nRH * 8), event.queue()}; + reco::PFClusterDeviceCollection pfClusters{nRH, event.queue()}; + reco::PFRecHitFractionDeviceCollection pfrhFractions{nRH * pfRecHitFractionAllocation_, event.queue()}; + + PFClusterProducerKernel kernel(event.queue(), pfRecHits); + kernel.execute( + event.queue(), params, topology, pfClusteringVars, pfClusteringEdgeVars, pfRecHits, pfClusters, pfrhFractions); + + if (synchronise_) + alpaka::wait(event.queue()); + + event.emplace(outputPFClusterSoA_Token_, std::move(pfClusters)); + event.emplace(outputPFRHFractionSoA_Token_, std::move(pfrhFractions)); + } + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("pfRecHits"); + desc.add("pfClusterParams"); + desc.add("topology"); + desc.add("synchronise"); + desc.add("pfRecHitFractionAllocation", 120); + descriptions.addWithDefaultLabel(desc); + } + + private: + const device::ESGetToken pfClusParamsToken; + const device::ESGetToken topologyToken_; + const edm::EDGetTokenT inputPFRecHitSoA_Token_; + const device::EDPutToken outputPFClusterSoA_Token_; + const device::EDPutToken outputPFRHFractionSoA_Token_; + const bool synchronise_; + const int pfRecHitFractionAllocation_; + }; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/MakerMacros.h" +DEFINE_FWK_ALPAKA_MODULE(PFClusterSoAProducer); diff --git a/RecoParticleFlow/PFClusterProducer/plugins/alpaka/PFClusterSoAProducerKernel.dev.cc b/RecoParticleFlow/PFClusterProducer/plugins/alpaka/PFClusterSoAProducerKernel.dev.cc new file mode 100644 index 0000000000000..ea7816ce0cb87 --- /dev/null +++ b/RecoParticleFlow/PFClusterProducer/plugins/alpaka/PFClusterSoAProducerKernel.dev.cc @@ -0,0 +1,1565 @@ +#include + +#include "FWCore/Utilities/interface/bit_cast.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/atomicMaxF.h" + +#include "DataFormats/ParticleFlowReco/interface/PFLayer.h" +#include "RecoParticleFlow/PFClusterProducer/plugins/alpaka/PFClusterSoAProducerKernel.h" +#include "RecoParticleFlow/PFClusterProducer/plugins/alpaka/PFClusterECLCC.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + using namespace cms::alpakatools; + + using namespace reco::pfClustering; + + static constexpr int threadsPerBlockForClustering = 512; + static constexpr uint32_t blocksForExoticClusters = 4; + + // cutoffFraction -> Is a rechit almost entirely attributed to one cluster + // cutoffDistance -> Is a rechit close enough to a cluster to be associated + // Values are from RecoParticleFlow/PFClusterProducer/plugins/Basic2DGenericPFlowClusterizer.cc + static constexpr float cutoffDistance = 100.; + static constexpr float cutoffFraction = 0.9999; + + static constexpr uint32_t kHBHalf = 1296; + static constexpr uint32_t maxTopoInput = 2 * kHBHalf; + + // Calculation of dR2 for Clustering + ALPAKA_FN_ACC ALPAKA_FN_INLINE static float dR2(Position4 pos1, Position4 pos2) { + float mag1 = sqrtf(pos1.x * pos1.x + pos1.y * pos1.y + pos1.z * pos1.z); + float cosTheta1 = mag1 > 0.0 ? pos1.z / mag1 : 1.0; + float eta1 = 0.5f * logf((1.0f + cosTheta1) / (1.0f - cosTheta1)); + float phi1 = atan2f(pos1.y, pos1.x); + + float mag2 = sqrtf(pos2.x * pos2.x + pos2.y * pos2.y + pos2.z * pos2.z); + float cosTheta2 = mag2 > 0.0 ? pos2.z / mag2 : 1.0; + float eta2 = 0.5f * logf((1.0f + cosTheta2) / (1.0f - cosTheta2)); + float phi2 = atan2f(pos2.y, pos2.x); + + float deta = eta2 - eta1; + constexpr const float fPI = M_PI; + float dphi = std::abs(std::abs(phi2 - phi1) - fPI) - fPI; + return (deta * deta + dphi * dphi); + } + + // Get index of seed + ALPAKA_FN_ACC static auto getSeedRhIdx(int* seeds, int seedNum) { return seeds[seedNum]; } + + // Get rechit fraction of a given rechit for a given seed + ALPAKA_FN_ACC static auto getRhFrac(reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, + int topoSeedBegin, + reco::PFRecHitFractionDeviceCollection::View fracView, + int seedNum, + int rhNum) { + int seedIdx = pfClusteringVars[topoSeedBegin + seedNum].topoSeedList(); + return fracView[pfClusteringVars[seedIdx].seedFracOffsets() + rhNum].frac(); + } + + // Cluster position calculation + template + ALPAKA_FN_ACC static void updateClusterPos(reco::PFClusterParamsDeviceCollection::ConstView pfClusParams, + Position4& pos4, + float frac, + int rhInd, + reco::PFRecHitDeviceCollection::ConstView pfRecHits, + float rhENormInv) { + Position4 rechitPos = Position4{pfRecHits[rhInd].x(), pfRecHits[rhInd].y(), pfRecHits[rhInd].z(), 1.0}; + const auto rh_energy = pfRecHits[rhInd].energy() * frac; + const auto norm = (frac < pfClusParams.minFracInCalc() ? 0.0f : std::max(0.0f, logf(rh_energy * rhENormInv))); + if constexpr (debug) + printf("\t\t\trechit %d: norm = %f\tfrac = %f\trh_energy = %f\tpos = (%f, %f, %f)\n", + rhInd, + norm, + frac, + rh_energy, + rechitPos.x, + rechitPos.y, + rechitPos.z); + + pos4.x += rechitPos.x * norm; + pos4.y += rechitPos.y * norm; + pos4.z += rechitPos.z * norm; + pos4.w += norm; // position_norm + } + + // Processing single seed clusters + // Device function designed to be called by all threads of a given block + template >> + ALPAKA_FN_ACC static void hcalFastCluster_singleSeed( + const TAcc& acc, + reco::PFClusterParamsDeviceCollection::ConstView pfClusParams, + const reco::PFRecHitHCALTopologyDeviceCollection::ConstView topology, + int topoId, // from selection + int nRHTopo, // from selection + reco::PFRecHitDeviceCollection::ConstView pfRecHits, + reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, + reco::PFClusterDeviceCollection::View clusterView, + reco::PFRecHitFractionDeviceCollection::View fracView) { + int tid = alpaka::getIdx(acc)[0u]; // thread index is rechit number + // Declaration of shared variables + int& i = alpaka::declareSharedVar(acc); + int& nRHOther = alpaka::declareSharedVar(acc); + unsigned int& iter = alpaka::declareSharedVar(acc); + float& tol = alpaka::declareSharedVar(acc); + float& clusterEnergy = alpaka::declareSharedVar(acc); + float& rhENormInv = alpaka::declareSharedVar(acc); + float& seedEnergy = alpaka::declareSharedVar(acc); + Position4& clusterPos = alpaka::declareSharedVar(acc); + Position4& prevClusterPos = alpaka::declareSharedVar(acc); + Position4& seedPos = alpaka::declareSharedVar(acc); + bool& notDone = alpaka::declareSharedVar(acc); + if (once_per_block(acc)) { + i = pfClusteringVars[pfClusteringVars[topoId].topoSeedOffsets()].topoSeedList(); // i is the seed rechit index + nRHOther = nRHTopo - 1; // number of non-seed rechits + seedPos = Position4{pfRecHits[i].x(), pfRecHits[i].y(), pfRecHits[i].z(), 1.}; + clusterPos = seedPos; // Initial cluster position is just the seed + prevClusterPos = seedPos; + seedEnergy = pfRecHits[i].energy(); + clusterEnergy = seedEnergy; + tol = pfClusParams.stoppingTolerance(); // stopping tolerance * tolerance scaling + + if (topology.cutsFromDB()) { + rhENormInv = (1.f / topology[pfRecHits[i].denseId()].noiseThreshold()); + } else { + if (pfRecHits[i].layer() == PFLayer::HCAL_BARREL1) + rhENormInv = pfClusParams.recHitEnergyNormInvHB_vec()[pfRecHits[i].depth() - 1]; + else if (pfRecHits[i].layer() == PFLayer::HCAL_ENDCAP) + rhENormInv = pfClusParams.recHitEnergyNormInvHE_vec()[pfRecHits[i].depth() - 1]; + else { + rhENormInv = 0.; + printf("Rechit %d has invalid layer %d!\n", i, pfRecHits[i].layer()); + } + } + + iter = 0; + notDone = true; + } + alpaka::syncBlockThreads(acc); // all threads call sync + + int j = -1; // j is the rechit index for this thread + int rhFracOffset = -1; + Position4 rhPos; + float rhEnergy = -1., rhPosNorm = -1.; + + if (tid < nRHOther) { + rhFracOffset = + pfClusteringVars[i].seedFracOffsets() + tid + 1; // Offset for this rechit in pcrhfrac, pcrhfracidx arrays + j = fracView[rhFracOffset].pfrhIdx(); // rechit index for this thread + rhPos = Position4{pfRecHits[j].x(), pfRecHits[j].y(), pfRecHits[j].z(), 1.}; + rhEnergy = pfRecHits[j].energy(); + rhPosNorm = fmaxf(0., logf(rhEnergy * rhENormInv)); + } + alpaka::syncBlockThreads(acc); // all threads call sync + + do { + if constexpr (debug) { + if (once_per_block(acc)) + printf("\n--- Now on iter %d for topoId %d ---\n", iter, topoId); + } + float dist2 = -1., d2 = -1., fraction = -1.; + if (tid < nRHOther) { + // Rechit distance calculation + dist2 = (clusterPos.x - rhPos.x) * (clusterPos.x - rhPos.x) + + (clusterPos.y - rhPos.y) * (clusterPos.y - rhPos.y) + + (clusterPos.z - rhPos.z) * (clusterPos.z - rhPos.z); + + d2 = dist2 / pfClusParams.showerSigma2(); + fraction = clusterEnergy * rhENormInv * expf(-0.5f * d2); + + // For single seed clusters, rechit fraction is either 1 (100%) or -1 (not included) + if (fraction > pfClusParams.minFracTot() && d2 < cutoffDistance) + fraction = 1.; + else + fraction = -1.; + fracView[rhFracOffset].frac() = fraction; + } + alpaka::syncBlockThreads(acc); // all threads call sync + + if constexpr (debug) { + if (once_per_block(acc)) + printf("Computing cluster position for topoId %d\n", topoId); + } + + if (once_per_block(acc)) { + // Reset cluster position and energy + clusterPos = seedPos; + clusterEnergy = seedEnergy; + } + alpaka::syncBlockThreads(acc); // all threads call sync + + // Recalculate cluster position and energy + if (fraction > -0.5) { + alpaka::atomicAdd(acc, &clusterEnergy, rhEnergy, alpaka::hierarchy::Threads{}); + alpaka::atomicAdd(acc, &clusterPos.x, rhPos.x * rhPosNorm, alpaka::hierarchy::Threads{}); + alpaka::atomicAdd(acc, &clusterPos.y, rhPos.y * rhPosNorm, alpaka::hierarchy::Threads{}); + alpaka::atomicAdd(acc, &clusterPos.z, rhPos.z * rhPosNorm, alpaka::hierarchy::Threads{}); + alpaka::atomicAdd(acc, &clusterPos.w, rhPosNorm, alpaka::hierarchy::Threads{}); // position_norm + } + alpaka::syncBlockThreads(acc); // all threads call sync + + if (once_per_block(acc)) { + // Normalize the seed postiion + if (clusterPos.w >= pfClusParams.minAllowedNormalization()) { + // Divide by position norm + clusterPos.x /= clusterPos.w; + clusterPos.y /= clusterPos.w; + clusterPos.z /= clusterPos.w; + + if constexpr (debug) + printf("\tPF cluster (seed %d) energy = %f\tposition = (%f, %f, %f)\n", + i, + clusterEnergy, + clusterPos.x, + clusterPos.y, + clusterPos.z); + } else { + if constexpr (debug) + printf("\tPF cluster (seed %d) position norm (%f) less than minimum (%f)\n", + i, + clusterPos.w, + pfClusParams.minAllowedNormalization()); + clusterPos.x = 0.; + clusterPos.y = 0.; + clusterPos.z = 0.; + } + float diff2 = dR2(prevClusterPos, clusterPos); + if constexpr (debug) + printf("\tPF cluster (seed %d) has diff2 = %f\n", i, diff2); + prevClusterPos = clusterPos; // Save clusterPos + + float tol2 = tol * tol; + iter++; + notDone = (diff2 > tol2) && (iter < pfClusParams.maxIterations()); + if constexpr (debug) { + if (diff2 > tol2) + printf("\tTopoId %d has diff2 = %f greater than squared tolerance %f (continuing)\n", topoId, diff2, tol2); + else if constexpr (debug) + printf("\tTopoId %d has diff2 = %f LESS than squared tolerance %f (terminating!)\n", topoId, diff2, tol2); + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + } while (notDone); // shared variable condition ensures synchronization is well defined + if (once_per_block(acc)) { // Cluster is finalized, assign cluster information to te SoA + int rhIdx = + pfClusteringVars[pfClusteringVars[topoId].topoSeedOffsets()].topoSeedList(); // i is the seed rechit index + int seedIdx = pfClusteringVars[rhIdx].rhIdxToSeedIdx(); + clusterView[seedIdx].energy() = clusterEnergy; + clusterView[seedIdx].x() = clusterPos.x; + clusterView[seedIdx].y() = clusterPos.y; + clusterView[seedIdx].z() = clusterPos.z; + } + } + + // Processing clusters up to 100 seeds and 512 non-seed rechits using shared memory accesses + // Device function designed to be called by all threads of a given block + template >> + ALPAKA_FN_ACC static void hcalFastCluster_multiSeedParallel( + const TAcc& acc, + reco::PFClusterParamsDeviceCollection::ConstView pfClusParams, + const reco::PFRecHitHCALTopologyDeviceCollection::ConstView topology, + int topoId, // from selection + int nSeeds, // from selection + int nRHTopo, // from selection + reco::PFRecHitDeviceCollection::ConstView pfRecHits, + reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, + reco::PFClusterDeviceCollection::View clusterView, + reco::PFRecHitFractionDeviceCollection::View fracView) { + int tid = alpaka::getIdx( // Thread index corresponds to a single rechit of the topo cluster + acc)[0u]; + + int& nRHNotSeed = alpaka::declareSharedVar(acc); + int& topoSeedBegin = alpaka::declareSharedVar(acc); + int& stride = alpaka::declareSharedVar(acc); + int& iter = alpaka::declareSharedVar(acc); + float& tol = alpaka::declareSharedVar(acc); + float& diff2 = alpaka::declareSharedVar(acc); + float& rhENormInv = alpaka::declareSharedVar(acc); + bool& notDone = alpaka::declareSharedVar(acc); + auto& clusterPos = alpaka::declareSharedVar(acc); + auto& prevClusterPos = alpaka::declareSharedVar(acc); + auto& clusterEnergy = alpaka::declareSharedVar(acc); + auto& rhFracSum = alpaka::declareSharedVar(acc); + auto& seeds = alpaka::declareSharedVar(acc); + auto& rechits = alpaka::declareSharedVar(acc); + + if (once_per_block(acc)) { + nRHNotSeed = nRHTopo - nSeeds + 1; // 1 + (# rechits per topoId that are NOT seeds) + topoSeedBegin = pfClusteringVars[topoId].topoSeedOffsets(); + tol = pfClusParams.stoppingTolerance() * + powf(fmaxf(1.0, nSeeds - 1), 2.0); // stopping tolerance * tolerance scaling + stride = alpaka::getWorkDiv(acc)[0u]; + iter = 0; + notDone = true; + + int i = pfClusteringVars[topoSeedBegin].topoSeedList(); + + if (topology.cutsFromDB()) { + rhENormInv = (1.f / topology[pfRecHits[i].denseId()].noiseThreshold()); + } else { + if (pfRecHits[i].layer() == PFLayer::HCAL_BARREL1) + rhENormInv = pfClusParams.recHitEnergyNormInvHB_vec()[pfRecHits[i].depth() - 1]; + else if (pfRecHits[i].layer() == PFLayer::HCAL_ENDCAP) + rhENormInv = pfClusParams.recHitEnergyNormInvHE_vec()[pfRecHits[i].depth() - 1]; + else { + rhENormInv = 0.; + printf("Rechit %d has invalid layer %d!\n", i, pfRecHits[i].layer()); + } + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + if (tid < nSeeds) + seeds[tid] = pfClusteringVars[topoSeedBegin + tid].topoSeedList(); + if (tid < nRHNotSeed - 1) + rechits[tid] = + fracView[pfClusteringVars[pfClusteringVars[topoSeedBegin].topoSeedList()].seedFracOffsets() + tid + 1] + .pfrhIdx(); + + alpaka::syncBlockThreads(acc); // all threads call sync + + if constexpr (debug) { + if (once_per_block(acc)) { + printf("\n===========================================================================================\n"); + printf("Processing topo cluster %d with nSeeds = %d nRHTopo = %d and seeds (", topoId, nSeeds, nRHTopo); + for (int s = 0; s < nSeeds; s++) { + if (s != 0) + printf(", "); + printf("%d", getSeedRhIdx(seeds, s)); + } + if (nRHTopo == nSeeds) { + printf(")\n\n"); + } else { + printf(") and other rechits ("); + for (int r = 1; r < nRHNotSeed; r++) { + if (r != 1) + printf(", "); + if (r <= 0) { + printf("Invalid rhNum (%d) for get RhFracIdx!\n", r); + } + printf("%d", rechits[r - 1]); + } + printf(")\n\n"); + } + } + alpaka::syncBlockThreads(acc); // all (or none) threads call sync + } + + // Set initial cluster position (energy) to seed rechit position (energy) + if (tid < nSeeds) { + int i = getSeedRhIdx(seeds, tid); + clusterPos[tid] = Position4{pfRecHits[i].x(), pfRecHits[i].y(), pfRecHits[i].z(), 1.0}; + prevClusterPos[tid] = clusterPos[tid]; + clusterEnergy[tid] = pfRecHits[i].energy(); + for (int r = 0; r < (nRHNotSeed - 1); r++) { + fracView[pfClusteringVars[i].seedFracOffsets() + r + 1].pfrhIdx() = rechits[r]; + fracView[pfClusteringVars[i].seedFracOffsets() + r + 1].frac() = -1.; + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + int rhThreadIdx = -1; + Position4 rhThreadPos; + if (tid < (nRHNotSeed - 1)) { + rhThreadIdx = rechits[tid]; // Index when thread represents rechit + rhThreadPos = Position4{pfRecHits[rhThreadIdx].x(), pfRecHits[rhThreadIdx].y(), pfRecHits[rhThreadIdx].z(), 1.}; + } + + // Neighbors when threadIdx represents seed + int seedThreadIdx = -1; + Neighbours4 seedNeighbors = Neighbours4{-9, -9, -9, -9}; + float seedEnergy = -1.; + Position4 seedInitClusterPos = Position4{0., 0., 0., 0.}; + if (tid < nSeeds) { + if constexpr (debug) + printf("tid: %d\n", tid); + seedThreadIdx = getSeedRhIdx(seeds, tid); + seedNeighbors = Neighbours4{pfRecHits[seedThreadIdx].neighbours()(0), + pfRecHits[seedThreadIdx].neighbours()(1), + pfRecHits[seedThreadIdx].neighbours()(2), + pfRecHits[seedThreadIdx].neighbours()(3)}; + seedEnergy = pfRecHits[seedThreadIdx].energy(); + + // Compute initial cluster position shift for seed + updateClusterPos(pfClusParams, seedInitClusterPos, 1., seedThreadIdx, pfRecHits, rhENormInv); + } + + do { + if constexpr (debug) { + if (once_per_block(acc)) + printf("\n--- Now on iter %d for topoId %d ---\n", iter, topoId); + } + + // Reset rhFracSum + rhFracSum[tid] = 0.; + if (once_per_block(acc)) + diff2 = -1; + + if (tid < (nRHNotSeed - 1)) { + for (int s = 0; s < nSeeds; s++) { + float dist2 = (clusterPos[s].x - rhThreadPos.x) * (clusterPos[s].x - rhThreadPos.x) + + (clusterPos[s].y - rhThreadPos.y) * (clusterPos[s].y - rhThreadPos.y) + + (clusterPos[s].z - rhThreadPos.z) * (clusterPos[s].z - rhThreadPos.z); + + float d2 = dist2 / pfClusParams.showerSigma2(); + float fraction = clusterEnergy[s] * rhENormInv * expf(-0.5f * d2); + + rhFracSum[tid] += fraction; + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + if (tid < (nRHNotSeed - 1)) { + for (int s = 0; s < nSeeds; s++) { + int i = seeds[s]; + float dist2 = (clusterPos[s].x - rhThreadPos.x) * (clusterPos[s].x - rhThreadPos.x) + + (clusterPos[s].y - rhThreadPos.y) * (clusterPos[s].y - rhThreadPos.y) + + (clusterPos[s].z - rhThreadPos.z) * (clusterPos[s].z - rhThreadPos.z); + + float d2 = dist2 / pfClusParams.showerSigma2(); + float fraction = clusterEnergy[s] * rhENormInv * expf(-0.5f * d2); + + if (rhFracSum[tid] > pfClusParams.minFracTot()) { + float fracpct = fraction / rhFracSum[tid]; + if (fracpct > cutoffFraction || (d2 < cutoffDistance && fracpct > pfClusParams.minFracToKeep())) { + fracView[pfClusteringVars[i].seedFracOffsets() + tid + 1].frac() = fracpct; + } else { + fracView[pfClusteringVars[i].seedFracOffsets() + tid + 1].frac() = -1; + } + } else { + fracView[pfClusteringVars[i].seedFracOffsets() + tid + 1].frac() = -1; + } + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + if constexpr (debug) { + if (once_per_block(acc)) + printf("Computing cluster position for topoId %d\n", topoId); + } + + // Reset cluster position and energy + if (tid < nSeeds) { + clusterPos[tid] = seedInitClusterPos; + clusterEnergy[tid] = seedEnergy; + if constexpr (debug) { + printf("Cluster %d (seed %d) has energy %f\tpos = (%f, %f, %f, %f)\n", + tid, + seeds[tid], + clusterEnergy[tid], + clusterPos[tid].x, + clusterPos[tid].y, + clusterPos[tid].z, + clusterPos[tid].w); + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + // Recalculate position + if (tid < nSeeds) { + for (int r = 0; r < nRHNotSeed - 1; r++) { + int j = rechits[r]; + float frac = getRhFrac(pfClusteringVars, topoSeedBegin, fracView, tid, r + 1); + + if (frac > -0.5) { + clusterEnergy[tid] += frac * pfRecHits[j].energy(); + + if (nSeeds == 1 || j == seedNeighbors.x || j == seedNeighbors.y || j == seedNeighbors.z || + j == seedNeighbors.w) + updateClusterPos(pfClusParams, clusterPos[tid], frac, j, pfRecHits, rhENormInv); + } + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + // Position normalization + if (tid < nSeeds) { + if (clusterPos[tid].w >= pfClusParams.minAllowedNormalization()) { + // Divide by position norm + clusterPos[tid].x /= clusterPos[tid].w; + clusterPos[tid].y /= clusterPos[tid].w; + clusterPos[tid].z /= clusterPos[tid].w; + + if constexpr (debug) + printf("\tCluster %d (seed %d) energy = %f\tposition = (%f, %f, %f)\n", + tid, + seedThreadIdx, + clusterEnergy[tid], + clusterPos[tid].x, + clusterPos[tid].y, + clusterPos[tid].z); + } else { + if constexpr (debug) + printf("\tCluster %d (seed %d) position norm (%f) less than minimum (%f)\n", + tid, + seedThreadIdx, + clusterPos[tid].w, + pfClusParams.minAllowedNormalization()); + clusterPos[tid].x = 0.0; + clusterPos[tid].y = 0.0; + clusterPos[tid].z = 0.0; + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + if (tid < nSeeds) { + float delta2 = dR2(prevClusterPos[tid], clusterPos[tid]); + if constexpr (debug) + printf("\tCluster %d (seed %d) has delta2 = %f\n", tid, seeds[tid], delta2); + atomicMaxF(acc, &diff2, delta2); + prevClusterPos[tid] = clusterPos[tid]; // Save clusterPos + } + alpaka::syncBlockThreads(acc); // all threads call sync + + if (once_per_block(acc)) { + float tol2 = tol * tol; + iter++; + notDone = (diff2 > tol2) && ((unsigned int)iter < pfClusParams.maxIterations()); + if constexpr (debug) { + if (diff2 > tol2) + printf("\tTopoId %d has diff2 = %f greater than squared tolerance %f (continuing)\n", topoId, diff2, tol2); + else if constexpr (debug) + printf("\tTopoId %d has diff2 = %f LESS than squared tolerance %f (terminating!)\n", topoId, diff2, tol2); + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + } while (notDone); // shared variable condition ensures synchronization is well defined + if (once_per_block(acc)) + // Fill PFCluster-level info + if (tid < nSeeds) { + int rhIdx = pfClusteringVars[tid + pfClusteringVars[topoId].topoSeedOffsets()].topoSeedList(); + int seedIdx = pfClusteringVars[rhIdx].rhIdxToSeedIdx(); + clusterView[seedIdx].energy() = clusterEnergy[tid]; + clusterView[seedIdx].x() = clusterPos[tid].x; + clusterView[seedIdx].y() = clusterPos[tid].y; + clusterView[seedIdx].z() = clusterPos[tid].z; + } + } + + // Process very large exotic clusters, from nSeeds > 400 and non-seeds > 1500 + // Uses global memory access + // Device function designed to be called by all threads of a given block + template >> + ALPAKA_FN_ACC static void hcalFastCluster_exotic(const TAcc& acc, + reco::PFClusterParamsDeviceCollection::ConstView pfClusParams, + const reco::PFRecHitHCALTopologyDeviceCollection::ConstView topology, + int topoId, + int nSeeds, + int nRHTopo, + reco::PFRecHitDeviceCollection::ConstView pfRecHits, + reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, + reco::PFClusterDeviceCollection::View clusterView, + reco::PFRecHitFractionDeviceCollection::View fracView, + Position4* __restrict__ globalClusterPos, + Position4* __restrict__ globalPrevClusterPos, + float* __restrict__ globalClusterEnergy, + float* __restrict__ globalRhFracSum, + int* __restrict__ globalSeeds, + int* __restrict__ globalRechits) { + int& nRHNotSeed = alpaka::declareSharedVar(acc); + int& blockIdx = alpaka::declareSharedVar(acc); + int& topoSeedBegin = alpaka::declareSharedVar(acc); + int& stride = alpaka::declareSharedVar(acc); + int& iter = alpaka::declareSharedVar(acc); + float& tol = alpaka::declareSharedVar(acc); + float& diff2 = alpaka::declareSharedVar(acc); + float& rhENormInv = alpaka::declareSharedVar(acc); + bool& notDone = alpaka::declareSharedVar(acc); + + blockIdx = maxTopoInput * alpaka::getIdx(acc)[0u]; + Position4* clusterPos = globalClusterPos + blockIdx; + Position4* prevClusterPos = globalPrevClusterPos + blockIdx; + float* clusterEnergy = globalClusterEnergy + blockIdx; + float* rhFracSum = globalRhFracSum + blockIdx; + int* seeds = globalSeeds + blockIdx; + int* rechits = globalRechits + blockIdx; + + if (once_per_block(acc)) { + nRHNotSeed = nRHTopo - nSeeds + 1; // 1 + (# rechits per topoId that are NOT seeds) + topoSeedBegin = pfClusteringVars[topoId].topoSeedOffsets(); + tol = pfClusParams.stoppingTolerance() * + powf(fmaxf(1.0, nSeeds - 1), 2.0); // stopping tolerance * tolerance scaling + stride = alpaka::getWorkDiv(acc)[0u]; + iter = 0; + notDone = true; + + int i = pfClusteringVars[topoSeedBegin].topoSeedList(); + + if (topology.cutsFromDB()) { + rhENormInv = (1.f / topology[pfRecHits[i].denseId()].noiseThreshold()); + } else { + if (pfRecHits[i].layer() == PFLayer::HCAL_BARREL1) + rhENormInv = pfClusParams.recHitEnergyNormInvHB_vec()[pfRecHits[i].depth() - 1]; + else if (pfRecHits[i].layer() == PFLayer::HCAL_ENDCAP) + rhENormInv = pfClusParams.recHitEnergyNormInvHE_vec()[pfRecHits[i].depth() - 1]; + else { + rhENormInv = 0.; + printf("Rechit %d has invalid layer %d!\n", i, pfRecHits[i].layer()); + } + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + for (int n = alpaka::getIdx(acc)[0u]; n < nRHTopo; n += stride) { + if (n < nSeeds) + seeds[n] = pfClusteringVars[topoSeedBegin + n].topoSeedList(); + if (n < nRHNotSeed - 1) + rechits[n] = + fracView[pfClusteringVars[pfClusteringVars[topoSeedBegin].topoSeedList()].seedFracOffsets() + n + 1] + .pfrhIdx(); + } + alpaka::syncBlockThreads(acc); // all threads call sync + + if constexpr (debug) { + if (once_per_block(acc)) { + printf("\n===========================================================================================\n"); + printf("Processing topo cluster %d with nSeeds = %d nRHTopo = %d and seeds (", topoId, nSeeds, nRHTopo); + for (int s = 0; s < nSeeds; s++) { + if (s != 0) + printf(", "); + printf("%d", getSeedRhIdx(seeds, s)); + } + if (nRHTopo == nSeeds) { + printf(")\n\n"); + } else { + printf(") and other rechits ("); + for (int r = 1; r < nRHNotSeed; r++) { + if (r != 1) + printf(", "); + if (r <= 0) { + printf("Invalid rhNum (%d) for get RhFracIdx!\n", r); + } + printf("%d", rechits[r - 1]); + } + printf(")\n\n"); + } + } + alpaka::syncBlockThreads(acc); // all (or none) threads call sync + } + + // Set initial cluster position (energy) to seed rechit position (energy) + for (int s = alpaka::getIdx(acc)[0u]; s < nSeeds; s += stride) { + int i = seeds[s]; + clusterPos[s] = Position4{pfRecHits[i].x(), pfRecHits[i].y(), pfRecHits[i].z(), 1.0}; + prevClusterPos[s] = clusterPos[s]; + clusterEnergy[s] = pfRecHits[i].energy(); + for (int r = 0; r < (nRHNotSeed - 1); r++) { + fracView[pfClusteringVars[i].seedFracOffsets() + r + 1].pfrhIdx() = rechits[r]; + fracView[pfClusteringVars[i].seedFracOffsets() + r + 1].frac() = -1.; + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + do { + if constexpr (debug) { + if (once_per_block(acc)) + printf("\n--- Now on iter %d for topoId %d ---\n", iter, topoId); + } + + if (once_per_block(acc)) + diff2 = -1; + // Reset rhFracSum + for (int tid = alpaka::getIdx(acc)[0u]; tid < nRHNotSeed - 1; tid += stride) { + rhFracSum[tid] = 0.; + int rhThreadIdx = rechits[tid]; + Position4 rhThreadPos = + Position4{pfRecHits[rhThreadIdx].x(), pfRecHits[rhThreadIdx].y(), pfRecHits[rhThreadIdx].z(), 1.}; + for (int s = 0; s < nSeeds; s++) { + float dist2 = (clusterPos[s].x - rhThreadPos.x) * (clusterPos[s].x - rhThreadPos.x) + + (clusterPos[s].y - rhThreadPos.y) * (clusterPos[s].y - rhThreadPos.y) + + (clusterPos[s].z - rhThreadPos.z) * (clusterPos[s].z - rhThreadPos.z); + + float d2 = dist2 / pfClusParams.showerSigma2(); + float fraction = clusterEnergy[s] * rhENormInv * expf(-0.5f * d2); + + rhFracSum[tid] += fraction; + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + for (int tid = alpaka::getIdx(acc)[0u]; tid < nRHNotSeed - 1; tid += stride) { + int rhThreadIdx = rechits[tid]; + Position4 rhThreadPos = + Position4{pfRecHits[rhThreadIdx].x(), pfRecHits[rhThreadIdx].y(), pfRecHits[rhThreadIdx].z(), 1.}; + for (int s = 0; s < nSeeds; s++) { + int i = seeds[s]; + float dist2 = (clusterPos[s].x - rhThreadPos.x) * (clusterPos[s].x - rhThreadPos.x) + + (clusterPos[s].y - rhThreadPos.y) * (clusterPos[s].y - rhThreadPos.y) + + (clusterPos[s].z - rhThreadPos.z) * (clusterPos[s].z - rhThreadPos.z); + + float d2 = dist2 / pfClusParams.showerSigma2(); + float fraction = clusterEnergy[s] * rhENormInv * expf(-0.5f * d2); + + if (rhFracSum[tid] > pfClusParams.minFracTot()) { + float fracpct = fraction / rhFracSum[tid]; + if (fracpct > cutoffFraction || (d2 < cutoffDistance && fracpct > pfClusParams.minFracToKeep())) { + fracView[pfClusteringVars[i].seedFracOffsets() + tid + 1].frac() = fracpct; + } else { + fracView[pfClusteringVars[i].seedFracOffsets() + tid + 1].frac() = -1; + } + } else { + fracView[pfClusteringVars[i].seedFracOffsets() + tid + 1].frac() = -1; + } + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + if constexpr (debug) { + if (once_per_block(acc)) + printf("Computing cluster position for topoId %d\n", topoId); + } + + // Reset cluster position and energy + for (int s = alpaka::getIdx(acc)[0u]; s < nSeeds; s += stride) { + int seedRhIdx = getSeedRhIdx(seeds, s); + float norm = logf(pfRecHits[seedRhIdx].energy() * rhENormInv); + clusterPos[s] = Position4{ + pfRecHits[seedRhIdx].x() * norm, pfRecHits[seedRhIdx].y() * norm, pfRecHits[seedRhIdx].z() * norm, norm}; + clusterEnergy[s] = pfRecHits[seedRhIdx].energy(); + if constexpr (debug) { + printf("Cluster %d (seed %d) has energy %f\tpos = (%f, %f, %f, %f)\n", + s, + seeds[s], + clusterEnergy[s], + clusterPos[s].x, + clusterPos[s].y, + clusterPos[s].z, + clusterPos[s].w); + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + // Recalculate position + for (int s = alpaka::getIdx(acc)[0u]; s < nSeeds; s += stride) { + int seedRhIdx = getSeedRhIdx(seeds, s); + for (int r = 0; r < nRHNotSeed - 1; r++) { + int j = rechits[r]; + float frac = getRhFrac(pfClusteringVars, topoSeedBegin, fracView, s, r + 1); + + if (frac > -0.5) { + clusterEnergy[s] += frac * pfRecHits[j].energy(); + + if (nSeeds == 1 || j == pfRecHits[seedRhIdx].neighbours()(0) || j == pfRecHits[seedRhIdx].neighbours()(1) || + j == pfRecHits[seedRhIdx].neighbours()(2) || j == pfRecHits[seedRhIdx].neighbours()(3)) + updateClusterPos(pfClusParams, clusterPos[s], frac, j, pfRecHits, rhENormInv); + } + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + // Position normalization + for (int s = alpaka::getIdx(acc)[0u]; s < nSeeds; s += stride) { + if (clusterPos[s].w >= pfClusParams.minAllowedNormalization()) { + // Divide by position norm + clusterPos[s].x /= clusterPos[s].w; + clusterPos[s].y /= clusterPos[s].w; + clusterPos[s].z /= clusterPos[s].w; + + if constexpr (debug) + printf("\tCluster %d (seed %d) energy = %f\tposition = (%f, %f, %f)\n", + s, + seeds[s], + clusterEnergy[s], + clusterPos[s].x, + clusterPos[s].y, + clusterPos[s].z); + } else { + if constexpr (debug) + printf("\tCluster %d (seed %d) position norm (%f) less than minimum (%f)\n", + s, + seeds[s], + clusterPos[s].w, + pfClusParams.minAllowedNormalization()); + clusterPos[s].x = 0.0; + clusterPos[s].y = 0.0; + clusterPos[s].z = 0.0; + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + for (int s = alpaka::getIdx(acc)[0u]; s < nSeeds; s += stride) { + float delta2 = dR2(prevClusterPos[s], clusterPos[s]); + if constexpr (debug) + printf("\tCluster %d (seed %d) has delta2 = %f\n", s, seeds[s], delta2); + atomicMaxF(acc, &diff2, delta2); + prevClusterPos[s] = clusterPos[s]; // Save clusterPos + } + alpaka::syncBlockThreads(acc); // all threads call sync + + if (once_per_block(acc)) { + float tol2 = tol * tol; + iter++; + notDone = (diff2 > tol2) && ((unsigned int)iter < pfClusParams.maxIterations()); + if constexpr (debug) { + if (diff2 > tol2) + printf("\tTopoId %d has diff2 = %f greater than squared tolerance %f (continuing)\n", topoId, diff2, tol2); + else if constexpr (debug) + printf("\tTopoId %d has diff2 = %f LESS than squared tolerance %f (terminating!)\n", topoId, diff2, tol2); + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + } while (notDone); // shared variable ensures synchronization is well defined + if (once_per_block(acc)) + for (int s = alpaka::getIdx(acc)[0u]; s < nSeeds; s += stride) { + int rhIdx = pfClusteringVars[s + pfClusteringVars[topoId].topoSeedOffsets()].topoSeedList(); + int seedIdx = pfClusteringVars[rhIdx].rhIdxToSeedIdx(); + clusterView[seedIdx].energy() = pfRecHits[s].energy(); + clusterView[seedIdx].x() = pfRecHits[s].x(); + clusterView[seedIdx].y() = pfRecHits[s].y(); + clusterView[seedIdx].z() = pfRecHits[s].z(); + } + alpaka::syncBlockThreads(acc); // all threads call sync + } + + // Process clusters with up to 400 seeds and 1500 non seeds using shared memory + // Device function designed to be called by all threads of a given block + template >> + ALPAKA_FN_ACC static void hcalFastCluster_multiSeedIterative( + const TAcc& acc, + reco::PFClusterParamsDeviceCollection::ConstView pfClusParams, + const reco::PFRecHitHCALTopologyDeviceCollection::ConstView topology, + int topoId, + int nSeeds, + int nRHTopo, + reco::PFRecHitDeviceCollection::ConstView pfRecHits, + reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, + reco::PFClusterDeviceCollection::View clusterView, + reco::PFRecHitFractionDeviceCollection::View fracView) { + int& nRHNotSeed = alpaka::declareSharedVar(acc); + int& topoSeedBegin = alpaka::declareSharedVar(acc); + int& stride = alpaka::declareSharedVar(acc); + int& iter = alpaka::declareSharedVar(acc); + float& tol = alpaka::declareSharedVar(acc); + float& diff2 = alpaka::declareSharedVar(acc); + float& rhENormInv = alpaka::declareSharedVar(acc); + bool& notDone = alpaka::declareSharedVar(acc); + + auto& clusterPos = alpaka::declareSharedVar(acc); + auto& prevClusterPos = alpaka::declareSharedVar(acc); + auto& clusterEnergy = alpaka::declareSharedVar(acc); + auto& rhFracSum = alpaka::declareSharedVar(acc); + auto& seeds = alpaka::declareSharedVar(acc); + auto& rechits = alpaka::declareSharedVar(acc); + + if (once_per_block(acc)) { + nRHNotSeed = nRHTopo - nSeeds + 1; // 1 + (# rechits per topoId that are NOT seeds) + topoSeedBegin = pfClusteringVars[topoId].topoSeedOffsets(); + tol = pfClusParams.stoppingTolerance() * // stopping tolerance * tolerance scaling + powf(fmaxf(1.0, nSeeds - 1), 2.0); + stride = alpaka::getWorkDiv(acc)[0u]; + iter = 0; + notDone = true; + + int i = pfClusteringVars[topoSeedBegin].topoSeedList(); + + if (topology.cutsFromDB()) { + rhENormInv = (1.f / topology[pfRecHits[i].denseId()].noiseThreshold()); + } else { + if (pfRecHits[i].layer() == PFLayer::HCAL_BARREL1) + rhENormInv = pfClusParams.recHitEnergyNormInvHB_vec()[pfRecHits[i].depth() - 1]; + else if (pfRecHits[i].layer() == PFLayer::HCAL_ENDCAP) + rhENormInv = pfClusParams.recHitEnergyNormInvHE_vec()[pfRecHits[i].depth() - 1]; + else { + rhENormInv = 0.; + printf("Rechit %d has invalid layer %d!\n", i, pfRecHits[i].layer()); + } + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + for (int n = alpaka::getIdx(acc)[0u]; n < nRHTopo; n += stride) { + if (n < nSeeds) + seeds[n] = pfClusteringVars[topoSeedBegin + n].topoSeedList(); + if (n < nRHNotSeed - 1) + rechits[n] = + fracView[pfClusteringVars[pfClusteringVars[topoSeedBegin].topoSeedList()].seedFracOffsets() + n + 1] + .pfrhIdx(); + } + alpaka::syncBlockThreads(acc); // all threads call sync + + if constexpr (debug) { + if (once_per_block(acc)) { + printf("\n===========================================================================================\n"); + printf("Processing topo cluster %d with nSeeds = %d nRHTopo = %d and seeds (", topoId, nSeeds, nRHTopo); + for (int s = 0; s < nSeeds; s++) { + if (s != 0) + printf(", "); + printf("%d", getSeedRhIdx(seeds, s)); + } + if (nRHTopo == nSeeds) { + printf(")\n\n"); + } else { + printf(") and other rechits ("); + for (int r = 1; r < nRHNotSeed; r++) { + if (r != 1) + printf(", "); + if (r <= 0) { + printf("Invalid rhNum (%d) for get RhFracIdx!\n", r); + } + printf("%d", rechits[r - 1]); + } + printf(")\n\n"); + } + } + alpaka::syncBlockThreads(acc); // all (or none) threads call sync + } + + // Set initial cluster position (energy) to seed rechit position (energy) + for (int s = alpaka::getIdx(acc)[0u]; s < nSeeds; s += stride) { + int i = seeds[s]; + clusterPos[s] = Position4{pfRecHits[i].x(), pfRecHits[i].y(), pfRecHits[i].z(), 1.0}; + prevClusterPos[s] = clusterPos[s]; + clusterEnergy[s] = pfRecHits[i].energy(); + for (int r = 0; r < (nRHNotSeed - 1); r++) { + fracView[pfClusteringVars[i].seedFracOffsets() + r + 1].pfrhIdx() = rechits[r]; + fracView[pfClusteringVars[i].seedFracOffsets() + r + 1].frac() = -1.; + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + do { + if constexpr (debug) { + if (once_per_block(acc)) + printf("\n--- Now on iter %d for topoId %d ---\n", iter, topoId); + } + + if (once_per_block(acc)) + diff2 = -1; + // Reset rhFracSum + for (int tid = alpaka::getIdx(acc)[0u]; tid < nRHNotSeed - 1; tid += stride) { + rhFracSum[tid] = 0.; + int rhThreadIdx = rechits[tid]; + Position4 rhThreadPos = + Position4{pfRecHits[rhThreadIdx].x(), pfRecHits[rhThreadIdx].y(), pfRecHits[rhThreadIdx].z(), 1.}; + for (int s = 0; s < nSeeds; s++) { + float dist2 = (clusterPos[s].x - rhThreadPos.x) * (clusterPos[s].x - rhThreadPos.x) + + (clusterPos[s].y - rhThreadPos.y) * (clusterPos[s].y - rhThreadPos.y) + + (clusterPos[s].z - rhThreadPos.z) * (clusterPos[s].z - rhThreadPos.z); + + float d2 = dist2 / pfClusParams.showerSigma2(); + float fraction = clusterEnergy[s] * rhENormInv * expf(-0.5f * d2); + + rhFracSum[tid] += fraction; + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + for (int tid = alpaka::getIdx(acc)[0u]; tid < nRHNotSeed - 1; tid += stride) { + int rhThreadIdx = rechits[tid]; + Position4 rhThreadPos = + Position4{pfRecHits[rhThreadIdx].x(), pfRecHits[rhThreadIdx].y(), pfRecHits[rhThreadIdx].z(), 1.}; + for (int s = 0; s < nSeeds; s++) { + int i = seeds[s]; + float dist2 = (clusterPos[s].x - rhThreadPos.x) * (clusterPos[s].x - rhThreadPos.x) + + (clusterPos[s].y - rhThreadPos.y) * (clusterPos[s].y - rhThreadPos.y) + + (clusterPos[s].z - rhThreadPos.z) * (clusterPos[s].z - rhThreadPos.z); + + float d2 = dist2 / pfClusParams.showerSigma2(); + float fraction = clusterEnergy[s] * rhENormInv * expf(-0.5f * d2); + + if (rhFracSum[tid] > pfClusParams.minFracTot()) { + float fracpct = fraction / rhFracSum[tid]; + if (fracpct > cutoffFraction || (d2 < cutoffDistance && fracpct > pfClusParams.minFracToKeep())) { + fracView[pfClusteringVars[i].seedFracOffsets() + tid + 1].frac() = fracpct; + } else { + fracView[pfClusteringVars[i].seedFracOffsets() + tid + 1].frac() = -1; + } + } else { + fracView[pfClusteringVars[i].seedFracOffsets() + tid + 1].frac() = -1; + } + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + if constexpr (debug) { + if (once_per_block(acc)) + printf("Computing cluster position for topoId %d\n", topoId); + } + + // Reset cluster position and energy + for (int s = alpaka::getIdx(acc)[0u]; s < nSeeds; s += stride) { + int seedRhIdx = getSeedRhIdx(seeds, s); + float norm = logf(pfRecHits[seedRhIdx].energy() * rhENormInv); + clusterPos[s] = Position4{ + pfRecHits[seedRhIdx].x() * norm, pfRecHits[seedRhIdx].y() * norm, pfRecHits[seedRhIdx].z() * norm, norm}; + clusterEnergy[s] = pfRecHits[seedRhIdx].energy(); + if constexpr (debug) { + printf("Cluster %d (seed %d) has energy %f\tpos = (%f, %f, %f, %f)\n", + s, + seeds[s], + clusterEnergy[s], + clusterPos[s].x, + clusterPos[s].y, + clusterPos[s].z, + clusterPos[s].w); + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + // Recalculate position + for (int s = alpaka::getIdx(acc)[0u]; s < nSeeds; s += stride) { + int seedRhIdx = getSeedRhIdx(seeds, s); + for (int r = 0; r < nRHNotSeed - 1; r++) { + int j = rechits[r]; + float frac = getRhFrac(pfClusteringVars, topoSeedBegin, fracView, s, r + 1); + + if (frac > -0.5) { + clusterEnergy[s] += frac * pfRecHits[j].energy(); + + if (nSeeds == 1 || j == pfRecHits[seedRhIdx].neighbours()(0) || j == pfRecHits[seedRhIdx].neighbours()(1) || + j == pfRecHits[seedRhIdx].neighbours()(2) || j == pfRecHits[seedRhIdx].neighbours()(3)) + updateClusterPos(pfClusParams, clusterPos[s], frac, j, pfRecHits, rhENormInv); + } + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + // Position normalization + for (int s = alpaka::getIdx(acc)[0u]; s < nSeeds; s += stride) { + if (clusterPos[s].w >= pfClusParams.minAllowedNormalization()) { + // Divide by position norm + clusterPos[s].x /= clusterPos[s].w; + clusterPos[s].y /= clusterPos[s].w; + clusterPos[s].z /= clusterPos[s].w; + + if constexpr (debug) + printf("\tCluster %d (seed %d) energy = %f\tposition = (%f, %f, %f)\n", + s, + seeds[s], + clusterEnergy[s], + clusterPos[s].x, + clusterPos[s].y, + clusterPos[s].z); + } else { + if constexpr (debug) + printf("\tCluster %d (seed %d) position norm (%f) less than minimum (%f)\n", + s, + seeds[s], + clusterPos[s].w, + pfClusParams.minAllowedNormalization()); + clusterPos[s].x = 0.0; + clusterPos[s].y = 0.0; + clusterPos[s].z = 0.0; + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + for (int s = alpaka::getIdx(acc)[0u]; s < nSeeds; s += stride) { + float delta2 = dR2(prevClusterPos[s], clusterPos[s]); + if constexpr (debug) + printf("\tCluster %d (seed %d) has delta2 = %f\n", s, seeds[s], delta2); + atomicMaxF(acc, &diff2, delta2); + prevClusterPos[s] = clusterPos[s]; // Save clusterPos + } + alpaka::syncBlockThreads(acc); // all threads call sync + + if (once_per_block(acc)) { + float tol2 = tol * tol; + iter++; + notDone = (diff2 > tol2) && ((unsigned int)iter < pfClusParams.maxIterations()); + if constexpr (debug) { + if (diff2 > tol2) + printf("\tTopoId %d has diff2 = %f greater than tolerance %f (continuing)\n", topoId, diff2, tol2); + else if constexpr (debug) + printf("\tTopoId %d has diff2 = %f LESS than tolerance %f (terminating!)\n", topoId, diff2, tol2); + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + } while (notDone); // shared variable ensures synchronization is well defined + if (once_per_block(acc)) + for (int s = alpaka::getIdx(acc)[0u]; s < nSeeds; s += stride) { + int rhIdx = pfClusteringVars[s + pfClusteringVars[topoId].topoSeedOffsets()].topoSeedList(); + int seedIdx = pfClusteringVars[rhIdx].rhIdxToSeedIdx(); + clusterView[seedIdx].energy() = pfRecHits[s].energy(); + clusterView[seedIdx].x() = pfRecHits[s].x(); + clusterView[seedIdx].y() = pfRecHits[s].y(); + clusterView[seedIdx].z() = pfRecHits[s].z(); + } + } + + // Seeding using local energy maxima + class SeedingTopoThresh { + public: + template >> + ALPAKA_FN_ACC void operator()(const TAcc& acc, + reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, + const reco::PFClusterParamsDeviceCollection::ConstView pfClusParams, + const reco::PFRecHitHCALTopologyDeviceCollection::ConstView topology, + const reco::PFRecHitHostCollection::ConstView pfRecHits, + reco::PFClusterDeviceCollection::View clusterView, + reco::PFRecHitFractionDeviceCollection::View fracView, + uint32_t* __restrict__ nSeeds) const { + const int nRH = pfRecHits.size(); + + if (once_per_grid(acc)) { + clusterView.size() = nRH; + } + + for (auto i : elements_with_stride(acc, nRH)) { + // Initialize arrays + pfClusteringVars[i].pfrh_isSeed() = 0; + pfClusteringVars[i].rhCount() = 0; + pfClusteringVars[i].topoSeedCount() = 0; + pfClusteringVars[i].topoRHCount() = 0; + pfClusteringVars[i].seedFracOffsets() = -1; + pfClusteringVars[i].topoSeedOffsets() = -1; + pfClusteringVars[i].topoSeedList() = -1; + clusterView[i].seedRHIdx() = -1; + + int layer = pfRecHits[i].layer(); + int depthOffset = pfRecHits[i].depth() - 1; + float energy = pfRecHits[i].energy(); + Position3 pos = Position3{pfRecHits[i].x(), pfRecHits[i].y(), pfRecHits[i].z()}; + float seedThreshold = 9999.; + float topoThreshold = 9999.; + + if (topology.cutsFromDB()) { + seedThreshold = topology[pfRecHits[i].denseId()].seedThreshold(); + topoThreshold = topology[pfRecHits[i].denseId()].noiseThreshold(); + } else { + if (layer == PFLayer::HCAL_BARREL1) { + seedThreshold = pfClusParams.seedEThresholdHB_vec()[depthOffset]; + topoThreshold = pfClusParams.topoEThresholdHB_vec()[depthOffset]; + } else if (layer == PFLayer::HCAL_ENDCAP) { + seedThreshold = pfClusParams.seedEThresholdHE_vec()[depthOffset]; + topoThreshold = pfClusParams.topoEThresholdHE_vec()[depthOffset]; + } + } + + // cmssdt.cern.ch/lxr/source/DataFormats/ParticleFlowReco/interface/PFRecHit.h#0108 + float pt2 = energy * energy * (pos.x * pos.x + pos.y * pos.y) / (pos.x * pos.x + pos.y * pos.y + pos.z * pos.z); + + // Seeding threshold test + if ((layer == PFLayer::HCAL_BARREL1 && energy > seedThreshold && pt2 > pfClusParams.seedPt2ThresholdHB()) || + (layer == PFLayer::HCAL_ENDCAP && energy > seedThreshold && pt2 > pfClusParams.seedPt2ThresholdHE())) { + pfClusteringVars[i].pfrh_isSeed() = 1; + for (int k = 0; k < 4; k++) { // Does this seed candidate have a higher energy than four neighbours + if (pfRecHits[i].neighbours()(k) < 0) + continue; + if (energy < pfRecHits[pfRecHits[i].neighbours()(k)].energy()) { + pfClusteringVars[i].pfrh_isSeed() = 0; + break; + } + } + if (pfClusteringVars[i].pfrh_isSeed()) + alpaka::atomicAdd(acc, nSeeds, 1u); + } + // Topo clustering threshold test + + if ((layer == PFLayer::HCAL_ENDCAP && energy > topoThreshold) || + (layer == PFLayer::HCAL_BARREL1 && energy > topoThreshold)) { + pfClusteringVars[i].pfrh_passTopoThresh() = true; + pfClusteringVars[i].pfrh_topoId() = i; + } else { + pfClusteringVars[i].pfrh_passTopoThresh() = false; + pfClusteringVars[i].pfrh_topoId() = -1; + } + } + } + }; + + // Preparation of topo inputs. Initializing topoId, egdeIdx, nEdges, edgeList + class PrepareTopoInputs { + public: + template >> + ALPAKA_FN_ACC void operator()(const TAcc& acc, + const reco::PFRecHitHostCollection::ConstView pfRecHits, + reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, + reco::PFClusteringEdgeVarsDeviceCollection::View pfClusteringEdgeVars, + uint32_t* __restrict__ nSeeds) const { + const int nRH = pfRecHits.size(); + + if (once_per_grid(acc)) { + pfClusteringVars.nEdges() = nRH * 8; + pfClusteringEdgeVars[nRH].pfrh_edgeIdx() = nRH * 8; + } + for (uint32_t i : cms::alpakatools::elements_with_stride(acc, nRH)) { + pfClusteringEdgeVars[i].pfrh_edgeIdx() = i * 8; + pfClusteringVars[i].pfrh_topoId() = 0; + for (int j = 0; j < 8; j++) { // checking if neighbours exist and assigning neighbours as edges + if (pfRecHits[i].neighbours()(j) == -1) + pfClusteringEdgeVars[i * 8 + j].pfrh_edgeList() = i; + else + pfClusteringEdgeVars[i * 8 + j].pfrh_edgeList() = pfRecHits[i].neighbours()(j); + } + } + + return; + } + }; + + // Contraction in a single block + class TopoClusterContraction { + public: + template >> + ALPAKA_FN_ACC void operator()(const TAcc& acc, + const reco::PFRecHitHostCollection::ConstView pfRecHits, + reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, + reco::PFClusterDeviceCollection::View clusterView, + uint32_t* __restrict__ nSeeds) const { + const int nRH = pfRecHits.size(); + int& totalSeedOffset = alpaka::declareSharedVar(acc); + int& totalSeedFracOffset = alpaka::declareSharedVar(acc); + + // rhCount, topoRHCount, topoSeedCount initialized earlier + if (once_per_block(acc)) { + pfClusteringVars.nTopos() = 0; + pfClusteringVars.nRHFracs() = 0; + totalSeedOffset = 0; + totalSeedFracOffset = 0; + pfClusteringVars.pcrhFracSize() = 0; + } + + alpaka::syncBlockThreads(acc); // all threads call sync + + // Now determine the number of seeds and rechits in each topo cluster [topoRHCount, topoSeedCount] + // Also get the list of topoIds (smallest rhIdx of each topo cluser) + for (int rhIdx = alpaka::getIdx(acc)[0u]; rhIdx < nRH; + rhIdx += alpaka::getWorkDiv(acc)[0u]) { + pfClusteringVars[rhIdx].rhIdxToSeedIdx() = -1; + int topoId = pfClusteringVars[rhIdx].pfrh_topoId(); + if (topoId > -1) { + // Valid topo cluster + alpaka::atomicAdd(acc, &pfClusteringVars[topoId].topoRHCount(), 1); + // Valid topoId not counted yet + if (topoId == rhIdx) { // For every topo cluster, there is one rechit that meets this condition. + int topoIdx = alpaka::atomicAdd(acc, &pfClusteringVars.nTopos(), 1); + pfClusteringVars[topoIdx].topoIds() = + topoId; // topoId: the smallest index of rechits that belong to a topo cluster. + } + // This is a cluster seed + if (pfClusteringVars[rhIdx].pfrh_isSeed()) { // # of seeds in this topo cluster + alpaka::atomicAdd(acc, &pfClusteringVars[topoId].topoSeedCount(), 1); + } + } + } + + alpaka::syncBlockThreads(acc); // all threads call sync + + // Determine offsets for topo ID seed array [topoSeedOffsets] + for (int topoId = alpaka::getIdx(acc)[0u]; topoId < nRH; + topoId += alpaka::getWorkDiv(acc)[0u]) { + if (pfClusteringVars[topoId].topoSeedCount() > 0) { + // This is a valid topo ID + int offset = alpaka::atomicAdd(acc, &totalSeedOffset, pfClusteringVars[topoId].topoSeedCount()); + pfClusteringVars[topoId].topoSeedOffsets() = offset; + } + } + alpaka::syncBlockThreads(acc); // all threads call sync + + // Fill arrays of rechit indicies for each seed [topoSeedList] and rhIdx->seedIdx conversion for each seed [rhIdxToSeedIdx] + // Also fill seedRHIdx, topoId, depth + for (int rhIdx = alpaka::getIdx(acc)[0u]; rhIdx < nRH; + rhIdx += alpaka::getWorkDiv(acc)[0u]) { + int topoId = pfClusteringVars[rhIdx].pfrh_topoId(); + if (pfClusteringVars[rhIdx].pfrh_isSeed()) { + // Valid topo cluster and this rhIdx corresponds to a seed + int k = alpaka::atomicAdd(acc, &pfClusteringVars[topoId].rhCount(), 1); + int seedIdx = pfClusteringVars[topoId].topoSeedOffsets() + k; + if ((unsigned int)seedIdx >= *nSeeds) + printf("Warning(contraction) %8d > %8d should not happen, check topoId: %d has %d rh\n", + seedIdx, + *nSeeds, + topoId, + k); + pfClusteringVars[seedIdx].topoSeedList() = rhIdx; + pfClusteringVars[rhIdx].rhIdxToSeedIdx() = seedIdx; + clusterView[seedIdx].topoId() = topoId; + clusterView[seedIdx].seedRHIdx() = rhIdx; + clusterView[seedIdx].depth() = pfRecHits[rhIdx].depth(); + } + } + + alpaka::syncBlockThreads(acc); // all threads call sync + + // Determine seed offsets for rechit fraction array + for (int rhIdx = alpaka::getIdx(acc)[0u]; rhIdx < nRH; + rhIdx += alpaka::getWorkDiv(acc)[0u]) { + pfClusteringVars[rhIdx].rhCount() = 1; // Reset this counter array + + int topoId = pfClusteringVars[rhIdx].pfrh_topoId(); + if (pfClusteringVars[rhIdx].pfrh_isSeed() && topoId > -1) { + // Allot the total number of rechits for this topo cluster for rh fractions + int offset = alpaka::atomicAdd(acc, &totalSeedFracOffset, pfClusteringVars[topoId].topoRHCount()); + + // Add offset for this PF cluster seed + pfClusteringVars[rhIdx].seedFracOffsets() = offset; + + // Store recHitFraction offset & size information for each seed + clusterView[pfClusteringVars[rhIdx].rhIdxToSeedIdx()].rhfracOffset() = + pfClusteringVars[rhIdx].seedFracOffsets(); + clusterView[pfClusteringVars[rhIdx].rhIdxToSeedIdx()].rhfracSize() = + pfClusteringVars[topoId].topoRHCount() - pfClusteringVars[topoId].topoSeedCount() + 1; + } + } + + alpaka::syncBlockThreads(acc); // all threads call sync + + if (once_per_block(acc)) { + pfClusteringVars.pcrhFracSize() = totalSeedFracOffset; + pfClusteringVars.nRHFracs() = totalSeedFracOffset; + clusterView.nRHFracs() = totalSeedFracOffset; + clusterView.nSeeds() = *nSeeds; + clusterView.nTopos() = pfClusteringVars.nTopos(); + + if (pfClusteringVars.pcrhFracSize() > 200000) // Warning in case the fraction is too large + printf("At the end of topoClusterContraction, found large *pcrhFracSize = %d\n", + pfClusteringVars.pcrhFracSize()); + } + } + }; + + // Prefill the rechit index for all PFCluster fractions + // Optimized for GPU parallel, but works on any backend + class FillRhfIndex { + public: + template >> + ALPAKA_FN_ACC void operator()(const TAcc& acc, + const reco::PFRecHitHostCollection::ConstView pfRecHits, + reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, + reco::PFRecHitFractionDeviceCollection::View fracView) const { + const int nRH = pfRecHits.size(); + + for (auto index : elements_with_stride_nd(acc, {nRH, nRH})) { + const int i = index[0u]; // i is a seed index + const int j = index[1u]; // j is NOT a seed + int topoId = pfClusteringVars[i].pfrh_topoId(); + if (topoId > -1 && pfClusteringVars[i].pfrh_isSeed() && topoId == pfClusteringVars[j].pfrh_topoId()) { + if (!pfClusteringVars[j].pfrh_isSeed()) { // NOT a seed + int k = alpaka::atomicAdd( + acc, &pfClusteringVars[i].rhCount(), 1); // Increment the number of rechit fractions for this seed + auto fraction = fracView[pfClusteringVars[i].seedFracOffsets() + k]; + fraction.pfrhIdx() = j; + fraction.pfcIdx() = pfClusteringVars[i].rhIdxToSeedIdx(); + } else if (i == j) { // i==j is a seed rechit index + auto seed = fracView[pfClusteringVars[i].seedFracOffsets()]; + seed.pfrhIdx() = j; + seed.frac() = 1; + seed.pfcIdx() = pfClusteringVars[i].rhIdxToSeedIdx(); + } + } + } + } + }; + + class FastCluster { + public: + template >> + ALPAKA_FN_ACC void operator()(const TAcc& acc, + const reco::PFRecHitHostCollection::ConstView pfRecHits, + const reco::PFClusterParamsDeviceCollection::ConstView pfClusParams, + const reco::PFRecHitHCALTopologyDeviceCollection::ConstView topology, + reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, + reco::PFClusterDeviceCollection::View clusterView, + reco::PFRecHitFractionDeviceCollection::View fracView) const { + const int nRH = pfRecHits.size(); + int& topoId = alpaka::declareSharedVar(acc); + int& nRHTopo = alpaka::declareSharedVar(acc); + int& nSeeds = alpaka::declareSharedVar(acc); + + if (once_per_block(acc)) { + topoId = alpaka::getIdx(acc)[0u]; + nRHTopo = pfClusteringVars[topoId].topoRHCount(); + nSeeds = pfClusteringVars[topoId].topoSeedCount(); + } + + alpaka::syncBlockThreads(acc); // all threads call sync + + if (topoId < nRH && nRHTopo > 0 && nSeeds > 0) { + if (nRHTopo == nSeeds) { + // PF cluster is isolated seed. No iterations needed + if (once_per_block(acc)) { + // Fill PFCluster-level information + int rhIdx = pfClusteringVars[pfClusteringVars[topoId].topoSeedOffsets()].topoSeedList(); + int seedIdx = pfClusteringVars[rhIdx].rhIdxToSeedIdx(); + clusterView[seedIdx].energy() = pfRecHits[rhIdx].energy(); + clusterView[seedIdx].x() = pfRecHits[rhIdx].x(); + clusterView[seedIdx].y() = pfRecHits[rhIdx].y(); + clusterView[seedIdx].z() = pfRecHits[rhIdx].z(); + } + } else if constexpr (!std::is_same_v) { + // singleSeed and multiSeedParallel functions work only for GPU backend + if (nSeeds == 1) { + // Single seed cluster + hcalFastCluster_singleSeed( + acc, pfClusParams, topology, topoId, nRHTopo, pfRecHits, pfClusteringVars, clusterView, fracView); + } else if (nSeeds <= 100 && nRHTopo - nSeeds < threadsPerBlockForClustering) { + hcalFastCluster_multiSeedParallel( + acc, pfClusParams, topology, topoId, nSeeds, nRHTopo, pfRecHits, pfClusteringVars, clusterView, fracView); + } + } else if (nSeeds <= 400 && nRHTopo - nSeeds <= 1500) { + hcalFastCluster_multiSeedIterative( + acc, pfClusParams, topology, topoId, nSeeds, nRHTopo, pfRecHits, pfClusteringVars, clusterView, fracView); + } else { + if constexpr (debug) { + if (once_per_block(acc)) + printf("Topo cluster %d has %d seeds and %d rechits. Will be processed in next kernel.\n", + topoId, + nSeeds, + nRHTopo); + } + } + } + } + }; + + // Process very large, exotic topo clusters + class FastClusterExotic { + public: + template >> + ALPAKA_FN_ACC void operator()(const TAcc& acc, + const reco::PFRecHitHostCollection::ConstView pfRecHits, + const reco::PFClusterParamsDeviceCollection::ConstView pfClusParams, + const reco::PFRecHitHCALTopologyDeviceCollection::ConstView topology, + reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, + reco::PFClusterDeviceCollection::View clusterView, + reco::PFRecHitFractionDeviceCollection::View fracView, + Position4* __restrict__ globalClusterPos, + Position4* __restrict__ globalPrevClusterPos, + float* __restrict__ globalClusterEnergy, + float* __restrict__ globalRhFracSum, + int* __restrict__ globalSeeds, + int* __restrict__ globalRechits) const { + const int nRH = pfRecHits.size(); + for (int topoId = alpaka::getIdx(acc)[0u]; topoId < nRH; + topoId += blocksForExoticClusters) { + int nRHTopo = pfClusteringVars[topoId].topoRHCount(); + int nSeeds = pfClusteringVars[topoId].topoSeedCount(); + + if (nRHTopo > 0 && nSeeds > 400 && nRHTopo - nSeeds > 1500) { + hcalFastCluster_exotic(acc, + pfClusParams, + topology, + topoId, + nSeeds, + nRHTopo, + pfRecHits, + pfClusteringVars, + clusterView, + fracView, + globalClusterPos, + globalPrevClusterPos, + globalClusterEnergy, + globalRhFracSum, + globalSeeds, + globalRechits); + } + alpaka::syncBlockThreads(acc); // all threads call sync + } + } + }; + + PFClusterProducerKernel::PFClusterProducerKernel(Queue& queue, const reco::PFRecHitHostCollection& pfRecHits) + : nSeeds(cms::alpakatools::make_device_buffer(queue)), + globalClusterPos( + cms::alpakatools::make_device_buffer(queue, blocksForExoticClusters * maxTopoInput)), + globalPrevClusterPos( + cms::alpakatools::make_device_buffer(queue, blocksForExoticClusters * maxTopoInput)), + globalClusterEnergy( + cms::alpakatools::make_device_buffer(queue, blocksForExoticClusters * maxTopoInput)), + globalRhFracSum(cms::alpakatools::make_device_buffer(queue, blocksForExoticClusters * maxTopoInput)), + globalSeeds(cms::alpakatools::make_device_buffer(queue, blocksForExoticClusters * maxTopoInput)), + globalRechits(cms::alpakatools::make_device_buffer(queue, blocksForExoticClusters * maxTopoInput)) { + alpaka::memset(queue, nSeeds, 0x00); // Reset nSeeds + } + + void PFClusterProducerKernel::execute(Queue& queue, + const reco::PFClusterParamsDeviceCollection& params, + const reco::PFRecHitHCALTopologyDeviceCollection& topology, + reco::PFClusteringVarsDeviceCollection& pfClusteringVars, + reco::PFClusteringEdgeVarsDeviceCollection& pfClusteringEdgeVars, + const reco::PFRecHitHostCollection& pfRecHits, + reco::PFClusterDeviceCollection& pfClusters, + reco::PFRecHitFractionDeviceCollection& pfrhFractions) { + const int nRH = pfRecHits->size(); + const int threadsPerBlock = 256; + const int blocks = divide_up_by(nRH, threadsPerBlock); + + // seedingTopoThresh + alpaka::exec(queue, + make_workdiv(blocks, threadsPerBlock), + SeedingTopoThresh{}, + pfClusteringVars.view(), + params.view(), + topology.view(), + pfRecHits.view(), + pfClusters.view(), + pfrhFractions.view(), + nSeeds.data()); + // prepareTopoInputs + alpaka::exec(queue, + make_workdiv(blocks, threadsPerBlock), + PrepareTopoInputs{}, + pfRecHits.view(), + pfClusteringVars.view(), + pfClusteringEdgeVars.view(), + nSeeds.data()); + // ECLCC + alpaka::exec(queue, + make_workdiv(blocks, threadsPerBlock), + ECLCCInit{}, + pfRecHits.view(), + pfClusteringVars.view(), + pfClusteringEdgeVars.view()); + alpaka::exec(queue, + make_workdiv(blocks, threadsPerBlock), + ECLCCCompute1{}, + pfRecHits.view(), + pfClusteringVars.view(), + pfClusteringEdgeVars.view()); + alpaka::exec(queue, + make_workdiv(blocks, threadsPerBlock), + ECLCCFlatten{}, + pfRecHits.view(), + pfClusteringVars.view(), + pfClusteringEdgeVars.view()); + // topoClusterContraction + alpaka::exec(queue, + make_workdiv(1, threadsPerBlockForClustering), + TopoClusterContraction{}, + pfRecHits.view(), + pfClusteringVars.view(), + pfClusters.view(), + nSeeds.data()); + + // fillRhfIndex + alpaka::exec(queue, + make_workdiv({divide_up_by(nRH, 32), divide_up_by(nRH, 32)}, {32, 32}), + FillRhfIndex{}, + pfRecHits.view(), + pfClusteringVars.view(), + pfrhFractions.view()); + + // Run fastCluster + alpaka::exec(queue, + make_workdiv(nRH, threadsPerBlockForClustering), + FastCluster{}, + pfRecHits.view(), + params.view(), + topology.view(), + pfClusteringVars.view(), + pfClusters.view(), + pfrhFractions.view()); + // exotic clustering kernel + alpaka::exec(queue, + make_workdiv(blocksForExoticClusters, + threadsPerBlockForClustering), // uses 4 blocks to minimize memory usage + FastClusterExotic{}, + pfRecHits.view(), + params.view(), + topology.view(), + pfClusteringVars.view(), + pfClusters.view(), + pfrhFractions.view(), + globalClusterPos.data(), + globalPrevClusterPos.data(), + globalClusterEnergy.data(), + globalRhFracSum.data(), + globalSeeds.data(), + globalRechits.data()); + } + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE diff --git a/RecoParticleFlow/PFClusterProducer/plugins/alpaka/PFClusterSoAProducerKernel.h b/RecoParticleFlow/PFClusterProducer/plugins/alpaka/PFClusterSoAProducerKernel.h new file mode 100644 index 0000000000000..715d484f3120e --- /dev/null +++ b/RecoParticleFlow/PFClusterProducer/plugins/alpaka/PFClusterSoAProducerKernel.h @@ -0,0 +1,64 @@ +#ifndef RecoParticleFlow_PFClusterProducer_PFClusterProducerAlpakaKernel_h +#define RecoParticleFlow_PFClusterProducer_PFClusterProducerAlpakaKernel_h + +#include "DataFormats/ParticleFlowReco/interface/alpaka/PFRecHitDeviceCollection.h" +#include "DataFormats/ParticleFlowReco/interface/PFRecHitHostCollection.h" +#include "DataFormats/ParticleFlowReco/interface/alpaka/PFClusterDeviceCollection.h" +#include "DataFormats/ParticleFlowReco/interface/alpaka/PFRecHitFractionDeviceCollection.h" +#include "RecoParticleFlow/PFClusterProducer/interface/alpaka/PFClusterParamsDeviceCollection.h" +#include "RecoParticleFlow/PFClusterProducer/interface/alpaka/PFClusteringVarsDeviceCollection.h" +#include "RecoParticleFlow/PFClusterProducer/interface/alpaka/PFClusteringEdgeVarsDeviceCollection.h" +#include "RecoParticleFlow/PFRecHitProducer/interface/PFRecHitTopologyRecord.h" +#include "RecoParticleFlow/PFRecHitProducer/interface/alpaka/PFRecHitTopologyDeviceCollection.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + namespace reco::pfClustering { + struct Position4 { + float x; + float y; + float z; + float w; + }; + + struct Position3 { + float x; + float y; + float z; + }; + + struct Neighbours4 { + int x; + int y; + int z; + int w; + }; + } // namespace reco::pfClustering + + class PFClusterProducerKernel { + public: + PFClusterProducerKernel(Queue& queue, const reco::PFRecHitHostCollection& pfRecHits); + + void execute(Queue& queue, + const reco::PFClusterParamsDeviceCollection& params, + const reco::PFRecHitHCALTopologyDeviceCollection& topology, + reco::PFClusteringVarsDeviceCollection& pfClusteringVars, + reco::PFClusteringEdgeVarsDeviceCollection& pfClusteringEdgeVars, + const reco::PFRecHitHostCollection& pfRecHits, + reco::PFClusterDeviceCollection& pfClusters, + reco::PFRecHitFractionDeviceCollection& pfrhFractions); + + private: + cms::alpakatools::device_buffer nSeeds; + cms::alpakatools::device_buffer globalClusterPos; + cms::alpakatools::device_buffer globalPrevClusterPos; + cms::alpakatools::device_buffer globalClusterEnergy; + cms::alpakatools::device_buffer globalRhFracSum; + cms::alpakatools::device_buffer globalSeeds; + cms::alpakatools::device_buffer globalRechits; + }; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +#endif diff --git a/RecoParticleFlow/PFClusterProducer/python/particleFlowCluster_cff.py b/RecoParticleFlow/PFClusterProducer/python/particleFlowCluster_cff.py index 415c3b55d4b45..3436581964004 100644 --- a/RecoParticleFlow/PFClusterProducer/python/particleFlowCluster_cff.py +++ b/RecoParticleFlow/PFClusterProducer/python/particleFlowCluster_cff.py @@ -26,7 +26,7 @@ pfClusteringECALTask = cms.Task(particleFlowRecHitECAL, particleFlowClusterECALUncorrected, particleFlowClusterECALTask) -pfClusteringECAL = cms.Sequence(pfClusteringECALTask) +pfClusteringECAL = cms.Sequence(pfClusteringECALTask) pfClusteringPSTask = cms.Task(particleFlowRecHitPS,particleFlowClusterPS) pfClusteringPS = cms.Sequence(pfClusteringPSTask) @@ -85,3 +85,12 @@ _phase2_timing_particleFlowClusterECALTask) phase2_timing.toModify(particleFlowClusterECAL, inputECAL = 'particleFlowTimeAssignerECAL') + +# Replace HBHE rechit and clustering with Alpaka modules + +from Configuration.ProcessModifiers.alpaka_cff import alpaka + +def _addProcessPFClusterAlpaka(process): + process.load("RecoParticleFlow.PFClusterProducer.pfClusterHBHEAlpaka_cff") + +modifyConfigurationPFClusterAlpaka_ = alpaka.makeProcessModifier(_addProcessPFClusterAlpaka) diff --git a/RecoParticleFlow/PFClusterProducer/python/pfClusterHBHEAlpaka_cff.py b/RecoParticleFlow/PFClusterProducer/python/pfClusterHBHEAlpaka_cff.py new file mode 100644 index 0000000000000..631eee2cec974 --- /dev/null +++ b/RecoParticleFlow/PFClusterProducer/python/pfClusterHBHEAlpaka_cff.py @@ -0,0 +1,97 @@ +import FWCore.ParameterSet.Config as cms + +from Configuration.ProcessModifiers.alpaka_cff import alpaka + +from RecoParticleFlow.PFRecHitProducer.hcalRecHitSoAProducer_cfi import hcalRecHitSoAProducer as _hcalRecHitSoAProducer +from RecoParticleFlow.PFRecHitProducer.pfRecHitHCALParamsESProducer_cfi import pfRecHitHCALParamsESProducer as _pfRecHitHCALParamsESProducer +from RecoParticleFlow.PFRecHitProducer.pfRecHitHCALTopologyESProducer_cfi import pfRecHitHCALTopologyESProducer as _pfRecHitHCALTopologyESProducer +from RecoParticleFlow.PFRecHitProducer.pfRecHitSoAProducerHCAL_cfi import pfRecHitSoAProducerHCAL as _pfRecHitSoAProducerHCAL +from RecoParticleFlow.PFRecHitProducer.legacyPFRecHitProducer_cfi import legacyPFRecHitProducer as _legacyPFRecHitProducer +from RecoParticleFlow.PFClusterProducer.pfClusterParamsESProducer_cfi import pfClusterParamsESProducer as _pfClusterParamsESProducer +from RecoParticleFlow.PFClusterProducer.pfClusterSoAProducer_cfi import pfClusterSoAProducer as _pfClusterSoAProducer +from RecoParticleFlow.PFClusterProducer.legacyPFClusterProducer_cfi import legacyPFClusterProducer as _legacyPFClusterProducer + +from RecoParticleFlow.PFClusterProducer.particleFlowCluster_cff import pfClusteringHBHEHFTask, particleFlowClusterHBHE, particleFlowRecHitHBHE, particleFlowClusterHCAL + +_alpaka_pfClusteringHBHEHFTask = pfClusteringHBHEHFTask.copy() + +pfRecHitHCALParamsRecordSource = cms.ESSource('EmptyESSource', + recordName = cms.string('PFRecHitHCALParamsRecord'), + iovIsRunNotTime = cms.bool(True), + firstValid = cms.vuint32(1) + ) + +pfRecHitHCALTopologyRecordSource = cms.ESSource('EmptyESSource', + recordName = cms.string('PFRecHitHCALTopologyRecord'), + iovIsRunNotTime = cms.bool(True), + firstValid = cms.vuint32(1) + ) + +pfClusterParamsRecordSource = cms.ESSource('EmptyESSource', + recordName = cms.string('JobConfigurationGPURecord'), + iovIsRunNotTime = cms.bool(True), + firstValid = cms.vuint32(1) + ) + +hbheRecHitToSoA = _hcalRecHitSoAProducer.clone( + src = "hbhereco" + ) + +pfRecHitHCALParamsESProducer = _pfRecHitHCALParamsESProducer.clone( + energyThresholdsHB = cms.vdouble( 0.1, 0.2, 0.3, 0.3 ), + energyThresholdsHE = cms.vdouble( 0.1, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2 ) + ) + +pfRecHitHCALTopologyESProducer = _pfRecHitHCALTopologyESProducer.clone() +pfRecHitSoAProducerHCAL = _pfRecHitSoAProducerHCAL.clone( + producers = cms.VPSet( + cms.PSet( + src = cms.InputTag("hbheRecHitToSoA"), + params = cms.ESInputTag("pfRecHitHCALParamsESProducer:"), + ) + ), + topology = "pfRecHitHCALTopologyESProducer:", + synchronise = cms.untracked.bool(False) + ) + +legacyPFRecHitProducer = _legacyPFRecHitProducer.clone( + src = "pfRecHitSoAProducerHCAL" + ) + +pfClusterParamsESProducer = _pfClusterParamsESProducer.clone() +pfClusterSoAProducer = _pfClusterSoAProducer.clone( + pfRecHits = 'pfRecHitSoAProducerHCAL', + topology = "pfRecHitHCALTopologyESProducer:", + pfClusterParams = 'pfClusterParamsESProducer:', + synchronise = cms.bool(False) + ) + + +legacyPFClusterProducer = _legacyPFClusterProducer.clone( + src = 'pfClusterSoAProducer', + pfClusterParams = 'pfClusterParamsESProducer:', + pfClusterBuilder = particleFlowClusterHBHE.pfClusterBuilder, + recHitsSource = 'legacyPFRecHitProducer', + PFRecHitsLabelIn = 'pfRecHitSoAProducerHCAL' + ) + + +_alpaka_pfClusteringHBHEHFTask.add(pfRecHitHCALParamsRecordSource) +_alpaka_pfClusteringHBHEHFTask.add(pfRecHitHCALTopologyRecordSource) +_alpaka_pfClusteringHBHEHFTask.add(pfClusterParamsRecordSource) +_alpaka_pfClusteringHBHEHFTask.add(hbheRecHitToSoA) +_alpaka_pfClusteringHBHEHFTask.add(pfRecHitHCALParamsESProducer) +_alpaka_pfClusteringHBHEHFTask.add(pfRecHitSoAProducerHCAL) +_alpaka_pfClusteringHBHEHFTask.add(legacyPFRecHitProducer) +_alpaka_pfClusteringHBHEHFTask.add(pfClusterParamsESProducer) +_alpaka_pfClusteringHBHEHFTask.add(pfClusterSoAProducer) + +_alpaka_pfClusteringHBHEHFTask.remove(particleFlowRecHitHBHE) +_alpaka_pfClusteringHBHEHFTask.remove(particleFlowClusterHBHE) +_alpaka_pfClusteringHBHEHFTask.remove(particleFlowClusterHCAL) +_alpaka_pfClusteringHBHEHFTask.add(particleFlowClusterHBHE) +_alpaka_pfClusteringHBHEHFTask.add(particleFlowClusterHCAL) + +alpaka.toReplaceWith(particleFlowClusterHBHE, legacyPFClusterProducer) + +alpaka.toReplaceWith(pfClusteringHBHEHFTask, _alpaka_pfClusteringHBHEHFTask) diff --git a/RecoParticleFlow/PFClusterProducer/src/PFClusterParamsHostCollection.cc b/RecoParticleFlow/PFClusterProducer/src/PFClusterParamsHostCollection.cc new file mode 100644 index 0000000000000..4d88391c4b5c1 --- /dev/null +++ b/RecoParticleFlow/PFClusterProducer/src/PFClusterParamsHostCollection.cc @@ -0,0 +1,4 @@ +#include "FWCore/Utilities/interface/typelookup.h" +#include "RecoParticleFlow/PFClusterProducer/interface/PFClusterParamsHostCollection.h" + +TYPELOOKUP_DATA_REG(reco::PFClusterParamsHostCollection); diff --git a/RecoParticleFlow/PFClusterProducer/src/alpaka/PFClusterParamsDeviceCollection.cc b/RecoParticleFlow/PFClusterProducer/src/alpaka/PFClusterParamsDeviceCollection.cc new file mode 100644 index 0000000000000..54a63b04ad9c0 --- /dev/null +++ b/RecoParticleFlow/PFClusterProducer/src/alpaka/PFClusterParamsDeviceCollection.cc @@ -0,0 +1,4 @@ +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/typelookup.h" + +#include "RecoParticleFlow/PFClusterProducer/interface/alpaka/PFClusterParamsDeviceCollection.h" +TYPELOOKUP_ALPAKA_DATA_REG(reco::PFClusterParamsDeviceCollection); diff --git a/RecoParticleFlow/PFClusterProducer/test/test_PFRecHitAndClusterSoA.py b/RecoParticleFlow/PFClusterProducer/test/test_PFRecHitAndClusterSoA.py new file mode 100644 index 0000000000000..7cf551884a504 --- /dev/null +++ b/RecoParticleFlow/PFClusterProducer/test/test_PFRecHitAndClusterSoA.py @@ -0,0 +1,494 @@ +# Auto generated configuration file +# using: +# Revision: 1.19 +# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v +# with command line options: reHLT --processName reHLT -s HLT:@relval2021 --conditions auto:phase1_2021_realistic --datatier GEN-SIM-DIGI-RAW -n 5 --eventcontent FEVTDEBUGHLT --geometry DB:Extended --era Run3 --customise=HLTrigger/Configuration/customizeHLTforPatatrack.customizeHLTforPatatrack --filein /store/relval/CMSSW_12_3_0_pre5/RelValTTbar_14TeV/GEN-SIM-DIGI-RAW/123X_mcRun3_2021_realistic_v6-v1/10000/2639d8f2-aaa6-4a78-b7c2-9100a6717e6c.root +import FWCore.ParameterSet.Config as cms + +from Configuration.Eras.Era_Run3_cff import Run3 + +process = cms.Process('rereHLT',Run3) + +_thresholdsHB = cms.vdouble(0.8, 0.8, 0.8, 0.8) +_thresholdsHE = cms.vdouble(0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8) +_thresholdsHBphase1 = cms.vdouble(0.1, 0.2, 0.3, 0.3) +_thresholdsHEphase1 = cms.vdouble(0.1, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2) +_seedingThresholdsHB = cms.vdouble(1.0, 1.0, 1.0, 1.0) +_seedingThresholdsHE = cms.vdouble(1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1) +_seedingThresholdsHBphase1 = cms.vdouble(0.125, 0.25, 0.35, 0.35) +_seedingThresholdsHEphase1 = cms.vdouble(0.1375, 0.275, 0.275, 0.275, 0.275, 0.275, 0.275) +#updated HB RecHit threshold for 2023 +_thresholdsHBphase1_2023 = cms.vdouble(0.4, 0.3, 0.3, 0.3) +#updated HB seeding threshold for 2023 +_seedingThresholdsHBphase1_2023 = cms.vdouble(0.6, 0.5, 0.5, 0.5) + +# import of standard configurations +process.load('Configuration.StandardSequences.Services_cff') +process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi') +process.load('FWCore.MessageService.MessageLogger_cfi') +process.load('Configuration.EventContent.EventContent_cff') +process.load('SimGeneral.MixingModule.mixNoPU_cfi') +process.load('Configuration.StandardSequences.GeometryRecoDB_cff') +process.load('Configuration.StandardSequences.MagneticField_cff') +process.load('HLTrigger.Configuration.HLT_GRun_cff') +process.load('Configuration.StandardSequences.EndOfProcess_cff') +process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff') + +process.load('Configuration.StandardSequences.Accelerators_cff') +process.load('HeterogeneousCore.AlpakaCore.ProcessAcceleratorAlpaka_cfi') + +process.maxEvents = cms.untracked.PSet( + #input = cms.untracked.int32(1), + #input = cms.untracked.int32(100), + input = cms.untracked.int32(1000), + output = cms.optional.untracked.allowed(cms.int32,cms.PSet) +) + +# Input source +# Need to use a file that contains HCAL/ECAL hits. Verify using: +# root root://eoscms.cern.ch//eos/cms/store/relval/CMSSW_13_0_0/RelValTTbar_14TeV/GEN-SIM-DIGI-RAW/PU_130X_mcRun3_2022_realistic_v2_HS-v4/2590000/0088b51b-0cda-40f2-95fc-590f446624ee.root -e 'Events->Print()' -q | grep -E "hltHbhereco|hltEcalRecHit" +process.source = cms.Source("PoolSource", + #fileNames = cms.untracked.vstring('/store/relval/CMSSW_13_0_0/RelValTTbar_14TeV/GEN-SIM-DIGI-RAW/PU_130X_mcRun3_2022_realistic_v2_HS-v4/2590000/0088b51b-0cda-40f2-95fc-590f446624ee.root'), + fileNames = cms.untracked.vstring('/store/relval/CMSSW_13_0_8/RelValQCD_FlatPt_15_3000HS_14/GEN-SIM-DIGI-RAW/130X_mcRun3_2022_realistic_v3_2022-v1/2580000/0e63ba30-251b-4034-93ca-4d400aaa399e.root'), + secondaryFileNames = cms.untracked.vstring(), + #skipEvents = cms.untracked.uint32(999) +) + +process.options = cms.untracked.PSet( + IgnoreCompletely = cms.untracked.vstring(), + Rethrow = cms.untracked.vstring(), + allowUnscheduled = cms.obsolete.untracked.bool, + canDeleteEarly = cms.untracked.vstring(), + deleteNonConsumedUnscheduledModules = cms.untracked.bool(True), + dumpOptions = cms.untracked.bool(False), + emptyRunLumiMode = cms.obsolete.untracked.string, + eventSetup = cms.untracked.PSet( + forceNumberOfConcurrentIOVs = cms.untracked.PSet( + allowAnyLabel_=cms.required.untracked.uint32 + ), + numberOfConcurrentIOVs = cms.untracked.uint32(0) + ), + fileMode = cms.untracked.string('FULLMERGE'), + forceEventSetupCacheClearOnNewRun = cms.untracked.bool(False), + makeTriggerResults = cms.obsolete.untracked.bool, + numberOfConcurrentLuminosityBlocks = cms.untracked.uint32(0), + numberOfConcurrentRuns = cms.untracked.uint32(1), + numberOfStreams = cms.untracked.uint32(0), + numberOfThreads = cms.untracked.uint32(1), + printDependencies = cms.untracked.bool(False), + sizeOfStackForThreadsInKB = cms.optional.untracked.uint32, + throwIfIllegalParameter = cms.untracked.bool(True), + wantSummary = cms.untracked.bool(False) +) + +# Production Info +process.configurationMetadata = cms.untracked.PSet( + annotation = cms.untracked.string('reHLT nevts:5'), + name = cms.untracked.string('Applications'), + version = cms.untracked.string('$Revision: 1.19 $') +) + +# Output definition +process.FEVTDEBUGHLToutput = cms.OutputModule("PoolOutputModule", + dataset = cms.untracked.PSet( + dataTier = cms.untracked.string('GEN-SIM-DIGI-RAW'), + filterName = cms.untracked.string('') + ), + fileName = cms.untracked.string('reHLT_HLT.root'), + outputCommands = process.FEVTDEBUGHLTEventContent.outputCommands, + splitLevel = cms.untracked.int32(0) +) + +# Other statements +from HLTrigger.Configuration.CustomConfigs import ProcessName +process = ProcessName(process) + +from Configuration.AlCa.GlobalTag import GlobalTag +process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase1_2022_realistic', '') + +# Path and EndPath definitions +process.endjob_step = cms.EndPath(process.endOfProcess) +process.FEVTDEBUGHLToutput_step = cms.EndPath(process.FEVTDEBUGHLToutput) + +# Schedule definition +# process.schedule imported from cff in HLTrigger.Configuration +process.schedule.extend([process.endjob_step,process.FEVTDEBUGHLToutput_step]) +from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask +associatePatAlgosToolsTask(process) + +# customisation of the process +from HLTrigger.Configuration.customizeHLTforMC import customizeHLTforMC +process = customizeHLTforMC(process) + +# Add early deletion of temporary data products to reduce peak memory need +from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete +process = customiseEarlyDelete(process) + +process.load( "HLTrigger.Timer.FastTimerService_cfi" ) +if 'MessageLogger' in process.__dict__: + process.MessageLogger.TriggerSummaryProducerAOD = cms.untracked.PSet() + process.MessageLogger.L1GtTrigReport = cms.untracked.PSet() + process.MessageLogger.L1TGlobalSummary = cms.untracked.PSet() + process.MessageLogger.HLTrigReport = cms.untracked.PSet() + process.MessageLogger.FastReport = cms.untracked.PSet() + process.MessageLogger.ThroughputService = cms.untracked.PSet() + process.MessageLogger.cerr.FastReport = cms.untracked.PSet( limit = cms.untracked.int32( 10000000 ) ) + + +##################################### +## Read command-line arguments ## +##################################### +import sys +import argparse +parser = argparse.ArgumentParser(prog=f"{sys.argv[0]} {sys.argv[1]} --", description='Test and validation of PFRecHitProducer with Alpaka') +parser.add_argument('-c', '--cal', type=str, default='HCAL', + help='Calorimeter type. Possible options: HCAL, ECAL. Default: HCAL') +parser.add_argument('-b', '--backend', type=str, default='auto', + help='Alpaka backend. Possible options: CPU, GPU, auto. Default: auto') +parser.add_argument('-s', '--synchronise', action='store_true', default=False, + help='Put synchronisation point at the end of Alpaka modules (for benchmarking performance)') +parser.add_argument('-t', '--threads', type=int, default=8, + help='Number of threads. Default: 8') +parser.add_argument('-d', '--debug', type=int, default=0, const=1, nargs="?", + help='Dump PFRecHits for first event (n>0) or first error (n<0). This applies to the n-th validation (1: Legacy vs Alpaka, 2: Legacy vs Legacy-from-Alpaka, 3: Alpaka vs Legacy-from-Alpaka). Default: 0') +args = parser.parse_args() + +if(args.debug and args.threads != 1): + args.threads = 1 + print("Number of threads set to 1 for debugging") + +assert args.cal.lower() in ["hcal", "ecal", "h", "e"], "Invalid calorimeter type" +hcal = args.cal.lower() in ["hcal", "h"] +CAL = "HCAL" if hcal else "ECAL" + +alpaka_backends = { + "cpu": "alpaka_serial_sync::%s", # Execute on CPU + "gpu": "alpaka_cuda_async::%s", # Execute using CUDA + "cuda": "alpaka_cuda_async::%s", # Execute using CUDA + "auto": "%s@alpaka" # Let framework choose +} +assert args.backend.lower() in alpaka_backends, "Invalid backend" +alpaka_backend_str = alpaka_backends[args.backend.lower()] + + + +######################################## +## Legacy HBHE PFRecHit producer ## +######################################## +process.hltParticleFlowRecHitHBHE = cms.EDProducer("PFRecHitProducer", + navigator = cms.PSet( + hcalEnums = cms.vint32(1, 2), + name = cms.string('PFRecHitHCALDenseIdNavigator') + ), + producers = cms.VPSet(cms.PSet( + name = cms.string('PFHBHERecHitCreator'), + qualityTests = cms.VPSet( + cms.PSet( + usePFThresholdsFromDB = cms.bool(True), + cuts = cms.VPSet( + cms.PSet( + depth = cms.vint32(1, 2, 3, 4), + detectorEnum = cms.int32(1), + threshold = cms.vdouble(0.4, 0.3, 0.3, 0.3) + ), + cms.PSet( + depth = cms.vint32(1, 2, 3, 4, 5, 6, 7), + detectorEnum = cms.int32(2), + threshold = cms.vdouble(0.1, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2) + ) + ), + name = cms.string('PFRecHitQTestHCALThresholdVsDepth') + ), + cms.PSet( + cleaningThresholds = cms.vdouble(0.0), + flags = cms.vstring('Standard'), + maxSeverities = cms.vint32(11), + name = cms.string('PFRecHitQTestHCALChannel') + ) + ), + src = cms.InputTag("hltHbherecoLegacy") + )) +) + + +##################################### +## Legacy PFRecHit producer ## +##################################### +if hcal: + process.hltParticleFlowRecHit = cms.EDProducer("PFRecHitProducer", + navigator = cms.PSet( + hcalEnums = cms.vint32(1, 2), + name = cms.string('PFRecHitHCALDenseIdNavigator') + ), + producers = cms.VPSet(cms.PSet( + name = cms.string('PFHBHERecHitCreator'), + qualityTests = cms.VPSet( + cms.PSet( + usePFThresholdsFromDB = cms.bool(True), + cuts = cms.VPSet( + cms.PSet( + depth = cms.vint32(1, 2, 3, 4), + detectorEnum = cms.int32(1), + threshold = cms.vdouble(0.4, 0.3, 0.3, 0.3) + ), + cms.PSet( + depth = cms.vint32(1, 2, 3, 4, 5, 6, 7), + detectorEnum = cms.int32(2), + threshold = cms.vdouble(0.1, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2) + ) + ), + name = cms.string('PFRecHitQTestHCALThresholdVsDepth') + ), + cms.PSet( + cleaningThresholds = cms.vdouble(0.0), + flags = cms.vstring('Standard'), + maxSeverities = cms.vint32(11), + name = cms.string('PFRecHitQTestHCALChannel') + ) + ), + src = cms.InputTag("hltHbherecoLegacy") + )) + ) +else: # ecal + qualityTestsECAL = cms.VPSet( + cms.PSet( + name = cms.string("PFRecHitQTestDBThreshold"), + applySelectionsToAllCrystals=cms.bool(True), + ), + cms.PSet( + name = cms.string("PFRecHitQTestECAL"), + cleaningThreshold = cms.double(2.0), + timingCleaning = cms.bool(True), + topologicalCleaning = cms.bool(True), + skipTTRecoveredHits = cms.bool(True) + ) + ) + process.hltParticleFlowRecHit = cms.EDProducer("PFRecHitProducer", + navigator = cms.PSet( + name = cms.string("PFRecHitECALNavigator"), + barrel = cms.PSet( ), + endcap = cms.PSet( ) + ), + producers = cms.VPSet( + cms.PSet( + name = cms.string("PFEBRecHitCreator"), + src = cms.InputTag("hltEcalRecHit","EcalRecHitsEB"), + srFlags = cms.InputTag(""), + qualityTests = qualityTestsECAL + ), + cms.PSet( + name = cms.string("PFEERecHitCreator"), + src = cms.InputTag("hltEcalRecHit","EcalRecHitsEE"), + srFlags = cms.InputTag(""), + qualityTests = qualityTestsECAL + ) + ) + ) + + +##################################### +## Alpaka PFRecHit producer ## +##################################### +# Convert legacy CaloRecHits to CaloRecHitSoA +if hcal: + process.hltParticleFlowRecHitToSoA = cms.EDProducer(alpaka_backend_str % "HCALRecHitSoAProducer", + src = cms.InputTag("hltHbherecoLegacy"), + synchronise = cms.untracked.bool(args.synchronise) + ) +else: # ecal + process.hltParticleFlowRecHitEBToSoA = cms.EDProducer(alpaka_backend_str % "ECALRecHitSoAProducer", + src = cms.InputTag("hltEcalRecHit","EcalRecHitsEB"), + synchronise = cms.untracked.bool(args.synchronise) + ) + process.hltParticleFlowRecHitEEToSoA = cms.EDProducer(alpaka_backend_str % "ECALRecHitSoAProducer", + src = cms.InputTag("hltEcalRecHit","EcalRecHitsEE"), + synchronise = cms.untracked.bool(args.synchronise) + ) + +# Construct topology and cut parameter information +process.pfRecHitTopologyRecordSource = cms.ESSource('EmptyESSource', + recordName = cms.string(f'PFRecHit{CAL}TopologyRecord'), + iovIsRunNotTime = cms.bool(True), + firstValid = cms.vuint32(1) +) +process.pfRecHitParamsRecordSource = cms.ESSource('EmptyESSource', + recordName = cms.string(f'PFRecHit{CAL}ParamsRecord'), + iovIsRunNotTime = cms.bool(True), + firstValid = cms.vuint32(1) +) +process.hltParticleFlowRecHitTopologyESProducer = cms.ESProducer(alpaka_backend_str % f"PFRecHit{CAL}TopologyESProducer", + usePFThresholdsFromDB = cms.bool(True)) +if hcal: + process.hltParticleFlowRecHitParamsESProducer = cms.ESProducer(alpaka_backend_str % "PFRecHitHCALParamsESProducer", + energyThresholdsHB = cms.vdouble( 0.4, 0.3, 0.3, 0.3 ), + energyThresholdsHE = cms.vdouble( 0.1, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2 ) + ) +else: # ecal + process.hltParticleFlowRecHitParamsESProducer = cms.ESProducer(alpaka_backend_str % "PFRecHitECALParamsESProducer") + +# Construct PFRecHitSoA +if hcal: + process.hltParticleFlowPFRecHitAlpaka = cms.EDProducer(alpaka_backend_str % "PFRecHitSoAProducerHCAL", + producers = cms.VPSet( + cms.PSet( + src = cms.InputTag("hltParticleFlowRecHitToSoA"), + params = cms.ESInputTag("hltParticleFlowRecHitParamsESProducer:"), + ) + ), + topology = cms.ESInputTag("hltParticleFlowRecHitTopologyESProducer:"), + synchronise = cms.untracked.bool(args.synchronise) + ) +else: # ecal + process.hltParticleFlowPFRecHitAlpaka = cms.EDProducer(alpaka_backend_str % "PFRecHitSoAProducerECAL", + producers = cms.VPSet( + cms.PSet( + src = cms.InputTag("hltParticleFlowRecHitEBToSoA"), + params = cms.ESInputTag("hltParticleFlowRecHitParamsESProducer:") + ), + cms.PSet( + src = cms.InputTag("hltParticleFlowRecHitEEToSoA"), + params = cms.ESInputTag("hltParticleFlowRecHitParamsESProducer:") + ) + ), + topology = cms.ESInputTag("hltParticleFlowRecHitTopologyESProducer:"), + synchronise = cms.bool(args.synchronise) + ) + +# Convert Alpaka PFRecHits to legacy format (for validation) +process.hltParticleFlowAlpakaToLegacyPFRecHits = cms.EDProducer("LegacyPFRecHitProducer", + src = cms.InputTag("hltParticleFlowPFRecHitAlpaka") +) + + +##################################### +## PFRecHit validation ## +##################################### +# Validate legacy format from legacy module vs SoA format from Alpaka module +# This is the main Alpaka vs legacy test +from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer +process.hltParticleFlowPFRecHitComparison = DQMEDAnalyzer("PFRecHitProducerTest", + #caloRecHits = cms.untracked.InputTag("hltParticleFlowRecHitToSoA"), + pfRecHitsSource1 = cms.untracked.InputTag("hltParticleFlowRecHit"), + pfRecHitsSource2 = cms.untracked.InputTag("hltParticleFlowPFRecHitAlpaka"), + pfRecHitsType1 = cms.untracked.string("legacy"), + pfRecHitsType2 = cms.untracked.string("alpaka"), + title = cms.untracked.string("Legacy vs Alpaka"), + dumpFirstEvent = cms.untracked.bool(args.debug == 1), + dumpFirstError = cms.untracked.bool(args.debug == -1), + strictCompare = cms.untracked.bool(True) +) + +# Validate legacy format from legacy module vs legacy format from Alpaka module +process.hltParticleFlowAlpakaToLegacyPFRecHitsComparison1 = DQMEDAnalyzer("PFRecHitProducerTest", + pfRecHitsSource1 = cms.untracked.InputTag("hltParticleFlowRecHitHBHE"), + pfRecHitsSource2 = cms.untracked.InputTag("hltParticleFlowAlpakaToLegacyPFRecHits"), + pfRecHitsType1 = cms.untracked.string("legacy"), + pfRecHitsType2 = cms.untracked.string("legacy"), + title = cms.untracked.string("Legacy vs Legacy-from-Alpaka"), + dumpFirstEvent = cms.untracked.bool(args.debug == 2), + dumpFirstError = cms.untracked.bool(args.debug == -2), + strictCompare = cms.untracked.bool(True) +) + +# Validate SoA format from Alpaka module vs legacy format from Alpaka module +# This tests the SoA-to-legacy conversion module +process.hltParticleFlowAlpakaToLegacyPFRecHitsComparison2 = DQMEDAnalyzer("PFRecHitProducerTest", + pfRecHitsSource1 = cms.untracked.InputTag("hltParticleFlowPFRecHitAlpaka"), + pfRecHitsSource2 = cms.untracked.InputTag("hltParticleFlowAlpakaToLegacyPFRecHits"), + pfRecHitsType1 = cms.untracked.string("alpaka"), + pfRecHitsType2 = cms.untracked.string("legacy"), + title = cms.untracked.string("Alpaka vs Legacy-from-Alpaka"), + dumpFirstEvent = cms.untracked.bool(args.debug == 3), + dumpFirstError = cms.untracked.bool(args.debug == -3), + strictCompare = cms.untracked.bool(True) +) + +#Move Onto Clustering + +process.pfClusterParamsAlpakaESRcdSource = cms.ESSource('EmptyESSource', + recordName = cms.string('JobConfigurationGPURecord'), + iovIsRunNotTime = cms.bool(True), + firstValid = cms.vuint32(1) +) + +from RecoParticleFlow.PFClusterProducer.pfClusterParamsESProducer_cfi import pfClusterParamsESProducer as _pfClusterParamsESProducer + +process.hltParticleFlowClusterParamsESProducer = _pfClusterParamsESProducer.clone() +process.hltParticleFlowClusterParamsESProducer.pfClusterBuilder.maxIterations = 5 + +for idx, x in enumerate(process.hltParticleFlowClusterParamsESProducer.initialClusteringStep.thresholdsByDetector): + for idy, y in enumerate(process.hltParticleFlowClusterHBHE.initialClusteringStep.thresholdsByDetector): + if x.detector == y.detector: + x.gatheringThreshold = y.gatheringThreshold +for idx, x in enumerate(process.hltParticleFlowClusterParamsESProducer.pfClusterBuilder.recHitEnergyNorms): + for idy, y in enumerate(process.hltParticleFlowClusterHBHE.pfClusterBuilder.recHitEnergyNorms): + if x.detector == y.detector: + x.recHitEnergyNorm = y.recHitEnergyNorm +for idx, x in enumerate(process.hltParticleFlowClusterParamsESProducer.seedFinder.thresholdsByDetector): + for idy, y in enumerate(process.hltParticleFlowClusterHBHE.seedFinder.thresholdsByDetector): + if x.detector == y.detector: + x.seedingThreshold = y.seedingThreshold + +process.hltParticleFlowPFClusterAlpaka = cms.EDProducer(alpaka_backend_str % "PFClusterSoAProducer", + pfClusterParams = cms.ESInputTag("hltParticleFlowClusterParamsESProducer:"), + topology = cms.ESInputTag("hltParticleFlowRecHitTopologyESProducer:"), + synchronise = cms.bool(args.synchronise)) +process.hltParticleFlowPFClusterAlpaka.pfRecHits = cms.InputTag("hltParticleFlowPFRecHitAlpaka") + +# Create legacy PFClusters + +process.hltParticleFlowAlpakaToLegacyPFClusters = cms.EDProducer("LegacyPFClusterProducer", + src = cms.InputTag("hltParticleFlowPFClusterAlpaka"), + pfClusterParams = cms.ESInputTag("hltParticleFlowClusterParamsESProducer:"), + pfClusterBuilder = process.hltParticleFlowClusterHBHE.pfClusterBuilder, + usePFThresholdsFromDB = cms.bool(True), + recHitsSource = cms.InputTag("hltParticleFlowAlpakaToLegacyPFRecHits")) +process.hltParticleFlowAlpakaToLegacyPFClusters.PFRecHitsLabelIn = cms.InputTag("hltParticleFlowPFRecHitAlpaka") + +process.hltParticleFlowClusterHBHE.pfClusterBuilder.maxIterations = 5 +process.hltParticleFlowClusterHBHE.usePFThresholdsFromDB = cms.bool(True) + +# Additional customization +process.FEVTDEBUGHLToutput.outputCommands = cms.untracked.vstring('drop *_*_*_*') +process.FEVTDEBUGHLToutput.outputCommands.append('keep *_hltParticleFlowRecHitToSoA_*_*') +process.FEVTDEBUGHLToutput.outputCommands.append('keep *_hltParticleFlowPFRecHitAlpaka_*_*') +process.FEVTDEBUGHLToutput.outputCommands.append('keep *_hltParticleFlowAlpakaToLegacyPFClusters_*_*') +process.FEVTDEBUGHLToutput.outputCommands.append('keep *_hltParticleFlowClusterHBHE_*_*') + +# Path/sequence definitions +path = process.hltHcalDigis +path += process.hltHbherecoLegacy +path += process.hltParticleFlowRecHit # Construct PFRecHits on CPU +if hcal: + path += process.hltParticleFlowRecHitToSoA # Convert legacy calorimeter hits to SoA (HCAL barrel+endcap) +else: # ecal + path += process.hltParticleFlowRecHitEBToSoA # Convert legacy calorimeter hits to SoA (ECAL barrel) + path += process.hltParticleFlowRecHitEEToSoA # Convert legacy calorimeter hits to SoA (ECAL endcap) +path += process.hltParticleFlowPFRecHitAlpaka # Construct PFRecHits SoA +path += process.hltParticleFlowRecHitHBHE # Construct Legacy PFRecHits +path += process.hltParticleFlowClusterHBHE +path += process.hltParticleFlowPFRecHitComparison # Validate Alpaka vs CPU +path += process.hltParticleFlowAlpakaToLegacyPFRecHits # Convert Alpaka PFRecHits SoA to legacy format +path += process.hltParticleFlowAlpakaToLegacyPFRecHitsComparison1 # Validate legacy-format-from-alpaka vs regular legacy format +path += process.hltParticleFlowAlpakaToLegacyPFRecHitsComparison2 # Validate Alpaka format vs legacy-format-from-alpaka + +path += process.hltParticleFlowPFClusterAlpaka +path += process.hltParticleFlowAlpakaToLegacyPFClusters + +process.PFClusterAlpakaValidationTask = cms.EndPath(path) +process.schedule = cms.Schedule(process.PFClusterAlpakaValidationTask) +process.schedule.extend([process.endjob_step,process.FEVTDEBUGHLToutput_step]) +process.options.numberOfThreads = cms.untracked.uint32(args.threads) + +# Save DQM output +process.DQMoutput = cms.OutputModule("DQMRootOutputModule", + dataset = cms.untracked.PSet( + dataTier = cms.untracked.string('DQMIO'), + filterName = cms.untracked.string('') + ), + fileName = cms.untracked.string('file:DQMIO.root'), + outputCommands = process.DQMEventContent.outputCommands, + splitLevel = cms.untracked.int32(0) +) +process.DQMTask = cms.EndPath(process.DQMoutput) +process.schedule.append(process.DQMTask) diff --git a/RecoParticleFlow/PFProducer/plugins/PFCandidateChecker.cc b/RecoParticleFlow/PFProducer/plugins/PFCandidateChecker.cc index 7cc2379a0ea5c..6fe62d2352a1b 100644 --- a/RecoParticleFlow/PFProducer/plugins/PFCandidateChecker.cc +++ b/RecoParticleFlow/PFProducer/plugins/PFCandidateChecker.cc @@ -24,12 +24,8 @@ class PFCandidateChecker : public edm::stream::EDAnalyzer<> { public: explicit PFCandidateChecker(const edm::ParameterSet&); - ~PFCandidateChecker() override; - void analyze(const edm::Event&, const edm::EventSetup&) override; - void beginRun(const edm::Run& r, const edm::EventSetup& c) override; - private: void printJets(const reco::PFJetCollection& pfJetsReco, const reco::PFJetCollection& pfJetsReReco) const; @@ -96,10 +92,6 @@ PFCandidateChecker::PFCandidateChecker(const edm::ParameterSet& iConfig) { << inputTagPFCandidatesReReco_; } -PFCandidateChecker::~PFCandidateChecker() {} - -void PFCandidateChecker::beginRun(const edm::Run& run, const edm::EventSetup& es) {} - void PFCandidateChecker::analyze(const Event& iEvent, const EventSetup& iSetup) { LogDebug("PFCandidateChecker") << "START event: " << iEvent.id().event() << " in run " << iEvent.id().run() << endl; diff --git a/RecoParticleFlow/PFProducer/plugins/PFPhotonTranslator.cc b/RecoParticleFlow/PFProducer/plugins/PFPhotonTranslator.cc deleted file mode 100644 index f128e0dbe6a6a..0000000000000 --- a/RecoParticleFlow/PFProducer/plugins/PFPhotonTranslator.cc +++ /dev/null @@ -1,1100 +0,0 @@ -#include "FWCore/MessageLogger/interface/MessageLogger.h" -#include "FWCore/Framework/interface/ESHandle.h" -#include "FWCore/Framework/interface/Event.h" -#include "FWCore/ParameterSet/interface/ParameterSet.h" -#include "CommonTools/ParticleFlow/interface/PFClusterWidthAlgo.h" -#include "DataFormats/ParticleFlowCandidate/interface/PFCandidate.h" -#include "DataFormats/EgammaReco/interface/PreshowerCluster.h" -#include "DataFormats/EgammaReco/interface/SuperCluster.h" -#include "DataFormats/EgammaCandidates/interface/PhotonCore.h" -#include "DataFormats/EgammaCandidates/interface/Photon.h" -#include "DataFormats/VertexReco/interface/Vertex.h" -#include "DataFormats/ParticleFlowReco/interface/PFBlockElement.h" -#include "DataFormats/Math/interface/Vector3D.h" -#include "DataFormats/Math/interface/LorentzVector.h" -#include "RecoEcal/EgammaCoreTools/interface/Mustache.h" -#include "FWCore/Framework/interface/stream/EDProducer.h" -#include "FWCore/Framework/interface/MakerMacros.h" -#include "DataFormats/Common/interface/ValueMap.h" -#include "DataFormats/EgammaReco/interface/BasicCluster.h" -#include "DataFormats/ParticleFlowReco/interface/PFCluster.h" -#include "DataFormats/Common/interface/Handle.h" -#include "DataFormats/ParticleFlowCandidate/interface/PFCandidatePhotonExtra.h" -#include "DataFormats/ParticleFlowReco/interface/PFBlock.h" -#include "CondFormats/EcalObjects/interface/EcalMustacheSCParameters.h" -#include "CondFormats/DataRecord/interface/EcalMustacheSCParametersRcd.h" - -class CaloGeometry; -class CaloTopology; -class DetId; -namespace edm { - class EventSetup; -} // namespace edm - -class PFPhotonTranslator : public edm::stream::EDProducer<> { -public: - explicit PFPhotonTranslator(const edm::ParameterSet &); - ~PFPhotonTranslator() override; - - void produce(edm::Event &, const edm::EventSetup &) override; - - typedef std::vector > > IsolationValueMaps; - -private: - // to retrieve the collection from the event - bool fetchCandidateCollection(edm::Handle &c, - const edm::InputTag &tag, - const edm::Event &iEvent) const; - - // makes a basic cluster from PFBlockElement and add it to the collection ; the corrected energy is taken - // from the PFCandidate - void createBasicCluster(const reco::PFBlockElement &, - reco::BasicClusterCollection &basicClusters, - std::vector &, - const reco::PFCandidate &coCandidate) const; - // makes a preshower cluster from of PFBlockElement and add it to the collection - void createPreshowerCluster(const reco::PFBlockElement &PFBE, - reco::PreshowerClusterCollection &preshowerClusters, - unsigned plane) const; - - // create the basic cluster Ptr - void createBasicClusterPtrs(const edm::OrphanHandle &basicClustersHandle); - - // create the preshower cluster Refs - void createPreshowerClusterPtrs(const edm::OrphanHandle &preshowerClustersHandle); - - // make a super cluster from its ingredients and add it to the collection - void createSuperClusters(const reco::PFCandidateCollection &, reco::SuperClusterCollection &superClusters) const; - - void createOneLegConversions(const edm::OrphanHandle &superClustersHandle, - reco::ConversionCollection &oneLegConversions); - - //create photon cores - void createPhotonCores(const edm::OrphanHandle &superClustersHandle, - const edm::OrphanHandle &oneLegConversionHandle, - reco::PhotonCoreCollection &photonCores); - - void createPhotons(reco::VertexCollection &vertexCollection, - edm::Handle &egPhotons, - const edm::OrphanHandle &photonCoresHandle, - const IsolationValueMaps &isolationValues, - reco::PhotonCollection &photons); - - const reco::PFCandidate &correspondingDaughterCandidate(const reco::PFCandidate &cand, - const reco::PFBlockElement &pfbe) const; - - edm::InputTag inputTagPFCandidates_; - std::vector inputTagIsoVals_; - std::string PFBasicClusterCollection_; - std::string PFPreshowerClusterCollection_; - std::string PFSuperClusterCollection_; - std::string PFPhotonCoreCollection_; - std::string PFPhotonCollection_; - std::string PFConversionCollection_; - std::string EGPhotonCollection_; - std::string vertexProducer_; - edm::InputTag barrelEcalHits_; - edm::InputTag endcapEcalHits_; - edm::InputTag hcalTowers_; - double hOverEConeSize_; - - // the collection of basic clusters associated to a photon - std::vector basicClusters_; - // the correcsponding PFCluster ref - std::vector > pfClusters_; - // the collection of preshower clusters associated to a photon - std::vector preshowerClusters_; - // the super cluster collection (actually only one) associated to a photon - std::vector superClusters_; - // the references to the basic clusters associated to a photon - std::vector basicClusterPtr_; - // the references to the basic clusters associated to a photon - std::vector preshowerClusterPtr_; - // keep track of the index of the PF Candidate - std::vector photPFCandidateIndex_; - // the list of candidatePtr - std::vector CandidatePtr_; - // the e/g SC associated - std::vector egSCRef_; - // the e/g photon associated - std::vector egPhotonRef_; - // the PF MVA and regression - std::vector pfPhotonMva_; - std::vector energyRegression_; - std::vector energyRegressionError_; - - //Vector of vector of Conversions Refs - std::vector pfConv_; - std::vector > pfSingleLegConv_; - std::vector > pfSingleLegConvMva_; - - std::vector conv1legPFCandidateIndex_; - std::vector conv2legPFCandidateIndex_; - - edm::ESHandle theCaloTopo_; - edm::ESHandle theCaloGeom_; - - // Mustache SC parameters - edm::ESGetToken ecalMustacheSCParametersToken_; - const EcalMustacheSCParameters *mustacheSCParams_; - - bool emptyIsOk_; -}; - -DEFINE_FWK_MODULE(PFPhotonTranslator); - -using namespace edm; -using namespace std; -using namespace reco; - -typedef math::XYZTLorentzVector LorentzVector; -typedef math::XYZPoint Point; -typedef math::XYZVector Vector; - -PFPhotonTranslator::PFPhotonTranslator(const edm::ParameterSet &iConfig) { - //std::cout << "PFPhotonTranslator" << std::endl; - - inputTagPFCandidates_ = iConfig.getParameter("PFCandidate"); - - edm::ParameterSet isoVals = iConfig.getParameter("isolationValues"); - inputTagIsoVals_.push_back(isoVals.getParameter("pfChargedHadrons")); - inputTagIsoVals_.push_back(isoVals.getParameter("pfPhotons")); - inputTagIsoVals_.push_back(isoVals.getParameter("pfNeutralHadrons")); - - PFBasicClusterCollection_ = iConfig.getParameter("PFBasicClusters"); - PFPreshowerClusterCollection_ = iConfig.getParameter("PFPreshowerClusters"); - PFSuperClusterCollection_ = iConfig.getParameter("PFSuperClusters"); - PFConversionCollection_ = iConfig.getParameter("PFConversionCollection"); - PFPhotonCoreCollection_ = iConfig.getParameter("PFPhotonCores"); - PFPhotonCollection_ = iConfig.getParameter("PFPhotons"); - - EGPhotonCollection_ = iConfig.getParameter("EGPhotons"); - - vertexProducer_ = iConfig.getParameter("primaryVertexProducer"); - - barrelEcalHits_ = iConfig.getParameter("barrelEcalHits"); - endcapEcalHits_ = iConfig.getParameter("endcapEcalHits"); - - hcalTowers_ = iConfig.getParameter("hcalTowers"); - hOverEConeSize_ = iConfig.getParameter("hOverEConeSize"); - - if (iConfig.exists("emptyIsOk")) - emptyIsOk_ = iConfig.getParameter("emptyIsOk"); - else - emptyIsOk_ = false; - - ecalMustacheSCParametersToken_ = esConsumes(); - - produces(PFBasicClusterCollection_); - produces(PFPreshowerClusterCollection_); - produces(PFSuperClusterCollection_); - produces(PFPhotonCoreCollection_); - produces(PFPhotonCollection_); - produces(PFConversionCollection_); -} - -PFPhotonTranslator::~PFPhotonTranslator() {} - -void PFPhotonTranslator::produce(edm::Event &iEvent, const edm::EventSetup &iSetup) { - //cout << "NEW EVENT"<(); - - auto psClusters_p = std::make_unique(); - - /* - auto SingleLeg_p = std::make_unique(); - */ - - reco::SuperClusterCollection outputSuperClusterCollection; - reco::ConversionCollection outputOneLegConversionCollection; - reco::PhotonCoreCollection outputPhotonCoreCollection; - reco::PhotonCollection outputPhotonCollection; - - outputSuperClusterCollection.clear(); - outputOneLegConversionCollection.clear(); - outputPhotonCoreCollection.clear(); - outputPhotonCollection.clear(); - - edm::Handle pfCandidates; - bool status = fetchCandidateCollection(pfCandidates, inputTagPFCandidates_, iEvent); - - edm::Handle egPhotons; - iEvent.getByLabel(EGPhotonCollection_, egPhotons); - - Handle vertexHandle; - - IsolationValueMaps isolationValues(inputTagIsoVals_.size()); - for (size_t j = 0; j < inputTagIsoVals_.size(); ++j) { - iEvent.getByLabel(inputTagIsoVals_[j], isolationValues[j]); - } - - // clear the vectors - photPFCandidateIndex_.clear(); - basicClusters_.clear(); - pfClusters_.clear(); - preshowerClusters_.clear(); - superClusters_.clear(); - basicClusterPtr_.clear(); - preshowerClusterPtr_.clear(); - CandidatePtr_.clear(); - egSCRef_.clear(); - egPhotonRef_.clear(); - pfPhotonMva_.clear(); - energyRegression_.clear(); - energyRegressionError_.clear(); - pfConv_.clear(); - pfSingleLegConv_.clear(); - pfSingleLegConvMva_.clear(); - conv1legPFCandidateIndex_.clear(); - conv2legPFCandidateIndex_.clear(); - - // loop on the candidates - //CC@@ - // we need first to create AND put the SuperCluster, - // basic clusters and presh clusters collection - // in order to get a working Handle - unsigned ncand = (status) ? pfCandidates->size() : 0; - - unsigned iphot = 0; - unsigned iconv1leg = 0; - unsigned iconv2leg = 0; - - for (unsigned i = 0; i < ncand; ++i) { - const reco::PFCandidate &cand = (*pfCandidates)[i]; - if (cand.particleId() != reco::PFCandidate::gamma) - continue; - //cout << "cand.mva_nothing_gamma()="< 0.001) //Found PFPhoton with PFPhoton Extras saved - { - //cout << "NEW PHOTON" << endl; - - //std::cout << "nDoubleLegConv="<conversionRef().size()<conversionRef().empty()) { - pfConv_.push_back(reco::ConversionRefVector()); - - const reco::ConversionRefVector &doubleLegConvColl = cand.photonExtraRef()->conversionRef(); - for (unsigned int iconv = 0; iconv < doubleLegConvColl.size(); iconv++) { - pfConv_[iconv2leg].push_back(doubleLegConvColl[iconv]); - } - - conv2legPFCandidateIndex_.push_back(iconv2leg); - iconv2leg++; - } else - conv2legPFCandidateIndex_.push_back(-1); - - const std::vector &singleLegConvColl = cand.photonExtraRef()->singleLegConvTrackRef(); - const std::vector &singleLegConvCollMva = cand.photonExtraRef()->singleLegConvMva(); - - //std::cout << "nSingleLegConv=" <()); - pfSingleLegConvMva_.push_back(std::vector()); - - //cout << "nTracks="<< singleLegConvColl.size()<begin(); gamIter != egPhotons->end(); ++gamIter) { - if (cand.superClusterRef() == gamIter->superCluster()) { - reco::PhotonRef PhotRef(reco::PhotonRef(egPhotons, iegphot)); - egPhotonRef_.push_back(PhotRef); - } - iegphot++; - } - - //std::cout << "Cand elements in blocks : " << cand.elementsInBlocks().size() << std::endl; - - for (unsigned iele = 0; iele < cand.elementsInBlocks().size(); ++iele) { - // first get the block - reco::PFBlockRef blockRef = cand.elementsInBlocks()[iele].first; - // - unsigned elementIndex = cand.elementsInBlocks()[iele].second; - // check it actually exists - if (blockRef.isNull()) - continue; - - // then get the elements of the block - const edm::OwnVector &elements = (*blockRef).elements(); - - const reco::PFBlockElement &pfbe(elements[elementIndex]); - // The first ECAL element should be the cluster associated to the GSF; defined as the seed - if (pfbe.type() == reco::PFBlockElement::ECAL) { - //std::cout << "BlockElement ECAL" << std::endl; - // the Brem photons are saved as daughter PFCandidate; this - // is convenient to access the corrected energy - // std::cout << " Found candidate " << correspondingDaughterCandidate(coCandidate,pfbe) << " " << coCandidate << std::endl; - createBasicCluster(pfbe, basicClusters_[iphot], pfClusters_[iphot], correspondingDaughterCandidate(cand, pfbe)); - } - if (pfbe.type() == reco::PFBlockElement::PS1) { - //std::cout << "BlockElement PS1" << std::endl; - createPreshowerCluster(pfbe, preshowerClusters_[iphot], 1); - } - if (pfbe.type() == reco::PFBlockElement::PS2) { - //std::cout << "BlockElement PS2" << std::endl; - createPreshowerCluster(pfbe, preshowerClusters_[iphot], 2); - } - - } // loop on the elements - - // save the basic clusters - basicClusters_p->insert(basicClusters_p->end(), basicClusters_[iphot].begin(), basicClusters_[iphot].end()); - // save the preshower clusters - psClusters_p->insert(psClusters_p->end(), preshowerClusters_[iphot].begin(), preshowerClusters_[iphot].end()); - - ++iphot; - - } // loop on PFCandidates - - //Save the basic clusters and get an handle as to be able to create valid Refs (thanks to Claude) - // std::cout << " Number of basic clusters " << basicClusters_p->size() << std::endl; - const edm::OrphanHandle bcRefProd = - iEvent.put(std::move(basicClusters_p), PFBasicClusterCollection_); - - //preshower clusters - const edm::OrphanHandle psRefProd = - iEvent.put(std::move(psClusters_p), PFPreshowerClusterCollection_); - - // now that the Basic clusters are in the event, the Ref can be created - createBasicClusterPtrs(bcRefProd); - // now that the preshower clusters are in the event, the Ref can be created - createPreshowerClusterPtrs(psRefProd); - - // and now the Super cluster can be created with valid references - //if(status) createSuperClusters(*pfCandidates,*superClusters_p); - if (status) - createSuperClusters(*pfCandidates, outputSuperClusterCollection); - - //std::cout << "nb superclusters in collection : "<(outputSuperClusterCollection); - const edm::OrphanHandle scRefProd = - iEvent.put(std::move(superClusters_p), PFSuperClusterCollection_); - - /* - int ipho=0; - for (reco::SuperClusterCollection::const_iterator gamIter = scRefProd->begin(); gamIter != scRefProd->end(); ++gamIter){ - std::cout << "SC i="<(outputPhotonCoreCollection); - //std::cout << "photon core collection put in unique_ptr"< pcRefProd = - iEvent.put(std::move(photonCores_p), PFPhotonCoreCollection_); - - //std::cout << "photon core have been put in the event"<begin(); gamIter != pcRefProd->end(); ++gamIter){ - std::cout << "PhotonCore i="< barrelHitHandle; - EcalRecHitCollection barrelRecHits; - iEvent.getByLabel(barrelEcalHits_, barrelHitHandle); - if (!barrelHitHandle.isValid()) { - edm::LogError("PhotonProducer") << "Error! Can't get the product "< endcapHitHandle; - iEvent.getByLabel(endcapEcalHits_, endcapHitHandle); - EcalRecHitCollection endcapRecHits; - if (!endcapHitHandle.isValid()) { - edm::LogError("PhotonProducer") << "Error! Can't get the product "<().get(theCaloGeom_); - - edm::ESHandle pTopology; - iSetup.get().get(theCaloTopo_); - const CaloTopology *topology = theCaloTopo_.product(); - - // get Hcal rechits collection - Handle hcalTowersHandle; - iEvent.getByLabel(hcalTowers_, hcalTowersHandle); - */ - - //create photon collection - //if(status) createPhotons(vertexCollection, pcRefProd, topology, &barrelRecHits, &endcapRecHits, hcalRecHitsHandle, isolationValues, outputPhotonCollection); - if (status) - createPhotons(vertexCollection, egPhotons, pcRefProd, isolationValues, outputPhotonCollection); - - // Put the photons in the event - auto photons_p = std::make_unique(outputPhotonCollection); - //std::cout << "photon collection put in unique_ptr"< photonRefProd = iEvent.put(std::move(photons_p), PFPhotonCollection_); - //std::cout << "photons have been put in the event"<begin(); gamIter != photonRefProd->end(); ++gamIter){ - std::cout << "Photon i="< the energy is not corrected -// It should be possible to get the corrected energy (including the associated PS energy) -// from the PFCandidate daugthers ; Needs some work -void PFPhotonTranslator::createBasicCluster(const reco::PFBlockElement &PFBE, - reco::BasicClusterCollection &basicClusters, - std::vector &pfClusters, - const reco::PFCandidate &coCandidate) const { - const reco::PFClusterRef &myPFClusterRef = PFBE.clusterRef(); - if (myPFClusterRef.isNull()) - return; - - const reco::PFCluster &myPFCluster(*myPFClusterRef); - pfClusters.push_back(&myPFCluster); - //std::cout << " Creating BC " << myPFCluster.energy() << " " << coCandidate.ecalEnergy() <<" "<< coCandidate.rawEcalEnergy() <energy(), myPFClusterRef->position(), myPFClusterRef->hitsAndFractions(), plane)); -} - -void PFPhotonTranslator::createBasicClusterPtrs( - const edm::OrphanHandle &basicClustersHandle) { - unsigned size = photPFCandidateIndex_.size(); - unsigned basicClusterCounter = 0; - basicClusterPtr_.resize(size); - - for (unsigned iphot = 0; iphot < size; ++iphot) // loop on tracks - { - unsigned nbc = basicClusters_[iphot].size(); - for (unsigned ibc = 0; ibc < nbc; ++ibc) // loop on basic clusters - { - // std::cout << "Track "<< iGSF << " ref " << basicClusterCounter << std::endl; - reco::CaloClusterPtr bcPtr(basicClustersHandle, basicClusterCounter); - basicClusterPtr_[iphot].push_back(bcPtr); - ++basicClusterCounter; - } - } -} - -void PFPhotonTranslator::createPreshowerClusterPtrs( - const edm::OrphanHandle &preshowerClustersHandle) { - unsigned size = photPFCandidateIndex_.size(); - unsigned psClusterCounter = 0; - preshowerClusterPtr_.resize(size); - - for (unsigned iphot = 0; iphot < size; ++iphot) // loop on tracks - { - unsigned nbc = preshowerClusters_[iphot].size(); - for (unsigned ibc = 0; ibc < nbc; ++ibc) // loop on basic clusters - { - // std::cout << "Track "<< iGSF << " ref " << basicClusterCounter << std::endl; - reco::CaloClusterPtr psPtr(preshowerClustersHandle, psClusterCounter); - preshowerClusterPtr_[iphot].push_back(psPtr); - ++psClusterCounter; - } - } -} - -void PFPhotonTranslator::createSuperClusters(const reco::PFCandidateCollection &pfCand, - reco::SuperClusterCollection &superClusters) const { - unsigned nphot = photPFCandidateIndex_.size(); - for (unsigned iphot = 0; iphot < nphot; ++iphot) { - //cout << "SC iphot=" << iphot << endl; - - // Computes energy position a la e/gamma - double sclusterE = 0; - double posX = 0.; - double posY = 0.; - double posZ = 0.; - - unsigned nbasics = basicClusters_[iphot].size(); - for (unsigned ibc = 0; ibc < nbasics; ++ibc) { - //cout << "BC in SC : iphot="< > &v1 = basicClusters_[iphot][ibc].hitsAndFractions(); - // std::cout << " Number of cells " << v1.size() << std::endl; - for (std::vector >::const_iterator diIt = v1.begin(); diIt != v1.end(); ++diIt) { - // std::cout << " Adding DetId " << (diIt->first).rawId() << " " << diIt->second << std::endl; - mySuperCluster.addHitAndFraction(diIt->first, diIt->second); - } // loop over rechits - } - - unsigned nps = preshowerClusterPtr_[iphot].size(); - for (unsigned ips = 0; ips < nps; ++ips) { - mySuperCluster.addPreshowerCluster(preshowerClusterPtr_[iphot][ips]); - } - - // Set the preshower energy - mySuperCluster.setPreshowerEnergy(pfCand[photPFCandidateIndex_[iphot]].pS1Energy() + - pfCand[photPFCandidateIndex_[iphot]].pS2Energy()); - - // Set the cluster width - mySuperCluster.setEtaWidth(pfwidth.pflowEtaWidth()); - mySuperCluster.setPhiWidth(pfwidth.pflowPhiWidth()); - // Force the computation of rawEnergy_ of the reco::SuperCluster - mySuperCluster.rawEnergy(); - - //cout << "SC energy="<< mySuperCluster.energy()< &superClustersHandle, - reco::ConversionCollection &oneLegConversions) { - //std::cout << "createOneLegConversions" << std::endl; - - unsigned nphot = photPFCandidateIndex_.size(); - for (unsigned iphot = 0; iphot < nphot; ++iphot) { - //if (conv1legPFCandidateIndex_[iphot]==-1) cout << "No OneLegConversions to add"< -1) { - for (unsigned iConv = 0; iConv < pfSingleLegConv_[conv1legPFCandidateIndex_[iphot]].size(); iConv++) { - reco::CaloClusterPtrVector scPtrVec; - std::vector matchingBC; - math::Error<3>::type error; - const reco::Vertex *convVtx = - new reco::Vertex(pfSingleLegConv_[conv1legPFCandidateIndex_[iphot]][iConv]->innerPosition(), error); - - //cout << "Vtx x="<x() << " y="<< convVtx->y()<<" z="<z()<< endl; - //cout << "VtxError x=" << convVtx->xError() << endl; - - std::vector OneLegConvVector; - OneLegConvVector.push_back(pfSingleLegConv_[conv1legPFCandidateIndex_[iphot]][iConv]); - - std::vector tr = OneLegConvVector; - std::vector trackPositionAtEcalVec; - std::vector innPointVec; - std::vector trackPinVec; - std::vector trackPoutVec; - math::XYZPointF trackPositionAtEcal( - pfSingleLegConv_[conv1legPFCandidateIndex_[iphot]][iConv]->outerPosition().X(), - pfSingleLegConv_[conv1legPFCandidateIndex_[iphot]][iConv]->outerPosition().Y(), - pfSingleLegConv_[conv1legPFCandidateIndex_[iphot]][iConv]->outerPosition().Z()); - math::XYZPointF innPoint(pfSingleLegConv_[conv1legPFCandidateIndex_[iphot]][iConv]->innerPosition().X(), - pfSingleLegConv_[conv1legPFCandidateIndex_[iphot]][iConv]->innerPosition().Y(), - pfSingleLegConv_[conv1legPFCandidateIndex_[iphot]][iConv]->innerPosition().Z()); - math::XYZVectorF trackPin(pfSingleLegConv_[conv1legPFCandidateIndex_[iphot]][iConv]->innerMomentum().X(), - pfSingleLegConv_[conv1legPFCandidateIndex_[iphot]][iConv]->innerMomentum().Y(), - pfSingleLegConv_[conv1legPFCandidateIndex_[iphot]][iConv]->innerMomentum().Z()); - math::XYZVectorF trackPout(pfSingleLegConv_[conv1legPFCandidateIndex_[iphot]][iConv]->outerMomentum().X(), - pfSingleLegConv_[conv1legPFCandidateIndex_[iphot]][iConv]->outerMomentum().Y(), - pfSingleLegConv_[conv1legPFCandidateIndex_[iphot]][iConv]->outerMomentum().Z()); - float DCA = pfSingleLegConv_[conv1legPFCandidateIndex_[iphot]][iConv]->d0(); - trackPositionAtEcalVec.push_back(trackPositionAtEcal); - innPointVec.push_back(innPoint); - trackPinVec.push_back(trackPin); - trackPoutVec.push_back(trackPout); - std::vector OneLegMvaVector; - reco::Conversion myOneLegConversion(scPtrVec, - OneLegConvVector, - trackPositionAtEcalVec, - *convVtx, - matchingBC, - DCA, - innPointVec, - trackPinVec, - trackPoutVec, - pfSingleLegConvMva_[conv1legPFCandidateIndex_[iphot]][iConv], - reco::Conversion::pflow); - OneLegMvaVector.push_back(pfSingleLegConvMva_[conv1legPFCandidateIndex_[iphot]][iConv]); - myOneLegConversion.setOneLegMVA(OneLegMvaVector); - //reco::Conversion myOneLegConversion(scPtrVec, - //OneLegConvVector, *convVtx, reco::Conversion::pflow); - - /* - std::cout << "One leg conversion created" << endl; - std::vector > convtracks = myOneLegConversion.tracks(); - const std::vector mvalist = myOneLegConversion.oneLegMVA(); - - cout << "nTracks="<< convtracks.size()<pt(); - std::cout << "Track pt="<< convtracks[itk]->pt() << std::endl; - std::cout << "Track mva="<< mvalist[itk] << std::endl; - } - */ - oneLegConversions.push_back(myOneLegConversion); - - //cout << "OneLegConv added"< &superClustersHandle, - const edm::OrphanHandle &oneLegConversionHandle, - reco::PhotonCoreCollection &photonCores) { - //std::cout << "createPhotonCores" << std::endl; - - unsigned nphot = photPFCandidateIndex_.size(); - - unsigned i1legtot = 0; - - for (unsigned iphot = 0; iphot < nphot; ++iphot) { - //std::cout << "iphot="<electronPixelSeeds(); - for (unsigned iseed = 0; iseed < pixelSeeds.size(); iseed++) { - myPhotonCore.addElectronPixelSeed(pixelSeeds[iseed]); - } - - //cout << "PhotonCores : SC OK" << endl; - - //cout << "conv1legPFCandidateIndex_[iphot]="< &egPhotons, - const edm::OrphanHandle &photonCoresHandle, - const IsolationValueMaps &isolationValues, - reco::PhotonCollection &photons) { - //cout << "createPhotons" << endl; - - unsigned nphot = photPFCandidateIndex_.size(); - - for (unsigned iphot = 0; iphot < nphot; ++iphot) { - //std::cout << "iphot="<position(); - //std::cout << "vtx made" << std::endl; - - math::XYZVector direction = PCref->parentSuperCluster()->position() - vtx; - - //It could be that pfSC energy gives not the best resolution : use smaller agregates for some cases ? - math::XYZVector P3 = direction.unit() * PCref->parentSuperCluster()->energy(); - LorentzVector P4(P3.x(), P3.y(), P3.z(), PCref->parentSuperCluster()->energy()); - - reco::Photon myPhoton(P4, PCref->parentSuperCluster()->position(), PCref, vtx); - //cout << "photon created"<e1x5(); - showerShape.e2x5 = egPhotonRef_[iphot]->e2x5(); - showerShape.e3x3 = egPhotonRef_[iphot]->e3x3(); - showerShape.e5x5 = egPhotonRef_[iphot]->e5x5(); - showerShape.maxEnergyXtal = egPhotonRef_[iphot]->maxEnergyXtal(); - showerShape.sigmaEtaEta = egPhotonRef_[iphot]->sigmaEtaEta(); - showerShape.sigmaIetaIeta = egPhotonRef_[iphot]->sigmaIetaIeta(); - for (uint id = 0; id < showerShape.hcalOverEcal.size(); ++id) { - showerShape.hcalOverEcal[id] = egPhotonRef_[iphot]->hcalOverEcal(id + 1); - } - myPhoton.setShowerShapeVariables(showerShape); - - saturationInfo.nSaturatedXtals = egPhotonRef_[iphot]->nSaturatedXtals(); - saturationInfo.isSeedSaturated = egPhotonRef_[iphot]->isSeedSaturated(); - myPhoton.setSaturationInfo(saturationInfo); - - fiducialFlags.isEB = egPhotonRef_[iphot]->isEB(); - fiducialFlags.isEE = egPhotonRef_[iphot]->isEE(); - fiducialFlags.isEBEtaGap = egPhotonRef_[iphot]->isEBEtaGap(); - fiducialFlags.isEBPhiGap = egPhotonRef_[iphot]->isEBPhiGap(); - fiducialFlags.isEERingGap = egPhotonRef_[iphot]->isEERingGap(); - fiducialFlags.isEEDeeGap = egPhotonRef_[iphot]->isEEDeeGap(); - fiducialFlags.isEBEEGap = egPhotonRef_[iphot]->isEBEEGap(); - myPhoton.setFiducialVolumeFlags(fiducialFlags); - - isolationVariables03.ecalRecHitSumEt = egPhotonRef_[iphot]->ecalRecHitSumEtConeDR03(); - for (uint id = 0; id < isolationVariables03.hcalRecHitSumEt.size(); ++id) { - isolationVariables03.hcalRecHitSumEt[id] = egPhotonRef_[iphot]->hcalTowerSumEtConeDR03(id + 1); - } - isolationVariables03.trkSumPtSolidCone = egPhotonRef_[iphot]->trkSumPtSolidConeDR03(); - isolationVariables03.trkSumPtHollowCone = egPhotonRef_[iphot]->trkSumPtHollowConeDR03(); - isolationVariables03.nTrkSolidCone = egPhotonRef_[iphot]->nTrkSolidConeDR03(); - isolationVariables03.nTrkHollowCone = egPhotonRef_[iphot]->nTrkHollowConeDR03(); - isolationVariables04.ecalRecHitSumEt = egPhotonRef_[iphot]->ecalRecHitSumEtConeDR04(); - for (uint id = 0; id < isolationVariables04.hcalRecHitSumEt.size(); ++id) { - isolationVariables04.hcalRecHitSumEt[id] = egPhotonRef_[iphot]->hcalTowerSumEtConeDR04(id + 1); - } - isolationVariables04.trkSumPtSolidCone = egPhotonRef_[iphot]->trkSumPtSolidConeDR04(); - isolationVariables04.trkSumPtHollowCone = egPhotonRef_[iphot]->trkSumPtHollowConeDR04(); - isolationVariables04.nTrkSolidCone = egPhotonRef_[iphot]->nTrkSolidConeDR04(); - isolationVariables04.nTrkHollowCone = egPhotonRef_[iphot]->nTrkHollowConeDR04(); - myPhoton.setIsolationVariables(isolationVariables04, isolationVariables03); - - reco::Photon::PflowIsolationVariables myPFIso; - myPFIso.chargedHadronIso = (*isolationValues[0])[CandidatePtr_[iphot]]; - myPFIso.photonIso = (*isolationValues[1])[CandidatePtr_[iphot]]; - myPFIso.neutralHadronIso = (*isolationValues[2])[CandidatePtr_[iphot]]; - myPhoton.setPflowIsolationVariables(myPFIso); - - reco::Photon::PflowIDVariables myPFVariables; - - reco::Mustache myMustache(mustacheSCParams_); - myMustache.MustacheID( - *(myPhoton.parentSuperCluster()), myPFVariables.nClusterOutsideMustache, myPFVariables.etOutsideMustache); - myPFVariables.mva = pfPhotonMva_[iphot]; - myPhoton.setPflowIDVariables(myPFVariables); - - //cout << "chargedHadronIso="<elementsInBlocks().size() != 1) { - // std::cout << " Daughter with " << myPFCandidate.elementsInBlocks().size()<< " element in block " << std::endl; - return cand; - } - if (myPFCandidate->elementsInBlocks()[0].second == refindex) { - // std::cout << " Found it " << cand << std::endl; - return *myPFCandidate; - } - } - return cand; -} diff --git a/RecoParticleFlow/PFProducer/plugins/importers/EGPhotonImporter.cc b/RecoParticleFlow/PFProducer/plugins/importers/EGPhotonImporter.cc deleted file mode 100644 index 846628a65a091..0000000000000 --- a/RecoParticleFlow/PFProducer/plugins/importers/EGPhotonImporter.cc +++ /dev/null @@ -1,104 +0,0 @@ -#include "RecoParticleFlow/PFProducer/interface/BlockElementImporterBase.h" -#include "RecoParticleFlow/PFProducer/interface/PhotonSelectorAlgo.h" -#include "DataFormats/ParticleFlowReco/interface/PFCluster.h" -#include "DataFormats/ParticleFlowReco/interface/PFBlockElementSuperCluster.h" -#include "DataFormats/EgammaCandidates/interface/Photon.h" -#include "DataFormats/EgammaReco/interface/SuperCluster.h" -#include "RecoParticleFlow/PFProducer/interface/PFBlockElementSCEqual.h" - -#include - -#include - -class EGPhotonImporter : public BlockElementImporterBase { -public: - enum SelectionChoices { SeparateDetectorIso, CombinedDetectorIso }; - - EGPhotonImporter(const edm::ParameterSet&, edm::ConsumesCollector&); - - void importToBlock(const edm::Event&, ElementList&) const override; - -private: - edm::EDGetTokenT _src; - const std::unordered_map _selectionTypes; - SelectionChoices _selectionChoice; - std::unique_ptr _selector; - bool _superClustersArePF; -}; - -DEFINE_EDM_PLUGIN(BlockElementImporterFactory, EGPhotonImporter, "EGPhotonImporter"); - -EGPhotonImporter::EGPhotonImporter(const edm::ParameterSet& conf, edm::ConsumesCollector& cc) - : BlockElementImporterBase(conf, cc), - _src(cc.consumes(conf.getParameter("source"))), - _selectionTypes({{"SeparateDetectorIso", EGPhotonImporter::SeparateDetectorIso}, - {"CombinedDetectorIso", EGPhotonImporter::CombinedDetectorIso}}), - _superClustersArePF(conf.getParameter("superClustersArePF")) { - const std::string& selChoice = conf.getParameter("SelectionChoice"); - _selectionChoice = _selectionTypes.at(selChoice); - const edm::ParameterSet& selDef = conf.getParameterSet("SelectionDefinition"); - const float minEt = selDef.getParameter("minEt"); - const float trackIso_const = selDef.getParameter("trackIsoConstTerm"); - const float trackIso_slope = selDef.getParameter("trackIsoSlopeTerm"); - const float ecalIso_const = selDef.getParameter("ecalIsoConstTerm"); - const float ecalIso_slope = selDef.getParameter("ecalIsoSlopeTerm"); - const float hcalIso_const = selDef.getParameter("hcalIsoConstTerm"); - const float hcalIso_slope = selDef.getParameter("hcalIsoSlopeTerm"); - const float hoe = selDef.getParameter("HoverE"); - const float loose_hoe = selDef.getParameter("LooseHoverE"); - const float combIso = selDef.getParameter("combIsoConstTerm"); - _selector = std::make_unique((float)_selectionChoice, - minEt, - trackIso_const, - trackIso_slope, - ecalIso_const, - ecalIso_slope, - hcalIso_const, - hcalIso_slope, - hoe, - combIso, - loose_hoe); -} - -void EGPhotonImporter::importToBlock(const edm::Event& e, BlockElementImporterBase::ElementList& elems) const { - typedef BlockElementImporterBase::ElementList::value_type ElementType; - auto photons = e.getHandle(_src); - elems.reserve(elems.size() + photons->size()); - // setup our elements so that all the SCs are grouped together - auto SCs_end = std::partition( - elems.begin(), elems.end(), [](const ElementType& a) { return a->type() == reco::PFBlockElement::SC; }); - //now add the photons - auto bphoton = photons->cbegin(); - auto ephoton = photons->cend(); - reco::PFBlockElementSuperCluster* scbe = nullptr; - reco::PhotonRef phoref; - for (auto photon = bphoton; photon != ephoton; ++photon) { - if (_selector->passPhotonSelection(*photon)) { - phoref = reco::PhotonRef(photons, std::distance(bphoton, photon)); - const reco::SuperClusterRef& scref = photon->superCluster(); - PFBlockElementSCEqual myEqual(scref); - auto sc_elem = std::find_if(elems.begin(), SCs_end, myEqual); - if (sc_elem != SCs_end) { - scbe = static_cast(sc_elem->get()); - scbe->setFromPhoton(true); - scbe->setPhotonRef(phoref); - scbe->setTrackIso(photon->trkSumPtHollowConeDR04()); - scbe->setEcalIso(photon->ecalRecHitSumEtConeDR04()); - scbe->setHcalIso(photon->hcalTowerSumEtConeDR04()); - scbe->setHoE(photon->hadronicOverEm()); - } else { - scbe = new reco::PFBlockElementSuperCluster(scref); - scbe->setFromPhoton(true); - scbe->setFromPFSuperCluster(_superClustersArePF); - scbe->setPhotonRef(phoref); - scbe->setTrackIso(photon->trkSumPtHollowConeDR04()); - scbe->setEcalIso(photon->ecalRecHitSumEtConeDR04()); - scbe->setHcalIso(photon->hcalTowerSumEtConeDR04()); - scbe->setHoE(photon->hadronicOverEm()); - SCs_end = elems.insert(SCs_end, ElementType(scbe)); - ++SCs_end; // point to element *after* the new one - } - } - } // loop on photons - elems.shrink_to_fit(); -} diff --git a/RecoParticleFlow/PFProducer/plugins/importers/SuperClusterImporter.cc b/RecoParticleFlow/PFProducer/plugins/importers/SuperClusterImporter.cc index f4d2b9bcb4e80..ef91bd52881da 100644 --- a/RecoParticleFlow/PFProducer/plugins/importers/SuperClusterImporter.cc +++ b/RecoParticleFlow/PFProducer/plugins/importers/SuperClusterImporter.cc @@ -5,8 +5,16 @@ #include "DataFormats/EgammaReco/interface/SuperCluster.h" #include "RecoParticleFlow/PFProducer/interface/PFBlockElementSCEqual.h" #include "Geometry/Records/interface/CaloGeometryRecord.h" -// for single tower H/E -#include "RecoEgamma/EgammaIsolationAlgos/interface/EgammaHadTower.h" +#include "RecoEgamma/EgammaIsolationAlgos/interface/EgammaHcalIsolation.h" +#include "CondFormats/DataRecord/interface/HcalPFCutsRcd.h" +#include "CondTools/Hcal/interface/HcalPFCutsHandler.h" +#include "Geometry/CaloTopology/interface/HcalTopology.h" +#include "Geometry/CaloGeometry/interface/CaloGeometry.h" +#include "DataFormats/HcalRecHit/interface/HcalRecHitCollections.h" +#include "RecoLocalCalo/HcalRecAlgos/interface/HcalSeverityLevelComputer.h" +#include "RecoLocalCalo/HcalRecAlgos/interface/HcalSeverityLevelComputerRcd.h" +#include "CondFormats/HcalObjects/interface/HcalChannelQuality.h" +#include "CondFormats/DataRecord/interface/HcalChannelQualityRcd.h" //quick pT for superclusters inline double ptFast(const double energy, const math::XYZPoint& position, const math::XYZPoint& origin) { @@ -28,13 +36,26 @@ class SuperClusterImporter : public BlockElementImporterBase { private: edm::EDGetTokenT _srcEB, _srcEE; - edm::EDGetTokenT _srcTowers; const double _maxHoverE, _pTbyPass, _minSCPt; + const edm::EDGetTokenT hbheRecHitsTag_; + const int maxSeverityHB_; + const int maxSeverityHE_; + bool cutsFromDB; CaloTowerConstituentsMap const* towerMap_; + CaloGeometry const* caloGeom_; + HcalTopology const* hcalTopo_; + HcalChannelQuality const* hcalChannelQual_; + HcalSeverityLevelComputer const* hcalSev_; bool _superClustersArePF; static const math::XYZPoint _zero; const edm::ESGetToken _ctmapToken; + const edm::ESGetToken caloGeometryToken_; + const edm::ESGetToken hcalTopologyToken_; + const edm::ESGetToken hcalChannelQualityToken_; + const edm::ESGetToken hcalSevLvlComputerToken_; + edm::ESGetToken hcalCutsToken_; + HcalPFCuts const* hcalCuts = nullptr; }; const math::XYZPoint SuperClusterImporter::_zero = math::XYZPoint(0, 0, 0); @@ -45,19 +66,39 @@ SuperClusterImporter::SuperClusterImporter(const edm::ParameterSet& conf, edm::C : BlockElementImporterBase(conf, cc), _srcEB(cc.consumes(conf.getParameter("source_eb"))), _srcEE(cc.consumes(conf.getParameter("source_ee"))), - _srcTowers(cc.consumes(conf.getParameter("source_towers"))), _maxHoverE(conf.getParameter("maximumHoverE")), _pTbyPass(conf.getParameter("minPTforBypass")), _minSCPt(conf.getParameter("minSuperClusterPt")), + hbheRecHitsTag_(cc.consumes(conf.getParameter("hbheRecHitsTag"))), + maxSeverityHB_(conf.getParameter("maxSeverityHB")), + maxSeverityHE_(conf.getParameter("maxSeverityHE")), + cutsFromDB(conf.getParameter("usePFThresholdsFromDB")), _superClustersArePF(conf.getParameter("superClustersArePF")), - _ctmapToken(cc.esConsumes()) {} + _ctmapToken(cc.esConsumes()), + caloGeometryToken_{cc.esConsumes()}, + hcalTopologyToken_{cc.esConsumes()}, + hcalChannelQualityToken_{cc.esConsumes(edm::ESInputTag("", "withTopo"))}, + hcalSevLvlComputerToken_{cc.esConsumes()} { + if (cutsFromDB) { + hcalCutsToken_ = cc.esConsumes( + edm::ESInputTag("", "withTopo")); + } +} -void SuperClusterImporter::updateEventSetup(const edm::EventSetup& es) { towerMap_ = &es.getData(_ctmapToken); } +void SuperClusterImporter::updateEventSetup(const edm::EventSetup& es) { + towerMap_ = &es.getData(_ctmapToken); + if (cutsFromDB) { + hcalCuts = &es.getData(hcalCutsToken_); + } + caloGeom_ = &es.getData(caloGeometryToken_); + hcalTopo_ = &es.getData(hcalTopologyToken_); + hcalChannelQual_ = &es.getData(hcalChannelQualityToken_); + hcalSev_ = &es.getData(hcalSevLvlComputerToken_); +} void SuperClusterImporter::importToBlock(const edm::Event& e, BlockElementImporterBase::ElementList& elems) const { auto eb_scs = e.getHandle(_srcEB); auto ee_scs = e.getHandle(_srcEE); - auto const& towers = e.get(_srcTowers); elems.reserve(elems.size() + eb_scs->size() + ee_scs->size()); // setup our elements so that all the SCs are grouped together auto SCs_end = @@ -67,14 +108,30 @@ void SuperClusterImporter::importToBlock(const edm::Event& e, BlockElementImport auto esc = eb_scs->cend(); reco::PFBlockElementSuperCluster* scbe = nullptr; reco::SuperClusterRef scref; + + EgammaHcalIsolation thisHcalVar_ = EgammaHcalIsolation(EgammaHcalIsolation::InclusionRule::isBehindClusterSeed, + 0, //outercone + EgammaHcalIsolation::InclusionRule::withinConeAroundCluster, + 0, //innercone + {{0, 0, 0, 0}}, + {{0, 0, 0, 0}}, + maxSeverityHB_, + {{0, 0, 0, 0, 0, 0, 0}}, + {{0, 0, 0, 0, 0, 0, 0}}, + maxSeverityHE_, + e.get(hbheRecHitsTag_), + caloGeom_, + hcalTopo_, + hcalChannelQual_, + hcalSev_, + towerMap_); + for (auto sc = bsc; sc != esc; ++sc) { scref = reco::SuperClusterRef(eb_scs, std::distance(bsc, sc)); PFBlockElementSCEqual myEqual(scref); auto sc_elem = std::find_if(elems.begin(), SCs_end, myEqual); const double scpT = ptFast(sc->energy(), sc->position(), _zero); - const auto towersBehindCluster = egamma::towersOf(*sc, *towerMap_); - const double H_tower = - (egamma::depth1HcalESum(towersBehindCluster, towers) + egamma::depth2HcalESum(towersBehindCluster, towers)); + const double H_tower = thisHcalVar_.getHcalESumBc(scref.get(), 0, hcalCuts); const double HoverE = H_tower / sc->energy(); if (sc_elem == SCs_end && scpT > _minSCPt && (scpT > _pTbyPass || HoverE < _maxHoverE)) { scbe = new reco::PFBlockElementSuperCluster(scref); @@ -91,9 +148,7 @@ void SuperClusterImporter::importToBlock(const edm::Event& e, BlockElementImport PFBlockElementSCEqual myEqual(scref); auto sc_elem = std::find_if(elems.begin(), SCs_end, myEqual); const double scpT = ptFast(sc->energy(), sc->position(), _zero); - const auto towersBehindCluster = egamma::towersOf(*sc, *towerMap_); - const double H_tower = - (egamma::depth1HcalESum(towersBehindCluster, towers) + egamma::depth2HcalESum(towersBehindCluster, towers)); + const double H_tower = thisHcalVar_.getHcalESumBc(scref.get(), 0, hcalCuts); const double HoverE = H_tower / sc->energy(); if (sc_elem == SCs_end && scpT > _minSCPt && (scpT > _pTbyPass || HoverE < _maxHoverE)) { scbe = new reco::PFBlockElementSuperCluster(scref); diff --git a/RecoParticleFlow/PFProducer/python/particleFlowBlock_cfi.py b/RecoParticleFlow/PFProducer/python/particleFlowBlock_cfi.py index b491a3ffbcc6e..82eca42e7f077 100644 --- a/RecoParticleFlow/PFProducer/python/particleFlowBlock_cfi.py +++ b/RecoParticleFlow/PFProducer/python/particleFlowBlock_cfi.py @@ -24,10 +24,13 @@ cms.PSet( importerName = cms.string("SuperClusterImporter"), source_eb = cms.InputTag("particleFlowSuperClusterECAL:particleFlowSuperClusterECALBarrel"), source_ee = cms.InputTag("particleFlowSuperClusterECAL:particleFlowSuperClusterECALEndcapWithPreshower"), - source_towers = cms.InputTag("towerMaker"), maximumHoverE = cms.double(0.5), minSuperClusterPt = cms.double(10.0), minPTforBypass = cms.double(100.0), + hbheRecHitsTag = cms.InputTag('hbhereco'), + maxSeverityHB = cms.int32(9), + maxSeverityHE = cms.int32(9), + usePFThresholdsFromDB = cms.bool(False), superClustersArePF = cms.bool(True) ), cms.PSet( importerName = cms.string("ConversionTrackImporter"), source = cms.InputTag("pfConversions"), @@ -246,3 +249,8 @@ def _findIndicesByModule(name): particleFlowBlock, elementImporters = _addTimingLayer ) + +#--- Use DB conditions for cuts&seeds for Run3 and phase2 +from Configuration.Eras.Modifier_hcalPfCutsFromDB_cff import hcalPfCutsFromDB +hcalPfCutsFromDB.toModify( _scImporter, + usePFThresholdsFromDB = True) diff --git a/RecoParticleFlow/PFProducer/src/PhotonSelectorAlgo.cc b/RecoParticleFlow/PFProducer/src/PhotonSelectorAlgo.cc deleted file mode 100644 index 48e22a8b4a0aa..0000000000000 --- a/RecoParticleFlow/PFProducer/src/PhotonSelectorAlgo.cc +++ /dev/null @@ -1,66 +0,0 @@ -// -// Original Authors: Nicholas Wardle, Florian Beaudette -// -#include "RecoParticleFlow/PFProducer/interface/PhotonSelectorAlgo.h" - -PhotonSelectorAlgo::PhotonSelectorAlgo(float choice, - float c_Et, - float c_iso_track_a, - float c_iso_track_b, - float c_iso_ecal_a, - float c_iso_ecal_b, - float c_iso_hcal_a, - float c_iso_hcal_b, - float c_hoe, - float comb_iso, - float loose_hoe) - : choice_(choice), - c_Et_(c_Et), - c_iso_track_a_(c_iso_track_a), - c_iso_track_b_(c_iso_track_b), - c_iso_ecal_a_(c_iso_ecal_a), - c_iso_ecal_b_(c_iso_ecal_b), - c_iso_hcal_a_(c_iso_hcal_a), - c_iso_hcal_b_(c_iso_hcal_b), - c_hoe_(c_hoe), - comb_iso_(comb_iso), - loose_hoe_(loose_hoe) { - ; -} - -bool PhotonSelectorAlgo::passPhotonSelection(const reco::Photon& photon) const { - // Photon ET - float photonPt = photon.pt(); - if (photonPt < c_Et_) - return false; - if (choice_ < 0.1) //EGM Loose - { - //std::cout<<"Cuts:"< c_hoe_) - return false; - - // Track iso - if (photon.trkSumPtHollowConeDR04() > c_iso_track_a_ + c_iso_track_b_ * photonPt) - return false; - - // ECAL iso - if (photon.ecalRecHitSumEtConeDR04() > c_iso_ecal_a_ + c_iso_ecal_b_ * photonPt) - return false; - - // HCAL iso - if (photon.hcalTowerSumEtConeDR04() > c_iso_hcal_a_ + c_iso_hcal_b_ * photonPt) - return false; - } - if (choice_ > 0.99) { - //std::cout<<"Cuts "< loose_hoe_) - return false; - //Isolation variables in 0.3 cone combined - if (photon.trkSumPtHollowConeDR03() + photon.ecalRecHitSumEtConeDR03() + photon.hcalTowerSumEtConeDR03() > - comb_iso_) - return false; - } - - return true; -} diff --git a/RecoParticleFlow/PFRecHitProducer/interface/PFRecHitTopologyRecord.h b/RecoParticleFlow/PFRecHitProducer/interface/PFRecHitTopologyRecord.h index 78e731ac9eaa6..d539b47ca9ce3 100644 --- a/RecoParticleFlow/PFRecHitProducer/interface/PFRecHitTopologyRecord.h +++ b/RecoParticleFlow/PFRecHitProducer/interface/PFRecHitTopologyRecord.h @@ -5,10 +5,11 @@ #include "FWCore/Framework/interface/EventSetupRecordImplementation.h" #include "Geometry/Records/interface/CaloGeometryRecord.h" #include "Geometry/Records/interface/HcalRecNumberingRecord.h" +#include "CondFormats/DataRecord/interface/HcalPFCutsRcd.h" class PFRecHitHCALTopologyRecord : public edm::eventsetup::DependentRecordImplementation< PFRecHitHCALTopologyRecord, - edm::mpl::Vector> {}; + edm::mpl::Vector> {}; class PFRecHitECALTopologyRecord : public edm::eventsetup::DependentRecordImplementation + + diff --git a/RecoParticleFlow/PFRecHitProducer/plugins/alpaka/PFRecHitProducerKernel.dev.cc b/RecoParticleFlow/PFRecHitProducer/plugins/alpaka/PFRecHitProducerKernel.dev.cc index b9287a1f45787..ef18ebc5ecc93 100644 --- a/RecoParticleFlow/PFRecHitProducer/plugins/alpaka/PFRecHitProducerKernel.dev.cc +++ b/RecoParticleFlow/PFRecHitProducer/plugins/alpaka/PFRecHitProducerKernel.dev.cc @@ -16,6 +16,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE { template >> ALPAKA_FN_ACC void operator()(const TAcc& acc, const typename CAL::ParameterType::ConstView params, + const typename CAL::TopologyTypeDevice::ConstView topology, const typename CAL::CaloRecHitSoATypeDevice::ConstView recHits, reco::PFRecHitDeviceCollection::View pfRecHits, uint32_t* __restrict__ denseId2pfRecHit, @@ -23,7 +24,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE { // Strided loop over CaloRecHits for (int32_t i : cms::alpakatools::elements_with_stride(acc, recHits.metadata().size())) { // Check energy thresholds/quality cuts (specialised for HCAL/ECAL) - if (!applyCuts(recHits[i], params)) + if (!applyCuts(recHits[i], params, topology)) continue; // Use atomic operation to determine index of the PFRecHit to be constructed @@ -40,7 +41,8 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE { } ALPAKA_FN_ACC static bool applyCuts(const typename CAL::CaloRecHitSoATypeDevice::ConstView::const_element rh, - const typename CAL::ParameterType::ConstView params); + const typename CAL::ParameterType::ConstView params, + const typename CAL::TopologyTypeDevice::ConstView topology); ALPAKA_FN_ACC static void constructPFRecHit( reco::PFRecHitDeviceCollection::View::element pfrh, @@ -50,26 +52,33 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE { template <> ALPAKA_FN_ACC bool PFRecHitProducerKernelConstruct::applyCuts( const typename HCAL::CaloRecHitSoATypeDevice::ConstView::const_element rh, - const HCAL::ParameterType::ConstView params) { + const HCAL::ParameterType::ConstView params, + const HCAL::TopologyTypeDevice::ConstView topology) { // Reject HCAL recHits below enery threshold float threshold = 9999.; const uint32_t detId = rh.detId(); const uint32_t depth = HCAL::getDepth(detId); const uint32_t subdet = getSubdet(detId); - if (subdet == HcalBarrel) { - threshold = params.energyThresholds()[depth - 1]; - } else if (subdet == HcalEndcap) { - threshold = params.energyThresholds()[depth - 1 + HCAL::kMaxDepthHB]; + if (topology.cutsFromDB()) { + threshold = topology.noiseThreshold()[HCAL::detId2denseId(detId)]; } else { - printf("Rechit with detId %u has invalid subdetector %u!\n", detId, subdet); - return false; + if (subdet == HcalBarrel) { + threshold = params.energyThresholds()[depth - 1]; + } else if (subdet == HcalEndcap) { + threshold = params.energyThresholds()[depth - 1 + HCAL::kMaxDepthHB]; + } else { + printf("Rechit with detId %u has invalid subdetector %u!\n", detId, subdet); + return false; + } } return rh.energy() >= threshold; } template <> ALPAKA_FN_ACC bool PFRecHitProducerKernelConstruct::applyCuts( - const ECAL::CaloRecHitSoATypeDevice::ConstView::const_element rh, const ECAL::ParameterType::ConstView params) { + const ECAL::CaloRecHitSoATypeDevice::ConstView::const_element rh, + const ECAL::ParameterType::ConstView params, + const ECAL::TopologyTypeDevice::ConstView topology) { // Reject ECAL recHits below energy threshold if (rh.energy() < params.energyThresholds()[ECAL::detId2denseId(rh.detId())]) return false; @@ -88,6 +97,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE { reco::PFRecHitDeviceCollection::View::element pfrh, const HCAL::CaloRecHitSoATypeDevice::ConstView::const_element rh) { pfrh.detId() = rh.detId(); + pfrh.denseId() = HCAL::detId2denseId(rh.detId()); pfrh.energy() = rh.energy(); pfrh.time() = rh.time(); pfrh.depth() = HCAL::getDepth(pfrh.detId()); @@ -105,6 +115,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE { reco::PFRecHitDeviceCollection::View::element pfrh, const ECAL::CaloRecHitSoATypeDevice::ConstView::const_element rh) { pfrh.detId() = rh.detId(); + pfrh.denseId() = ECAL::detId2denseId(rh.detId()); pfrh.energy() = rh.energy(); pfrh.time() = rh.time(); pfrh.depth() = 1; @@ -168,11 +179,13 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE { void PFRecHitProducerKernel::processRecHits(Queue& queue, const typename CAL::CaloRecHitSoATypeDevice& recHits, const typename CAL::ParameterType& params, + const typename CAL::TopologyTypeDevice& topology, reco::PFRecHitDeviceCollection& pfRecHits) { alpaka::exec(queue, work_div_, PFRecHitProducerKernelConstruct{}, params.view(), + topology.view(), recHits.view(), pfRecHits.view(), denseId2pfRecHit_.data(), diff --git a/RecoParticleFlow/PFRecHitProducer/plugins/alpaka/PFRecHitProducerKernel.h b/RecoParticleFlow/PFRecHitProducer/plugins/alpaka/PFRecHitProducerKernel.h index 37638a2370060..ffaef6b0ad748 100644 --- a/RecoParticleFlow/PFRecHitProducer/plugins/alpaka/PFRecHitProducerKernel.h +++ b/RecoParticleFlow/PFRecHitProducer/plugins/alpaka/PFRecHitProducerKernel.h @@ -17,6 +17,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE { void processRecHits(Queue& queue, const typename CAL::CaloRecHitSoATypeDevice& recHits, const typename CAL::ParameterType& params, + const typename CAL::TopologyTypeDevice& topology, reco::PFRecHitDeviceCollection& pfRecHits); // Run kernel: Associate topology information (position, neighbours) diff --git a/RecoParticleFlow/PFRecHitProducer/plugins/alpaka/PFRecHitSoAProducer.cc b/RecoParticleFlow/PFRecHitProducer/plugins/alpaka/PFRecHitSoAProducer.cc index a53ce4f23eed4..8297b6359eaf5 100644 --- a/RecoParticleFlow/PFRecHitProducer/plugins/alpaka/PFRecHitSoAProducer.cc +++ b/RecoParticleFlow/PFRecHitProducer/plugins/alpaka/PFRecHitSoAProducer.cc @@ -41,7 +41,7 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE { PFRecHitProducerKernel kernel{event.queue(), num_recHits}; for (const auto& token : recHitsToken_) - kernel.processRecHits(event.queue(), event.get(token.first), setup.getData(token.second), pfRecHits); + kernel.processRecHits(event.queue(), event.get(token.first), setup.getData(token.second), topology, pfRecHits); kernel.associateTopologyInfo(event.queue(), topology, pfRecHits); if (synchronise_) diff --git a/RecoParticleFlow/PFRecHitProducer/plugins/alpaka/PFRecHitTopologyESProducer.cc b/RecoParticleFlow/PFRecHitProducer/plugins/alpaka/PFRecHitTopologyESProducer.cc index c751202f45347..f94db2aecc362 100644 --- a/RecoParticleFlow/PFRecHitProducer/plugins/alpaka/PFRecHitTopologyESProducer.cc +++ b/RecoParticleFlow/PFRecHitProducer/plugins/alpaka/PFRecHitTopologyESProducer.cc @@ -5,6 +5,8 @@ #include #include "DataFormats/EcalDetId/interface/EcalSubdetector.h" +#include "CondFormats/DataRecord/interface/HcalPFCutsRcd.h" +#include "CondTools/Hcal/interface/HcalPFCutsHandler.h" #include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "FWCore/ParameterSet/interface/ParameterSetDescription.h" @@ -24,15 +26,22 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE { template class PFRecHitTopologyESProducer : public ESProducer { public: - PFRecHitTopologyESProducer(edm::ParameterSet const& iConfig) : ESProducer(iConfig) { + PFRecHitTopologyESProducer(edm::ParameterSet const& iConfig) + : ESProducer(iConfig), cutsFromDB_(iConfig.getParameter("usePFThresholdsFromDB")) { auto cc = setWhatProduced(this); geomToken_ = cc.consumes(); - if constexpr (std::is_same_v) + if constexpr (std::is_same_v) { hcalToken_ = cc.consumes(); + hcalCutsToken_ = cc.consumes(); + } } static void fillDescriptions(edm::ConfigurationDescriptions& descriptions) { edm::ParameterSetDescription desc; + if constexpr (std::is_same_v) + desc.add("usePFThresholdsFromDB", true); + else // only needs to be true for HBHE + desc.add("usePFThresholdsFromDB", false); descriptions.addWithDefaultLabel(desc); } @@ -40,7 +49,6 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE { const auto& geom = iRecord.get(geomToken_); auto product = std::make_unique(CAL::kSize, cms::alpakatools::host()); auto view = product->view(); - const int calEnums[2] = {CAL::kSubdetectorBarrelId, CAL::kSubdetectorEndcapId}; for (const auto subdet : calEnums) { // Construct topology @@ -61,6 +69,20 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE { const uint32_t denseId = CAL::detId2denseId(detId); assert(denseId < CAL::kSize); + // Fill SoA members with HCAL PF Thresholds from GT + if constexpr (std::is_same_v) { + view.cutsFromDB() = false; + if (cutsFromDB_) { + view.cutsFromDB() = true; + const HcalPFCuts& pfCuts = iRecord.get(hcalCutsToken_); + const HcalTopology& htopo = iRecord.get(hcalToken_); + std::unique_ptr prod = std::make_unique(pfCuts); + prod->setTopo(&htopo); + view.noiseThreshold(denseId) = prod->getValues(detId.rawId())->noiseThreshold(); + view.seedThreshold(denseId) = prod->getValues(detId.rawId())->seedThreshold(); + } + } + const GlobalPoint pos = geo->getGeometry(detId)->getPosition(); view.positionX(denseId) = pos.x(); view.positionY(denseId) = pos.y(); @@ -119,6 +141,8 @@ namespace ALPAKA_ACCELERATOR_NAMESPACE { private: edm::ESGetToken geomToken_; edm::ESGetToken hcalToken_; + edm::ESGetToken hcalCutsToken_; + const bool cutsFromDB_; // specialised for HCAL/ECAL, because non-nearest neighbours are defined differently uint32_t getNeighbourDetId(const uint32_t detId, const uint32_t direction, const CaloSubdetectorTopology& topo); diff --git a/RecoTauTag/HLTProducers/src/L2TauTagNNProducerAlpaka.cc b/RecoTauTag/HLTProducers/src/L2TauTagNNProducerAlpaka.cc new file mode 100644 index 0000000000000..9772366c6b22e --- /dev/null +++ b/RecoTauTag/HLTProducers/src/L2TauTagNNProducerAlpaka.cc @@ -0,0 +1,822 @@ +/* + * \class L2TauTagProducer + * + * L2Tau identification using Convolutional NN. + * + * \author Valeria D'Amante, Università di Siena and INFN Pisa + * Konstantin Androsov, EPFL and ETHZ +*/ +#include +#include +#include +#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/Framework/interface/ESHandle.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "DataFormats/Math/interface/deltaR.h" +#include "DataFormats/Common/interface/Handle.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "FWCore/Utilities/interface/isFinite.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "PhysicsTools/TensorFlow/interface/TensorFlow.h" +#include "Geometry/CaloGeometry/interface/CaloCellGeometry.h" +#include "Geometry/CaloGeometry/interface/CaloGeometry.h" +#include "Geometry/CaloTopology/interface/HcalTopology.h" +#include "Geometry/Records/interface/CaloGeometryRecord.h" +#include "DataFormats/CaloRecHit/interface/CaloRecHit.h" +#include "DataFormats/EcalRecHit/interface/EcalRecHit.h" +#include "DataFormats/EcalRecHit/interface/EcalRecHitCollections.h" +#include "DataFormats/EcalDetId/interface/EcalDetIdCollections.h" +#include "DataFormats/HcalDetId/interface/HcalDetId.h" +#include "DataFormats/HcalRecHit/interface/HBHERecHit.h" +#include "DataFormats/HcalRecHit/interface/HcalRecHitDefs.h" +#include "DataFormats/HcalRecHit/interface/HFRecHit.h" +#include "DataFormats/HcalRecHit/interface/HORecHit.h" +#include "DataFormats/HLTReco/interface/TriggerTypeDefs.h" +#include "DataFormats/HLTReco/interface/TriggerFilterObjectWithRefs.h" +#include "TrackingTools/TrajectoryParametrization/interface/CurvilinearTrajectoryError.h" +#include "RecoTracker/PixelTrackFitting/interface/FitUtils.h" +#include "TrackingTools/TrajectoryParametrization/interface/GlobalTrajectoryParameters.h" +#include "DataFormats/TrackReco/interface/HitPattern.h" +#include "TrackingTools/AnalyticalJacobians/interface/JacobianLocalToCurvilinear.h" +#include "DataFormats/TrajectoryState/interface/LocalTrajectoryParameters.h" +#include "DataFormats/GeometrySurface/interface/Plane.h" +#include "DataFormats/BeamSpot/interface/BeamSpot.h" +#include "MagneticField/Records/interface/IdealMagneticFieldRecord.h" +#include "DataFormats/SiPixelClusterSoA/interface/ClusteringConstants.h" + +#include "DataFormats/TrackSoA/interface/alpaka/TrackUtilities.h" +#include "DataFormats/TrackSoA/interface/TracksHost.h" +#include "DataFormats/VertexSoA/interface/ZVertexHost.h" + +namespace L2TauTagNNv1 { + constexpr int nCellEta = 5; + constexpr int nCellPhi = 5; + constexpr int nVars = 31; + constexpr float dR_max = 0.5; + enum class NNInputs { + nVertices = 0, + l1Tau_pt, + l1Tau_eta, + l1Tau_hwIso, + EcalEnergySum, + EcalSize, + EcalEnergyStdDev, + EcalDeltaEta, + EcalDeltaPhi, + EcalChi2, + EcalEnergySumForPositiveChi2, + EcalSizeForPositiveChi2, + HcalEnergySum, + HcalSize, + HcalEnergyStdDev, + HcalDeltaEta, + HcalDeltaPhi, + HcalChi2, + HcalEnergySumForPositiveChi2, + HcalSizeForPositiveChi2, + PatatrackPtSum, + PatatrackSize, + PatatrackSizeWithVertex, + PatatrackPtSumWithVertex, + PatatrackChargeSum, + PatatrackDeltaEta, + PatatrackDeltaPhi, + PatatrackChi2OverNdof, + PatatrackNdof, + PatatrackDxy, + PatatrackDz + }; + + const std::map varNameMap = { + {NNInputs::nVertices, "nVertices"}, + {NNInputs::l1Tau_pt, "l1Tau_pt"}, + {NNInputs::l1Tau_eta, "l1Tau_eta"}, + {NNInputs::l1Tau_hwIso, "l1Tau_hwIso"}, + {NNInputs::EcalEnergySum, "EcalEnergySum"}, + {NNInputs::EcalSize, "EcalSize"}, + {NNInputs::EcalEnergyStdDev, "EcalEnergyStdDev"}, + {NNInputs::EcalDeltaEta, "EcalDeltaEta"}, + {NNInputs::EcalDeltaPhi, "EcalDeltaPhi"}, + {NNInputs::EcalChi2, "EcalChi2"}, + {NNInputs::EcalEnergySumForPositiveChi2, "EcalEnergySumForPositiveChi2"}, + {NNInputs::EcalSizeForPositiveChi2, "EcalSizeForPositiveChi2"}, + {NNInputs::HcalEnergySum, "HcalEnergySum"}, + {NNInputs::HcalSize, "HcalSize"}, + {NNInputs::HcalEnergyStdDev, "HcalEnergyStdDev"}, + {NNInputs::HcalDeltaEta, "HcalDeltaEta"}, + {NNInputs::HcalDeltaPhi, "HcalDeltaPhi"}, + {NNInputs::HcalChi2, "HcalChi2"}, + {NNInputs::HcalEnergySumForPositiveChi2, "HcalEnergySumForPositiveChi2"}, + {NNInputs::HcalSizeForPositiveChi2, "HcalSizeForPositiveChi2"}, + {NNInputs::PatatrackPtSum, "PatatrackPtSum"}, + {NNInputs::PatatrackSize, "PatatrackSize"}, + {NNInputs::PatatrackSizeWithVertex, "PatatrackSizeWithVertex"}, + {NNInputs::PatatrackPtSumWithVertex, "PatatrackPtSumWithVertex"}, + {NNInputs::PatatrackChargeSum, "PatatrackChargeSum"}, + {NNInputs::PatatrackDeltaEta, "PatatrackDeltaEta"}, + {NNInputs::PatatrackDeltaPhi, "PatatrackDeltaPhi"}, + {NNInputs::PatatrackChi2OverNdof, "PatatrackChi2OverNdof"}, + {NNInputs::PatatrackNdof, "PatatrackNdof"}, + {NNInputs::PatatrackDxy, "PatatrackDxy"}, + {NNInputs::PatatrackDz, "PatatrackDz"}}; +} // namespace L2TauTagNNv1 +namespace { + inline float& getCellImpl( + tensorflow::Tensor& cellGridMatrix, int tau_idx, int phi_idx, int eta_idx, L2TauTagNNv1::NNInputs NNInput_idx) { + return cellGridMatrix.tensor()(tau_idx, phi_idx, eta_idx, static_cast(NNInput_idx)); + } +} // namespace +struct normDictElement { + float mean; + float std; + float min; + float max; +}; + +struct L2TauNNProducerAlpakaCacheData { + L2TauNNProducerAlpakaCacheData() : graphDef(nullptr), session(nullptr) {} + tensorflow::GraphDef* graphDef; + tensorflow::Session* session; + std::vector normVec; +}; + +class L2TauNNProducerAlpaka : public edm::stream::EDProducer> { +public: + using TracksHost = pixelTrack::TracksHostPhase1; + + struct caloRecHitCollections { + const HBHERecHitCollection* hbhe; + const HORecHitCollection* ho; + const EcalRecHitCollection* eb; + const EcalRecHitCollection* ee; + const CaloGeometry* geometry; + }; + + struct InputDescTau { + std::string CollectionName; + edm::EDGetTokenT inputToken_; + }; + + static constexpr float dR2_max = L2TauTagNNv1::dR_max * L2TauTagNNv1::dR_max; + static constexpr float dEta_width = 2 * L2TauTagNNv1::dR_max / static_cast(L2TauTagNNv1::nCellEta); + static constexpr float dPhi_width = 2 * L2TauTagNNv1::dR_max / static_cast(L2TauTagNNv1::nCellPhi); + + explicit L2TauNNProducerAlpaka(const edm::ParameterSet&, const L2TauNNProducerAlpakaCacheData*); + static void fillDescriptions(edm::ConfigurationDescriptions&); + static std::unique_ptr initializeGlobalCache(const edm::ParameterSet&); + static void globalEndJob(L2TauNNProducerAlpakaCacheData*); + +private: + void checknan(tensorflow::Tensor& tensor, int debugLevel); + void standardizeTensor(tensorflow::Tensor& tensor); + std::vector getTauScore(const tensorflow::Tensor& cellGridMatrix); + void produce(edm::Event& event, const edm::EventSetup& eventsetup) override; + void fillL1TauVars(tensorflow::Tensor& cellGridMatrix, const std::vector& allTaus); + void fillCaloRecHits(tensorflow::Tensor& cellGridMatrix, + const std::vector& allTaus, + const caloRecHitCollections& caloRecHits); + void fillPatatracks(tensorflow::Tensor& cellGridMatrix, + const std::vector& allTaus, + const TracksHost& patatracks_tsoa, + const ZVertexHost& patavtx_soa, + const reco::BeamSpot& beamspot, + const MagneticField* magfi); + void selectGoodTracksAndVertices(const ZVertexHost& patavtx_soa, + const TracksHost& patatracks_tsoa, + std::vector& trkGood, + std::vector& vtxGood); + std::pair impactParameter(int it, + const TracksHost& patatracks_tsoa, + float patatrackPhi, + const reco::BeamSpot& beamspot, + const MagneticField* magfi); + template + std::tuple getEtaPhiIndices(const VPos& position, const LVec& tau_p4); + template + std::tuple getEtaPhiIndices(float eta, float phi, const LVec& tau_p4); + +private: + const int debugLevel_; + const edm::EDGetTokenT tauTriggerToken_; + std::vector L1TauDesc_; + const edm::EDGetTokenT hbheToken_; + const edm::EDGetTokenT hoToken_; + const edm::EDGetTokenT ebToken_; + const edm::EDGetTokenT eeToken_; + const edm::ESGetToken geometryToken_; + const edm::ESGetToken bFieldToken_; + const edm::EDGetTokenT pataVerticesToken_; + const edm::EDGetTokenT pataTracksToken_; + const edm::EDGetTokenT beamSpotToken_; + const unsigned int maxVtx_; + const float fractionSumPt2_; + const float minSumPt2_; + const float trackPtMin_; + const float trackPtMax_; + const float trackChi2Max_; + std::string inputTensorName_; + std::string outputTensorName_; + const L2TauNNProducerAlpakaCacheData* L2cacheData_; +}; + +std::unique_ptr L2TauNNProducerAlpaka::initializeGlobalCache( + const edm::ParameterSet& cfg) { + std::unique_ptr cacheData = std::make_unique(); + cacheData->normVec.reserve(L2TauTagNNv1::nVars); + + auto const graphPath = edm::FileInPath(cfg.getParameter("graphPath")).fullPath(); + + cacheData->graphDef = tensorflow::loadGraphDef(graphPath); + cacheData->session = tensorflow::createSession(cacheData->graphDef); + + tensorflow::setLogging("2"); + + boost::property_tree::ptree loadPtreeRoot; + auto const normalizationDict = edm::FileInPath(cfg.getParameter("normalizationDict")).fullPath(); + boost::property_tree::read_json(normalizationDict, loadPtreeRoot); + for (const auto& [key, val] : L2TauTagNNv1::varNameMap) { + boost::property_tree::ptree var = loadPtreeRoot.get_child(val); + normDictElement current_element; + current_element.mean = var.get_child("mean").get_value(); + current_element.std = var.get_child("std").get_value(); + current_element.min = var.get_child("min").get_value(); + current_element.max = var.get_child("max").get_value(); + cacheData->normVec.push_back(current_element); + } + return cacheData; +} +void L2TauNNProducerAlpaka::globalEndJob(L2TauNNProducerAlpakaCacheData* cacheData) { + if (cacheData->graphDef != nullptr) { + delete cacheData->graphDef; + } + tensorflow::closeSession(cacheData->session); +} +void L2TauNNProducerAlpaka::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("debugLevel", 0)->setComment("set debug level for printing out info"); + edm::ParameterSetDescription l1TausPset; + l1TausPset.add("L1CollectionName", "DoubleTau")->setComment("Name of collections"); + l1TausPset.add("L1TauTrigger", edm::InputTag("hltL1sDoubleTauBigOR")) + ->setComment("Which trigger should the L1 Taus collection pass"); + edm::ParameterSet l1TausPSetDefault; + l1TausPSetDefault.addParameter("L1CollectionName", "DoubleTau"); + l1TausPSetDefault.addParameter("L1TauTrigger", edm::InputTag("hltL1sDoubleTauBigOR")); + desc.addVPSet("L1Taus", l1TausPset, {l1TausPSetDefault}); + desc.add("hbheInput", edm::InputTag("hltHbhereco"))->setComment("HBHE recHit collection"); + desc.add("hoInput", edm::InputTag("hltHoreco"))->setComment("HO recHit Collection"); + desc.add("ebInput", edm::InputTag("hltEcalRecHit:EcalRecHitsEB"))->setComment("EB recHit Collection"); + desc.add("eeInput", edm::InputTag("hltEcalRecHit:EcalRecHitsEE"))->setComment("EE recHit Collection"); + desc.add("pataVertices", edm::InputTag("hltPixelVerticesSoA")) + ->setComment("patatrack vertices collection"); + desc.add("pataTracks", edm::InputTag("hltPixelTracksSoA"))->setComment("patatrack collection"); + desc.add("BeamSpot", edm::InputTag("hltOnlineBeamSpot"))->setComment("BeamSpot Collection"); + desc.add("maxVtx", 100)->setComment("max output collection size (number of accepted vertices)"); + desc.add("fractionSumPt2", 0.3)->setComment("threshold on sumPt2 fraction of the leading vertex"); + desc.add("minSumPt2", 0.)->setComment("min sumPt2"); + desc.add("track_pt_min", 1.0)->setComment("min track p_T"); + desc.add("track_pt_max", 10.0)->setComment("max track p_T"); + desc.add("track_chi2_max", 99999.)->setComment("max track chi2"); + desc.add("graphPath", "RecoTauTag/TrainingFiles/data/L2TauNNTag/L2TauTag_Run3v1.pb") + ->setComment("path to the saved CNN"); + desc.add("normalizationDict", "RecoTauTag/TrainingFiles/data/L2TauNNTag/NormalizationDict.json") + ->setComment("path to the dictionary for variable standardization"); + descriptions.addWithDefaultLabel(desc); +} + +L2TauNNProducerAlpaka::L2TauNNProducerAlpaka(const edm::ParameterSet& cfg, + const L2TauNNProducerAlpakaCacheData* cacheData) + : debugLevel_(cfg.getParameter("debugLevel")), + hbheToken_(consumes(cfg.getParameter("hbheInput"))), + hoToken_(consumes(cfg.getParameter("hoInput"))), + ebToken_(consumes(cfg.getParameter("ebInput"))), + eeToken_(consumes(cfg.getParameter("eeInput"))), + geometryToken_(esConsumes()), + bFieldToken_(esConsumes()), + pataVerticesToken_(consumes(cfg.getParameter("pataVertices"))), + pataTracksToken_(consumes(cfg.getParameter("pataTracks"))), + beamSpotToken_(consumes(cfg.getParameter("BeamSpot"))), + maxVtx_(cfg.getParameter("maxVtx")), + fractionSumPt2_(cfg.getParameter("fractionSumPt2")), + minSumPt2_(cfg.getParameter("minSumPt2")), + trackPtMin_(cfg.getParameter("track_pt_min")), + trackPtMax_(cfg.getParameter("track_pt_max")), + trackChi2Max_(cfg.getParameter("track_chi2_max")) { + if (cacheData->graphDef == nullptr) { + throw cms::Exception("InvalidCacheData") << "Invalid Cache Data."; + } + inputTensorName_ = cacheData->graphDef->node(0).name(); + outputTensorName_ = cacheData->graphDef->node(cacheData->graphDef->node_size() - 1).name(); + L2cacheData_ = cacheData; + std::vector L1TauCollections = cfg.getParameter>("L1Taus"); + L1TauDesc_.reserve(L1TauCollections.size()); + for (const auto& l1TauInput : L1TauCollections) { + InputDescTau toInsert; + toInsert.CollectionName = l1TauInput.getParameter("L1CollectionName"); + toInsert.inputToken_ = + consumes(l1TauInput.getParameter("L1TauTrigger")); + L1TauDesc_.push_back(toInsert); + } + for (const auto& desc : L1TauDesc_) + produces>(desc.CollectionName); +} + +void L2TauNNProducerAlpaka::checknan(tensorflow::Tensor& tensor, int debugLevel) { + using NNInputs = L2TauTagNNv1::NNInputs; + std::vector tensor_shape(tensor.shape().dims()); + for (int d = 0; d < tensor.shape().dims(); d++) { + tensor_shape.at(d) = tensor.shape().dim_size(d); + } + if (tensor_shape.size() != 4) { + throw cms::Exception("InvalidTensor") << "Tensor shape does not have 4 dimensions!"; + } + for (int tau_idx = 0; tau_idx < tensor_shape.at(0); tau_idx++) { + for (int phi_idx = 0; phi_idx < tensor_shape.at(1); phi_idx++) { + for (int eta_idx = 0; eta_idx < tensor_shape.at(2); eta_idx++) { + for (int var_idx = 0; var_idx < tensor_shape.at(3); var_idx++) { + auto getCell = [&](NNInputs input) -> float& { + return getCellImpl(tensor, tau_idx, phi_idx, eta_idx, input); + }; + auto nonstd_var = getCell(static_cast(var_idx)); + if (edm::isNotFinite(nonstd_var)) { + edm::LogWarning("InputVar") << "var is nan \nvar name= " + << L2TauTagNNv1::varNameMap.at(static_cast(var_idx)) + << "\t var_idx = " << var_idx << "\t eta_idx = " << eta_idx + << "\t phi_idx = " << phi_idx << "\t tau_idx = " << tau_idx; + if (debugLevel > 2) { + edm::LogWarning("InputVar") << "other vars in same cell \n"; + if (var_idx + 1 < tensor_shape.at(3)) + edm::LogWarning("InputVar") << L2TauTagNNv1::varNameMap.at(static_cast(var_idx + 1)) + << "\t = " << getCell(static_cast(var_idx + 1)); + if (var_idx + 2 < tensor_shape.at(3)) + edm::LogWarning("InputVar") << L2TauTagNNv1::varNameMap.at(static_cast(var_idx + 2)) + << "\t = " << getCell(static_cast(var_idx + 2)); + if (var_idx + 3 < tensor_shape.at(3)) + edm::LogWarning("InputVar") << L2TauTagNNv1::varNameMap.at(static_cast(var_idx + 3)) + << "\t = " << getCell(static_cast(var_idx + 3)); + if (var_idx + 4 < tensor_shape.at(3)) + edm::LogWarning("InputVar") << L2TauTagNNv1::varNameMap.at(static_cast(var_idx + 4)) + << "\t = " << getCell(static_cast(var_idx + 4)); + } + } + } + } + } + } +} + +void L2TauNNProducerAlpaka::standardizeTensor(tensorflow::Tensor& tensor) { + using NNInputs = L2TauTagNNv1::NNInputs; + std::vector tensor_shape(tensor.shape().dims()); + for (int d = 0; d < tensor.shape().dims(); d++) { + tensor_shape.at(d) = tensor.shape().dim_size(d); + } + if (tensor_shape.size() != 4) { + throw cms::Exception("InvalidTensor") << "Tensor shape does not have 4 dimensions!"; + } + for (int tau_idx = 0; tau_idx < tensor_shape.at(0); tau_idx++) { + for (int phi_idx = 0; phi_idx < tensor_shape.at(1); phi_idx++) { + for (int eta_idx = 0; eta_idx < tensor_shape.at(2); eta_idx++) { + for (int var_idx = 0; var_idx < tensor_shape.at(3); var_idx++) { + auto getCell = [&](NNInputs input) -> float& { + return getCellImpl(tensor, tau_idx, phi_idx, eta_idx, input); + }; + float mean = L2cacheData_->normVec.at(var_idx).mean; + float std = L2cacheData_->normVec.at(var_idx).std; + float min = L2cacheData_->normVec.at(var_idx).min; + float max = L2cacheData_->normVec.at(var_idx).max; + float nonstd_var = getCell(static_cast(var_idx)); + float std_var = static_cast((nonstd_var - mean) / std); + if (std_var > max) { + std_var = static_cast(max); + } else if (std_var < min) { + std_var = static_cast(min); + } + getCell(static_cast(var_idx)) = std_var; + } + } + } + } +} + +void L2TauNNProducerAlpaka::fillL1TauVars(tensorflow::Tensor& cellGridMatrix, const std::vector& allTaus) { + using NNInputs = L2TauTagNNv1::NNInputs; + + const int nTaus = allTaus.size(); + for (int tau_idx = 0; tau_idx < nTaus; tau_idx++) { + for (int eta_idx = 0; eta_idx < L2TauTagNNv1::nCellEta; eta_idx++) { + for (int phi_idx = 0; phi_idx < L2TauTagNNv1::nCellPhi; phi_idx++) { + auto getCell = [&](NNInputs input) -> float& { + return getCellImpl(cellGridMatrix, tau_idx, phi_idx, eta_idx, input); + }; + getCell(NNInputs::l1Tau_pt) = allTaus[tau_idx]->pt(); + getCell(NNInputs::l1Tau_eta) = allTaus[tau_idx]->eta(); + getCell(NNInputs::l1Tau_hwIso) = allTaus[tau_idx]->hwIso(); + } + } + } +} + +template +std::tuple L2TauNNProducerAlpaka::getEtaPhiIndices(float eta, float phi, const LVec& tau_p4) { + const float deta = eta - tau_p4.eta(); + const float dphi = reco::deltaPhi(phi, tau_p4.phi()); + const int eta_idx = static_cast(floor((deta + L2TauTagNNv1::dR_max) / dEta_width)); + const int phi_idx = static_cast(floor((dphi + L2TauTagNNv1::dR_max) / dPhi_width)); + return std::make_tuple(deta, dphi, eta_idx, phi_idx); +} + +template +std::tuple L2TauNNProducerAlpaka::getEtaPhiIndices(const VPos& position, const LVec& tau_p4) { + return getEtaPhiIndices(position.eta(), position.phi(), tau_p4); +} + +void L2TauNNProducerAlpaka::fillCaloRecHits(tensorflow::Tensor& cellGridMatrix, + const std::vector& allTaus, + const caloRecHitCollections& caloRecHits) { + using NNInputs = L2TauTagNNv1::NNInputs; + + const int nTaus = allTaus.size(); + float deta, dphi; + int eta_idx = 0; + int phi_idx = 0; + int tau_idx = 0; + + auto getCell = [&](NNInputs input) -> float& { + return getCellImpl(cellGridMatrix, tau_idx, phi_idx, eta_idx, input); + }; + for (tau_idx = 0; tau_idx < nTaus; tau_idx++) { + // calorechit_EE + for (const auto& caloRecHit_ee : *caloRecHits.ee) { + if (caloRecHit_ee.energy() <= 0) + continue; + const auto& position = caloRecHits.geometry->getGeometry(caloRecHit_ee.id())->getPosition(); + const float eeCalEn = caloRecHit_ee.energy(); + const float eeCalChi2 = caloRecHit_ee.chi2(); + if (reco::deltaR2(position, allTaus[tau_idx]->polarP4()) < dR2_max) { + std::tie(deta, dphi, eta_idx, phi_idx) = getEtaPhiIndices(position, allTaus[tau_idx]->polarP4()); + getCell(NNInputs::EcalEnergySum) += eeCalEn; + getCell(NNInputs::EcalSize) += 1.; + getCell(NNInputs::EcalEnergyStdDev) += eeCalEn * eeCalEn; + getCell(NNInputs::EcalDeltaEta) += deta * eeCalEn; + getCell(NNInputs::EcalDeltaPhi) += dphi * eeCalEn; + if (eeCalChi2 >= 0) { + getCell(NNInputs::EcalChi2) += eeCalChi2 * eeCalEn; + getCell(NNInputs::EcalEnergySumForPositiveChi2) += eeCalEn; + getCell(NNInputs::EcalSizeForPositiveChi2) += 1.; + } + } + } + + // calorechit_EB + for (const auto& caloRecHit_eb : *caloRecHits.eb) { + if (caloRecHit_eb.energy() <= 0) + continue; + const auto& position = caloRecHits.geometry->getGeometry(caloRecHit_eb.id())->getPosition(); + const float ebCalEn = caloRecHit_eb.energy(); + const float ebCalChi2 = caloRecHit_eb.chi2(); + if (reco::deltaR2(position, allTaus[tau_idx]->polarP4()) < dR2_max) { + std::tie(deta, dphi, eta_idx, phi_idx) = getEtaPhiIndices(position, allTaus[tau_idx]->polarP4()); + getCell(NNInputs::EcalEnergySum) += ebCalEn; + getCell(NNInputs::EcalSize) += 1.; + getCell(NNInputs::EcalEnergyStdDev) += ebCalEn * ebCalEn; + getCell(NNInputs::EcalDeltaEta) += deta * ebCalEn; + getCell(NNInputs::EcalDeltaPhi) += dphi * ebCalEn; + if (ebCalChi2 >= 0) { + getCell(NNInputs::EcalChi2) += ebCalChi2 * ebCalEn; + getCell(NNInputs::EcalEnergySumForPositiveChi2) += ebCalEn; + getCell(NNInputs::EcalSizeForPositiveChi2) += 1.; + } + } + } + + // calorechit_HBHE + for (const auto& caloRecHit_hbhe : *caloRecHits.hbhe) { + if (caloRecHit_hbhe.energy() <= 0) + continue; + const auto& position = caloRecHits.geometry->getGeometry(caloRecHit_hbhe.id())->getPosition(); + const float hbheCalEn = caloRecHit_hbhe.energy(); + const float hbheCalChi2 = caloRecHit_hbhe.chi2(); + if (reco::deltaR2(position, allTaus[tau_idx]->polarP4()) < dR2_max) { + std::tie(deta, dphi, eta_idx, phi_idx) = getEtaPhiIndices(position, allTaus[tau_idx]->polarP4()); + getCell(NNInputs::HcalEnergySum) += hbheCalEn; + getCell(NNInputs::HcalEnergyStdDev) += hbheCalEn * hbheCalEn; + getCell(NNInputs::HcalSize) += 1.; + getCell(NNInputs::HcalDeltaEta) += deta * hbheCalEn; + getCell(NNInputs::HcalDeltaPhi) += dphi * hbheCalEn; + if (hbheCalChi2 >= 0) { + getCell(NNInputs::HcalChi2) += hbheCalChi2 * hbheCalEn; + getCell(NNInputs::HcalEnergySumForPositiveChi2) += hbheCalEn; + getCell(NNInputs::HcalSizeForPositiveChi2) += 1.; + } + } + } + + // calorechit_HO + for (const auto& caloRecHit_ho : *caloRecHits.ho) { + if (caloRecHit_ho.energy() <= 0) + continue; + const auto& position = caloRecHits.geometry->getGeometry(caloRecHit_ho.id())->getPosition(); + const float hoCalEn = caloRecHit_ho.energy(); + if (reco::deltaR2(position, allTaus[tau_idx]->polarP4()) < dR2_max) { + std::tie(deta, dphi, eta_idx, phi_idx) = getEtaPhiIndices(position, allTaus[tau_idx]->polarP4()); + getCell(NNInputs::HcalEnergySum) += hoCalEn; + getCell(NNInputs::HcalEnergyStdDev) += hoCalEn * hoCalEn; + getCell(NNInputs::HcalSize) += 1.; + getCell(NNInputs::HcalDeltaEta) += deta * hoCalEn; + getCell(NNInputs::HcalDeltaPhi) += dphi * hoCalEn; + } + } + + // normalize to sum and define stdDev + for (eta_idx = 0; eta_idx < L2TauTagNNv1::nCellEta; eta_idx++) { + for (phi_idx = 0; phi_idx < L2TauTagNNv1::nCellPhi; phi_idx++) { + /* normalize eCal vars*/ + if (getCell(NNInputs::EcalEnergySum) > 0.) { + getCell(NNInputs::EcalDeltaEta) /= getCell(NNInputs::EcalEnergySum); + getCell(NNInputs::EcalDeltaPhi) /= getCell(NNInputs::EcalEnergySum); + } + if (getCell(NNInputs::EcalEnergySumForPositiveChi2) > 0.) { + getCell(NNInputs::EcalChi2) /= getCell(NNInputs::EcalEnergySumForPositiveChi2); + } + if (getCell(NNInputs::EcalSize) > 1.) { + // (stdDev - (enSum*enSum)/size) / (size-1) + getCell(NNInputs::EcalEnergyStdDev) = + (getCell(NNInputs::EcalEnergyStdDev) - + (getCell(NNInputs::EcalEnergySum) * getCell(NNInputs::EcalEnergySum)) / getCell(NNInputs::EcalSize)) / + (getCell(NNInputs::EcalSize) - 1); + } else { + getCell(NNInputs::EcalEnergyStdDev) = 0.; + } + /* normalize hCal Vars */ + if (getCell(NNInputs::HcalEnergySum) > 0.) { + getCell(NNInputs::HcalDeltaEta) /= getCell(NNInputs::HcalEnergySum); + getCell(NNInputs::HcalDeltaPhi) /= getCell(NNInputs::HcalEnergySum); + } + if (getCell(NNInputs::HcalEnergySumForPositiveChi2) > 0.) { + getCell(NNInputs::HcalChi2) /= getCell(NNInputs::HcalEnergySumForPositiveChi2); + } + if (getCell(NNInputs::HcalSize) > 1.) { + // (stdDev - (enSum*enSum)/size) / (size-1) + getCell(NNInputs::HcalEnergyStdDev) = + (getCell(NNInputs::HcalEnergyStdDev) - + (getCell(NNInputs::HcalEnergySum) * getCell(NNInputs::HcalEnergySum)) / getCell(NNInputs::HcalSize)) / + (getCell(NNInputs::HcalSize) - 1); + } else { + getCell(NNInputs::HcalEnergyStdDev) = 0.; + } + } + } + } +} + +void L2TauNNProducerAlpaka::selectGoodTracksAndVertices(const ZVertexHost& patavtx_soa, + const TracksHost& patatracks_tsoa, + std::vector& trkGood, + std::vector& vtxGood) { + using patatrackHelpers = TracksUtilities; + const auto maxTracks = patatracks_tsoa.view().metadata().size(); + const int nv = patavtx_soa.view().nvFinal(); + trkGood.clear(); + trkGood.reserve(maxTracks); + vtxGood.clear(); + vtxGood.reserve(nv); + auto const* quality = patatracks_tsoa.view().quality(); + + // No need to sort either as the algorithms is just using the max (not even the location, just the max value of pt2sum). + std::vector pTSquaredSum(nv, 0); + std::vector nTrkAssociated(nv, 0); + + for (int32_t trk_idx = 0; trk_idx < maxTracks; ++trk_idx) { + auto nHits = patatrackHelpers::nHits(patatracks_tsoa.view(), trk_idx); + if (nHits == 0) { + break; + } + int vtx_ass_to_track = patavtx_soa.view()[trk_idx].idv(); + if (vtx_ass_to_track >= 0 && vtx_ass_to_track < nv) { + auto patatrackPt = patatracks_tsoa.view()[trk_idx].pt(); + ++nTrkAssociated[vtx_ass_to_track]; + if (patatrackPt >= trackPtMin_ && patatracks_tsoa.const_view()[trk_idx].chi2() <= trackChi2Max_) { + patatrackPt = std::min(patatrackPt, trackPtMax_); + pTSquaredSum[vtx_ass_to_track] += patatrackPt * patatrackPt; + } + } + if (nHits > 0 and quality[trk_idx] >= pixelTrack::Quality::loose) { + trkGood.push_back(trk_idx); + } + } + if (nv > 0) { + const auto minFOM_fromFrac = (*std::max_element(pTSquaredSum.begin(), pTSquaredSum.end())) * fractionSumPt2_; + for (int j = nv - 1; j >= 0 && vtxGood.size() < maxVtx_; --j) { + auto vtx_idx = patavtx_soa.view()[j].sortInd(); + assert(vtx_idx < nv); + if (nTrkAssociated[vtx_idx] >= 2 && pTSquaredSum[vtx_idx] >= minFOM_fromFrac && + pTSquaredSum[vtx_idx] > minSumPt2_) { + vtxGood.push_back(vtx_idx); + } + } + } +} + +std::pair L2TauNNProducerAlpaka::impactParameter(int it, + const TracksHost& patatracks_tsoa, + float patatrackPhi, + const reco::BeamSpot& beamspot, + const MagneticField* magfi) { + /* dxy and dz */ + riemannFit::Vector5d ipar, opar; + riemannFit::Matrix5d icov, ocov; + TracksUtilities::copyToDense(patatracks_tsoa.view(), ipar, icov, it); + riemannFit::transformToPerigeePlane(ipar, icov, opar, ocov); + LocalTrajectoryParameters lpar(opar(0), opar(1), opar(2), opar(3), opar(4), 1.); + float sp = std::sin(patatrackPhi); + float cp = std::cos(patatrackPhi); + Surface::RotationType Rotation(sp, -cp, 0, 0, 0, -1.f, cp, sp, 0); + GlobalPoint BeamSpotPoint(beamspot.x0(), beamspot.y0(), beamspot.z0()); + Plane impPointPlane(BeamSpotPoint, Rotation); + GlobalTrajectoryParameters gp( + impPointPlane.toGlobal(lpar.position()), impPointPlane.toGlobal(lpar.momentum()), lpar.charge(), magfi); + GlobalPoint vv = gp.position(); + math::XYZPoint pos(vv.x(), vv.y(), vv.z()); + GlobalVector pp = gp.momentum(); + math::XYZVector mom(pp.x(), pp.y(), pp.z()); + auto lambda = M_PI_2 - pp.theta(); + auto phi = pp.phi(); + float patatrackDxy = -vv.x() * std::sin(phi) + vv.y() * std::cos(phi); + float patatrackDz = + (vv.z() * std::cos(lambda) - (vv.x() * std::cos(phi) + vv.y() * std::sin(phi)) * std::sin(lambda)) / + std::cos(lambda); + return std::make_pair(patatrackDxy, patatrackDz); +} + +void L2TauNNProducerAlpaka::fillPatatracks(tensorflow::Tensor& cellGridMatrix, + const std::vector& allTaus, + const TracksHost& patatracks_tsoa, + const ZVertexHost& patavtx_soa, + const reco::BeamSpot& beamspot, + const MagneticField* magfi) { + using NNInputs = L2TauTagNNv1::NNInputs; + using patatrackHelpers = TracksUtilities; + float deta, dphi; + int eta_idx = 0; + int phi_idx = 0; + int tau_idx = 0; + + auto getCell = [&](NNInputs input) -> float& { + return getCellImpl(cellGridMatrix, tau_idx, phi_idx, eta_idx, input); + }; + + std::vector trkGood; + std::vector vtxGood; + + selectGoodTracksAndVertices(patavtx_soa, patatracks_tsoa, trkGood, vtxGood); + + const int nTaus = allTaus.size(); + for (tau_idx = 0; tau_idx < nTaus; tau_idx++) { + const float tauEta = allTaus[tau_idx]->eta(); + const float tauPhi = allTaus[tau_idx]->phi(); + + for (const auto it : trkGood) { + const float patatrackPt = patatracks_tsoa.const_view()[it].pt(); + if (patatrackPt <= 0) + continue; + const float patatrackPhi = reco::phi(patatracks_tsoa.const_view(), it); + const float patatrackEta = patatracks_tsoa.const_view()[it].eta(); + const float patatrackCharge = reco::charge(patatracks_tsoa.const_view(), it); + const float patatrackChi2OverNdof = patatracks_tsoa.view()[it].chi2(); + const auto nHits = patatrackHelpers::nHits(patatracks_tsoa.const_view(), it); + if (nHits <= 0) + continue; + const int patatrackNdof = 2 * std::min(6, nHits) - 5; + + const int vtx_idx_assTrk = patavtx_soa.view()[it].idv(); + if (reco::deltaR2(patatrackEta, patatrackPhi, tauEta, tauPhi) < dR2_max) { + std::tie(deta, dphi, eta_idx, phi_idx) = + getEtaPhiIndices(patatrackEta, patatrackPhi, allTaus[tau_idx]->polarP4()); + getCell(NNInputs::PatatrackPtSum) += patatrackPt; + getCell(NNInputs::PatatrackSize) += 1.; + getCell(NNInputs::PatatrackChargeSum) += patatrackCharge; + getCell(NNInputs::PatatrackDeltaEta) += deta * patatrackPt; + getCell(NNInputs::PatatrackDeltaPhi) += dphi * patatrackPt; + getCell(NNInputs::PatatrackChi2OverNdof) += patatrackChi2OverNdof * patatrackPt; + getCell(NNInputs::PatatrackNdof) += patatrackNdof * patatrackPt; + std::pair impactParameters = impactParameter(it, patatracks_tsoa, patatrackPhi, beamspot, magfi); + getCell(NNInputs::PatatrackDxy) += impactParameters.first * patatrackPt; + getCell(NNInputs::PatatrackDz) += impactParameters.second * patatrackPt; + if ((std::find(vtxGood.begin(), vtxGood.end(), vtx_idx_assTrk) != vtxGood.end())) { + getCell(NNInputs::PatatrackPtSumWithVertex) += patatrackPt; + getCell(NNInputs::PatatrackSizeWithVertex) += 1.; + } + } + } + + // normalize to sum and define stdDev + for (eta_idx = 0; eta_idx < L2TauTagNNv1::nCellEta; eta_idx++) { + for (phi_idx = 0; phi_idx < L2TauTagNNv1::nCellPhi; phi_idx++) { + getCell(NNInputs::nVertices) = vtxGood.size(); + if (getCell(NNInputs::PatatrackPtSum) > 0.) { + getCell(NNInputs::PatatrackDeltaEta) /= getCell(NNInputs::PatatrackPtSum); + getCell(NNInputs::PatatrackDeltaPhi) /= getCell(NNInputs::PatatrackPtSum); + getCell(NNInputs::PatatrackChi2OverNdof) /= getCell(NNInputs::PatatrackPtSum); + getCell(NNInputs::PatatrackNdof) /= getCell(NNInputs::PatatrackPtSum); + getCell(NNInputs::PatatrackDxy) /= getCell(NNInputs::PatatrackPtSum); + getCell(NNInputs::PatatrackDz) /= getCell(NNInputs::PatatrackPtSum); + } + } + } + } +} + +std::vector L2TauNNProducerAlpaka::getTauScore(const tensorflow::Tensor& cellGridMatrix) { + std::vector pred_tensor; + tensorflow::run(L2cacheData_->session, {{inputTensorName_, cellGridMatrix}}, {outputTensorName_}, &pred_tensor); + const int nTau = cellGridMatrix.shape().dim_size(0); + std::vector pred_vector(nTau); + for (int tau_idx = 0; tau_idx < nTau; ++tau_idx) { + pred_vector[tau_idx] = pred_tensor[0].matrix()(tau_idx, 0); + } + + return pred_vector; +} + +void L2TauNNProducerAlpaka::produce(edm::Event& event, const edm::EventSetup& eventsetup) { + std::vector> TauCollectionMap(L1TauDesc_.size()); + l1t::TauVectorRef allTaus; + + for (size_t inp_idx = 0; inp_idx < L1TauDesc_.size(); inp_idx++) { + l1t::TauVectorRef l1Taus; + auto const& l1TriggeredTaus = event.get(L1TauDesc_[inp_idx].inputToken_); + l1TriggeredTaus.getObjects(trigger::TriggerL1Tau, l1Taus); + TauCollectionMap.at(inp_idx).resize(l1Taus.size()); + + for (size_t l1_idx = 0; l1_idx < l1Taus.size(); l1_idx++) { + size_t tau_idx; + const auto iter = std::find(allTaus.begin(), allTaus.end(), l1Taus[l1_idx]); + if (iter != allTaus.end()) { + tau_idx = std::distance(allTaus.begin(), iter); + } else { + allTaus.push_back(l1Taus[l1_idx]); + tau_idx = allTaus.size() - 1; + } + TauCollectionMap.at(inp_idx).at(l1_idx) = tau_idx; + } + } + const auto ebCal = event.getHandle(ebToken_); + const auto eeCal = event.getHandle(eeToken_); + const auto hbhe = event.getHandle(hbheToken_); + const auto ho = event.getHandle(hoToken_); + auto const& patatracks_SoA = event.get(pataTracksToken_); + auto const& vertices_SoA = event.get(pataVerticesToken_); + const auto bsHandle = event.getHandle(beamSpotToken_); + + auto const fieldESH = eventsetup.getHandle(bFieldToken_); + auto const geometry = eventsetup.getHandle(geometryToken_); + + caloRecHitCollections caloRecHits; + caloRecHits.hbhe = &*hbhe; + caloRecHits.ho = &*ho; + caloRecHits.eb = &*ebCal; + caloRecHits.ee = &*eeCal; + caloRecHits.geometry = &*geometry; + + const int nTaus = allTaus.size(); + tensorflow::Tensor cellGridMatrix(tensorflow::DT_FLOAT, + {nTaus, L2TauTagNNv1::nCellEta, L2TauTagNNv1::nCellPhi, L2TauTagNNv1::nVars}); + const int n_inputs = nTaus * L2TauTagNNv1::nCellEta * L2TauTagNNv1::nCellPhi * L2TauTagNNv1::nVars; + for (int input_idx = 0; input_idx < n_inputs; ++input_idx) { + cellGridMatrix.flat()(input_idx) = 0; + } + fillL1TauVars(cellGridMatrix, allTaus); + + fillCaloRecHits(cellGridMatrix, allTaus, caloRecHits); + + fillPatatracks(cellGridMatrix, allTaus, patatracks_SoA, vertices_SoA, *bsHandle, fieldESH.product()); + + standardizeTensor(cellGridMatrix); + + if (debugLevel_ > 0) { + checknan(cellGridMatrix, debugLevel_); + } + + std::vector tau_score = getTauScore(cellGridMatrix); + + for (size_t inp_idx = 0; inp_idx < L1TauDesc_.size(); inp_idx++) { + const size_t nTau = TauCollectionMap[inp_idx].size(); + auto tau_tags = std::make_unique>(nTau); + for (size_t tau_pos = 0; tau_pos < nTau; ++tau_pos) { + const auto tau_idx = TauCollectionMap[inp_idx][tau_pos]; + if (debugLevel_ > 0) { + edm::LogInfo("DebugInfo") << event.id().event() << " \t " << (allTaus[tau_idx])->pt() << " \t " + << tau_score.at(tau_idx) << std::endl; + } + (*tau_tags)[tau_pos] = tau_score.at(tau_idx); + } + event.put(std::move(tau_tags), L1TauDesc_[inp_idx].CollectionName); + } +} +//define this as a plug-in +#include "FWCore/Framework/interface/MakerMacros.h" +DEFINE_FWK_MODULE(L2TauNNProducerAlpaka); diff --git a/RecoTauTag/RecoTau/interface/DeepTauIdBase.h b/RecoTauTag/RecoTau/interface/DeepTauIdBase.h index c9d3ebbe85067..95e940afdfe67 100644 --- a/RecoTauTag/RecoTau/interface/DeepTauIdBase.h +++ b/RecoTauTag/RecoTau/interface/DeepTauIdBase.h @@ -1298,10 +1298,14 @@ class DeepTauIdBase : public Producer { if (sub_version_ == 1) get(dnn::footprintCorrection) = sp.scale(tau_funcs.getFootprintCorrectiondR03(tau, tau_ref), tauInputs_indices_[dnn::footprintCorrection]); - else if (sub_version_ == 5) - get(dnn::footprintCorrection) = - sp.scale(tau_funcs.getFootprintCorrection(tau, tau_ref), tauInputs_indices_[dnn::footprintCorrection]); - + else if (sub_version_ == 5) { + if (is_online_) + get(dnn::footprintCorrection) = + sp.scale(tau_funcs.getFootprintCorrectiondR03(tau, tau_ref), tauInputs_indices_[dnn::footprintCorrection]); + else + get(dnn::footprintCorrection) = + sp.scale(tau_funcs.getFootprintCorrection(tau, tau_ref), tauInputs_indices_[dnn::footprintCorrection]); + } get(dnn::neutralIsoPtSum) = sp.scale(tau_funcs.getNeutralIsoPtSum(tau, tau_ref), tauInputs_indices_[dnn::neutralIsoPtSum]); get(dnn::neutralIsoPtSumWeight_over_neutralIsoPtSum) = diff --git a/RecoTracker/CkfPattern/interface/BaseCkfTrajectoryBuilder.h b/RecoTracker/CkfPattern/interface/BaseCkfTrajectoryBuilder.h index e6e03cb8f4fea..789ffe3aa3d5a 100644 --- a/RecoTracker/CkfPattern/interface/BaseCkfTrajectoryBuilder.h +++ b/RecoTracker/CkfPattern/interface/BaseCkfTrajectoryBuilder.h @@ -42,7 +42,6 @@ namespace edm { class ConsumesCollector; } -#include "TrackingTools/PatternTools/interface/bqueue.h" #include "RecoTracker/CkfPattern/interface/PrintoutHelper.h" #include diff --git a/RecoTracker/Configuration/python/RecoPixelVertexing_cff.py b/RecoTracker/Configuration/python/RecoPixelVertexing_cff.py index c08a0987d3f59..f5ba3ad7df1da 100644 --- a/RecoTracker/Configuration/python/RecoPixelVertexing_cff.py +++ b/RecoTracker/Configuration/python/RecoPixelVertexing_cff.py @@ -1,4 +1,5 @@ import FWCore.ParameterSet.Config as cms +from HeterogeneousCore.AlpakaCore.functions import * from HeterogeneousCore.CUDACore.SwitchProducerCUDA import SwitchProducerCUDA from RecoTracker.PixelTrackFitting.PixelTracks_cff import * @@ -98,6 +99,31 @@ pixelVerticesTask.copy() )) +## pixel vertex reconstruction with Alpaka + +# pixel vertex SoA producer with alpaka on the device +from RecoTracker.PixelVertexFinding.pixelVertexProducerAlpakaPhase1_cfi import pixelVertexProducerAlpakaPhase1 as _pixelVerticesAlpakaPhase1 +from RecoTracker.PixelVertexFinding.pixelVertexProducerAlpakaPhase2_cfi import pixelVertexProducerAlpakaPhase2 as _pixelVerticesAlpakaPhase2 +pixelVerticesAlpaka = _pixelVerticesAlpakaPhase1.clone() +phase2_tracker.toReplaceWith(pixelVerticesAlpaka,_pixelVerticesAlpakaPhase2.clone()) + +from RecoTracker.PixelVertexFinding.pixelVertexFromSoAAlpaka_cfi import pixelVertexFromSoAAlpaka as _pixelVertexFromSoAAlpaka +alpaka.toReplaceWith(pixelVertices, _pixelVertexFromSoAAlpaka.clone()) + +# pixel vertex SoA producer with alpaka on the cpu, for validation +pixelVerticesAlpakaSerial = makeSerialClone(pixelVerticesAlpaka, + pixelTrackSrc = 'pixelTracksAlpakaSerial' +) + +alpaka.toReplaceWith(pixelVerticesTask, cms.Task( + # Build the pixel vertices in SoA format with alpaka on the device + pixelVerticesAlpaka, + # Build the pixel vertices in SoA format with alpaka on the cpu (if requested by the validation) + pixelVerticesAlpakaSerial, + # Convert the pixel vertices from SoA format (on the host) to the legacy format + pixelVertices +)) + # Tasks and Sequences recopixelvertexingTask = cms.Task( pixelTracksTask, diff --git a/RecoTracker/Configuration/python/RecoTracker_EventContent_cff.py b/RecoTracker/Configuration/python/RecoTracker_EventContent_cff.py index 641ee50a8a9eb..5137c1f736c6e 100644 --- a/RecoTracker/Configuration/python/RecoTracker_EventContent_cff.py +++ b/RecoTracker/Configuration/python/RecoTracker_EventContent_cff.py @@ -18,8 +18,9 @@ ) ) #HI-specific products: needed in AOD, propagate to more inclusive tiers as well +from Configuration.Eras.Modifier_run3_upc_cff import run3_upc from Configuration.ProcessModifiers.pp_on_AA_cff import pp_on_AA -pp_on_AA.toModify( RecoTrackerAOD.outputCommands, +(pp_on_AA | run3_upc).toModify( RecoTrackerAOD.outputCommands, func=lambda outputCommands: outputCommands.extend(['keep recoTracks_hiConformalPixelTracks_*_*']) ) #RECO content @@ -38,7 +39,7 @@ ) ) RecoTrackerRECO.outputCommands.extend(RecoTrackerAOD.outputCommands) -pp_on_AA.toModify( RecoTrackerRECO.outputCommands, +(pp_on_AA | run3_upc).toModify( RecoTrackerRECO.outputCommands, func=lambda outputCommands: outputCommands.extend([ 'keep recoTrackExtras_hiConformalPixelTracks_*_*', 'keep TrackingRecHitsOwned_hiConformalPixelTracks_*_*' diff --git a/RecoTracker/Configuration/python/customizePixelOnlyForProfiling.py b/RecoTracker/Configuration/python/customizePixelOnlyForProfiling.py index 3d121a8736f8e..55a02f83f913c 100644 --- a/RecoTracker/Configuration/python/customizePixelOnlyForProfiling.py +++ b/RecoTracker/Configuration/python/customizePixelOnlyForProfiling.py @@ -3,10 +3,12 @@ # Customise the Pixel-only reconstruction to run on GPU # # Run the unpacker, clustering, ntuplets, track fit and vertex reconstruction on GPU. +# CUDA and Alpaka co-living here for the moment + def customizePixelOnlyForProfilingGPUOnly(process): process.consumer = cms.EDAnalyzer("GenericConsumer", - eventProducts = cms.untracked.vstring('pixelTracksCUDA', 'pixelVerticesCUDA') + eventProducts = cms.untracked.vstring('pixelTracksCUDA', 'pixelVerticesCUDA', '*DeviceProduct_pixelTracksAlpaka_*_*', '*DeviceProduct_pixelVerticesAlpaka_*_*') ) process.consume_step = cms.EndPath(process.consumer) @@ -25,10 +27,8 @@ def customizePixelOnlyForProfilingGPUOnly(process): # tracks and vertices on the CPU in SoA format, without conversion to legacy format. def customizePixelOnlyForProfilingGPUWithHostCopy(process): - #? process.siPixelRecHitSoAFromLegacy.convertToLegacy = False - process.consumer = cms.EDAnalyzer("GenericConsumer", - eventProducts = cms.untracked.vstring('pixelTracksSoA', 'pixelVerticesSoA') + eventProducts = cms.untracked.vstring('pixelTracksSoA', 'pixelVerticesSoA', 'pixelTracksAlpaka', 'pixelVerticesAlpaka') ) process.consume_step = cms.EndPath(process.consumer) diff --git a/RecoTracker/ConversionSeedGenerators/plugins/SeedGeneratorFromTrackingParticleEDProducer.cc b/RecoTracker/ConversionSeedGenerators/plugins/SeedGeneratorFromTrackingParticleEDProducer.cc deleted file mode 100644 index c344461f05ba0..0000000000000 --- a/RecoTracker/ConversionSeedGenerators/plugins/SeedGeneratorFromTrackingParticleEDProducer.cc +++ /dev/null @@ -1,80 +0,0 @@ -/*#include "FWCore/Framework/interface/EDProducer.h" -#include "FWCore/Utilities/interface/InputTag.h" -#include "FWCore/ParameterSet/interface/ParameterSet.h" -#include "FWCore/Framework/interface/Event.h" -#include "DataFormats/Common/interface/Handle.h" -#include "DataFormats/TrackReco/interface/Track.h" -#include "DataFormats/TrajectorySeed/interface/TrajectorySeedCollection.h" -#include "RecoTracker/TkSeedGenerator/interface/SeedFromProtoTrack.h" -#include "RecoTracker/TkSeedingLayers/interface/SeedingHitSet.h" -#include "RecoTracker/TkSeedGenerator/interface/SeedFromConsecutiveHitsCreator.h" -#include "TrackingTools/TransientTrackingRecHit/interface/TransientTrackingRecHitBuilder.h" -#include "TrackingTools/Records/interface/TransientRecHitRecord.h" -#include "RecoTracker/TkTrackingRegions/interface/GlobalTrackingRegion.h" -#include "TrackingTools/TransientTrackingRecHit/interface/TransientTrackingRecHit.h" -#include "FWCore/Framework/interface/EventSetup.h" -#include "FWCore/Framework/interface/ESHandle.h" -#include "DataFormats/TrackReco/interface/TrackFwd.h" -#include "FWCore/MessageLogger/interface/MessageLogger.h" -#include - -class SeedGeneratorFromTrackingParticleEDProducer : public edm::EDProducer { -public: - SeedGeneratorFromTrackingParticleEDProducer(const edm::ParameterSet& cfg); - virtual ~SeedGeneratorFromTrackingParticleEDProducer(){} - virtual void produce(edm::Event& ev, const edm::EventSetup& es); -private: - edm::ParameterSet theConfig; - edm::InputTag theInputCollectionTag; -}; - -using namespace edm; -using namespace reco; - -template T sqr( T t) {return t*t;} -typedef TransientTrackingRecHit::ConstRecHitPointer Hit; - -struct HitLessByRadius { bool operator() (const Hit& h1, const Hit & h2) { return h1->globalPosition().perp2() < h2->globalPosition().perp2(); } }; - -SeedGeneratorFromTrackingParticleEDProducer::SeedGeneratorFromTrackingParticleEDProducer(const ParameterSet& cfg) - : theConfig(cfg), theInputCollectionTag(cfg.getParameter("InputCollection")) -{ - produces(); -} - -void SeedGeneratorFromTrackingParticleEDProducer::produce(edm::Event& ev, const edm::EventSetup& es) -{ - auto result = std::make_unique(); - Handle trks; - ev.getByLabel(theInputCollectionTag, trks); - - const TrackCollection &protos = *(trks.product()); - - for (TrackCollection::const_iterator it=protos.begin(); it!= protos.end(); ++it) { - const Track & proto = (*it); - - if (theConfig.getParameter("useProtoTrackKinematics")) { - SeedFromProtoTrack seedFromProtoTrack( proto, es); - if (seedFromProtoTrack.isValid()) (*result).push_back( seedFromProtoTrack.trajectorySeed() ); - } else { - edm::ESHandle ttrhbESH; - std::string builderName = theConfig.getParameter("TTRHBuilder"); - es.get().get(builderName,ttrhbESH); - std::vector hits; - for (unsigned int iHit = 0, nHits = proto.recHitsSize(); iHit < nHits; ++iHit) { - TrackingRecHitRef refHit = proto.recHit(iHit); - if(refHit->isValid()) hits.push_back(ttrhbESH->build( &(*refHit) )); - sort(hits.begin(), hits.end(), HitLessByRadius()); - } - if (hits.size() >= 2) { - GlobalPoint vtx(proto.vertex().x(), proto.vertex().y(), proto.vertex().z()); - double mom_perp = sqrt(proto.momentum().x()*proto.momentum().x()+proto.momentum().y()*proto.momentum().y()); - GlobalTrackingRegion region(mom_perp, vtx, 0.2, 0.2); - SeedFromConsecutiveHitsCreator().trajectorySeed(*result, SeedingHitSet(hits), region, es); - } - } - } - - ev.put(std::move(result)); -} -*/ diff --git a/RecoTracker/FinalTrackSelectors/plugins/SingleLongTrackProducer.cc b/RecoTracker/FinalTrackSelectors/plugins/SingleLongTrackProducer.cc new file mode 100644 index 0000000000000..8430385347ae4 --- /dev/null +++ b/RecoTracker/FinalTrackSelectors/plugins/SingleLongTrackProducer.cc @@ -0,0 +1,245 @@ +// user includes +#include "DataFormats/Math/interface/deltaR.h" +#include "DataFormats/MuonReco/interface/Muon.h" +#include "DataFormats/TrackReco/interface/Track.h" +#include "DataFormats/VertexReco/interface/Vertex.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" + +// ROOT includes +#include "TLorentzVector.h" + +class SingleLongTrackProducer : public edm::stream::EDProducer<> { +public: + explicit SingleLongTrackProducer(const edm::ParameterSet &); + ~SingleLongTrackProducer() override = default; + + static void fillDescriptions(edm::ConfigurationDescriptions &descriptions); + +private: + void produce(edm::Event &, const edm::EventSetup &) override; + + const edm::EDGetTokenT> tracksToken; + const edm::EDGetTokenT> muonsToken; + const edm::EDGetTokenT PrimVtxToken; + + const int minNumberOfLayers; + const double matchInDr; + const bool onlyValidHits; + const bool debug; + const double minPt; + const double maxEta; + const double maxDxy; + const double maxDz; +}; + +SingleLongTrackProducer::SingleLongTrackProducer(const edm::ParameterSet &iConfig) + : tracksToken{consumes(iConfig.getParameter("allTracks"))}, + muonsToken{consumes>(iConfig.getParameter("matchMuons"))}, + PrimVtxToken{consumes(iConfig.getParameter("PrimaryVertex"))}, + minNumberOfLayers{iConfig.getParameter("minNumberOfLayers")}, + matchInDr{iConfig.getParameter("requiredDr")}, + onlyValidHits{iConfig.getParameter("onlyValidHits")}, + debug{iConfig.getParameter("debug")}, + minPt{iConfig.getParameter("minPt")}, + maxEta{iConfig.getParameter("maxEta")}, + maxDxy{iConfig.getParameter("maxDxy")}, + maxDz{iConfig.getParameter("maxDz")} { + produces("").setBranchAlias(""); +} + +void SingleLongTrackProducer::produce(edm::Event &iEvent, const edm::EventSetup &iSetup) { + using namespace edm; + + // register output collection: + std::unique_ptr goodTracks(new reco::TrackCollection); + + // register input collections: + const auto &tracks = iEvent.getHandle(tracksToken); + if (!tracks.isValid()) { + edm::LogError("SingleLongTrackProducer") + << "Input track collection is not valid.\n Returning empty output track collection."; + iEvent.put(std::move(goodTracks), ""); + return; + } + + const auto &muons = iEvent.getHandle(muonsToken); + if (!muons.isValid() && matchInDr > 0.) { + edm::LogError("SingleLongTrackProducer") + << "Input muon collection is not valid.\n Returning empty output track collection."; + iEvent.put(std::move(goodTracks), ""); + return; + } + + const auto &vertices = iEvent.getHandle(PrimVtxToken); + if (!vertices.isValid()) { + edm::LogError("SingleLongTrackProducer") + << "Input vertex collection is not valid.\n Returning empty output track collection."; + iEvent.put(std::move(goodTracks), ""); + return; + } + + const reco::Vertex vtx = vertices->at(0); + + // Preselection of long quality tracks + std::vector selTracks; + reco::Track bestTrack; + unsigned int tMuon = 0; + double fitProb = 100; + + for (const auto &track : *tracks) { + const reco::HitPattern &hitpattern = track.hitPattern(); + double dR2min = 100.; + double chiNdof = track.normalizedChi2(); + double dxy = std::abs(track.dxy(vtx.position())); + double dz = std::abs(track.dz(vtx.position())); + + if (track.pt() < minPt) + continue; + + if (std::abs(track.eta()) > maxEta) + continue; + + if (hitpattern.trackerLayersWithMeasurement() < minNumberOfLayers) + continue; + + // Long track needs to be close to a good muon (only if requested) + if (matchInDr > 0.) { + for (const auto &m : *muons) { + if (m.isTrackerMuon()) { + tMuon++; + reco::Track matchedTrack = *(m.innerTrack()); + // match to general track in deltaR + double dr2 = reco::deltaR2(track, matchedTrack); + if (dr2 < dR2min) + dR2min = dr2; + } + } + // matchInDr here is defined positive + if (dR2min >= matchInDr * matchInDr) + continue; + } + // do vertex consistency: + bool vertex_match = dxy < maxDxy && dz < maxDz; + if (!(vertex_match)) + continue; + if (track.validFraction() < 1.0) + continue; + // only save the track with the smallest chiNdof + if (chiNdof < fitProb) { + fitProb = chiNdof; + bestTrack = track; + bestTrack.setExtra(track.extra()); + } + if (debug) + edm::LogPrint("SingleLongTrackProducer") << " deltaR2 (general) track to matched Track: " << dR2min; + if (debug) + edm::LogPrint("SingleLongTrackProducer") << "chi2Ndof:" << chiNdof << " best Track: " << fitProb; + } + + selTracks.push_back(bestTrack); + + if (debug) + edm::LogPrint("SingleLongTrackProducer") + << " number of Tracker Muons: " << tMuon << ", thereof " << selTracks.size() << " tracks passed preselection."; + + // check hits validity in preselected tracks + bool hitIsNotValid{false}; + + for (const auto &track : selTracks) { + reco::HitPattern hitpattern = track.hitPattern(); + int deref{0}; + + // this checks track recHits + try { // (Un)Comment this line with /* to (not) allow for events with not valid hits + auto hb = track.recHitsBegin(); + + for (unsigned int h = 0; h < track.recHitsSize(); h++) { + auto recHit = *(hb + h); + auto const &hit = *recHit; + + if (onlyValidHits && !hit.isValid()) { + hitIsNotValid = true; + continue; + } + } + } catch (cms::Exception const &e) { + deref += 1; + if (debug) + std::cerr << e.explainSelf() << std::endl; + } + + if (hitIsNotValid == true) + break; // (Un)Comment this line with */ to (not) allow for events with not valid hits + + int deref2{0}; + + // this checks track hitPattern hits + try { + auto hb = track.recHitsBegin(); + + for (unsigned int h = 0; h < track.recHitsSize(); h++) { + uint32_t pHit = hitpattern.getHitPattern(reco::HitPattern::TRACK_HITS, h); + + auto recHit = *(hb + h); + auto const &hit = *recHit; + + if (onlyValidHits && !hit.isValid()) { + if (debug) + edm::LogPrint("SingleLongTrackProducer") << "hit not valid: " << h; + continue; + } + + // loop over the hits of the track. + if (onlyValidHits && !(hitpattern.validHitFilter(pHit))) { + if (debug) + edm::LogPrint("SingleLongTrackProducer") << "hit not valid: " << h; + continue; + } + } + goodTracks->push_back(track); + } catch (cms::Exception const &e) { + deref2 += 1; + if (debug) + std::cerr << e.explainSelf() << std::endl; + } + + if (debug) + edm::LogPrint("SingleLongTrackProducer") + << "found tracks with " << deref << "missing valid hits and " << deref2 << " missing hit pattern"; + } + + if (debug) { + auto const &moduleType = moduleDescription().moduleName(); + auto const &moduleLabel = moduleDescription().moduleLabel(); + edm::LogPrint("SingleLongTrackProducer") << "[" << moduleType << "] (" << moduleLabel << ") " + << " output track size: " << goodTracks.get()->size(); + } + + // save track collection in event: + iEvent.put(std::move(goodTracks), ""); +} + +void SingleLongTrackProducer::fillDescriptions(edm::ConfigurationDescriptions &descriptions) { + edm::ParameterSetDescription desc; + desc.add("allTracks", edm::InputTag("generalTracks"))->setComment("input track collection"); + desc.add("matchMuons", edm::InputTag("earlyMuons"))->setComment("input muon collection for matching"); + desc.add("PrimaryVertex", edm::InputTag("offlinePrimaryVertices")) + ->setComment("input primary vertex collection"); + desc.add("minNumberOfLayers", 10)->setComment("minimum number of layers"); + desc.add("requiredDr", 0.01)->setComment("matching muons deltaR. If negative do not match"); + desc.add("onlyValidHits", true)->setComment("use only valid hits"); + desc.add("debug", false)->setComment("verbose?"); + desc.add("minPt", 15.0)->setComment("minimum pT"); + desc.add("maxEta", 2.2)->setComment("maximum pseudorapidity (absolute value)"); + desc.add("maxDxy", 0.02)->setComment("maximum transverse impact parameter"); + desc.add("maxDz", 0.5)->setComment("maximum longitudinal impact parameter"); + descriptions.addWithDefaultLabel(desc); +} + +#include "FWCore/Framework/interface/MakerMacros.h" +DEFINE_FWK_MODULE(SingleLongTrackProducer); diff --git a/RecoTracker/FinalTrackSelectors/plugins/TrackerTrackHitFilter.cc b/RecoTracker/FinalTrackSelectors/plugins/TrackerTrackHitFilter.cc index c6341cd9d19a2..c03ecb6c92ebb 100644 --- a/RecoTracker/FinalTrackSelectors/plugins/TrackerTrackHitFilter.cc +++ b/RecoTracker/FinalTrackSelectors/plugins/TrackerTrackHitFilter.cc @@ -62,6 +62,9 @@ * minimumHits = Minimum hits that the output TrackCandidate must have to be saved * replaceWithInactiveHits = instead of discarding hits, replace them with a invalid "inactive" hits, * so multiple scattering is accounted for correctly. + * truncateTracks = determines if recHits collection is to be truncated to provide tracks with + * layersRemaining number of layers after refitting + * layersRemaining = number of tracker layers with measurement remaining after truncating track * stripFrontInvalidHits = strip invalid hits at the beginning of the track * stripBackInvalidHits = strip invalid hits at the end of the track * stripAllInvalidHits = remove ALL invald hits (might be a problem for multiple scattering, use with care!) @@ -87,6 +90,8 @@ namespace reco { const Trajectory *itt, std::vector &hits); void produceFromTrack(const edm::EventSetup &iSetup, const Track *itt, std::vector &hits); + unsigned int getSequLayer(const reco::Track &tk, unsigned int prevSequLayers, std::vector isNotValidVec); + bool isFirstValidHitInLayer(const reco::Track &tk, std::vector isNotValidVec); static void fillDescriptions(edm::ConfigurationDescriptions &descriptions); @@ -125,6 +130,9 @@ namespace reco { size_t minimumHits_; + unsigned int layersRemaining_; + bool truncateTracks_; + bool replaceWithInactiveHits_; bool stripFrontInvalidHits_; bool stripBackInvalidHits_; @@ -301,6 +309,8 @@ namespace reco { TrackerTrackHitFilter::TrackerTrackHitFilter(const edm::ParameterSet &iConfig) : src_(iConfig.getParameter("src")), minimumHits_(iConfig.getParameter("minimumHits")), + layersRemaining_(iConfig.getParameter("layersRemaining")), + truncateTracks_(iConfig.getParameter("truncateTracks")), replaceWithInactiveHits_(iConfig.getParameter("replaceWithInactiveHits")), stripFrontInvalidHits_(iConfig.getParameter("stripFrontInvalidHits")), stripBackInvalidHits_(iConfig.getParameter("stripBackInvalidHits")), @@ -561,6 +571,54 @@ namespace reco { iEvent.put(std::move(output)); } + bool TrackerTrackHitFilter::isFirstValidHitInLayer(const reco::Track &tk, std::vector isNotValidVec) { + reco::HitPattern hp = tk.hitPattern(); + + int vecSize = static_cast(isNotValidVec.size()); + // If hit is not valid, it will not count as a tracker layer with measurement -> don't increase sequLayers + if (isNotValidVec[vecSize - 1]) + return false; + + // If very first valid layer -> increase sequLayers + if (vecSize == 1) + return true; + + uint32_t pHit = hp.getHitPattern(reco::HitPattern::TRACK_HITS, vecSize - 1); + uint32_t thisLayer = hp.getLayer(pHit); + uint32_t thisSubStruct = hp.getSubStructure(pHit); + + // This loop compares the previous hits substructure and layer with current hit. If hits in the same layer + // and substructure and previous hit is valid, skip layer. If previous hit is not valid, even if in same layer + // and substructure, check the previous previous hit. Repeat process for every previous hit until reaching + // a valid hit or different layer/substructure + for (int j = 0; j < vecSize; ++j) { + if (vecSize > (j + 1)) { + uint32_t nHit = hp.getHitPattern(reco::HitPattern::TRACK_HITS, vecSize - (j + 2)); + uint32_t prevLayer = hp.getLayer(nHit); + uint32_t prevSubStruct = hp.getSubStructure(nHit); + if ((thisLayer == prevLayer) && (thisSubStruct == prevSubStruct)) { + if (isNotValidVec[vecSize - (j + 2)] == false) { + return false; + } + } else { + return true; + } + } else { + return true; + } + } + return false; + } + + unsigned int TrackerTrackHitFilter::getSequLayer(const reco::Track &tk, + unsigned int prevSequLayers, + std::vector isNotValidVec) { + unsigned int sequLayers = 0; + sequLayers = isFirstValidHitInLayer(tk, isNotValidVec) ? prevSequLayers + 1 : prevSequLayers; + + return sequLayers; + } + TrackCandidate TrackerTrackHitFilter::makeCandidate(const reco::Track &tk, std::vector::iterator hitsBegin, std::vector::iterator hitsEnd) { @@ -586,8 +644,29 @@ namespace reco { TrajectorySeed seed(state, TrackCandidate::RecHitContainer(), pdir); TrackCandidate::RecHitContainer ownHits; ownHits.reserve(hitsEnd - hitsBegin); + const reco::HitPattern &hp = tk.hitPattern(); + unsigned int sequLayers = 0; + std::vector isNotValidVec; + isNotValidVec.clear(); + bool breakHitLoop = false; + if (int(int(hp.numberOfValidHits()) - int(hp.numberOfAllHits(reco::HitPattern::TRACK_HITS))) != 0) { + breakHitLoop = true; + } for (; hitsBegin != hitsEnd; ++hitsBegin) { - //if(! (*hitsBegin)->isValid() ) std::cout<<"Putting in the trackcandidate an INVALID HIT !"<isValid()) { + isNotValidVec.push_back(true); + } else { + isNotValidVec.push_back(false); + } + sequLayers = getSequLayer(tk, sequLayers, isNotValidVec); + if (sequLayers > layersRemaining_) + break; + } ownHits.push_back(*hitsBegin); } @@ -1040,6 +1119,12 @@ namespace reco { ->setComment( " instead of removing hits replace them with inactive hits, so you still consider the multiple " "scattering"); + desc.add("truncateTracks", false) + ->setComment( + "determines if recHits collection is to be truncated to provide tracks with layersRemaining number of " + "layers after refitting"); + desc.add("layersRemaining", 8) + ->setComment("number of tracker layers with measurement remaining after truncating track"); desc.add("stripFrontInvalidHits", false) ->setComment("strip invalid & inactive hits from any end of the track"); desc.add("stripBackInvalidHits", false) diff --git a/RecoTracker/FinalTrackSelectors/python/SingleLongTrackProducer_cfi.py b/RecoTracker/FinalTrackSelectors/python/SingleLongTrackProducer_cfi.py new file mode 100644 index 0000000000000..1f97050cea9ba --- /dev/null +++ b/RecoTracker/FinalTrackSelectors/python/SingleLongTrackProducer_cfi.py @@ -0,0 +1,16 @@ +import FWCore.ParameterSet.Config as cms + +from RecoTracker.FinalTrackSelectors.singleLongTrackProducer_cfi import singleLongTrackProducer + +SingleLongTrackProducer = singleLongTrackProducer.clone( + allTracks = "generalTracks", + matchMuons = "earlyMuons", + requiredDr= 0.01, + minNumberOfLayers = 10, + onlyValidHits = True, + debug = False, + minPt = 15.0, + maxEta = 2.2, + maxDxy = 0.02, + maxDz = 0.5, + PrimaryVertex = "offlinePrimaryVertices") diff --git a/RecoTracker/FinalTrackSelectors/python/displacedRegionalStepInputTracks_cfi.py b/RecoTracker/FinalTrackSelectors/python/displacedRegionalStepInputTracks_cfi.py index cb0d1dc22b5e0..4571a68e2f11f 100644 --- a/RecoTracker/FinalTrackSelectors/python/displacedRegionalStepInputTracks_cfi.py +++ b/RecoTracker/FinalTrackSelectors/python/displacedRegionalStepInputTracks_cfi.py @@ -15,3 +15,9 @@ "muonSeededTracksOutInClassifier" ] ) +from Configuration.ProcessModifiers.pp_on_AA_cff import pp_on_AA +from Configuration.Eras.Modifier_pp_on_XeXe_2017_cff import pp_on_XeXe_2017 +(pp_on_AA | pp_on_XeXe_2017).toModify(displacedRegionalStepInputTracks, + trackProducers = [], + inputClassifiers = [] +) diff --git a/RecoTracker/FinalTrackSelectors/python/trackSelectionTf_cfi.py b/RecoTracker/FinalTrackSelectors/python/trackSelectionTf_cfi.py index f982016a447dd..73d16f88b0fa3 100644 --- a/RecoTracker/FinalTrackSelectors/python/trackSelectionTf_cfi.py +++ b/RecoTracker/FinalTrackSelectors/python/trackSelectionTf_cfi.py @@ -7,5 +7,9 @@ ComponentName = "trackSelectionTfPLess", FileName = "RecoTracker/FinalTrackSelectors/data/TrackTfClassifier/MkFitPixelLessOnly_Run3_12_5_0_pre5.pb" ) +trackSelectionTfDisplacedRegional = _tfGraphDefProducer.clone( + ComponentName = "trackSelectionTfDisplacedRegional", + FileName = "RecoTracker/FinalTrackSelectors/data/TrackTfClassifier/MkFitDisplacedRegionalOnly_Run3_12_5_0_pre5.pb" +) diff --git a/RecoTracker/IterativeTracking/python/DisplacedRegionalStep_cff.py b/RecoTracker/IterativeTracking/python/DisplacedRegionalStep_cff.py index 3ea10ea22e565..438e33ceadc30 100644 --- a/RecoTracker/IterativeTracking/python/DisplacedRegionalStep_cff.py +++ b/RecoTracker/IterativeTracking/python/DisplacedRegionalStep_cff.py @@ -453,6 +453,7 @@ from RecoTracker.FinalTrackSelectors.trackSelectionTf_cfi import * from RecoTracker.FinalTrackSelectors.trackSelectionTf_CKF_cfi import * trackdnn.toReplaceWith(displacedRegionalStep, trackTfClassifier.clone( + mva = dict(tfDnnLabel = 'trackSelectionTfDisplacedRegional'), src = 'displacedRegionalStepTracks', qualityCuts = qualityCutDictionary.DisplacedRegionalStep.value() )) diff --git a/RecoTracker/IterativeTracking/python/JetCoreRegionalStep_cff.py b/RecoTracker/IterativeTracking/python/JetCoreRegionalStep_cff.py index 491604c1f2ff6..aa2ed52be9cf5 100644 --- a/RecoTracker/IterativeTracking/python/JetCoreRegionalStep_cff.py +++ b/RecoTracker/IterativeTracking/python/JetCoreRegionalStep_cff.py @@ -12,8 +12,8 @@ # run only if there are high pT jets jetsForCoreTracking = cms.EDFilter('CandPtrSelector', src = cms.InputTag('ak4CaloJetsForTrk'), cut = cms.string('pt > 100 && abs(eta) < 2.5'), filter = cms.bool(False)) -jetsForCoreTrackingBarrel = jetsForCoreTracking.clone( cut = 'pt > 100 && abs(eta) < 2.5' ) -jetsForCoreTrackingEndcap = jetsForCoreTracking.clone( cut = 'pt > 100 && abs(eta) > 1.4 && abs(eta) < 2.5' ) +jetsForCoreTrackingBarrel = jetsForCoreTracking.clone( cut = 'pt > 100 && abs(eta) < 1.4' ) +jetsForCoreTrackingEndcap = jetsForCoreTracking.clone( cut = 'pt > 100 && abs(eta) < 2.5' ) # care only at tracks from main PV firstStepGoodPrimaryVertices = cms.EDFilter('PrimaryVertexObjectFilter', @@ -158,7 +158,7 @@ maxPtForLooperReconstruction = 0.0) jetCoreRegionalStepBarrelTrajectoryBuilder = RecoTracker.CkfPattern.GroupedCkfTrajectoryBuilder_cfi.GroupedCkfTrajectoryBuilderIterativeDefault.clone( trajectoryFilter = dict(refToPSet_ = 'jetCoreRegionalStepBarrelTrajectoryFilter'), - maxCand = 50, + maxCand = 30, estimator = 'jetCoreRegionalStepChi2Est', keepOriginalIfRebuildFails = True, lockHits = False, @@ -168,6 +168,7 @@ maxPtForLooperReconstruction = cms.double(0.0)) jetCoreRegionalStepEndcapTrajectoryBuilder = jetCoreRegionalStepTrajectoryBuilder.clone( trajectoryFilter = cms.PSet(refToPSet_ = cms.string('jetCoreRegionalStepEndcapTrajectoryFilter')), + maxCand = 30, #clustersToSkip = cms.InputTag('jetCoreRegionalStepClusters'), ) trackingNoLoopers.toModify(jetCoreRegionalStepEndcapTrajectoryBuilder, @@ -240,7 +241,9 @@ copyExtras = True ) fastSim.toReplaceWith(jetCoreRegionalStepTracks,_fastSim_jetCoreRegionalStepTracks) - +from Configuration.ProcessModifiers.seedingDeepCore_cff import seedingDeepCore +(seedingDeepCore & fastSim).toReplaceWith(jetCoreRegionalStepBarrelTracks,_fastSim_jetCoreRegionalStepTracks) +(seedingDeepCore & fastSim).toReplaceWith(jetCoreRegionalStepEndcapTracks,_fastSim_jetCoreRegionalStepTracks) # Final selection from RecoTracker.FinalTrackSelectors.TrackCutClassifier_cff import * @@ -334,10 +337,8 @@ jetCoreRegionalStepEndcap) -from Configuration.ProcessModifiers.seedingDeepCore_cff import seedingDeepCore - from RecoTracker.FinalTrackSelectors.TrackCollectionMerger_cfi import * -seedingDeepCore.toReplaceWith(jetCoreRegionalStepTracks, TrackCollectionMerger.clone( +(seedingDeepCore & ~fastSim).toReplaceWith(jetCoreRegionalStepTracks, TrackCollectionMerger.clone( trackProducers = ["jetCoreRegionalStepBarrelTracks", "jetCoreRegionalStepEndcapTracks",], inputClassifiers = ["jetCoreRegionalStepBarrel", @@ -354,6 +355,12 @@ cms.Task(jetCoreRegionalStepTracks,jetCoreRegionalStep) )) +# short-circuit tracking parts for fastsim fastSim.toReplaceWith(JetCoreRegionalStepTask, - cms.Task(jetCoreRegionalStepTracks, - jetCoreRegionalStep)) + cms.Task(jetCoreRegionalStepTracks, + jetCoreRegionalStep)) +(seedingDeepCore & fastSim).toReplaceWith(JetCoreRegionalStepTask, + cms.Task(jetCoreRegionalStepBarrelTracks, jetCoreRegionalStepEndcapTracks, + jetCoreRegionalStepTracks, + jetCoreRegionalStepBarrel, jetCoreRegionalStepEndcap, + jetCoreRegionalStep)) diff --git a/RecoTracker/IterativeTracking/python/PixelPairStep_cff.py b/RecoTracker/IterativeTracking/python/PixelPairStep_cff.py index e0760a6565df8..81238cde7d34c 100644 --- a/RecoTracker/IterativeTracking/python/PixelPairStep_cff.py +++ b/RecoTracker/IterativeTracking/python/PixelPairStep_cff.py @@ -92,6 +92,8 @@ originRadius = 0.2, fixedError = 4. )) +from Configuration.Eras.Modifier_run3_upc_cff import run3_upc +(highBetaStar_2018 & run3_upc).toModify(pixelPairStepTrackingRegions,RegionPSet = dict(originRadius = 0.015)) fastSim.toModify(pixelPairStepTrackingRegions, RegionPSet=dict(VertexCollection = 'firstStepPrimaryVerticesBeforeMixing')) # SEEDS diff --git a/RecoTracker/MeasurementDet/plugins/TkPixelMeasurementDet.cc b/RecoTracker/MeasurementDet/plugins/TkPixelMeasurementDet.cc index 2ac7459194582..9fe741653302d 100644 --- a/RecoTracker/MeasurementDet/plugins/TkPixelMeasurementDet.cc +++ b/RecoTracker/MeasurementDet/plugins/TkPixelMeasurementDet.cc @@ -153,19 +153,20 @@ bool TkPixelMeasurementDet::hasBadComponents(const TrajectoryStateOnSurface& tso return false; auto lp = tsos.localPosition(); - auto le = tsos.localError().positionError(); + float lexx = tsos.hasError() ? tsos.localError().positionError().xx() : 0.0f; + float leyy = tsos.hasError() ? tsos.localError().positionError().yy() : 0.0f; for (auto const& broc : badRocPositions_) { auto dx = std::abs(broc.x() - lp.x()) - theRocWidth; auto dy = std::abs(broc.y() - lp.y()) - theRocHeight; if ((dx <= 0.f) & (dy <= 0.f)) return true; - if ((dx * dx < 9.f * le.xx()) && (dy * dy < 9.f * le.yy())) + if ((dx * dx < 9.f * lexx) && (dy * dy < 9.f * leyy)) return true; } if (badFEDChannelPositions == nullptr) return false; - float dx = 3.f * std::sqrt(le.xx()) + theRocWidth, dy = 3.f * std::sqrt(le.yy()) + theRocHeight; + float dx = 3.f * std::sqrt(lexx) + theRocWidth, dy = 3.f * std::sqrt(leyy) + theRocHeight; for (auto const& p : *badFEDChannelPositions) { if (lp.x() > (p.first.x() - dx) && lp.x() < (p.second.x() + dx) && lp.y() > (p.first.y() - dy) && lp.y() < (p.second.y() + dy)) { diff --git a/RecoTracker/MkFit/plugins/MkFitProducer.cc b/RecoTracker/MkFit/plugins/MkFitProducer.cc index b66a2294db6e0..0c0ec3634f84e 100644 --- a/RecoTracker/MkFit/plugins/MkFitProducer.cc +++ b/RecoTracker/MkFit/plugins/MkFitProducer.cc @@ -32,7 +32,7 @@ class MkFitProducer : public edm::global::EDProducer> { public: explicit MkFitProducer(edm::ParameterSet const& iConfig); - ~MkFitProducer() override = default; + ~MkFitProducer() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); @@ -101,6 +101,8 @@ MkFitProducer::MkFitProducer(edm::ParameterSet const& iConfig) mkfit::MkBuilderWrapper::populate(); } +MkFitProducer::~MkFitProducer() { mkfit::MkBuilderWrapper::clear(); } + void MkFitProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { edm::ParameterSetDescription desc; diff --git a/RecoTracker/MkFitCMS/standalone/Shell.cc b/RecoTracker/MkFitCMS/standalone/Shell.cc index ad4fa12c00827..4f378ec0adf82 100644 --- a/RecoTracker/MkFitCMS/standalone/Shell.cc +++ b/RecoTracker/MkFitCMS/standalone/Shell.cc @@ -325,7 +325,9 @@ namespace mkfit { reco tracks labels are seed indices. seed labels are sim track indices -- - mkfit labels are seed indices in given iteration after cleaning (at seed load-time) + mkfit labels are seed indices in given iteration after cleaning (at seed load-time). + This is no longer true -- was done like that in branch where this code originated from. + It seems the label is the same as seed label. */ int Shell::LabelFromHits(Track &t, bool replace, float good_frac) { diff --git a/RecoTracker/MkFitCore/interface/IterationConfig.h b/RecoTracker/MkFitCore/interface/IterationConfig.h index 33bc8dabf658d..5504d2476e504 100644 --- a/RecoTracker/MkFitCore/interface/IterationConfig.h +++ b/RecoTracker/MkFitCore/interface/IterationConfig.h @@ -90,6 +90,8 @@ namespace mkfit { float chi2Cut_min = 15.0; float chi2CutOverlap = 3.5; float pTCutOverlap = 0.0; + bool recheckOverlap = false; + bool useHitSelectionV2 = false; //quality filter params int minHitsQF = 4; diff --git a/RecoTracker/MkFitCore/interface/MkBuilder.h b/RecoTracker/MkFitCore/interface/MkBuilder.h index c68b3e7bf80e6..bf0335d31bc7a 100644 --- a/RecoTracker/MkFitCore/interface/MkBuilder.h +++ b/RecoTracker/MkFitCore/interface/MkBuilder.h @@ -38,6 +38,7 @@ namespace mkfit { static std::unique_ptr make_builder(bool silent = true); static void populate(); + static void clear(); int total_cands() const; std::pair max_hits_layer(const EventOfHits &eoh) const; diff --git a/RecoTracker/MkFitCore/interface/MkBuilderWrapper.h b/RecoTracker/MkFitCore/interface/MkBuilderWrapper.h index 63e16b3c0e759..fcc5c83e70ee5 100644 --- a/RecoTracker/MkFitCore/interface/MkBuilderWrapper.h +++ b/RecoTracker/MkFitCore/interface/MkBuilderWrapper.h @@ -20,6 +20,7 @@ namespace mkfit { MkBuilder& get() { return *builder_; } static void populate(); + static void clear(); private: std::unique_ptr builder_; diff --git a/RecoTracker/MkFitCore/interface/Track.h b/RecoTracker/MkFitCore/interface/Track.h index a0aaa772fd728..bdb489e4de869 100644 --- a/RecoTracker/MkFitCore/interface/Track.h +++ b/RecoTracker/MkFitCore/interface/Track.h @@ -393,8 +393,6 @@ namespace mkfit { Track(int charge, const SVector3& position, const SVector3& momentum, const SMatrixSym66& errors, float chi2) : TrackBase(charge, position, momentum, errors, chi2) {} - Track(const Track& t) : TrackBase(t), hitsOnTrk_(t.hitsOnTrk_) {} - // This function is very inefficient, use only for debug and validation! HitVec hitsVector(const std::vector& globalHitVec) const { HitVec hitsVec; diff --git a/RecoTracker/MkFitCore/interface/TrackStructures.h b/RecoTracker/MkFitCore/interface/TrackStructures.h index 736fbe8847e55..5c7912b8ea7c1 100644 --- a/RecoTracker/MkFitCore/interface/TrackStructures.h +++ b/RecoTracker/MkFitCore/interface/TrackStructures.h @@ -224,6 +224,7 @@ namespace mkfit { } void addHitIdx(int hitIdx, int hitLyr, float chi2); + bool popOverlap(); HoTNode& refLastHoTNode(); // for filling up overlap info const HoTNode& refLastHoTNode() const; // for dump traversal @@ -564,6 +565,25 @@ namespace mkfit { } } + inline bool TrackCand::popOverlap() { + auto popHitIdx = getLastHitIdx(); + auto popHitLyr = getLastHitLyr(); + auto popPrev = refLastHoTNode().m_prev_idx; + auto popChi2 = refLastHoTNode().m_chi2; + // sanity checks first, then just shift lastHitIdx_ to popPrev + if (lastHitIdx_ == 0 || popHitIdx < 0) + return false; + auto prevHitLyr = m_comb_candidate->hot(popPrev).layer; + auto prevHitIdx = m_comb_candidate->hot(popPrev).index; + if (popHitLyr != prevHitLyr || prevHitIdx < 0) + return false; + lastHitIdx_ = popPrev; + + --nFoundHits_; + chi2_ -= popChi2; + --nOverlapHits_; + return true; + } //============================================================================== class EventOfCombCandidates { diff --git a/RecoTracker/MkFitCore/src/CandCloner.cc b/RecoTracker/MkFitCore/src/CandCloner.cc index 253e639a20b2e..78d2ead9495d3 100644 --- a/RecoTracker/MkFitCore/src/CandCloner.cc +++ b/RecoTracker/MkFitCore/src/CandCloner.cc @@ -24,11 +24,13 @@ namespace mkfit { void CandCloner::begin_eta_bin(EventOfCombCandidates *e_o_ccs, std::vector *update_list, + std::vector *overlap_list, std::vector> *extra_cands, int start_seed, int n_seeds) { mp_event_of_comb_candidates = e_o_ccs; mp_kalman_update_list = update_list; + mp_kalman_overlap_list = overlap_list; mp_extra_cands = extra_cands; m_start_seed = start_seed; m_n_seeds = n_seeds; @@ -50,6 +52,7 @@ namespace mkfit { m_idx_max_prev = 0; mp_kalman_update_list->clear(); + mp_kalman_overlap_list->clear(); #ifdef CC_TIME_LAYER t_lay = dtime(); @@ -193,14 +196,21 @@ namespace mkfit { break; if (h2a.hitIdx >= 0) { - mp_kalman_update_list->emplace_back(UpdateIndices(m_start_seed + is, n_pushed, h2a.hitIdx)); - - // set the overlap if we have it and and pT > pTCutOverlap + // set the overlap if we have it and pT > pTCutOverlap HitMatch *hm; if (tc.pT() > mp_iteration_params->pTCutOverlap && (hm = ccand[h2a.trkIdx].findOverlap(h2a.hitIdx, h2a.module))) { - tc.addHitIdx(hm->m_hit_idx, m_layer, 0); - tc.incOverlapCount(); + if (mp_iteration_params->recheckOverlap) { + // Special overlap_list if the overlap hit needs to be re-checked after primary update. + mp_kalman_overlap_list->emplace_back( + UpdateIndices(m_start_seed + is, n_pushed, h2a.hitIdx, hm->m_hit_idx)); + } else { + tc.addHitIdx(hm->m_hit_idx, m_layer, 0); + tc.incOverlapCount(); + mp_kalman_update_list->emplace_back(UpdateIndices(m_start_seed + is, n_pushed, h2a.hitIdx, -1)); + } + } else { + mp_kalman_update_list->emplace_back(UpdateIndices(m_start_seed + is, n_pushed, h2a.hitIdx, -1)); } } diff --git a/RecoTracker/MkFitCore/src/CandCloner.h b/RecoTracker/MkFitCore/src/CandCloner.h index 0466811aa0520..fbb4a29bf69fd 100644 --- a/RecoTracker/MkFitCore/src/CandCloner.h +++ b/RecoTracker/MkFitCore/src/CandCloner.h @@ -25,6 +25,7 @@ namespace mkfit { void begin_eta_bin(EventOfCombCandidates *e_o_ccs, std::vector *update_list, + std::vector *overlap_list, std::vector> *extra_cands, int start_seed, int n_seeds); @@ -56,7 +57,7 @@ namespace mkfit { const IterationParams *mp_iteration_params = nullptr; EventOfCombCandidates *mp_event_of_comb_candidates; - std::vector *mp_kalman_update_list; + std::vector *mp_kalman_update_list, *mp_kalman_overlap_list; std::vector> *mp_extra_cands; #if defined(CC_TIME_ETA) or defined(CC_TIME_LAYER) diff --git a/RecoTracker/MkFitCore/src/FindingFoos.h b/RecoTracker/MkFitCore/src/FindingFoos.h index 939d141947b12..a02bff4e2c541 100644 --- a/RecoTracker/MkFitCore/src/FindingFoos.h +++ b/RecoTracker/MkFitCore/src/FindingFoos.h @@ -16,6 +16,10 @@ namespace mkfit { const MPlexLS &, const MPlexLV &, MPlexQI &, const MPlexHS &, const MPlexHV &, MPlexLS &, MPlexLV &, MPlexQI &, \ const int, const PropagationFlags &, const bool +#define COMPUTE_CHI2_AND_UPDATE_ARGS \ + const MPlexLS &, const MPlexLV &, MPlexQI &, const MPlexHS &, const MPlexHV &, MPlexQF &, MPlexLS &, MPlexLV &, \ + MPlexQI &, const int, const PropagationFlags, const bool + class FindingFoos { public: void (*m_compute_chi2_foo)(COMPUTE_CHI2_ARGS); diff --git a/RecoTracker/MkFitCore/src/HitStructures.cc b/RecoTracker/MkFitCore/src/HitStructures.cc index da2afc2f60b0f..91d9cebdeb60a 100644 --- a/RecoTracker/MkFitCore/src/HitStructures.cc +++ b/RecoTracker/MkFitCore/src/HitStructures.cc @@ -91,6 +91,9 @@ namespace mkfit { m_hit_infos.reserve(m_n_hits); } + // Factor to get from hit sigma to half-length in q direction. + const float hl_fac = is_pixel() ? 3.0f : std::sqrt(3.0f); + for (unsigned int i = 0; i < m_n_hits; ++i) { const Hit &h = hitv[i]; @@ -100,13 +103,12 @@ namespace mkfit { m_binnor.register_entry_safe(phi, q); if (Config::usePhiQArrays) { - const float sqrt3 = std::sqrt(3); float half_length, qbar; if (m_is_barrel) { - half_length = sqrt3 * std::sqrt(h.ezz()); + half_length = hl_fac * std::sqrt(h.ezz()); qbar = h.r(); } else { - half_length = sqrt3 * std::sqrt(h.exx() + h.eyy()); + half_length = hl_fac * std::sqrt(h.exx() + h.eyy()); qbar = h.z(); } hinfos.emplace_back(HitInfo({phi, q, half_length, qbar})); @@ -168,13 +170,14 @@ namespace mkfit { m_binnor.register_entry_safe(phi, q); if (Config::usePhiQArrays) { - const float sqrt3 = std::sqrt(3); + // Factor to get from hit sigma to half-length in q direction. + const float hl_fac = is_pixel() ? 3.0f : std::sqrt(3.0f); float half_length, qbar; if (m_is_barrel) { - half_length = sqrt3 * std::sqrt(h.ezz()); + half_length = hl_fac * std::sqrt(h.ezz()); qbar = h.r(); } else { - half_length = sqrt3 * std::sqrt(h.exx() + h.eyy()); + half_length = hl_fac * std::sqrt(h.exx() + h.eyy()); qbar = h.z(); } m_hit_infos.emplace_back(HitInfo({phi, q, half_length, qbar})); diff --git a/RecoTracker/MkFitCore/src/IterationConfig.cc b/RecoTracker/MkFitCore/src/IterationConfig.cc index 60e9066d89da0..b4b5e612584a9 100644 --- a/RecoTracker/MkFitCore/src/IterationConfig.cc +++ b/RecoTracker/MkFitCore/src/IterationConfig.cc @@ -61,6 +61,8 @@ namespace mkfit { /* float */ chi2Cut_min, /* float */ chi2CutOverlap, /* float */ pTCutOverlap, + /* bool */ recheckOverlap, + /* bool */ useHitSelectionV2, /* int */ minHitsQF, /* float */ minPtCut, /* unsigned int */ maxClusterSize) diff --git a/RecoTracker/MkFitCore/src/KalmanUtilsMPlex.cc b/RecoTracker/MkFitCore/src/KalmanUtilsMPlex.cc index 481ad42150fdc..01729ebed4610 100644 --- a/RecoTracker/MkFitCore/src/KalmanUtilsMPlex.cc +++ b/RecoTracker/MkFitCore/src/KalmanUtilsMPlex.cc @@ -741,7 +741,7 @@ namespace mkfit { KFO_Update_Params | KFO_Local_Cov, psErr, psPar, msErr, msPar, outErr, outPar, dummy_chi2, N_proc); } for (int n = 0; n < NN; ++n) { - if (outPar.At(n, 3, 0) < 0) { + if (n < N_proc && outPar.At(n, 3, 0) < 0) { Chg.At(n, 0, 0) = -Chg.At(n, 0, 0); outPar.At(n, 3, 0) = -outPar.At(n, 3, 0); } @@ -777,7 +777,11 @@ namespace mkfit { MPlexQF msRad; #pragma omp simd for (int n = 0; n < NN; ++n) { - msRad.At(n, 0, 0) = std::hypot(msPar.constAt(n, 0, 0), msPar.constAt(n, 1, 0)); + if (n < N_proc) { + msRad.At(n, 0, 0) = std::hypot(msPar.constAt(n, 0, 0), msPar.constAt(n, 1, 0)); + } else { + msRad.At(n, 0, 0) = 0.0f; + } } propagateHelixToRMPlex(psErr, psPar, inChg, msRad, propErr, propPar, outFailFlag, N_proc, propFlags); @@ -843,9 +847,14 @@ namespace mkfit { MPlexQF rotT00; MPlexQF rotT01; for (int n = 0; n < NN; ++n) { - const float r = std::hypot(msPar.constAt(n, 0, 0), msPar.constAt(n, 1, 0)); - rotT00.At(n, 0, 0) = -(msPar.constAt(n, 1, 0) + psPar.constAt(n, 1, 0)) / (2 * r); - rotT01.At(n, 0, 0) = (msPar.constAt(n, 0, 0) + psPar.constAt(n, 0, 0)) / (2 * r); + if (n < N_proc) { + const float r = std::hypot(msPar.constAt(n, 0, 0), msPar.constAt(n, 1, 0)); + rotT00.At(n, 0, 0) = -(msPar.constAt(n, 1, 0) + psPar.constAt(n, 1, 0)) / (2 * r); + rotT01.At(n, 0, 0) = (msPar.constAt(n, 0, 0) + psPar.constAt(n, 0, 0)) / (2 * r); + } else { + rotT00.At(n, 0, 0) = 0.0f; + rotT01.At(n, 0, 0) = 0.0f; + } } MPlexHV res_glo; //position residual in global coordinates @@ -1359,7 +1368,7 @@ namespace mkfit { kalmanOperationEndcap(KFO_Update_Params, psErr, psPar, msErr, msPar, outErr, outPar, dummy_chi2, N_proc); } for (int n = 0; n < NN; ++n) { - if (outPar.At(n, 3, 0) < 0) { + if (n < N_proc && outPar.At(n, 3, 0) < 0) { Chg.At(n, 0, 0) = -Chg.At(n, 0, 0); outPar.At(n, 3, 0) = -outPar.At(n, 3, 0); } diff --git a/RecoTracker/MkFitCore/src/Matriplex/Matriplex.h b/RecoTracker/MkFitCore/src/Matriplex/Matriplex.h index 712afd240789b..b5e979b7d6e87 100644 --- a/RecoTracker/MkFitCore/src/Matriplex/Matriplex.h +++ b/RecoTracker/MkFitCore/src/Matriplex/Matriplex.h @@ -56,11 +56,6 @@ namespace Matriplex { T& operator()(idx_t n, idx_t i, idx_t j) { return fArray[(i * D2 + j) * N + n]; } const T& operator()(idx_t n, idx_t i, idx_t j) const { return fArray[(i * D2 + j) * N + n]; } - Matriplex& operator=(const Matriplex& m) { - memcpy(fArray, m.fArray, sizeof(T) * kTotSize); - return *this; - } - Matriplex& operator=(T t) { for (idx_t i = 0; i < kTotSize; ++i) fArray[i] = t; @@ -115,6 +110,24 @@ namespace Matriplex { return *this; } + Matriplex operator-() { + Matriplex t; + for (idx_t i = 0; i < kTotSize; ++i) + t.fArray[i] = -fArray[i]; + return t; + } + + Matriplex& abs(const Matriplex& a) { + for (idx_t i = 0; i < kTotSize; ++i) + fArray[i] = std::abs(a.fArray[i]); + return *this; + } + Matriplex& abs() { + for (idx_t i = 0; i < kTotSize; ++i) + fArray[i] = std::abs(fArray[i]); + return *this; + } + Matriplex& sqrt(const Matriplex& a) { for (idx_t i = 0; i < kTotSize; ++i) fArray[i] = std::sqrt(a.fArray[i]); @@ -401,6 +414,12 @@ namespace Matriplex { return t; } + template + MPlex abs(const MPlex& a) { + MPlex t; + return t.abs(a); + } + template MPlex sqrt(const MPlex& a) { MPlex t; @@ -410,7 +429,7 @@ namespace Matriplex { template MPlex sqr(const MPlex& a) { MPlex t; - return t.sqrt(a); + return t.sqr(a); } template diff --git a/RecoTracker/MkFitCore/src/Matriplex/MatriplexSym.h b/RecoTracker/MkFitCore/src/Matriplex/MatriplexSym.h index 151f616402440..e3b1a133f0daf 100644 --- a/RecoTracker/MkFitCore/src/Matriplex/MatriplexSym.h +++ b/RecoTracker/MkFitCore/src/Matriplex/MatriplexSym.h @@ -78,6 +78,8 @@ namespace Matriplex { return *this; } + MatriplexSym(const MatriplexSym& m) = default; + void copySlot(idx_t n, const MatriplexSym& m) { for (idx_t i = n; i < kTotSize; i += N) { fArray[i] = m.fArray[i]; @@ -263,6 +265,14 @@ namespace Matriplex { a[5 * N + n] = s * c22; } } + + Matriplex ReduceFixedIJ(idx_t i, idx_t j) const { + Matriplex t; + for (idx_t n = 0; n < N; ++n) { + t[n] = constAt(n, i, j); + } + return t; + } }; template diff --git a/RecoTracker/MkFitCore/src/MatriplexPackers.h b/RecoTracker/MkFitCore/src/MatriplexPackers.h index feef081682763..714de9eb23364 100644 --- a/RecoTracker/MkFitCore/src/MatriplexPackers.h +++ b/RecoTracker/MkFitCore/src/MatriplexPackers.h @@ -25,6 +25,8 @@ namespace mkfit { void reset() { m_pos = 0; } + int size() const { return m_pos; } + void addNullInput() { m_idx[m_pos++] = 0; } void addInput(const D& item) { diff --git a/RecoTracker/MkFitCore/src/MkBuilder.cc b/RecoTracker/MkFitCore/src/MkBuilder.cc index 2c984991519b8..0bedff5de99f1 100644 --- a/RecoTracker/MkFitCore/src/MkBuilder.cc +++ b/RecoTracker/MkFitCore/src/MkBuilder.cc @@ -44,6 +44,12 @@ namespace mkfit { m_fitters.populate(n_thr - m_fitters.size()); m_finders.populate(n_thr - m_finders.size()); } + + void clear() { + m_cloners.clear(); + m_fitters.clear(); + m_finders.clear(); + } }; CMS_SA_ALLOW ExecutionContext g_exe_ctx; @@ -166,6 +172,7 @@ namespace mkfit { std::unique_ptr MkBuilder::make_builder(bool silent) { return std::make_unique(silent); } void MkBuilder::populate() { g_exe_ctx.populate(Config::numThreadsFinder); } + void MkBuilder::clear() { g_exe_ctx.clear(); } std::pair MkBuilder::max_hits_layer(const EventOfHits &eoh) const { int maxN = 0; @@ -979,15 +986,16 @@ namespace mkfit { const int n_seeds = end_seed - start_seed; std::vector> seed_cand_idx; - std::vector seed_cand_update_idx; + std::vector seed_cand_update_idx, seed_cand_overlap_idx; seed_cand_idx.reserve(n_seeds * params.maxCandsPerSeed); seed_cand_update_idx.reserve(n_seeds * params.maxCandsPerSeed); + seed_cand_overlap_idx.reserve(n_seeds * params.maxCandsPerSeed); std::vector> extra_cands(n_seeds); for (int ii = 0; ii < n_seeds; ++ii) extra_cands[ii].reserve(params.maxCandsPerSeed); - cloner.begin_eta_bin(&eoccs, &seed_cand_update_idx, &extra_cands, start_seed, n_seeds); + cloner.begin_eta_bin(&eoccs, &seed_cand_update_idx, &seed_cand_overlap_idx, &extra_cands, start_seed, n_seeds); // Loop over layers, starting from after the seed. @@ -1087,7 +1095,10 @@ namespace mkfit { dprint("now get hit range"); - mkfndr->selectHitIndices(layer_of_hits, end - itrack); + if (iter_params.useHitSelectionV2) + mkfndr->selectHitIndicesV2(layer_of_hits, end - itrack); + else + mkfndr->selectHitIndices(layer_of_hits, end - itrack); find_tracks_handle_missed_layers( mkfndr, layer_info, extra_cands, seed_cand_idx, region, start_seed, itrack, end); @@ -1115,6 +1126,9 @@ namespace mkfit { // Update loop of best candidates. CandCloner prepares the list of those // that need update (excluding all those with negative last hit index). + // This is split into two sections - candidates without overlaps and with overlaps. + // On CMS PU-50 the ratio of those is ~ 65 : 35 over all iterations. + // Note, overlap recheck is only enabled for some iterations, e.g. pixelLess. const int theEndUpdater = seed_cand_update_idx.size(); @@ -1129,6 +1143,43 @@ namespace mkfit { mkfndr->copyOutParErr(eoccs.refCandidates_nc(), end - itrack, false); } + const int theEndOverlapper = seed_cand_overlap_idx.size(); + + for (int itrack = 0; itrack < theEndOverlapper; itrack += NN) { + const int end = std::min(itrack + NN, theEndOverlapper); + + mkfndr->inputTracksAndHits(eoccs.refCandidates(), layer_of_hits, seed_cand_overlap_idx, itrack, end, true); + + mkfndr->updateWithLoadedHit(end - itrack, layer_of_hits, fnd_foos); + + mkfndr->copyOutParErr(eoccs.refCandidates_nc(), end - itrack, false); + + mkfndr->inputOverlapHits(layer_of_hits, seed_cand_overlap_idx, itrack, end); + + // XXXX Could also be calcChi2AndUpdate(), then copy-out would have to be done + // below, choosing appropriate slot (with or without the overlap hit). + // Probably in a dedicated MkFinder copyOutXyzz function. + mkfndr->chi2OfLoadedHit(end - itrack, fnd_foos); + + for (int ii = itrack; ii < end; ++ii) { + const int fi = ii - itrack; + TrackCand &tc = eoccs[seed_cand_overlap_idx[ii].seed_idx][seed_cand_overlap_idx[ii].cand_idx]; + + // XXXX For now we DO NOT use chi2 as this was how things were done before the post-update + // chi2 check. To use it we should retune scoring function (might be even simpler). + auto chi2Ovlp = mkfndr->m_Chi2[fi]; + if (mkfndr->m_FailFlag[fi] == 0 && chi2Ovlp >= 0.0f && chi2Ovlp <= 60.0f) { + auto scoreCand = + getScoreCand(st_par.m_track_scorer, tc, true /*penalizeTailMissHits*/, true /*inFindCandidates*/); + tc.addHitIdx(seed_cand_overlap_idx[ii].ovlp_idx, curr_layer, chi2Ovlp); + tc.incOverlapCount(); + auto scoreCandOvlp = getScoreCand(st_par.m_track_scorer, tc, true, true); + if (scoreCand > scoreCandOvlp) + tc.popOverlap(); + } + } + } + // Check if cands are sorted, as expected. #ifdef DEBUG for (int iseed = start_seed; iseed < end_seed; ++iseed) { diff --git a/RecoTracker/MkFitCore/src/MkBuilderWrapper.cc b/RecoTracker/MkFitCore/src/MkBuilderWrapper.cc index 08bd3793bf459..f05db168d1684 100644 --- a/RecoTracker/MkFitCore/src/MkBuilderWrapper.cc +++ b/RecoTracker/MkFitCore/src/MkBuilderWrapper.cc @@ -3,8 +3,9 @@ namespace mkfit { MkBuilderWrapper::MkBuilderWrapper(bool silent) : builder_(MkBuilder::make_builder(silent)) {} - - MkBuilderWrapper::~MkBuilderWrapper() {} + MkBuilderWrapper::~MkBuilderWrapper() = default; void MkBuilderWrapper::populate() { MkBuilder::populate(); } + void MkBuilderWrapper::clear() { MkBuilder::clear(); } + } // namespace mkfit diff --git a/RecoTracker/MkFitCore/src/MkFinder.cc b/RecoTracker/MkFitCore/src/MkFinder.cc index ee9726a8eb485..725e5095e9545 100644 --- a/RecoTracker/MkFitCore/src/MkFinder.cc +++ b/RecoTracker/MkFitCore/src/MkFinder.cc @@ -178,6 +178,19 @@ namespace mkfit { } } + void MkFinder::inputOverlapHits(const LayerOfHits &layer_of_hits, + const std::vector &idxs, + int beg, + int end) { + // Copy overlap hit values in. + + for (int i = beg, imp = 0; i < end; ++i, ++imp) { + const Hit &hit = layer_of_hits.refHit(idxs[i].ovlp_idx); + m_msErr.copyIn(imp, hit.errArray()); + m_msPar.copyIn(imp, hit.posArray()); + } + } + void MkFinder::inputTracksAndHitIdx(const std::vector &tracks, const std::vector> &idxs, int beg, @@ -226,8 +239,8 @@ namespace mkfit { void MkFinder::packModuleNormDir( const LayerOfHits &layer_of_hits, int hit_cnt, MPlexHV &norm, MPlexHV &dir, int N_proc) const { - for (int itrack = 0; itrack < N_proc; ++itrack) { - if (hit_cnt < m_XHitSize[itrack]) { + for (int itrack = 0; itrack < NN; ++itrack) { + if (itrack < N_proc && hit_cnt < m_XHitSize[itrack]) { const auto &hit = layer_of_hits.refHit(m_XHitArr.constAt(itrack, hit_cnt, 0)); unsigned int mid = hit.detIDinLayer(); const ModuleInfo &mi = layer_of_hits.layer_info()->module_info(mid); @@ -255,7 +268,7 @@ namespace mkfit { if (!v.empty()) { // dq hit selection window - float this_dq = v[dq_sf] * (v[dq_0] * max_invpt + v[dq_1] * theta + v[dq_2]); + const float this_dq = v[dq_sf] * (v[dq_0] * max_invpt + v[dq_1] * theta + v[dq_2]); // In case value is below 0 (bad window derivation or other reasons), leave original limits if (this_dq > 0.f) { min_dq = this_dq; @@ -263,7 +276,7 @@ namespace mkfit { } // dphi hit selection window - float this_dphi = v[dp_sf] * (v[dp_0] * max_invpt + v[dp_1] * theta + v[dp_2]); + const float this_dphi = v[dp_sf] * (v[dp_0] * max_invpt + v[dp_1] * theta + v[dp_2]); // In case value is too low (bad window derivation or other reasons), leave original limits if (this_dphi > min_dphi) { min_dphi = this_dphi; @@ -363,6 +376,8 @@ namespace mkfit { #pragma omp simd #endif for (int itrack = 0; itrack < NN; ++itrack) { + if (itrack >= N_proc) + continue; m_XHitSize[itrack] = 0; float min_dq = ILC.min_dq(); @@ -474,7 +489,10 @@ namespace mkfit { // Vectorizing this makes it run slower! //#pragma omp simd - for (int itrack = 0; itrack < N_proc; ++itrack) { + for (int itrack = 0; itrack < NN; ++itrack) { + if (itrack >= N_proc) { + continue; + } // PROP-FAIL-ENABLE The following to be enabled when propagation failure // detection is properly implemented in propagate-to-R/Z. if (m_FailFlag[itrack]) { @@ -776,6 +794,8 @@ namespace mkfit { mp::StatePlex sp1, sp2; int n_proc; + MPlexQF dphi_track, dq_track; // 3 sigma track errors at initial state + // debug & ntuple dump -- to be local in functions MPlexQF phi_c, dphi; MPlexQF q_c, qmin, qmax; @@ -795,10 +815,10 @@ namespace mkfit { } } - void find_bin_ranges(const LayerInfo &li, const LayerOfHits &loh) { + void find_bin_ranges(const LayerInfo &li, const LayerOfHits &loh, const MPlexLS &err) { // Below made members for debugging // MPlexQF phi_c, dphi_min, dphi_max; - phi_c = mp::fast_atan2(isp.y, isp.x); + // phi_c = mp::fast_atan2(isp.y, isp.x); // calculated below as difference // Matriplex::min_max(sp1.dphi, sp2.dphi, dphi_min, dphi_max); // the above is wrong: dalpha is not dphi --> renamed variable in State @@ -809,24 +829,40 @@ namespace mkfit { // Matriplex::min_max(mp::fast_atan2(sp1.y, sp1.x), smp::fast_atan2(sp2.y, sp2.x), pmin, pmax); MPlexQF dp = pmax - pmin; phi_c = 0.5f * (pmax + pmin); - for (int ii = 0; ii < n_proc; ++ii) { - if (dp[ii] > Const::PI) { - std::swap(pmax[ii], pmin[ii]); - dp[ii] = Const::TwoPI - dp[ii]; - phi_c[ii] = Const::PI - phi_c[ii]; + for (int ii = 0; ii < NN; ++ii) { + if (ii < n_proc) { + if (dp[ii] > Const::PI) { + std::swap(pmax[ii], pmin[ii]); + dp[ii] = Const::TwoPI - dp[ii]; + phi_c[ii] = Const::PI - phi_c[ii]; + } + dphi[ii] = 0.5f * dp[ii]; + // printf("phic: %f p1: %f p2: %f pmin: %f pmax: %f dphi: %f\n", + // phi_c[ii], xp1[ii], xp2[ii], pmin[ii], pmax[ii], dphi[ii]); } - dphi[ii] = 0.5f * dp[ii]; - // printf("phic: %f p1: %f p2: %f pmin: %f pmax: %f dphi: %f\n", - // phi_c[ii], xp1[ii], xp2[ii], pmin[ii], pmax[ii], dphi[ii]); } + const auto calc_err_xy = [&](const MPlexQF &x, const MPlexQF &y) { + return x * x * err.ReduceFixedIJ(0, 0) + y * y * err.ReduceFixedIJ(1, 1) + + 2.0f * x * y * err.ReduceFixedIJ(0, 1); + }; + + // Calculate dphi_track, dq_track differs for barrel/endcap + MPlexQF r2_c = isp.x * isp.x + isp.y * isp.y; + MPlexQF r2inv_c = 1.0f / r2_c; + MPlexQF dphidx_c = -isp.y * r2inv_c; + MPlexQF dphidy_c = isp.x * r2inv_c; + dphi_track = 3.0f * calc_err_xy(dphidx_c, dphidy_c).abs().sqrt(); + // MPlexQF qmin, qmax; if (li.is_barrel()) { Matriplex::min_max(sp1.z, sp2.z, qmin, qmax); q_c = isp.z; + dq_track = 3.0f * err.ReduceFixedIJ(2, 2).abs().sqrt(); } else { Matriplex::min_max(Matriplex::hypot(sp1.x, sp1.y), Matriplex::hypot(sp2.x, sp2.y), qmin, qmax); - q_c = Matriplex::hypot(isp.x, isp.y); + q_c = Matriplex::sqrt(r2_c); + dq_track = 3.0f * (r2inv_c * calc_err_xy(isp.x, isp.y).abs()).sqrt(); } for (int i = 0; i < p1.kTotSize; ++i) { @@ -834,30 +870,32 @@ namespace mkfit { // const float dphi_clamp = 0.1; // if (dphi_min[i] > 0.0f || dphi_min[i] < -dphi_clamp) dphi_min[i] = -dphi_clamp; // if (dphi_max[i] < 0.0f || dphi_max[i] > dphi_clampf) dphi_max[i] = dphi_clamp; - p1[i] = loh.phiBinChecked(pmin[i] - PHI_BIN_EXTRA_FAC * 0.0123f); - p2[i] = loh.phiBinChecked(pmax[i] + PHI_BIN_EXTRA_FAC * 0.0123f); + p1[i] = loh.phiBinChecked(pmin[i] - dphi_track[i] - PHI_BIN_EXTRA_FAC * 0.0123f); + p2[i] = loh.phiBinChecked(pmax[i] + dphi_track[i] + PHI_BIN_EXTRA_FAC * 0.0123f); q0[i] = loh.qBinChecked(q_c[i]); - q1[i] = loh.qBinChecked(qmin[i] - Q_BIN_EXTRA_FAC * 0.5f * li.q_bin()); - q2[i] = loh.qBinChecked(qmax[i] + Q_BIN_EXTRA_FAC * 0.5f * li.q_bin()) + 1; + q1[i] = loh.qBinChecked(qmin[i] - dq_track[i] - Q_BIN_EXTRA_FAC * 0.5f * li.q_bin()); + q2[i] = loh.qBinChecked(qmax[i] + dq_track[i] + Q_BIN_EXTRA_FAC * 0.5f * li.q_bin()) + 1; } } }; Bins B(m_Par[iI], m_Chg, N_proc); B.prop_to_limits(LI); - B.find_bin_ranges(LI, L); - - for (int i = 0; i < N_proc; ++i) { - m_XHitSize[i] = 0; - // Notify failure. Ideally should be detected before selectHitIndices(). - if (m_FailFlag[i]) { - m_XWsrResult[i].m_wsr = WSR_Failed; - } else { - if (LI.is_barrel()) { - m_XWsrResult[i] = L.is_within_z_sensitive_region(B.q_c[i], 0.5f * (B.q2[i] - B.q1[i])); + B.find_bin_ranges(LI, L, m_Err[iI]); + + for (int i = 0; i < NN; ++i) { + if (i < N_proc) { + m_XHitSize[i] = 0; + // Notify failure. Ideally should be detected before selectHitIndices(). + if (m_FailFlag[i]) { + m_XWsrResult[i].m_wsr = WSR_Failed; } else { - m_XWsrResult[i] = L.is_within_r_sensitive_region(B.q_c[i], 0.5f * (B.q2[i] - B.q1[i])); + if (LI.is_barrel()) { + m_XWsrResult[i] = L.is_within_z_sensitive_region(B.q_c[i], 0.5f * (B.q2[i] - B.q1[i])); + } else { + m_XWsrResult[i] = L.is_within_r_sensitive_region(B.q_c[i], 0.5f * (B.q2[i] - B.q1[i])); + } } } } @@ -897,7 +935,11 @@ namespace mkfit { // Vectorizing this makes it run slower! //#pragma omp simd - for (int itrack = 0; itrack < N_proc; ++itrack) { + for (int itrack = 0; itrack < NN; ++itrack) { + if (itrack >= N_proc) { + continue; + } + if (m_FailFlag[itrack]) { m_XWsrResult[itrack].m_wsr = WSR_Failed; continue; @@ -969,8 +1011,8 @@ namespace mkfit { new_ddphi = cdist(std::abs(new_phi - L.hit_phi(hi))); new_ddq = std::abs(new_q - L.hit_q(hi)); - bool dqdphi_presel = - new_ddq < DDQ_PRESEL_FAC * L.hit_q_half_length(hi) && new_ddphi < DDPHI_PRESEL_FAC * 0.0123f; + bool dqdphi_presel = new_ddq < B.dq_track[itrack] + DDQ_PRESEL_FAC * L.hit_q_half_length(hi) && + new_ddphi < B.dphi_track[itrack] + DDPHI_PRESEL_FAC * 0.0123f; // clang-format off dprintf(" SHI %3u %4u %5u %6.3f %6.3f %6.4f %7.5f PROP-%s %s\n", @@ -1036,8 +1078,8 @@ namespace mkfit { mhp.reset(); //#pragma omp simd doesn't vectorize with current compilers - for (int itrack = 0; itrack < N_proc; ++itrack) { - if (hit_cnt < m_XHitSize[itrack]) { + for (int itrack = 0; itrack < NN; ++itrack) { + if (itrack < N_proc && hit_cnt < m_XHitSize[itrack]) { mhp.addInputAt(itrack, layer_of_hits.refHit(m_XHitArr.At(itrack, hit_cnt, 0))); } } @@ -1062,8 +1104,8 @@ namespace mkfit { //update best hit in case chi2 - - - + - + + + + + + + + + + + + + + + + + + diff --git a/RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorKernels.cu b/RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorKernels.cu index efb2a2e17715c..6e07126e9e428 100644 --- a/RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorKernels.cu +++ b/RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorKernels.cu @@ -1,8 +1,9 @@ -#include "RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorKernelsImpl.h" #include -// #define NTUPLE_DEBUG -// #define GPU_DEBUG +#include "RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorKernelsImpl.h" + +//#define GPU_DEBUG +//#define NTUPLE_DEBUG template void CAHitNtupletGeneratorKernelsGPU::launchKernels(const HitsConstView &hh, diff --git a/RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorKernels.h b/RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorKernels.h index 0865fa5cbc46a..250aef21c1d6a 100644 --- a/RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorKernels.h +++ b/RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorKernels.h @@ -1,18 +1,17 @@ #ifndef RecoTracker_PixelSeeding_plugins_CAHitNtupletGeneratorKernels_h #define RecoTracker_PixelSeeding_plugins_CAHitNtupletGeneratorKernels_h -// #define GPU_DEBUG +//#define GPU_DEBUG +//#define DUMP_GPU_TK_TUPLES -#include "GPUCACell.h" -#include "gpuPixelDoublets.h" - -#include "CUDADataFormats/Track/interface/PixelTrackUtilities.h" -#include "CUDADataFormats/TrackingRecHit/interface/TrackingRecHitsUtilities.h" #include "CUDADataFormats/Common/interface/HeterogeneousSoA.h" -#include "CUDADataFormats/TrackingRecHit/interface/TrackingRecHitSoADevice.h" +#include "CUDADataFormats/Track/interface/PixelTrackUtilities.h" #include "CUDADataFormats/Track/interface/TrackSoAHeterogeneousHost.h" +#include "CUDADataFormats/TrackingRecHit/interface/TrackingRecHitSoADevice.h" +#include "CUDADataFormats/TrackingRecHit/interface/TrackingRecHitsUtilities.h" -// #define DUMP_GPU_TK_TUPLES +#include "GPUCACell.h" +#include "gpuPixelDoublets.h" namespace caHitNtupletGenerator { diff --git a/RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorKernelsAlloc.cc b/RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorKernelsAlloc.cc index 6acff4abbd531..64148d5f5ba81 100644 --- a/RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorKernelsAlloc.cc +++ b/RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorKernelsAlloc.cc @@ -2,7 +2,8 @@ #include "CAHitNtupletGeneratorKernels.h" -// #define GPU_DEBUG +//#define GPU_DEBUG + template #ifdef __CUDACC__ void CAHitNtupletGeneratorKernelsGPU::allocateOnGPU(int32_t nHits, cudaStream_t stream) { diff --git a/RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorKernelsImpl.h b/RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorKernelsImpl.h index 540c0b92f9015..57e4ea6f9441f 100644 --- a/RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorKernelsImpl.h +++ b/RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorKernelsImpl.h @@ -2,8 +2,8 @@ // Original Author: Felice Pantaleo, CERN // -// #define NTUPLE_DEBUG -// #define GPU_DEBUG +//#define NTUPLE_DEBUG +//#define GPU_DEBUG #include #include @@ -11,15 +11,14 @@ #include +#include "CUDADataFormats/Track/interface/PixelTrackUtilities.h" +#include "CUDADataFormats/TrackingRecHit/interface/TrackingRecHitsUtilities.h" #include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h" #include "HeterogeneousCore/CUDAUtilities/interface/cuda_assert.h" #include "RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforGPU.h" -#include "CUDADataFormats/Track/interface/PixelTrackUtilities.h" -#include "CUDADataFormats/TrackingRecHit/interface/TrackingRecHitsUtilities.h" - -#include "CAStructures.h" #include "CAHitNtupletGeneratorKernels.h" +#include "CAStructures.h" #include "GPUCACell.h" #include "gpuFishbone.h" #include "gpuPixelDoublets.h" diff --git a/RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorOnGPU.cc b/RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorOnGPU.cc index faf0bae6fb0a9..5100cf734142c 100644 --- a/RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorOnGPU.cc +++ b/RecoTracker/PixelSeeding/plugins/CAHitNtupletGeneratorOnGPU.cc @@ -2,8 +2,8 @@ // Original Author: Felice Pantaleo, CERN // -// #define GPU_DEBUG -// #define DUMP_GPU_TK_TUPLES +//#define GPU_DEBUG +//#define DUMP_GPU_TK_TUPLES #include #include diff --git a/RecoTracker/PixelSeeding/plugins/alpaka/BrokenLineFit.dev.cc b/RecoTracker/PixelSeeding/plugins/alpaka/BrokenLineFit.dev.cc new file mode 100644 index 0000000000000..a21fed668b54c --- /dev/null +++ b/RecoTracker/PixelSeeding/plugins/alpaka/BrokenLineFit.dev.cc @@ -0,0 +1,412 @@ +// +// Author: Felice Pantaleo, CERN +// + +//#define BROKENLINE_DEBUG +//#define BL_DUMP_HITS +#include +#include + +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h" +#include "HeterogeneousCore/AlpakaInterface/interface/traits.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforDevice.h" +#include "RecoTracker/PixelTrackFitting/interface/alpaka/BrokenLine.h" + +#include "HelixFit.h" + +template +using Tuples = typename reco::TrackSoA::HitContainer; +template +using OutputSoAView = reco::TrackSoAView; +template +using TupleMultiplicity = caStructures::TupleMultiplicityT; + +// #define BL_DUMP_HITS + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + template + class Kernel_BLFastFit { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + Tuples const *__restrict__ foundNtuplets, + TupleMultiplicity const *__restrict__ tupleMultiplicity, + TrackingRecHitSoAConstView hh, + pixelCPEforDevice::ParamsOnDeviceT const *__restrict__ cpeParams, + typename TrackerTraits::tindex_type *__restrict__ ptkids, + double *__restrict__ phits, + float *__restrict__ phits_ge, + double *__restrict__ pfast_fit, + uint32_t nHitsL, + uint32_t nHitsH, + int32_t offset) const { + constexpr uint32_t hitsInFit = N; + constexpr auto invalidTkId = std::numeric_limits::max(); + + ALPAKA_ASSERT_OFFLOAD(hitsInFit <= nHitsL); + ALPAKA_ASSERT_OFFLOAD(nHitsL <= nHitsH); + ALPAKA_ASSERT_OFFLOAD(phits); + ALPAKA_ASSERT_OFFLOAD(pfast_fit); + ALPAKA_ASSERT_OFFLOAD(foundNtuplets); + ALPAKA_ASSERT_OFFLOAD(tupleMultiplicity); + + // look in bin for this hit multiplicity + int totTK = tupleMultiplicity->end(nHitsH) - tupleMultiplicity->begin(nHitsL); + ALPAKA_ASSERT_OFFLOAD(totTK <= int(tupleMultiplicity->size())); + ALPAKA_ASSERT_OFFLOAD(totTK >= 0); + +#ifdef BROKENLINE_DEBUG + const uint32_t threadIdx(alpaka::getIdx(acc)[0u]); + if (cms::alpakatools::once_per_grid(acc)) { + printf("%d total Ntuple\n", tupleMultiplicity->size()); + printf("%d Ntuple of size %d/%d for %d hits to fit\n", totTK, nHitsL, nHitsH, hitsInFit); + } +#endif + const auto nt = riemannFit::maxNumberOfConcurrentFits; + for (auto local_idx : cms::alpakatools::elements_with_stride(acc, nt)) { + auto tuple_idx = local_idx + offset; + if ((int)tuple_idx >= totTK) { + ptkids[local_idx] = invalidTkId; + break; + } + // get it from the ntuple container (one to one to helix) + auto tkid = *(tupleMultiplicity->begin(nHitsL) + tuple_idx); + ALPAKA_ASSERT_OFFLOAD(static_cast(tkid) < foundNtuplets->nOnes()); + + ptkids[local_idx] = tkid; + + auto nHits = foundNtuplets->size(tkid); + + ALPAKA_ASSERT_OFFLOAD(nHits >= nHitsL); + ALPAKA_ASSERT_OFFLOAD(nHits <= nHitsH); + + riemannFit::Map3xNd hits(phits + local_idx); + riemannFit::Map4d fast_fit(pfast_fit + local_idx); + riemannFit::Map6xNf hits_ge(phits_ge + local_idx); + +#ifdef BL_DUMP_HITS + auto &&done = alpaka::declareSharedVar(acc); + done = 0; + alpaka::syncBlockThreads(acc); + bool dump = + (foundNtuplets->size(tkid) == 5 && 0 == alpaka::atomicAdd(acc, &done, 1, alpaka::hierarchy::Blocks{})); +#endif + + // Prepare data structure + auto const *hitId = foundNtuplets->begin(tkid); + + // #define YERR_FROM_DC +#ifdef YERR_FROM_DC + // try to compute more precise error in y + auto dx = hh[hitId[hitsInFit - 1]].xGlobal() - hh[hitId[0]].xGlobal(); + auto dy = hh[hitId[hitsInFit - 1]].yGlobal() - hh[hitId[0]].yGlobal(); + auto dz = hh[hitId[hitsInFit - 1]].zGlobal() - hh[hitId[0]].zGlobal(); + float ux, uy, uz; +#endif + + float incr = std::max(1.f, float(nHits) / float(hitsInFit)); + float n = 0; + for (uint32_t i = 0; i < hitsInFit; ++i) { + int j = int(n + 0.5f); // round + if (hitsInFit - 1 == i) + j = nHits - 1; // force last hit to ensure max lever arm. + ALPAKA_ASSERT_OFFLOAD(j < int(nHits)); + n += incr; + auto hit = hitId[j]; + float ge[6]; + +#ifdef YERR_FROM_DC + auto const &dp = cpeParams->detParams(hh.detectorIndex(hit)); + auto status = hh[hit].chargeAndStatus().status; + int qbin = CPEFastParametrisation::kGenErrorQBins - 1 - status.qBin; + ALPAKA_ASSERT_OFFLOAD(qbin >= 0 && qbin < 5); + bool nok = (status.isBigY | status.isOneY); + // compute cotanbeta and use it to recompute error + dp.frame.rotation().multiply(dx, dy, dz, ux, uy, uz); + auto cb = std::abs(uy / uz); + int bin = + int(cb * (float(phase1PixelTopology::pixelThickess) / float(phase1PixelTopology::pixelPitchY)) * 8.f) - 4; + int low_value = 0; + int high_value = CPEFastParametrisation::kNumErrorBins - 1; + // return estimated bin value truncated to [0, 15] + bin = std::clamp(bin, low_value, high_value); + float yerr = dp.sigmay[bin] * 1.e-4f; // toCM + yerr *= dp.yfact[qbin]; // inflate + yerr *= yerr; + yerr += dp.apeYY; + yerr = nok ? hh[hit].yerrLocal() : yerr; + dp.frame.toGlobal(hh[hit].xerrLocal(), 0, yerr, ge); +#else + cpeParams->detParams(hh[hit].detectorIndex()).frame.toGlobal(hh[hit].xerrLocal(), 0, hh[hit].yerrLocal(), ge); +#endif + +#ifdef BL_DUMP_HITS + bool dump = foundNtuplets->size(tkid) == 5; + if (dump) { + printf("Track id %d %d Hit %d on %d\nGlobal: hits.col(%d) << %f,%f,%f\n", + local_idx, + tkid, + hit, + hh[hit].detectorIndex(), + i, + hh[hit].xGlobal(), + hh[hit].yGlobal(), + hh[hit].zGlobal()); + printf("Error: hits_ge.col(%d) << %e,%e,%e,%e,%e,%e\n", i, ge[0], ge[1], ge[2], ge[3], ge[4], ge[5]); + } +#endif + + hits.col(i) << hh[hit].xGlobal(), hh[hit].yGlobal(), hh[hit].zGlobal(); + hits_ge.col(i) << ge[0], ge[1], ge[2], ge[3], ge[4], ge[5]; + } + brokenline::fastFit(acc, hits, fast_fit); + + // no NaN here.... + ALPAKA_ASSERT_OFFLOAD(fast_fit(0) == fast_fit(0)); + ALPAKA_ASSERT_OFFLOAD(fast_fit(1) == fast_fit(1)); + ALPAKA_ASSERT_OFFLOAD(fast_fit(2) == fast_fit(2)); + ALPAKA_ASSERT_OFFLOAD(fast_fit(3) == fast_fit(3)); + } + } + }; + + template + struct Kernel_BLFit { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + TupleMultiplicity const *__restrict__ tupleMultiplicity, + double bField, + OutputSoAView results_view, + typename TrackerTraits::tindex_type const *__restrict__ ptkids, + double *__restrict__ phits, + float *__restrict__ phits_ge, + double *__restrict__ pfast_fit) const { + ALPAKA_ASSERT_OFFLOAD(results_view.pt()); + ALPAKA_ASSERT_OFFLOAD(results_view.eta()); + ALPAKA_ASSERT_OFFLOAD(results_view.chi2()); + ALPAKA_ASSERT_OFFLOAD(pfast_fit); + constexpr auto invalidTkId = std::numeric_limits::max(); + + // same as above... + // look in bin for this hit multiplicity + const auto nt = riemannFit::maxNumberOfConcurrentFits; + for (auto local_idx : cms::alpakatools::elements_with_stride(acc, nt)) { + if (invalidTkId == ptkids[local_idx]) + break; + auto tkid = ptkids[local_idx]; + + ALPAKA_ASSERT_OFFLOAD(tkid < TrackerTraits::maxNumberOfTuples); + + riemannFit::Map3xNd hits(phits + local_idx); + riemannFit::Map4d fast_fit(pfast_fit + local_idx); + riemannFit::Map6xNf hits_ge(phits_ge + local_idx); + + brokenline::PreparedBrokenLineData data; + + brokenline::karimaki_circle_fit circle; + riemannFit::LineFit line; + + brokenline::prepareBrokenLineData(acc, hits, fast_fit, bField, data); + brokenline::lineFit(acc, hits_ge, fast_fit, bField, data, line); + brokenline::circleFit(acc, hits, hits_ge, fast_fit, bField, data, circle); + + TracksUtilities::copyFromCircle( + results_view, circle.par, circle.cov, line.par, line.cov, 1.f / float(bField), tkid); + results_view[tkid].pt() = float(bField) / float(std::abs(circle.par(2))); + results_view[tkid].eta() = alpaka::math::asinh(acc, line.par(0)); + results_view[tkid].chi2() = (circle.chi2 + line.chi2) / (2 * N - 5); + +#ifdef BROKENLINE_DEBUG + if (!(circle.chi2 >= 0) || !(line.chi2 >= 0)) + printf("kernelBLFit failed! %f/%f\n", circle.chi2, line.chi2); + printf("kernelBLFit size %d for %d hits circle.par(0,1,2): %d %f,%f,%f\n", + N, + N, + tkid, + circle.par(0), + circle.par(1), + circle.par(2)); + printf("kernelBLHits line.par(0,1): %d %f,%f\n", tkid, line.par(0), line.par(1)); + printf("kernelBLHits chi2 cov %f/%f %e,%e,%e,%e,%e\n", + circle.chi2, + line.chi2, + circle.cov(0, 0), + circle.cov(1, 1), + circle.cov(2, 2), + line.cov(0, 0), + line.cov(1, 1)); +#endif + } + } + }; + + template + void HelixFit::launchBrokenLineKernels( + const TrackingRecHitSoAConstView &hv, + pixelCPEforDevice::ParamsOnDeviceT const *cpeParams, + uint32_t hitsInFit, + uint32_t maxNumberOfTuples, + Queue &queue) { + ALPAKA_ASSERT_OFFLOAD(tuples_); + + uint32_t blockSize = 64; + uint32_t numberOfBlocks = cms::alpakatools::divide_up_by(maxNumberOfConcurrentFits_, blockSize); + const WorkDiv1D workDivTriplets = cms::alpakatools::make_workdiv(numberOfBlocks, blockSize); + const WorkDiv1D workDivQuadsPenta = cms::alpakatools::make_workdiv(numberOfBlocks / 4, blockSize); + + // Fit internals + auto tkidDevice = + cms::alpakatools::make_device_buffer(queue, maxNumberOfConcurrentFits_); + auto hitsDevice = cms::alpakatools::make_device_buffer( + queue, maxNumberOfConcurrentFits_ * sizeof(riemannFit::Matrix3xNd<6>) / sizeof(double)); + auto hits_geDevice = cms::alpakatools::make_device_buffer( + queue, maxNumberOfConcurrentFits_ * sizeof(riemannFit::Matrix6xNf<6>) / sizeof(float)); + auto fast_fit_resultsDevice = cms::alpakatools::make_device_buffer( + queue, maxNumberOfConcurrentFits_ * sizeof(riemannFit::Vector4d) / sizeof(double)); + + for (uint32_t offset = 0; offset < maxNumberOfTuples; offset += maxNumberOfConcurrentFits_) { + // fit triplets + + alpaka::exec(queue, + workDivTriplets, + Kernel_BLFastFit<3, TrackerTraits>{}, + tuples_, + tupleMultiplicity_, + hv, + cpeParams, + tkidDevice.data(), + hitsDevice.data(), + hits_geDevice.data(), + fast_fit_resultsDevice.data(), + 3, + 3, + offset); + + alpaka::exec(queue, + workDivTriplets, + Kernel_BLFit<3, TrackerTraits>{}, + tupleMultiplicity_, + bField_, + outputSoa_, + tkidDevice.data(), + hitsDevice.data(), + hits_geDevice.data(), + fast_fit_resultsDevice.data()); + + if (fitNas4_) { + // fit all as 4 + riemannFit::rolling_fits<4, TrackerTraits::maxHitsOnTrack, 1>([this, + &hv, + &cpeParams, + &tkidDevice, + &hitsDevice, + &hits_geDevice, + &fast_fit_resultsDevice, + &offset, + &queue, + &workDivQuadsPenta](auto i) { + alpaka::exec(queue, + workDivQuadsPenta, + Kernel_BLFastFit<4, TrackerTraits>{}, + tuples_, + tupleMultiplicity_, + hv, + cpeParams, + tkidDevice.data(), + hitsDevice.data(), + hits_geDevice.data(), + fast_fit_resultsDevice.data(), + 4, + 4, + offset); + + alpaka::exec(queue, + workDivQuadsPenta, + Kernel_BLFit<4, TrackerTraits>{}, + tupleMultiplicity_, + bField_, + outputSoa_, + tkidDevice.data(), + hitsDevice.data(), + hits_geDevice.data(), + fast_fit_resultsDevice.data()); + }); + + } else { + riemannFit::rolling_fits<4, TrackerTraits::maxHitsOnTrackForFullFit, 1>([this, + &hv, + &cpeParams, + &tkidDevice, + &hitsDevice, + &hits_geDevice, + &fast_fit_resultsDevice, + &offset, + &queue, + &workDivQuadsPenta](auto i) { + alpaka::exec(queue, + workDivQuadsPenta, + Kernel_BLFastFit{}, + tuples_, + tupleMultiplicity_, + hv, + cpeParams, + tkidDevice.data(), + hitsDevice.data(), + hits_geDevice.data(), + fast_fit_resultsDevice.data(), + i, + i, + offset); + + alpaka::exec(queue, + workDivQuadsPenta, + Kernel_BLFit{}, + tupleMultiplicity_, + bField_, + outputSoa_, + tkidDevice.data(), + hitsDevice.data(), + hits_geDevice.data(), + fast_fit_resultsDevice.data()); + }); + + static_assert(TrackerTraits::maxHitsOnTrackForFullFit < TrackerTraits::maxHitsOnTrack); + + //Fit all the rest using the maximum from previous call + alpaka::exec(queue, + workDivQuadsPenta, + Kernel_BLFastFit{}, + tuples_, + tupleMultiplicity_, + hv, + cpeParams, + tkidDevice.data(), + hitsDevice.data(), + hits_geDevice.data(), + fast_fit_resultsDevice.data(), + TrackerTraits::maxHitsOnTrackForFullFit, + TrackerTraits::maxHitsOnTrack - 1, + offset); + + alpaka::exec(queue, + workDivQuadsPenta, + Kernel_BLFit{}, + tupleMultiplicity_, + bField_, + outputSoa_, + tkidDevice.data(), + hitsDevice.data(), + hits_geDevice.data(), + fast_fit_resultsDevice.data()); + } + + } // loop on concurrent fits + } + + template class HelixFit; + template class HelixFit; + template class HelixFit; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE diff --git a/RecoTracker/PixelSeeding/plugins/alpaka/CACell.h b/RecoTracker/PixelSeeding/plugins/alpaka/CACell.h new file mode 100644 index 0000000000000..d0142f78415ae --- /dev/null +++ b/RecoTracker/PixelSeeding/plugins/alpaka/CACell.h @@ -0,0 +1,391 @@ +#ifndef RecoPixelVertexing_PixelTriplets_CACellT_h +#define RecoPixelVertexing_PixelTriplets_CACellT_h + +// +// Author: Felice Pantaleo, CERN +// + +// #define ONLY_TRIPLETS_IN_HOLE + +#include + +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h" +#include "HeterogeneousCore/AlpakaInterface/interface/VecArray.h" +#include "HeterogeneousCore/AlpakaInterface/interface/SimpleVector.h" +#include "RecoTracker/PixelSeeding/interface/CircleEq.h" +#include "DataFormats/TrackSoA/interface/TrackDefinitions.h" +#include "DataFormats/TrackSoA/interface/TracksSoA.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "CAStructures.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + template + class CACellT { + public: + using PtrAsInt = unsigned long long; + + static constexpr auto maxCellsPerHit = TrackerTraits::maxCellsPerHit; + using OuterHitOfCellContainer = caStructures::OuterHitOfCellContainerT; + using OuterHitOfCell = caStructures::OuterHitOfCellT; + using CellNeighbors = caStructures::CellNeighborsT; + using CellTracks = caStructures::CellTracksT; + using CellNeighborsVector = caStructures::CellNeighborsVectorT; + using CellTracksVector = caStructures::CellTracksVectorT; + + using HitsConstView = TrackingRecHitSoAConstView; + using hindex_type = typename TrackerTraits::hindex_type; + using tindex_type = typename TrackerTraits::tindex_type; + static constexpr auto invalidHitId = std::numeric_limits::max(); + + using TmpTuple = cms::alpakatools::VecArray; + + using HitContainer = typename reco::TrackSoA::HitContainer; + using Quality = ::pixelTrack::Quality; + static constexpr auto bad = ::pixelTrack::Quality::bad; + + enum class StatusBit : uint16_t { kUsed = 1, kInTrack = 2, kKilled = 1 << 15 }; + + CACellT() = default; + + ALPAKA_FN_ACC ALPAKA_FN_INLINE void init(CellNeighborsVector& cellNeighbors, + CellTracksVector& cellTracks, + const HitsConstView& hh, + int layerPairId, + hindex_type innerHitId, + hindex_type outerHitId) { + theInnerHitId = innerHitId; + theOuterHitId = outerHitId; + theLayerPairId_ = layerPairId; + theStatus_ = 0; + theFishboneId = invalidHitId; + + // optimization that depends on access pattern + theInnerZ = hh[innerHitId].zGlobal(); + theInnerR = hh[innerHitId].rGlobal(); + + // link to default empty + theOuterNeighbors = &cellNeighbors[0]; + theTracks = &cellTracks[0]; + assert(outerNeighbors().empty()); + assert(tracks().empty()); + } + + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE __attribute__((always_inline)) int addOuterNeighbor( + const TAcc& acc, typename TrackerTraits::cindex_type t, CellNeighborsVector& cellNeighbors) { + // use smart cache + if (outerNeighbors().empty()) { + auto i = cellNeighbors.extend(acc); // maybe wasted.... + if (i > 0) { + cellNeighbors[i].reset(); + alpaka::mem_fence(acc, alpaka::memory_scope::Grid{}); +#ifdef ALPAKA_ACC_CPU_B_SEQ_T_SEQ_ENABLED + theOuterNeighbors = &cellNeighbors[i]; +#else + auto zero = (PtrAsInt)(&cellNeighbors[0]); + alpaka::atomicCas(acc, + (PtrAsInt*)(&theOuterNeighbors), + zero, + (PtrAsInt)(&cellNeighbors[i]), + alpaka::hierarchy::Blocks{}); // if fails we cannot give "i" back... +#endif + } else + return -1; + } + alpaka::mem_fence(acc, alpaka::memory_scope::Grid{}); + return outerNeighbors().push_back(acc, t); + } + + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE __attribute__((always_inline)) int addTrack(TAcc const& acc, + tindex_type t, + CellTracksVector& cellTracks) { + if (tracks().empty()) { + auto i = cellTracks.extend(acc); // maybe wasted.... + if (i > 0) { + cellTracks[i].reset(); + alpaka::mem_fence(acc, alpaka::memory_scope::Grid{}); +#ifdef ALPAKA_ACC_CPU_B_SEQ_T_SEQ_ENABLED + theTracks = &cellTracks[i]; +#else + auto zero = (PtrAsInt)(&cellTracks[0]); + alpaka::atomicCas(acc, + (PtrAsInt*)(&theTracks), + zero, + (PtrAsInt)(&cellTracks[i]), + alpaka::hierarchy::Blocks{}); // if fails we cannot give "i" back... + +#endif + } else + return -1; + } + alpaka::mem_fence(acc, alpaka::memory_scope::Grid{}); + return tracks().push_back(acc, t); + } + + ALPAKA_FN_ACC ALPAKA_FN_INLINE CellTracks& tracks() { return *theTracks; } + ALPAKA_FN_ACC ALPAKA_FN_INLINE CellTracks const& tracks() const { return *theTracks; } + ALPAKA_FN_ACC ALPAKA_FN_INLINE CellNeighbors& outerNeighbors() { return *theOuterNeighbors; } + ALPAKA_FN_ACC ALPAKA_FN_INLINE CellNeighbors const& outerNeighbors() const { return *theOuterNeighbors; } + ALPAKA_FN_ACC ALPAKA_FN_INLINE float inner_x(const HitsConstView& hh) const { return hh[theInnerHitId].xGlobal(); } + ALPAKA_FN_ACC ALPAKA_FN_INLINE float outer_x(const HitsConstView& hh) const { return hh[theOuterHitId].xGlobal(); } + ALPAKA_FN_ACC ALPAKA_FN_INLINE float inner_y(const HitsConstView& hh) const { return hh[theInnerHitId].yGlobal(); } + ALPAKA_FN_ACC ALPAKA_FN_INLINE float outer_y(const HitsConstView& hh) const { return hh[theOuterHitId].yGlobal(); } + ALPAKA_FN_ACC ALPAKA_FN_INLINE float inner_z(const HitsConstView& hh) const { return theInnerZ; } + ALPAKA_FN_ACC ALPAKA_FN_INLINE float outer_z(const HitsConstView& hh) const { return hh[theOuterHitId].zGlobal(); } + ALPAKA_FN_ACC ALPAKA_FN_INLINE float inner_r(const HitsConstView& hh) const { return theInnerR; } + ALPAKA_FN_ACC ALPAKA_FN_INLINE float outer_r(const HitsConstView& hh) const { return hh[theOuterHitId].rGlobal(); } + + ALPAKA_FN_ACC ALPAKA_FN_INLINE auto inner_iphi(const HitsConstView& hh) const { return hh[theInnerHitId].iphi(); } + ALPAKA_FN_ACC ALPAKA_FN_INLINE auto outer_iphi(const HitsConstView& hh) const { return hh[theOuterHitId].iphi(); } + + ALPAKA_FN_ACC ALPAKA_FN_INLINE float inner_detIndex(const HitsConstView& hh) const { + return hh[theInnerHitId].detectorIndex(); + } + ALPAKA_FN_ACC ALPAKA_FN_INLINE float outer_detIndex(const HitsConstView& hh) const { + return hh[theOuterHitId].detectorIndex(); + } + + constexpr unsigned int inner_hit_id() const { return theInnerHitId; } + constexpr unsigned int outer_hit_id() const { return theOuterHitId; } + + ALPAKA_FN_ACC void print_cell() const { + printf("printing cell: on layerPair: %d, innerHitId: %d, outerHitId: %d \n", + theLayerPairId_, + theInnerHitId, + theOuterHitId); + } + + ALPAKA_FN_ACC bool check_alignment(const HitsConstView& hh, + CACellT const& otherCell, + const float ptmin, + const float hardCurvCut, + const float caThetaCutBarrel, + const float caThetaCutForward, + const float dcaCutInnerTriplet, + const float dcaCutOuterTriplet) const { + // detIndex of the layerStart for the Phase1 Pixel Detector: + // [BPX1, BPX2, BPX3, BPX4, FP1, FP2, FP3, FN1, FN2, FN3, LAST_VALID] + // [ 0, 96, 320, 672, 1184, 1296, 1408, 1520, 1632, 1744, 1856] + auto ri = inner_r(hh); + auto zi = inner_z(hh); + + auto ro = outer_r(hh); + auto zo = outer_z(hh); + + auto r1 = otherCell.inner_r(hh); + auto z1 = otherCell.inner_z(hh); + auto isBarrel = otherCell.outer_detIndex(hh) < TrackerTraits::last_barrel_detIndex; + // TODO tune CA cuts below (theta and dca) + bool aligned = areAlignedRZ(r1, z1, ri, zi, ro, zo, ptmin, isBarrel ? caThetaCutBarrel : caThetaCutForward); + return (aligned && dcaCut(hh, + otherCell, + otherCell.inner_detIndex(hh) < TrackerTraits::last_bpix1_detIndex ? dcaCutInnerTriplet + : dcaCutOuterTriplet, + hardCurvCut)); + } + + ALPAKA_FN_ACC ALPAKA_FN_INLINE __attribute__((always_inline)) static bool areAlignedRZ( + float r1, float z1, float ri, float zi, float ro, float zo, const float ptmin, const float thetaCut) { + float radius_diff = std::abs(r1 - ro); + float distance_13_squared = radius_diff * radius_diff + (z1 - zo) * (z1 - zo); + + float pMin = ptmin * std::sqrt(distance_13_squared); // this needs to be divided by + // radius_diff later + + float tan_12_13_half_mul_distance_13_squared = fabs(z1 * (ri - ro) + zi * (ro - r1) + zo * (r1 - ri)); + return tan_12_13_half_mul_distance_13_squared * pMin <= thetaCut * distance_13_squared * radius_diff; + } + + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool dcaCut(const HitsConstView& hh, + CACellT const& otherCell, + const float region_origin_radius_plus_tolerance, + const float maxCurv) const { + auto x1 = otherCell.inner_x(hh); + auto y1 = otherCell.inner_y(hh); + + auto x2 = inner_x(hh); + auto y2 = inner_y(hh); + + auto x3 = outer_x(hh); + auto y3 = outer_y(hh); + + CircleEq eq(x1, y1, x2, y2, x3, y3); + + if (eq.curvature() > maxCurv) + return false; + + return std::abs(eq.dca0()) < region_origin_radius_plus_tolerance * std::abs(eq.curvature()); + } + + ALPAKA_FN_ACC ALPAKA_FN_INLINE __attribute__((always_inline)) static bool dcaCutH( + float x1, + float y1, + float x2, + float y2, + float x3, + float y3, + const float region_origin_radius_plus_tolerance, + const float maxCurv) { + CircleEq eq(x1, y1, x2, y2, x3, y3); + + if (eq.curvature() > maxCurv) + return false; + + return std::abs(eq.dca0()) < region_origin_radius_plus_tolerance * std::abs(eq.curvature()); + } + + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool hole0(const HitsConstView& hh, CACellT const& innerCell) const { + using namespace phase1PixelTopology; + + int p = innerCell.inner_iphi(hh); + if (p < 0) + p += std::numeric_limits::max(); + p = (max_ladder_bpx0 * p) / std::numeric_limits::max(); + p %= max_ladder_bpx0; + auto il = first_ladder_bpx0 + p; + auto r0 = hh.averageGeometry().ladderR[il]; + auto ri = innerCell.inner_r(hh); + auto zi = innerCell.inner_z(hh); + auto ro = outer_r(hh); + auto zo = outer_z(hh); + auto z0 = zi + (r0 - ri) * (zo - zi) / (ro - ri); + auto z_in_ladder = std::abs(z0 - hh.averageGeometry().ladderZ[il]); + auto z_in_module = z_in_ladder - module_length_bpx0 * int(z_in_ladder / module_length_bpx0); + auto gap = z_in_module < module_tolerance_bpx0 || z_in_module > (module_length_bpx0 - module_tolerance_bpx0); + return gap; + } + + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool hole4(const HitsConstView& hh, CACellT const& innerCell) const { + using namespace phase1PixelTopology; + + int p = outer_iphi(hh); + if (p < 0) + p += std::numeric_limits::max(); + p = (max_ladder_bpx4 * p) / std::numeric_limits::max(); + p %= max_ladder_bpx4; + auto il = first_ladder_bpx4 + p; + auto r4 = hh.averageGeometry().ladderR[il]; + auto ri = innerCell.inner_r(hh); + auto zi = innerCell.inner_z(hh); + auto ro = outer_r(hh); + auto zo = outer_z(hh); + auto z4 = zo + (r4 - ro) * (zo - zi) / (ro - ri); + auto z_in_ladder = std::abs(z4 - hh.averageGeometry().ladderZ[il]); + auto z_in_module = z_in_ladder - module_length_bpx4 * int(z_in_ladder / module_length_bpx4); + auto gap = z_in_module < module_tolerance_bpx4 || z_in_module > (module_length_bpx4 - module_tolerance_bpx4); + auto holeP = z4 > hh.averageGeometry().ladderMaxZ[il] && z4 < hh.averageGeometry().endCapZ[0]; + auto holeN = z4 < hh.averageGeometry().ladderMinZ[il] && z4 > hh.averageGeometry().endCapZ[1]; + return gap || holeP || holeN; + } + + // trying to free the track building process from hardcoded layers, leaving + // the visit of the graph based on the neighborhood connections between cells. + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE void find_ntuplets(TAcc const& acc, + const HitsConstView& hh, + CACellT* __restrict__ cells, + CellTracksVector& cellTracks, + HitContainer& foundNtuplets, + cms::alpakatools::AtomicPairCounter& apc, + Quality* __restrict__ quality, + TmpTuple& tmpNtuplet, + const unsigned int minHitsPerNtuplet, + bool startAt0) const { + // the building process for a track ends if: + // it has no right neighbor + // it has no compatible neighbor + // the ntuplets is then saved if the number of hits it contains is greater + // than a threshold + + if constexpr (DEPTH <= 0) { + printf("ERROR: CACellT::find_ntuplets reached full depth!\n"); + ALPAKA_ASSERT_OFFLOAD(false); + } else { + auto doubletId = this - cells; + tmpNtuplet.push_back_unsafe(doubletId); + ALPAKA_ASSERT_OFFLOAD(tmpNtuplet.size() <= int(TrackerTraits::maxHitsOnTrack - 3)); + + bool last = true; + for (unsigned int otherCell : outerNeighbors()) { + if (cells[otherCell].isKilled()) + continue; // killed by earlyFishbone + last = false; + cells[otherCell].template find_ntuplets( + acc, hh, cells, cellTracks, foundNtuplets, apc, quality, tmpNtuplet, minHitsPerNtuplet, startAt0); + } + if (last) { // if long enough save... + if ((unsigned int)(tmpNtuplet.size()) >= minHitsPerNtuplet - 1) { +#ifdef ONLY_TRIPLETS_IN_HOLE + // triplets accepted only pointing to the hole + if (tmpNtuplet.size() >= 3 || (startAt0 && hole4(hh, cells[tmpNtuplet[0]])) || + ((!startAt0) && hole0(hh, cells[tmpNtuplet[0]]))) +#endif + { + hindex_type hits[TrackerTraits::maxDepth + 2]; + auto nh = 0U; + constexpr int maxFB = 2; // for the time being let's limit this + int nfb = 0; + for (auto c : tmpNtuplet) { + hits[nh++] = cells[c].theInnerHitId; + if (nfb < maxFB && cells[c].hasFishbone()) { + ++nfb; + hits[nh++] = cells[c].theFishboneId; // Fishbone hit is always outer than inner hit + } + } + assert(nh < TrackerTraits::maxHitsOnTrack); + hits[nh] = theOuterHitId; + auto it = foundNtuplets.bulkFill(acc, apc, hits, nh + 1); + if (it >= 0) { // if negative is overflow.... + for (auto c : tmpNtuplet) + cells[c].addTrack(acc, it, cellTracks); + quality[it] = bad; // initialize to bad + } + } + } + } + tmpNtuplet.pop_back(); + assert(tmpNtuplet.size() < int(TrackerTraits::maxHitsOnTrack - 1)); + } + } + + // Cell status management + ALPAKA_FN_ACC ALPAKA_FN_INLINE void kill() { theStatus_ |= uint16_t(StatusBit::kKilled); } + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool isKilled() const { return theStatus_ & uint16_t(StatusBit::kKilled); } + + ALPAKA_FN_ACC ALPAKA_FN_INLINE int16_t layerPairId() const { return theLayerPairId_; } + + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool unused() const { return 0 == (uint16_t(StatusBit::kUsed) & theStatus_); } + ALPAKA_FN_ACC ALPAKA_FN_INLINE void setStatusBits(StatusBit mask) { theStatus_ |= uint16_t(mask); } + + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE void setFishbone(TAcc const& acc, hindex_type id, float z, const HitsConstView& hh) { + // make it deterministic: use the farther apart (in z) + auto old = theFishboneId; + while (old != + alpaka::atomicCas( + acc, + &theFishboneId, + old, + (invalidHitId == old || std::abs(z - theInnerZ) > std::abs(hh[old].zGlobal() - theInnerZ)) ? id : old, + alpaka::hierarchy::Blocks{})) + old = theFishboneId; + } + ALPAKA_FN_ACC ALPAKA_FN_INLINE auto fishboneId() const { return theFishboneId; } + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool hasFishbone() const { return theFishboneId != invalidHitId; } + + private: + CellNeighbors* theOuterNeighbors; + CellTracks* theTracks; + + int16_t theLayerPairId_; + uint16_t theStatus_; // tbd + + float theInnerZ; + float theInnerR; + hindex_type theInnerHitId; + hindex_type theOuterHitId; + hindex_type theFishboneId; + }; +} // namespace ALPAKA_ACCELERATOR_NAMESPACE +#endif // RecoPixelVertexing_PixelTriplets_plugins_CACellT_h diff --git a/RecoTracker/PixelSeeding/plugins/alpaka/CAFishbone.h b/RecoTracker/PixelSeeding/plugins/alpaka/CAFishbone.h new file mode 100644 index 0000000000000..343e0cf9ad005 --- /dev/null +++ b/RecoTracker/PixelSeeding/plugins/alpaka/CAFishbone.h @@ -0,0 +1,148 @@ +#ifndef RecoPixelVertexing_PixelTriplets_alpaka_CAFishbone_h +#define RecoPixelVertexing_PixelTriplets_alpaka_CAFishbone_h + +#include +#include +#include +#include +#include + +#include +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/traits.h" +#include "HeterogeneousCore/AlpakaInterface/interface/VecArray.h" +#include "DataFormats/Math/interface/approx_atan2.h" + +#include "CACell.h" +#include "CAStructures.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace caPixelDoublets { + + template + using CellNeighbors = caStructures::CellNeighborsT; + template + using CellTracks = caStructures::CellTracksT; + template + using CellNeighborsVector = caStructures::CellNeighborsVectorT; + template + using CellTracksVector = caStructures::CellTracksVectorT; + template + using OuterHitOfCell = caStructures::OuterHitOfCellT; + template + using HitsConstView = typename CACellT::HitsConstView; + + template + class CAFishbone { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, + HitsConstView hh, + CACellT* cells, + uint32_t const* __restrict__ nCells, + OuterHitOfCell const* isOuterHitOfCellWrap, + int32_t nHits, + bool checkTrack) const { + if (nHits <= isOuterHitOfCellWrap->offset) + return; + constexpr auto maxCellsPerHit = CACellT::maxCellsPerHit; + + auto const isOuterHitOfCell = isOuterHitOfCellWrap->container; + + // x runs faster... + + float x[maxCellsPerHit], y[maxCellsPerHit], z[maxCellsPerHit], n[maxCellsPerHit]; + uint16_t d[maxCellsPerHit]; + uint32_t cc[maxCellsPerHit]; + uint8_t l[maxCellsPerHit]; + const uint32_t dimIndexY = 0u; + const uint32_t dimIndexX = 1u; + const uint32_t blockDimensionX(alpaka::getWorkDiv(acc)[dimIndexX]); + const auto& [firstElementIdxNoStrideX, endElementIdxNoStrideX] = + cms::alpakatools::element_index_range_in_block(acc, 0u, dimIndexX); + + // Outermost loop on Y + const uint32_t gridDimensionY(alpaka::getWorkDiv(acc)[dimIndexY]); + const auto& [firstElementIdxNoStrideY, endElementIdxNoStrideY] = + cms::alpakatools::element_index_range_in_grid(acc, 0u, dimIndexY); + uint32_t firstElementIdxY = firstElementIdxNoStrideY; + uint32_t endElementIdxY = endElementIdxNoStrideY; + + for (uint32_t idy = firstElementIdxY, nt = nHits; idy < nt; ++idy) { + if (not cms::alpakatools::next_valid_element_index_strided( + idy, firstElementIdxY, endElementIdxY, gridDimensionY, nt)) + break; + + auto const& vc = isOuterHitOfCell[idy]; + auto s = vc.size(); + if (s < 2) + continue; + + auto const& c0 = cells[vc[0]]; + auto xo = c0.outer_x(hh); + auto yo = c0.outer_y(hh); + auto zo = c0.outer_z(hh); + auto sg = 0; + for (int32_t ic = 0; ic < s; ++ic) { + auto& ci = cells[vc[ic]]; + if (ci.unused()) + continue; // for triplets equivalent to next + if (checkTrack && ci.tracks().empty()) + continue; + cc[sg] = vc[ic]; + d[sg] = ci.inner_detIndex(hh); + l[sg] = ci.layerPairId(); + x[sg] = ci.inner_x(hh) - xo; + y[sg] = ci.inner_y(hh) - yo; + z[sg] = ci.inner_z(hh) - zo; + n[sg] = x[sg] * x[sg] + y[sg] * y[sg] + z[sg] * z[sg]; + ++sg; + } + if (sg < 2) + continue; + // here we parallelize in X + uint32_t firstElementIdxX = firstElementIdxNoStrideX; + uint32_t endElementIdxX = endElementIdxNoStrideX; + for (uint32_t ic = firstElementIdxX; (int)ic < sg - 1; ++ic) { + if (not cms::alpakatools::next_valid_element_index_strided( + ic, firstElementIdxX, endElementIdxX, blockDimensionX, sg - 1)) + break; + + auto& ci = cells[cc[ic]]; + for (auto jc = ic + 1; (int)jc < sg; ++jc) { + auto& cj = cells[cc[jc]]; + // must be different detectors (in the same layer) + // if (d[ic]==d[jc]) continue; + // || l[ic]!=l[jc]) continue; + auto cos12 = x[ic] * x[jc] + y[ic] * y[jc] + z[ic] * z[jc]; + + if (d[ic] != d[jc] && cos12 * cos12 >= 0.99999f * (n[ic] * n[jc])) { + // alligned: kill farthest (prefer consecutive layers) + // if same layer prefer farthest (longer level arm) and make space for intermediate hit + bool sameLayer = l[ic] == l[jc]; + if (n[ic] > n[jc]) { + if (sameLayer) { + cj.kill(); // closest + ci.setFishbone(acc, cj.inner_hit_id(), cj.inner_z(hh), hh); + } else { + ci.kill(); // farthest + // break; // removed to improve reproducibility. keep it for reference and tests + } + } else { + if (!sameLayer) { + cj.kill(); // farthest + } else { + ci.kill(); // closest + cj.setFishbone(acc, ci.inner_hit_id(), ci.inner_z(hh), hh); + // break; // removed to improve reproducibility. keep it for reference and tests + } + } + } + } //cj + } // ci + } // hits + } + }; + } // namespace caPixelDoublets +} // namespace ALPAKA_ACCELERATOR_NAMESPACE +#endif // RecoPixelVertexing_PixelTriplets_alpaka_CAFishbone_h diff --git a/RecoTracker/PixelSeeding/plugins/alpaka/CAHitNtuplet.cc b/RecoTracker/PixelSeeding/plugins/alpaka/CAHitNtuplet.cc new file mode 100644 index 0000000000000..c16aed2e0b1e8 --- /dev/null +++ b/RecoTracker/PixelSeeding/plugins/alpaka/CAHitNtuplet.cc @@ -0,0 +1,95 @@ +#include + +#include "DataFormats/TrackSoA/interface/TracksHost.h" +#include "DataFormats/TrackSoA/interface/alpaka/TracksSoACollection.h" +#include "DataFormats/TrackSoA/interface/TracksDevice.h" +#include "DataFormats/TrackingRecHitSoA/interface/alpaka/TrackingRecHitsSoACollection.h" +#include "FWCore/Framework/interface/ConsumesCollector.h" +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/Utilities/interface/ESGetToken.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "FWCore/Utilities/interface/RunningAverage.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/EDGetToken.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/EDPutToken.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/Event.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/EventSetup.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/stream/EDProducer.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "MagneticField/Records/interface/IdealMagneticFieldRecord.h" +#include "RecoTracker/TkMSParametrization/interface/PixelRecoUtilities.h" +#include "RecoLocalTracker/Records/interface/PixelCPEFastParamsRecord.h" +#include "RecoLocalTracker/SiPixelRecHits/interface/alpaka/PixelCPEFastParamsCollection.h" + +#include "CAHitNtupletGenerator.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + template + class CAHitNtupletAlpaka : public stream::EDProducer<> { + using HitsConstView = TrackingRecHitSoAConstView; + using HitsOnDevice = TrackingRecHitsSoACollection; + using HitsOnHost = TrackingRecHitHost; + + using TkSoAHost = TracksHost; + using TkSoADevice = TracksSoACollection; + + using Algo = CAHitNtupletGenerator; + + public: + explicit CAHitNtupletAlpaka(const edm::ParameterSet& iConfig); + ~CAHitNtupletAlpaka() override = default; + void produce(device::Event& iEvent, const device::EventSetup& es) override; + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + + private: + const edm::ESGetToken tokenField_; + const device::ESGetToken, PixelCPEFastParamsRecord> cpeToken_; + const device::EDGetToken tokenHit_; + const device::EDPutToken tokenTrack_; + + Algo deviceAlgo_; + }; + + template + CAHitNtupletAlpaka::CAHitNtupletAlpaka(const edm::ParameterSet& iConfig) + : tokenField_(esConsumes()), + cpeToken_(esConsumes(edm::ESInputTag("", iConfig.getParameter("CPE")))), + tokenHit_(consumes(iConfig.getParameter("pixelRecHitSrc"))), + tokenTrack_(produces()), + deviceAlgo_(iConfig) {} + + template + void CAHitNtupletAlpaka::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + + desc.add("pixelRecHitSrc", edm::InputTag("siPixelRecHitsPreSplittingAlpaka")); + + std::string cpe = "PixelCPEFastParams"; + cpe += TrackerTraits::nameModifier; + desc.add("CPE", cpe); + + Algo::fillPSetDescription(desc); + descriptions.addWithDefaultLabel(desc); + } + + template + void CAHitNtupletAlpaka::produce(device::Event& iEvent, const device::EventSetup& es) { + auto bf = 1. / es.getData(tokenField_).inverseBzAtOriginInGeV(); + + auto& fcpe = es.getData(cpeToken_); + + auto const& hits = iEvent.get(tokenHit_); + + iEvent.emplace(tokenTrack_, deviceAlgo_.makeTuplesAsync(hits, fcpe.const_buffer().data(), bf, iEvent.queue())); + } + + using CAHitNtupletAlpakaPhase1 = CAHitNtupletAlpaka; + using CAHitNtupletAlpakaPhase2 = CAHitNtupletAlpaka; +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/MakerMacros.h" + +DEFINE_FWK_ALPAKA_MODULE(CAHitNtupletAlpakaPhase1); +DEFINE_FWK_ALPAKA_MODULE(CAHitNtupletAlpakaPhase2); diff --git a/RecoTracker/PixelSeeding/plugins/alpaka/CAHitNtupletGenerator.cc b/RecoTracker/PixelSeeding/plugins/alpaka/CAHitNtupletGenerator.cc new file mode 100644 index 0000000000000..8f898872a66f4 --- /dev/null +++ b/RecoTracker/PixelSeeding/plugins/alpaka/CAHitNtupletGenerator.cc @@ -0,0 +1,329 @@ +// +// Original Author: Felice Pantaleo, CERN +// + +//#define GPU_DEBUG +//#define DUMP_GPU_TK_TUPLES + +#include +#include +#include +#include + +#include "DataFormats/TrackSoA/interface/alpaka/TracksSoACollection.h" +#include "DataFormats/TrackSoA/interface/TracksDevice.h" +#include "DataFormats/TrackSoA/interface/TracksHost.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/Utilities/interface/Exception.h" + +#include "CAHitNtupletGenerator.h" +#include "CAHitNtupletGeneratorKernels.h" +#include "CAPixelDoublets.h" +#include "CAPixelDoubletsAlgos.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace { + + using namespace caHitNtupletGenerator; + using namespace caPixelDoublets; + using namespace pixelTopology; + using namespace pixelTrack; + + template + T sqr(T x) { + return x * x; + } + + //Common Params + void fillDescriptionsCommon(edm::ParameterSetDescription& desc) { + // 87 cm/GeV = 1/(3.8T * 0.3) + // take less than radius given by the hardPtCut and reject everything below + // auto hardCurvCut = 1.f/(0.35 * 87.f); + desc.add("ptmin", 0.9f)->setComment("Cut on minimum pt"); + desc.add("CAThetaCutBarrel", 0.002f)->setComment("Cut on RZ alignement for Barrel"); + desc.add("CAThetaCutForward", 0.003f)->setComment("Cut on RZ alignment for Forward"); + desc.add("hardCurvCut", 1.f / (0.35 * 87.f)) + ->setComment("Cut on minimum curvature, used in DCA ntuplet selection"); + desc.add("dcaCutInnerTriplet", 0.15f)->setComment("Cut on origin radius when the inner hit is on BPix1"); + desc.add("dcaCutOuterTriplet", 0.25f)->setComment("Cut on origin radius when the outer hit is on BPix1"); + desc.add("earlyFishbone", true); + desc.add("lateFishbone", false); + desc.add("fillStatistics", false); + desc.add("minHitsPerNtuplet", 4); + desc.add("minHitsForSharingCut", 10) + ->setComment("Maximum number of hits in a tuple to clean also if the shared hit is on bpx1"); + + desc.add("fitNas4", false)->setComment("fit only 4 hits out of N"); + desc.add("doClusterCut", true); + desc.add("doZ0Cut", true); + desc.add("doPtCut", true); + desc.add("useRiemannFit", false)->setComment("true for Riemann, false for BrokenLine"); + desc.add("doSharedHitCut", true)->setComment("Sharing hit nTuples cleaning"); + desc.add("dupPassThrough", false)->setComment("Do not reject duplicate"); + desc.add("useSimpleTripletCleaner", true)->setComment("use alternate implementation"); + } + + AlgoParams makeCommonParams(edm::ParameterSet const& cfg) { + return AlgoParams({cfg.getParameter("minHitsForSharingCut"), + cfg.getParameter("useRiemannFit"), + cfg.getParameter("fitNas4"), + cfg.getParameter("includeJumpingForwardDoublets"), + cfg.getParameter("earlyFishbone"), + cfg.getParameter("lateFishbone"), + cfg.getParameter("fillStatistics"), + cfg.getParameter("doSharedHitCut"), + cfg.getParameter("dupPassThrough"), + cfg.getParameter("useSimpleTripletCleaner")}); + } + + //This is needed to have the partial specialization for isPhase1Topology/isPhase2Topology + template + struct TopologyCuts {}; + + template + struct TopologyCuts> { + static constexpr CAParamsT makeCACuts(edm::ParameterSet const& cfg) { + return CAParamsT{{cfg.getParameter("maxNumberOfDoublets"), + cfg.getParameter("minHitsPerNtuplet"), + (float)cfg.getParameter("ptmin"), + (float)cfg.getParameter("CAThetaCutBarrel"), + (float)cfg.getParameter("CAThetaCutForward"), + (float)cfg.getParameter("hardCurvCut"), + (float)cfg.getParameter("dcaCutInnerTriplet"), + (float)cfg.getParameter("dcaCutOuterTriplet")}}; + }; + + static constexpr ::pixelTrack::QualityCutsT makeQualityCuts(edm::ParameterSet const& pset) { + auto coeff = pset.getParameter>("chi2Coeff"); + auto ptMax = pset.getParameter("chi2MaxPt"); + + coeff[1] = (coeff[1] - coeff[0]) / log2(ptMax); + return ::pixelTrack::QualityCutsT{// polynomial coefficients for the pT-dependent chi2 cut + {(float)coeff[0], (float)coeff[1], 0.f, 0.f}, + // max pT used to determine the chi2 cut + (float)ptMax, + // chi2 scale factor: 8 for broken line fit, ?? for Riemann fit + (float)pset.getParameter("chi2Scale"), + // regional cuts for triplets + {(float)pset.getParameter("tripletMaxTip"), + (float)pset.getParameter("tripletMinPt"), + (float)pset.getParameter("tripletMaxZip")}, + // regional cuts for quadruplets + {(float)pset.getParameter("quadrupletMaxTip"), + (float)pset.getParameter("quadrupletMinPt"), + (float)pset.getParameter("quadrupletMaxZip")}}; + } + }; + + template + struct TopologyCuts> { + static constexpr CAParamsT makeCACuts(edm::ParameterSet const& cfg) { + return CAParamsT{{cfg.getParameter("maxNumberOfDoublets"), + cfg.getParameter("minHitsPerNtuplet"), + (float)cfg.getParameter("ptmin"), + (float)cfg.getParameter("CAThetaCutBarrel"), + (float)cfg.getParameter("CAThetaCutForward"), + (float)cfg.getParameter("hardCurvCut"), + (float)cfg.getParameter("dcaCutInnerTriplet"), + (float)cfg.getParameter("dcaCutOuterTriplet")}, + {(bool)cfg.getParameter("includeFarForwards")}}; + } + + static constexpr ::pixelTrack::QualityCutsT makeQualityCuts(edm::ParameterSet const& pset) { + return ::pixelTrack::QualityCutsT{ + static_cast(pset.getParameter("maxChi2")), + static_cast(pset.getParameter("minPt")), + static_cast(pset.getParameter("maxTip")), + static_cast(pset.getParameter("maxZip")), + }; + } + }; + + //Cell Cuts, as they are the cuts have the same logic for Phase2 and Phase1 + //keeping them separate would allow further differentiation in the future + //moving them to TopologyCuts and using the same syntax + template + CellCutsT makeCellCuts(edm::ParameterSet const& cfg) { + return CellCutsT{cfg.getParameter("doClusterCut"), + cfg.getParameter("doZ0Cut"), + cfg.getParameter("doPtCut"), + cfg.getParameter("idealConditions"), + (float)cfg.getParameter("cellZ0Cut"), + (float)cfg.getParameter("cellPtCut"), + cfg.getParameter>("phiCuts")}; + } + + } // namespace + + using namespace std; + + template + CAHitNtupletGenerator::CAHitNtupletGenerator(const edm::ParameterSet& cfg) + : m_params(makeCommonParams(cfg), + makeCellCuts(cfg), + TopologyCuts::makeQualityCuts(cfg.getParameterSet("trackQualityCuts")), + TopologyCuts::makeCACuts(cfg)) { +#ifdef DUMP_GPU_TK_TUPLES + printf("TK: %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n", + "tid", + "qual", + "nh", + "nl", + "charge", + "pt", + "eta", + "phi", + "tip", + "zip", + "chi2", + "h1", + "h2", + "h3", + "h4", + "h5", + "hn"); +#endif + } + + template + void CAHitNtupletGenerator::fillPSetDescription(edm::ParameterSetDescription& desc) { + static_assert(sizeof(TrackerTraits) == 0, + "Note: this fillPSetDescription is a dummy one. Please specialise it for the correct version of " + "CAHitNtupletGenerator."); + } + + template <> + void CAHitNtupletGenerator::fillPSetDescription(edm::ParameterSetDescription& desc) { + fillDescriptionsCommon(desc); + + desc.add("maxNumberOfDoublets", pixelTopology::Phase1::maxNumberOfDoublets); + desc.add("idealConditions", true); + desc.add("includeJumpingForwardDoublets", false); + desc.add("cellZ0Cut", 12.0); + desc.add("cellPtCut", 0.5); + + edm::ParameterSetDescription trackQualityCuts; + trackQualityCuts.add("chi2MaxPt", 10.)->setComment("max pT used to determine the pT-dependent chi2 cut"); + trackQualityCuts.add>("chi2Coeff", {0.9, 1.8})->setComment("chi2 at 1GeV and at ptMax above"); + trackQualityCuts.add("chi2Scale", 8.) + ->setComment( + "Factor to multiply the pT-dependent chi2 cut (currently: 8 for the broken line fit, ?? for the Riemann " + "fit)"); + trackQualityCuts.add("tripletMinPt", 0.5)->setComment("Min pT for triplets, in GeV"); + trackQualityCuts.add("tripletMaxTip", 0.3)->setComment("Max |Tip| for triplets, in cm"); + trackQualityCuts.add("tripletMaxZip", 12.)->setComment("Max |Zip| for triplets, in cm"); + trackQualityCuts.add("quadrupletMinPt", 0.3)->setComment("Min pT for quadruplets, in GeV"); + trackQualityCuts.add("quadrupletMaxTip", 0.5)->setComment("Max |Tip| for quadruplets, in cm"); + trackQualityCuts.add("quadrupletMaxZip", 12.)->setComment("Max |Zip| for quadruplets, in cm"); + desc.add("trackQualityCuts", trackQualityCuts) + ->setComment( + "Quality cuts based on the results of the track fit:\n - apply a pT-dependent chi2 cut;\n - apply " + "\"region " + "cuts\" based on the fit results (pT, Tip, Zip)."); + + desc.add>( + "phiCuts", + std::vector(std::begin(phase1PixelTopology::phicuts), std::end(phase1PixelTopology::phicuts))) + ->setComment("Cuts in phi for cells"); + } + + template <> + void CAHitNtupletGenerator::fillPSetDescription(edm::ParameterSetDescription& desc) { + fillDescriptionsCommon(desc); + + desc.add("maxNumberOfDoublets", pixelTopology::HIonPhase1::maxNumberOfDoublets); + desc.add("idealConditions", false); + desc.add("includeJumpingForwardDoublets", false); + desc.add("cellZ0Cut", 10.0); + desc.add("cellPtCut", 0.0); + + edm::ParameterSetDescription trackQualityCuts; + trackQualityCuts.add("chi2MaxPt", 10.)->setComment("max pT used to determine the pT-dependent chi2 cut"); + trackQualityCuts.add>("chi2Coeff", {0.9, 1.8})->setComment("chi2 at 1GeV and at ptMax above"); + trackQualityCuts.add("chi2Scale", 8.) + ->setComment( + "Factor to multiply the pT-dependent chi2 cut (currently: 8 for the broken line fit, ?? for the Riemann " + "fit)"); + trackQualityCuts.add("tripletMinPt", 0.0)->setComment("Min pT for triplets, in GeV"); + trackQualityCuts.add("tripletMaxTip", 0.1)->setComment("Max |Tip| for triplets, in cm"); + trackQualityCuts.add("tripletMaxZip", 6.)->setComment("Max |Zip| for triplets, in cm"); + trackQualityCuts.add("quadrupletMinPt", 0.0)->setComment("Min pT for quadruplets, in GeV"); + trackQualityCuts.add("quadrupletMaxTip", 0.5)->setComment("Max |Tip| for quadruplets, in cm"); + trackQualityCuts.add("quadrupletMaxZip", 6.)->setComment("Max |Zip| for quadruplets, in cm"); + + desc.add("trackQualityCuts", trackQualityCuts) + ->setComment( + "Quality cuts based on the results of the track fit:\n - apply a pT-dependent chi2 cut;\n - apply " + "\"region " + "cuts\" based on the fit results (pT, Tip, Zip)."); + + desc.add>( + "phiCuts", + std::vector(std::begin(phase1PixelTopology::phicuts), std::end(phase1PixelTopology::phicuts))) + ->setComment("Cuts in phi for cells"); + } + + template <> + void CAHitNtupletGenerator::fillPSetDescription(edm::ParameterSetDescription& desc) { + fillDescriptionsCommon(desc); + + desc.add("maxNumberOfDoublets", pixelTopology::Phase2::maxNumberOfDoublets); + desc.add("idealConditions", false); + desc.add("includeFarForwards", true); + desc.add("includeJumpingForwardDoublets", true); + desc.add("cellZ0Cut", 7.5); + desc.add("cellPtCut", 0.85); + + edm::ParameterSetDescription trackQualityCuts; + trackQualityCuts.add("maxChi2", 5.)->setComment("Max normalized chi2"); + trackQualityCuts.add("minPt", 0.5)->setComment("Min pT in GeV"); + trackQualityCuts.add("maxTip", 0.3)->setComment("Max |Tip| in cm"); + trackQualityCuts.add("maxZip", 12.)->setComment("Max |Zip|, in cm"); + desc.add("trackQualityCuts", trackQualityCuts) + ->setComment( + "Quality cuts based on the results of the track fit:\n - apply cuts based on the fit results (pT, Tip, " + "Zip)."); + + desc.add>( + "phiCuts", + std::vector(std::begin(phase2PixelTopology::phicuts), std::end(phase2PixelTopology::phicuts))) + ->setComment("Cuts in phi for cells"); + } + + template + TracksSoACollection CAHitNtupletGenerator::makeTuplesAsync( + HitsOnDevice const& hits_d, ParamsOnDevice const* cpeParams, float bfield, Queue& queue) const { + using HelixFit = HelixFit; + using TrackSoA = TracksSoACollection; + using GPUKernels = CAHitNtupletGeneratorKernels; + + TrackSoA tracks(queue); + + GPUKernels kernels(m_params, hits_d.view().metadata().size(), queue); + + kernels.buildDoublets(hits_d.view(), queue); + kernels.launchKernels(hits_d.view(), tracks.view(), queue); + + HelixFit fitter(bfield, m_params.fitNas4_); + fitter.allocate(kernels.tupleMultiplicity(), tracks.view()); + if (m_params.useRiemannFit_) { + fitter.launchRiemannKernels( + hits_d.view(), cpeParams, hits_d.view().metadata().size(), TrackerTraits::maxNumberOfQuadruplets, queue); + } else { + fitter.launchBrokenLineKernels( + hits_d.view(), cpeParams, hits_d.view().metadata().size(), TrackerTraits::maxNumberOfQuadruplets, queue); + } + kernels.classifyTuples(hits_d.view(), tracks.view(), queue); +#ifdef GPU_DEBUG + alpaka::wait(queue); + std::cout << "finished building pixel tracks on GPU" << std::endl; +#endif + + return tracks; + } + + template class CAHitNtupletGenerator; + template class CAHitNtupletGenerator; + template class CAHitNtupletGenerator; +} // namespace ALPAKA_ACCELERATOR_NAMESPACE diff --git a/RecoTracker/PixelSeeding/plugins/alpaka/CAHitNtupletGenerator.h b/RecoTracker/PixelSeeding/plugins/alpaka/CAHitNtupletGenerator.h new file mode 100644 index 0000000000000..826b92d4a195a --- /dev/null +++ b/RecoTracker/PixelSeeding/plugins/alpaka/CAHitNtupletGenerator.h @@ -0,0 +1,86 @@ +#ifndef RecoPixelVertexing_PixelTriplets_Alpaka_CAHitNtupletGenerator_h +#define RecoPixelVertexing_PixelTriplets_Alpaka_CAHitNtupletGenerator_h + +#include + +#include "DataFormats/SiPixelDetId/interface/PixelSubdetector.h" +#include "DataFormats/TrackSoA/interface/TrackDefinitions.h" +#include "DataFormats/TrackSoA/interface/alpaka/TracksSoACollection.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h" +#include "DataFormats/TrackingRecHitSoA/interface/alpaka/TrackingRecHitsSoACollection.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforDevice.h" + +#include "CAHitNtupletGeneratorKernels.h" +#include "CACell.h" +#include "HelixFit.h" + +namespace edm { + class ParameterSetDescription; +} // namespace edm + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + template + class CAHitNtupletGenerator { + public: + using HitsView = TrackingRecHitSoAView; + using HitsConstView = TrackingRecHitSoAConstView; + using HitsOnDevice = TrackingRecHitsSoACollection; + using HitsOnHost = TrackingRecHitHost; + using hindex_type = typename TrackingRecHitSoA::hindex_type; + + using HitToTuple = caStructures::HitToTupleT; + using TupleMultiplicity = caStructures::TupleMultiplicityT; + using OuterHitOfCell = caStructures::OuterHitOfCellT; + + using CACell = CACellT; + using TkSoAHost = TracksHost; + using TkSoADevice = TracksSoACollection; + using HitContainer = typename reco::TrackSoA::HitContainer; + using Tuple = HitContainer; + + using CellNeighborsVector = caStructures::CellNeighborsVectorT; + using CellTracksVector = caStructures::CellTracksVectorT; + + using Quality = ::pixelTrack::Quality; + + using QualityCuts = ::pixelTrack::QualityCutsT; + using Params = caHitNtupletGenerator::ParamsT; + using Counters = caHitNtupletGenerator::Counters; + + using ParamsOnDevice = pixelCPEforDevice::ParamsOnDeviceT; + + public: + CAHitNtupletGenerator(const edm::ParameterSet& cfg); + + static void fillPSetDescription(edm::ParameterSetDescription& desc); + + // NOTE: beginJob and endJob were meant to be used + // to fill the statistics. This is still not implemented in Alpaka + // since we are missing the begin/endJob functionality for the Alpaka + // producers. + // + // void beginJob(); + // void endJob(); + + TkSoADevice makeTuplesAsync(HitsOnDevice const& hits_d, + ParamsOnDevice const* cpeParams, + float bfield, + Queue& queue) const; + + private: + void buildDoublets(const HitsConstView& hh, Queue& queue) const; + + void hitNtuplets(const HitsConstView& hh, const edm::EventSetup& es, bool useRiemannFit, Queue& queue); + + void launchKernels(const HitsConstView& hh, bool useRiemannFit, Queue& queue) const; + + Params m_params; + }; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +#endif // RecoPixelVertexing_PixelTriplets_plugins_CAHitNtupletGenerator_h diff --git a/RecoTracker/PixelSeeding/plugins/alpaka/CAHitNtupletGeneratorKernels.dev.cc b/RecoTracker/PixelSeeding/plugins/alpaka/CAHitNtupletGeneratorKernels.dev.cc new file mode 100644 index 0000000000000..44e3295bdb606 --- /dev/null +++ b/RecoTracker/PixelSeeding/plugins/alpaka/CAHitNtupletGeneratorKernels.dev.cc @@ -0,0 +1,538 @@ +#include +#include "HeterogeneousCore/AlpakaInterface/interface/devices.h" +#include "HeterogeneousCore/AlpakaInterface/interface/host.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/HistoContainer.h" +#include "CAHitNtupletGeneratorKernels.h" +#include "CAHitNtupletGeneratorKernelsImpl.h" +#ifdef DUMP_GPU_TK_TUPLES +#include +#endif + +//#define GPU_DEBUG +//#define NTUPLE_DEBUG + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + template + CAHitNtupletGeneratorKernels::CAHitNtupletGeneratorKernels(Params const ¶ms, + uint32_t nhits, + Queue &queue) + : m_params(params), + ////////////////////////////////////////////////////////// + // ALLOCATIONS FOR THE INTERMEDIATE RESULTS (STAYS ON WORKER) + ////////////////////////////////////////////////////////// + counters_{cms::alpakatools::make_device_buffer(queue)}, + + // workspace + device_hitToTuple_{cms::alpakatools::make_device_buffer(queue)}, + device_tupleMultiplicity_{cms::alpakatools::make_device_buffer(queue)}, + + // NB: In legacy, device_theCells_ and device_isOuterHitOfCell_ were allocated inside buildDoublets + device_theCells_{ + cms::alpakatools::make_device_buffer(queue, m_params.caParams_.maxNumberOfDoublets_)}, + // in principle we can use "nhits" to heuristically dimension the workspace... + device_isOuterHitOfCell_{ + cms::alpakatools::make_device_buffer(queue, std::max(1u, nhits))}, + isOuterHitOfCell_{cms::alpakatools::make_device_buffer(queue)}, + + device_theCellNeighbors_{cms::alpakatools::make_device_buffer(queue)}, + device_theCellTracks_{cms::alpakatools::make_device_buffer(queue)}, + // NB: In legacy, cellStorage_ was allocated inside buildDoublets + cellStorage_{cms::alpakatools::make_device_buffer( + queue, + TrackerTraits::maxNumOfActiveDoublets * sizeof(CellNeighbors) + + TrackerTraits::maxNumOfActiveDoublets * sizeof(CellTracks))}, + device_cellCuts_{cms::alpakatools::make_device_buffer(queue)}, + device_theCellNeighborsContainer_{reinterpret_cast(cellStorage_.data())}, + device_theCellTracksContainer_{reinterpret_cast( + cellStorage_.data() + TrackerTraits::maxNumOfActiveDoublets * sizeof(CellNeighbors))}, + + // NB: In legacy, device_storage_ was allocated inside allocateOnGPU + device_storage_{ + cms::alpakatools::make_device_buffer(queue, 3u)}, + device_hitTuple_apc_{reinterpret_cast(device_storage_.data())}, + device_hitToTuple_apc_{reinterpret_cast(device_storage_.data() + 1)}, + device_nCells_{cms::alpakatools::make_device_view(alpaka::getDev(queue), + *reinterpret_cast(device_storage_.data() + 2))} { + alpaka::memset(queue, counters_, 0); + alpaka::memset(queue, device_nCells_, 0); + alpaka::memset(queue, cellStorage_, 0); + + auto cellCuts_h = cms::alpakatools::make_host_view(m_params.cellCuts_); + alpaka::memcpy(queue, device_cellCuts_, cellCuts_h); + + [[maybe_unused]] TupleMultiplicity *tupleMultiplicityDeviceData = device_tupleMultiplicity_.data(); + [[maybe_unused]] HitToTuple *hitToTupleDeviceData = device_hitToTuple_.data(); + using TM = cms::alpakatools::OneToManyAssocRandomAccess; + TM *tm = device_tupleMultiplicity_.data(); + TM::template launchZero(tm, queue); + TupleMultiplicity::template launchZero(tupleMultiplicityDeviceData, queue); + HitToTuple::template launchZero(hitToTupleDeviceData, queue); + } + + template + void CAHitNtupletGeneratorKernels::launchKernels(const HitsConstView &hh, + TkSoAView &tracks_view, + Queue &queue) { + using namespace caPixelDoublets; + using namespace caHitNtupletGeneratorKernels; + + // zero tuples + HitContainer::template launchZero(&(tracks_view.hitIndices()), queue); + + int32_t nhits = hh.metadata().size(); + +#ifdef NTUPLE_DEBUG + std::cout << "start tuple building. N hits " << nhits << std::endl; + if (nhits < 2) + std::cout << "too few hits " << nhits << std::endl; +#endif + + // + // applying conbinatoric cleaning such as fishbone at this stage is too expensive + // + + const auto nthTot = 64; + const auto stride = 4; + auto blockSize = nthTot / stride; + auto numberOfBlocks = cms::alpakatools::divide_up_by(3 * m_params.caParams_.maxNumberOfDoublets_ / 4, blockSize); + const auto rescale = numberOfBlocks / 65536; + blockSize *= (rescale + 1); + numberOfBlocks = cms::alpakatools::divide_up_by(3 * m_params.caParams_.maxNumberOfDoublets_ / 4, blockSize); + assert(numberOfBlocks < 65536); + assert(blockSize > 0 && 0 == blockSize % 16); + const Vec2D blks{numberOfBlocks, 1u}; + const Vec2D thrs{blockSize, stride}; + const auto kernelConnectWorkDiv = cms::alpakatools::make_workdiv(blks, thrs); + + alpaka::exec(queue, + kernelConnectWorkDiv, + Kernel_connect{}, + this->device_hitTuple_apc_, + this->device_hitToTuple_apc_, // needed only to be reset, ready for next kernel + hh, + this->device_theCells_.data(), + this->device_nCells_.data(), + this->device_theCellNeighbors_.data(), + this->isOuterHitOfCell_.data(), + this->m_params.caParams_); + + // do not run the fishbone if there are hits only in BPIX1 + if (this->m_params.earlyFishbone_) { + const auto nthTot = 128; + const auto stride = 16; + const auto blockSize = nthTot / stride; + const auto numberOfBlocks = cms::alpakatools::divide_up_by(nhits, blockSize); + const Vec2D blks{numberOfBlocks, 1u}; + const Vec2D thrs{blockSize, stride}; + const auto fishboneWorkDiv = cms::alpakatools::make_workdiv(blks, thrs); + alpaka::exec(queue, + fishboneWorkDiv, + CAFishbone{}, + hh, + this->device_theCells_.data(), + this->device_nCells_.data(), + this->isOuterHitOfCell_.data(), + nhits, + false); + } + blockSize = 64; + numberOfBlocks = cms::alpakatools::divide_up_by(3 * m_params.caParams_.maxNumberOfDoublets_ / 4, blockSize); + auto workDiv1D = cms::alpakatools::make_workdiv(numberOfBlocks, blockSize); + alpaka::exec(queue, + workDiv1D, + Kernel_find_ntuplets{}, + hh, + tracks_view, + this->device_theCells_.data(), + this->device_nCells_.data(), + this->device_theCellTracks_.data(), + this->device_hitTuple_apc_, + this->m_params.caParams_); +#ifdef GPU_DEBUG + alpaka::wait(queue); +#endif + + if (this->m_params.doStats_) + alpaka::exec(queue, + workDiv1D, + Kernel_mark_used{}, + this->device_theCells_.data(), + this->device_nCells_.data()); + +#ifdef GPU_DEBUG + alpaka::wait(queue); +#endif + + blockSize = 128; + numberOfBlocks = cms::alpakatools::divide_up_by(HitContainer{}.totOnes(), blockSize); + workDiv1D = cms::alpakatools::make_workdiv(numberOfBlocks, blockSize); + + alpaka::exec( + queue, workDiv1D, typename HitContainer::finalizeBulk{}, this->device_hitTuple_apc_, &tracks_view.hitIndices()); + +#ifdef GPU_DEBUG + alpaka::wait(queue); +#endif + + alpaka::exec(queue, workDiv1D, Kernel_fillHitDetIndices{}, tracks_view, hh); + +#ifdef GPU_DEBUG + alpaka::wait(queue); +#endif + alpaka::exec(queue, workDiv1D, Kernel_fillNLayers{}, tracks_view, this->device_hitTuple_apc_); + +#ifdef GPU_DEBUG + alpaka::wait(queue); +#endif + + // remove duplicates (tracks that share a doublet) + numberOfBlocks = cms::alpakatools::divide_up_by(3 * m_params.caParams_.maxNumberOfDoublets_ / 4, blockSize); + workDiv1D = cms::alpakatools::make_workdiv(numberOfBlocks, blockSize); + + alpaka::exec(queue, + workDiv1D, + Kernel_earlyDuplicateRemover{}, + this->device_theCells_.data(), + this->device_nCells_.data(), + tracks_view, + this->m_params.dupPassThrough_); +#ifdef GPU_DEBUG + alpaka::wait(queue); +#endif + + blockSize = 128; + numberOfBlocks = cms::alpakatools::divide_up_by(3 * TrackerTraits::maxNumberOfTuples / 4, blockSize); + workDiv1D = cms::alpakatools::make_workdiv(numberOfBlocks, blockSize); + + alpaka::exec(queue, + workDiv1D, + Kernel_countMultiplicity{}, + tracks_view, + this->device_tupleMultiplicity_.data()); + TupleMultiplicity::template launchFinalize(this->device_tupleMultiplicity_.data(), queue); + + workDiv1D = cms::alpakatools::make_workdiv(numberOfBlocks, blockSize); + alpaka::exec( + queue, workDiv1D, Kernel_fillMultiplicity{}, tracks_view, this->device_tupleMultiplicity_.data()); +#ifdef GPU_DEBUG + alpaka::wait(queue); +#endif + // do not run the fishbone if there are hits only in BPIX1 + if (this->m_params.lateFishbone_) { + const auto nthTot = 128; + const auto stride = 16; + const auto blockSize = nthTot / stride; + const auto numberOfBlocks = cms::alpakatools::divide_up_by(nhits, blockSize); + const Vec2D blks{numberOfBlocks, 1u}; + const Vec2D thrs{blockSize, stride}; + const auto workDiv2D = cms::alpakatools::make_workdiv(blks, thrs); + + alpaka::exec(queue, + workDiv2D, + CAFishbone{}, + hh, + this->device_theCells_.data(), + this->device_nCells_.data(), + this->isOuterHitOfCell_.data(), + nhits, + true); + } + +#ifdef GPU_DEBUG + alpaka::wait(queue); +#endif + } + + template + void CAHitNtupletGeneratorKernels::buildDoublets(const HitsConstView &hh, Queue &queue) { + auto nhits = hh.metadata().size(); + + using namespace caPixelDoublets; + + using CACell = CACellT; + using OuterHitOfCell = typename CACell::OuterHitOfCell; + using CellNeighbors = typename CACell::CellNeighbors; + using CellTracks = typename CACell::CellTracks; + using OuterHitOfCellContainer = typename CACell::OuterHitOfCellContainer; + +#ifdef NTUPLE_DEBUG + std::cout << "building Doublets out of " << nhits << " Hits" << std::endl; +#endif + +#ifdef GPU_DEBUG + alpaka::wait(queue); +#endif + + // in principle we can use "nhits" to heuristically dimension the workspace... + ALPAKA_ASSERT_OFFLOAD(this->device_isOuterHitOfCell_.data()); + + alpaka::exec( + queue, + cms::alpakatools::make_workdiv(1, 1), + [] ALPAKA_FN_ACC(Acc1D const &acc, + OuterHitOfCell *isOuterHitOfCell, + OuterHitOfCellContainer *container, + int32_t const *offset) { + // this code runs on the device + isOuterHitOfCell->container = container; + isOuterHitOfCell->offset = *offset; + }, + this->isOuterHitOfCell_.data(), + this->device_isOuterHitOfCell_.data(), + &hh.offsetBPIX2()); + + { + int threadsPerBlock = 128; + // at least one block! + int blocks = std::max(1u, cms::alpakatools::divide_up_by(nhits, threadsPerBlock)); + const auto workDiv1D = cms::alpakatools::make_workdiv(blocks, threadsPerBlock); + + alpaka::exec(queue, + workDiv1D, + InitDoublets{}, + this->isOuterHitOfCell_.data(), + nhits, + this->device_theCellNeighbors_.data(), + this->device_theCellNeighborsContainer_, + this->device_theCellTracks_.data(), + this->device_theCellTracksContainer_); + } + +#ifdef GPU_DEBUG + alpaka::wait(queue); +#endif + + if (0 == nhits) + return; // protect against empty events + + // take all layer pairs into account + auto nActualPairs = this->m_params.nPairs(); + + const int stride = 4; + const int threadsPerBlock = TrackerTraits::getDoubletsFromHistoMaxBlockSize / stride; + int blocks = (4 * nhits + threadsPerBlock - 1) / threadsPerBlock; + const Vec2D blks{blocks, 1u}; + const Vec2D thrs{threadsPerBlock, stride}; + const auto workDiv2D = cms::alpakatools::make_workdiv(blks, thrs); + + alpaka::exec(queue, + workDiv2D, + GetDoubletsFromHisto{}, + this->device_theCells_.data(), + this->device_nCells_.data(), + this->device_theCellNeighbors_.data(), + this->device_theCellTracks_.data(), + hh, + this->isOuterHitOfCell_.data(), + nActualPairs, + this->m_params.caParams_.maxNumberOfDoublets_, + this->m_params.cellCuts_); + +#ifdef GPU_DEBUG + alpaka::wait(queue); +#endif + } + + template + void CAHitNtupletGeneratorKernels::classifyTuples(const HitsConstView &hh, + TkSoAView &tracks_view, + Queue &queue) { + using namespace caHitNtupletGeneratorKernels; + + uint32_t nhits = hh.metadata().size(); + + auto blockSize = 64; + + // classify tracks based on kinematics + auto numberOfBlocks = cms::alpakatools::divide_up_by(3 * TrackerTraits::maxNumberOfQuadruplets / 4, blockSize); + auto workDiv1D = cms::alpakatools::make_workdiv(numberOfBlocks, blockSize); + alpaka::exec( + queue, workDiv1D, Kernel_classifyTracks{}, tracks_view, this->m_params.qualityCuts_); + + if (this->m_params.lateFishbone_) { + // apply fishbone cleaning to good tracks + numberOfBlocks = cms::alpakatools::divide_up_by(3 * m_params.caParams_.maxNumberOfDoublets_ / 4, blockSize); + workDiv1D = cms::alpakatools::make_workdiv(numberOfBlocks, blockSize); + alpaka::exec(queue, + workDiv1D, + Kernel_fishboneCleaner{}, + this->device_theCells_.data(), + this->device_nCells_.data(), + tracks_view); + } + + // mark duplicates (tracks that share a doublet) + numberOfBlocks = cms::alpakatools::divide_up_by(3 * m_params.caParams_.maxNumberOfDoublets_ / 4, blockSize); + workDiv1D = cms::alpakatools::make_workdiv(numberOfBlocks, blockSize); + alpaka::exec(queue, + workDiv1D, + Kernel_fastDuplicateRemover{}, + this->device_theCells_.data(), + this->device_nCells_.data(), + tracks_view, + this->m_params.dupPassThrough_); +#ifdef GPU_DEBUG + alpaka::wait(queue); +#endif + + if (this->m_params.doSharedHitCut_ || this->m_params.doStats_) { + // fill hit->track "map" + numberOfBlocks = cms::alpakatools::divide_up_by(3 * TrackerTraits::maxNumberOfQuadruplets / 4, blockSize); + workDiv1D = cms::alpakatools::make_workdiv(numberOfBlocks, blockSize); + alpaka::exec(queue, + workDiv1D, + Kernel_countHitInTracks{}, + tracks_view, + this->device_hitToTuple_.data()); //CHECK + + HitToTuple::template launchFinalize(this->device_hitToTuple_.data(), queue); + alpaka::exec( + queue, workDiv1D, Kernel_fillHitInTracks{}, tracks_view, this->device_hitToTuple_.data()); +#ifdef GPU_DEBUG + alpaka::wait(queue); +#endif + } + + if (this->m_params.doSharedHitCut_) { + // mark duplicates (tracks that share at least one hit) + numberOfBlocks = cms::alpakatools::divide_up_by(3 * TrackerTraits::maxNumberOfQuadruplets / 4, + blockSize); // TODO: Check if correct + workDiv1D = cms::alpakatools::make_workdiv(numberOfBlocks, blockSize); + alpaka::exec(queue, + workDiv1D, + Kernel_rejectDuplicate{}, + tracks_view, + this->m_params.minHitsForSharingCut_, + this->m_params.dupPassThrough_, + this->device_hitToTuple_.data()); + + alpaka::exec(queue, + workDiv1D, + Kernel_sharedHitCleaner{}, + hh, + tracks_view, + this->m_params.minHitsForSharingCut_, + this->m_params.dupPassThrough_, + this->device_hitToTuple_.data()); + + if (this->m_params.useSimpleTripletCleaner_) { + // (typename HitToTuple{}::capacity(), + numberOfBlocks = cms::alpakatools::divide_up_by(HitToTuple{}.capacity(), blockSize); + workDiv1D = cms::alpakatools::make_workdiv(numberOfBlocks, blockSize); + alpaka::exec(queue, + workDiv1D, + Kernel_simpleTripletCleaner{}, + tracks_view, + this->m_params.minHitsForSharingCut_, + this->m_params.dupPassThrough_, + this->device_hitToTuple_.data()); + } else { + numberOfBlocks = cms::alpakatools::divide_up_by(HitToTuple{}.capacity(), blockSize); + workDiv1D = cms::alpakatools::make_workdiv(numberOfBlocks, blockSize); + alpaka::exec(queue, + workDiv1D, + Kernel_tripletCleaner{}, + tracks_view, + this->m_params.minHitsForSharingCut_, + this->m_params.dupPassThrough_, + this->device_hitToTuple_.data()); + } +#ifdef GPU_DEBUG + alpaka::wait(queue); +#endif + } + + if (this->m_params.doStats_) { + numberOfBlocks = + cms::alpakatools::divide_up_by(std::max(nhits, m_params.caParams_.maxNumberOfDoublets_), blockSize); + workDiv1D = cms::alpakatools::make_workdiv(numberOfBlocks, blockSize); + + alpaka::exec(queue, + workDiv1D, + Kernel_checkOverflows{}, + tracks_view, + this->device_tupleMultiplicity_.data(), + this->device_hitToTuple_.data(), + this->device_hitTuple_apc_, + this->device_theCells_.data(), + this->device_nCells_.data(), + this->device_theCellNeighbors_.data(), + this->device_theCellTracks_.data(), + this->isOuterHitOfCell_.data(), + nhits, + this->m_params.caParams_.maxNumberOfDoublets_, + this->counters_.data()); + } + + if (this->m_params.doStats_) { + // counters (add flag???) + + numberOfBlocks = cms::alpakatools::divide_up_by(HitToTuple{}.capacity(), blockSize); + workDiv1D = cms::alpakatools::make_workdiv(numberOfBlocks, blockSize); + alpaka::exec(queue, + workDiv1D, + Kernel_doStatsForHitInTracks{}, + this->device_hitToTuple_.data(), + this->counters_.data()); + + numberOfBlocks = cms::alpakatools::divide_up_by(3 * TrackerTraits::maxNumberOfQuadruplets / 4, blockSize); + workDiv1D = cms::alpakatools::make_workdiv(numberOfBlocks, blockSize); + alpaka::exec( + queue, workDiv1D, Kernel_doStatsForTracks{}, tracks_view, this->counters_.data()); + } +#ifdef GPU_DEBUG + alpaka::wait(queue); +#endif + +#ifdef DUMP_GPU_TK_TUPLES + static std::atomic iev(0); + static std::mutex lock; + workDiv1D = cms::alpakatools::make_workdiv(1u, 32u); + { + std::lock_guard guard(lock); + ++iev; + for (int k = 0; k < 20000; k += 500) { + alpaka::exec(queue, + workDiv1D, + Kernel_print_found_ntuplets{}, + hh, + tracks_view, + this->device_hitToTuple_.data(), + k, + k + 500, + iev); + alpaka::wait(queue); + } + alpaka::exec(queue, + workDiv1D, + Kernel_print_found_ntuplets{}, + hh, + tracks_view, + this->device_hitToTuple_.data(), + 20000, + 1000000, + iev); + + alpaka::wait(queue); + } +#endif + } + // This will make sense when we will be able to run this once per job in Alpaka + /* +template +void CAHitNtupletGeneratorKernels::printCounters() { + auto workDiv1D = cms::alpakatools::make_workdiv(1,1); + alpaka::exec(queue_,workDiv1D,Kernel_printCounters{},this->counters_.data()); +} +*/ + template class CAHitNtupletGeneratorKernels; + template class CAHitNtupletGeneratorKernels; + template class CAHitNtupletGeneratorKernels; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE diff --git a/RecoTracker/PixelSeeding/plugins/alpaka/CAHitNtupletGeneratorKernels.h b/RecoTracker/PixelSeeding/plugins/alpaka/CAHitNtupletGeneratorKernels.h new file mode 100644 index 0000000000000..d55be09e6e497 --- /dev/null +++ b/RecoTracker/PixelSeeding/plugins/alpaka/CAHitNtupletGeneratorKernels.h @@ -0,0 +1,273 @@ +#ifndef RecoPixelVertexing_PixelTriplets_CAHitNtupletGeneratorKernels_h +#define RecoPixelVertexing_PixelTriplets_CAHitNtupletGeneratorKernels_h + +//#define GPU_DEBUG +//#define DUMP_GPU_TK_TUPLES + +#include + +#include + +#include "DataFormats/TrackSoA/interface/TrackDefinitions.h" +#include "DataFormats/TrackSoA/interface/TracksHost.h" +#include "DataFormats/TrackSoA/interface/alpaka/TrackUtilities.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h" +#include "HeterogeneousCore/AlpakaInterface/interface/AtomicPairCounter.h" +#include "HeterogeneousCore/AlpakaInterface/interface/HistoContainer.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" + +#include "CACell.h" +#include "CAPixelDoublets.h" +#include "CAStructures.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace caHitNtupletGenerator { + + //Configuration params common to all topologies, for the algorithms + struct AlgoParams { + const uint32_t minHitsForSharingCut_; + const bool useRiemannFit_; + const bool fitNas4_; + const bool includeJumpingForwardDoublets_; + const bool earlyFishbone_; + const bool lateFishbone_; + const bool doStats_; + const bool doSharedHitCut_; + const bool dupPassThrough_; + const bool useSimpleTripletCleaner_; + }; + + //CAParams + struct CACommon { + const uint32_t maxNumberOfDoublets_; + const uint32_t minHitsPerNtuplet_; + const float ptmin_; + const float CAThetaCutBarrel_; + const float CAThetaCutForward_; + const float hardCurvCut_; + const float dcaCutInnerTriplet_; + const float dcaCutOuterTriplet_; + }; + + template + struct CAParamsT : public CACommon { + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool startingLayerPair(int16_t pid) const { return false; }; + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool startAt0(int16_t pid) const { return false; }; + }; + + template + struct CAParamsT> : public CACommon { + /// Is is a starting layer pair? + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool startingLayerPair(int16_t pid) const { + return minHitsPerNtuplet_ > 3 ? pid < 3 : pid < 8 || pid > 12; + } + + /// Is this a pair with inner == 0? + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool startAt0(int16_t pid) const { + assert((pixelTopology::Phase1::layerPairs[pid * 2] == 0) == + (pid < 3 || pid == 13 || pid == 15 || pid == 16)); // to be 100% sure it's working, may be removed + return pixelTopology::Phase1::layerPairs[pid * 2] == 0; + } + }; + + template + struct CAParamsT> : public CACommon { + const bool includeFarForwards_; + /// Is is a starting layer pair? + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool startingLayerPair(int16_t pid) const { + return pid < 33; // in principle one could remove 5,6,7 23, 28 and 29 + } + + /// Is this a pair with inner == 0 + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool startAt0(int16_t pid) const { + assert((pixelTopology::Phase2::layerPairs[pid * 2] == 0) == ((pid < 3) | (pid >= 23 && pid < 28))); + return pixelTopology::Phase2::layerPairs[pid * 2] == 0; + } + }; + + //Full list of params = algo params + ca params + cell params + quality cuts + //Generic template + template + struct ParamsT : public AlgoParams { + // one should define the params for its own pixelTopology + // not defining anything here + inline uint32_t nPairs() const { return 0; } + }; + + template + struct ParamsT> : public AlgoParams { + using TT = TrackerTraits; + using QualityCuts = ::pixelTrack::QualityCutsT; //track quality cuts + using CellCuts = caPixelDoublets::CellCutsT; //cell building cuts + using CAParams = CAParamsT; //params to be used on device + + ParamsT(AlgoParams const& commonCuts, + CellCuts const& cellCuts, + QualityCuts const& cutsCuts, + CAParams const& caParams) + : AlgoParams(commonCuts), cellCuts_(cellCuts), qualityCuts_(cutsCuts), caParams_(caParams) {} + + const CellCuts cellCuts_; + const QualityCuts qualityCuts_{// polynomial coefficients for the pT-dependent chi2 cut + {0.68177776, 0.74609577, -0.08035491, 0.00315399}, + // max pT used to determine the chi2 cut + 10., + // chi2 scale factor: 30 for broken line fit, 45 for Riemann fit + 30., + // regional cuts for triplets + { + 0.3, // |Tip| < 0.3 cm + 0.5, // pT > 0.5 GeV + 12.0 // |Zip| < 12.0 cm + }, + // regional cuts for quadruplets + { + 0.5, // |Tip| < 0.5 cm + 0.3, // pT > 0.3 GeV + 12.0 // |Zip| < 12.0 cm + }}; + const CAParams caParams_; + /// Compute the number of pairs + inline uint32_t nPairs() const { + // take all layer pairs into account + uint32_t nActualPairs = TT::nPairs; + if (not includeJumpingForwardDoublets_) { + // exclude forward "jumping" layer pairs + nActualPairs = TT::nPairsForTriplets; + } + if (caParams_.minHitsPerNtuplet_ > 3) { + // for quadruplets, exclude all "jumping" layer pairs + nActualPairs = TT::nPairsForQuadruplets; + } + + return nActualPairs; + } + + }; // Params Phase1 + + template + struct ParamsT> : public AlgoParams { + using TT = TrackerTraits; + using QualityCuts = ::pixelTrack::QualityCutsT; + using CellCuts = caPixelDoublets::CellCutsT; + using CAParams = CAParamsT; + + ParamsT(AlgoParams const& commonCuts, + CellCuts const& cellCuts, + QualityCuts const& qualityCuts, + CAParams const& caParams) + : AlgoParams(commonCuts), cellCuts_(cellCuts), qualityCuts_(qualityCuts), caParams_(caParams) {} + + // quality cuts + const CellCuts cellCuts_; + const QualityCuts qualityCuts_{5.0f, /*chi2*/ 0.9f, /* pT in Gev*/ 0.4f, /*zip in cm*/ 12.0f /*tip in cm*/}; + const CAParams caParams_; + + inline uint32_t nPairs() const { + // take all layer pairs into account + uint32_t nActualPairs = TT::nPairsMinimal; + if (caParams_.includeFarForwards_) { + // considera far forwards (> 11 & > 23) + nActualPairs = TT::nPairsFarForwards; + } + if (includeJumpingForwardDoublets_) { + // include jumping forwards + nActualPairs = TT::nPairs; + } + + return nActualPairs; + } + + }; // Params Phase1 + + // counters + struct Counters { + unsigned long long nEvents; + unsigned long long nHits; + unsigned long long nCells; + unsigned long long nTuples; + unsigned long long nFitTracks; + unsigned long long nLooseTracks; + unsigned long long nGoodTracks; + unsigned long long nUsedHits; + unsigned long long nDupHits; + unsigned long long nFishCells; + unsigned long long nKilledCells; + unsigned long long nEmptyCells; + unsigned long long nZeroTrackCells; + }; + + using Quality = ::pixelTrack::Quality; + + } // namespace caHitNtupletGenerator + + template + class CAHitNtupletGeneratorKernels { + public: + using TrackerTraits = TTTraits; + using QualityCuts = ::pixelTrack::QualityCutsT; + using CellCuts = caPixelDoublets::CellCutsT; + using Params = caHitNtupletGenerator::ParamsT; + using CAParams = caHitNtupletGenerator::CAParamsT; + using Counters = caHitNtupletGenerator::Counters; + + using HitsView = TrackingRecHitSoAView; + using HitsConstView = TrackingRecHitSoAConstView; + using TkSoAView = reco::TrackSoAView; + + using HitToTuple = caStructures::template HitToTupleT; + using TupleMultiplicity = caStructures::template TupleMultiplicityT; + struct Testttt { + TupleMultiplicity tm; + }; + using CellNeighborsVector = caStructures::CellNeighborsVectorT; + using CellNeighbors = caStructures::CellNeighborsT; + using CellTracksVector = caStructures::CellTracksVectorT; + using CellTracks = caStructures::CellTracksT; + using OuterHitOfCellContainer = caStructures::OuterHitOfCellContainerT; + using OuterHitOfCell = caStructures::OuterHitOfCellT; + + using CACell = CACellT; + + using Quality = ::pixelTrack::Quality; + using HitContainer = typename reco::TrackSoA::HitContainer; + + CAHitNtupletGeneratorKernels(Params const& params, uint32_t nhits, Queue& queue); + ~CAHitNtupletGeneratorKernels() = default; + + TupleMultiplicity const* tupleMultiplicity() const { return device_tupleMultiplicity_.data(); } + + void launchKernels(const HitsConstView& hh, TkSoAView& track_view, Queue& queue); + + void classifyTuples(const HitsConstView& hh, TkSoAView& track_view, Queue& queue); + + void buildDoublets(const HitsConstView& hh, Queue& queue); + + static void printCounters(); + + private: + // params + Params const& m_params; + cms::alpakatools::device_buffer counters_; + + // workspace + cms::alpakatools::device_buffer device_hitToTuple_; + cms::alpakatools::device_buffer device_tupleMultiplicity_; + cms::alpakatools::device_buffer device_theCells_; + cms::alpakatools::device_buffer device_isOuterHitOfCell_; + cms::alpakatools::device_buffer isOuterHitOfCell_; + cms::alpakatools::device_buffer device_theCellNeighbors_; + cms::alpakatools::device_buffer device_theCellTracks_; + cms::alpakatools::device_buffer cellStorage_; + cms::alpakatools::device_buffer device_cellCuts_; + CellNeighbors* device_theCellNeighborsContainer_; + CellTracks* device_theCellTracksContainer_; + cms::alpakatools::device_buffer device_storage_; + cms::alpakatools::AtomicPairCounter* device_hitTuple_apc_; + cms::alpakatools::AtomicPairCounter* device_hitToTuple_apc_; + cms::alpakatools::device_view device_nCells_; + }; +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +#endif // RecoPixelVertexing_PixelTriplets_plugins_CAHitNtupletGeneratorKernels_h diff --git a/RecoTracker/PixelSeeding/plugins/alpaka/CAHitNtupletGeneratorKernelsImpl.h b/RecoTracker/PixelSeeding/plugins/alpaka/CAHitNtupletGeneratorKernelsImpl.h new file mode 100644 index 0000000000000..b809caa2e5736 --- /dev/null +++ b/RecoTracker/PixelSeeding/plugins/alpaka/CAHitNtupletGeneratorKernelsImpl.h @@ -0,0 +1,1044 @@ +// +// Original Author: Felice Pantaleo, CERN +// + +//#define GPU_DEBUG +//#define NTUPLE_DEBUG + +#include +#include +#include +#include + +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaInterface/interface/traits.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforDevice.h" +#include "DataFormats/TrackSoA/interface/alpaka/TrackUtilities.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h" + +#include "CAStructures.h" +#include "CAHitNtupletGeneratorKernels.h" +#include "CACell.h" +#include "CAFishbone.h" +#include "CAPixelDoublets.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace caHitNtupletGeneratorKernels { + + constexpr uint32_t tkNotFound = std::numeric_limits::max(); + constexpr float maxScore = std::numeric_limits::max(); + constexpr float nSigma2 = 25.f; + + //all of these below are mostly to avoid brining around the relative namespace + + template + using HitToTuple = caStructures::HitToTupleT; + + template + using TupleMultiplicity = caStructures::TupleMultiplicityT; + + template + using CellNeighborsVector = caStructures::CellNeighborsVectorT; + + template + using CellTracksVector = caStructures::CellTracksVectorT; + + template + using OuterHitOfCell = caStructures::OuterHitOfCellT; + + using Quality = ::pixelTrack::Quality; + + template + using TkSoAView = reco::TrackSoAView; + + template + using HitContainer = typename reco::TrackSoA::HitContainer; + + template + using HitsConstView = typename CACellT::HitsConstView; + + template + using QualityCuts = ::pixelTrack::QualityCutsT; + + template + using CAParams = caHitNtupletGenerator::CAParamsT; + + using Counters = caHitNtupletGenerator::Counters; + + template + class Kernel_checkOverflows { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + TkSoAView tracks_view, + TupleMultiplicity const *tupleMultiplicity, + HitToTuple const *hitToTuple, + cms::alpakatools::AtomicPairCounter *apc, + CACellT const *__restrict__ cells, + uint32_t const *__restrict__ nCells, + CellNeighborsVector const *cellNeighbors, + CellTracksVector const *cellTracks, + OuterHitOfCell const *isOuterHitOfCell, + int32_t nHits, + uint32_t maxNumberOfDoublets, + Counters *counters) const { + auto &c = *counters; + // counters once per event + if (cms::alpakatools::once_per_grid(acc)) { + alpaka::atomicAdd(acc, &c.nEvents, 1ull, alpaka::hierarchy::Blocks{}); + alpaka::atomicAdd(acc, &c.nHits, static_cast(nHits), alpaka::hierarchy::Blocks{}); + alpaka::atomicAdd(acc, &c.nCells, static_cast(*nCells), alpaka::hierarchy::Blocks{}); + alpaka::atomicAdd( + acc, &c.nTuples, static_cast(apc->get().first), alpaka::hierarchy::Blocks{}); + alpaka::atomicAdd(acc, + &c.nFitTracks, + static_cast(tupleMultiplicity->size()), + alpaka::hierarchy::Blocks{}); + } + +#ifdef NTUPLE_DEBUGS + if (cms::alpakatools::once_per_grid(acc)) { + printf("number of found cells %d \n found tuples %d with total hits %d out of %d\n", + *nCells, + apc->get().first, + apc->get().second, + nHits); + if (apc->get().first < TrackerTraits::maxNumberOfQuadruplets) { + ALPAKA_ASSERT_OFFLOAD(tracks_view.hitIndices().size(apc->get().first) == 0); + ALPAKA_ASSERT_OFFLOAD(tracks_view.hitIndices().size() == apc->get().second); + } + } + const auto ntNbins = foundNtuplets->nbins(); + + for (auto idx : cms::alpakatools::elements_with_stride(acc, ntBins)) { + if (tracks_view.hitIndices().size(idx) > TrackerTraits::maxHitsOnTrack) // current real limit + printf("ERROR %d, %d\n", idx, tracks_view.hitIndices().size(idx)); + ALPAKA_ASSERT_OFFLOAD(ftracks_view.hitIndices().size(idx) <= TrackerTraits::maxHitsOnTrack); + for (auto ih = tracks_view.hitIndices().begin(idx); ih != tracks_view.hitIndices().end(idx); ++ih) + ALPAKA_ASSERT_OFFLOAD(int(*ih) < nHits); + } +#endif + + if (cms::alpakatools::once_per_grid(acc)) { + if (apc->get().first >= TrackerTraits::maxNumberOfQuadruplets) + printf("Tuples overflow\n"); + if (*nCells >= maxNumberOfDoublets) + printf("Cells overflow\n"); + if (cellNeighbors && cellNeighbors->full()) + printf("cellNeighbors overflow %d %d \n", cellNeighbors->capacity(), cellNeighbors->size()); + if (cellTracks && cellTracks->full()) + printf("cellTracks overflow\n"); + if (int(hitToTuple->nOnes()) < nHits) + printf("ERROR hitToTuple overflow %d %d\n", hitToTuple->nOnes(), nHits); +#ifdef GPU_DEBUG + printf("size of cellNeighbors %d \n cellTracks %d \n hitToTuple %d \n", + cellNeighbors->size(), + cellTracks->size(), + hitToTuple->size()); +#endif + } + + const auto ntNCells = (*nCells); + for (auto idx : cms::alpakatools::elements_with_stride(acc, ntNCells)) { + auto const &thisCell = cells[idx]; + if (thisCell.hasFishbone() && !thisCell.isKilled()) + alpaka::atomicAdd(acc, &c.nFishCells, 1ull, alpaka::hierarchy::Blocks{}); + if (thisCell.outerNeighbors().full()) //++tooManyNeighbors[thisCell.theLayerPairId]; + printf("OuterNeighbors overflow %d in %d\n", idx, thisCell.layerPairId()); + if (thisCell.tracks().full()) //++tooManyTracks[thisCell.theLayerPairId]; + printf("Tracks overflow %d in %d\n", idx, thisCell.layerPairId()); + if (thisCell.isKilled()) + alpaka::atomicAdd(acc, &c.nKilledCells, 1ull, alpaka::hierarchy::Blocks{}); + if (!thisCell.unused()) + alpaka::atomicAdd(acc, &c.nEmptyCells, 1ull, alpaka::hierarchy::Blocks{}); + if ((0 == hitToTuple->size(thisCell.inner_hit_id())) && (0 == hitToTuple->size(thisCell.outer_hit_id()))) + alpaka::atomicAdd(acc, &c.nZeroTrackCells, 1ull, alpaka::hierarchy::Blocks{}); + } + + for (auto idx : cms::alpakatools::elements_with_stride(acc, nHits)) + if ((*isOuterHitOfCell).container[idx].full()) // ++tooManyOuterHitOfCell; + printf("OuterHitOfCell overflow %d\n", idx); + } + }; + + template + class Kernel_fishboneCleaner { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + CACellT const *cells, + uint32_t const *__restrict__ nCells, + TkSoAView tracks_view) const { + constexpr auto reject = Quality::dup; + const auto ntNCells = (*nCells); + + for (auto idx : cms::alpakatools::elements_with_stride(acc, ntNCells)) { + auto const &thisCell = cells[idx]; + if (!thisCell.isKilled()) + continue; + + for (auto it : thisCell.tracks()) + tracks_view[it].quality() = reject; + } + } + }; + // remove shorter tracks if sharing a cell + // It does not seem to affect efficiency in any way! + template + class Kernel_earlyDuplicateRemover { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + CACellT const *cells, + uint32_t const *__restrict__ nCells, + TkSoAView tracks_view, + bool dupPassThrough) const { + // quality to mark rejected + constexpr auto reject = Quality::edup; /// cannot be loose + ALPAKA_ASSERT_OFFLOAD(nCells); + const auto ntNCells = (*nCells); + + for (auto idx : cms::alpakatools::elements_with_stride(acc, ntNCells)) { + auto const &thisCell = cells[idx]; + + if (thisCell.tracks().size() < 2) + continue; + + int8_t maxNl = 0; + + // find maxNl + for (auto it : thisCell.tracks()) { + auto nl = tracks_view[it].nLayers(); + maxNl = std::max(nl, maxNl); + } + + // if (maxNl<4) continue; + // quad pass through (leave it her for tests) + // maxNl = std::min(4, maxNl); + + for (auto it : thisCell.tracks()) { + if (tracks_view[it].nLayers() < maxNl) + tracks_view[it].quality() = reject; //no race: simple assignment of the same constant + } + } + } + }; + + // assume the above (so, short tracks already removed) + template + class Kernel_fastDuplicateRemover { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + CACellT const *__restrict__ cells, + uint32_t const *__restrict__ nCells, + TkSoAView tracks_view, + bool dupPassThrough) const { + // quality to mark rejected + auto const reject = dupPassThrough ? Quality::loose : Quality::dup; + constexpr auto loose = Quality::loose; + + ALPAKA_ASSERT_OFFLOAD(nCells); + const auto ntNCells = (*nCells); + + for (auto idx : cms::alpakatools::elements_with_stride(acc, ntNCells)) { + auto const &thisCell = cells[idx]; + if (thisCell.tracks().size() < 2) + continue; + + float mc = maxScore; + uint16_t im = tkNotFound; + + auto score = [&](auto it) { return std::abs(reco::tip(tracks_view, it)); }; + + // full crazy combinatorics + int ntr = thisCell.tracks().size(); + for (int i = 0; i < ntr - 1; ++i) { + auto it = thisCell.tracks()[i]; + auto qi = tracks_view[it].quality(); + if (qi <= reject) + continue; + auto opi = tracks_view[it].state()(2); + auto e2opi = tracks_view[it].covariance()(9); + auto cti = tracks_view[it].state()(3); + auto e2cti = tracks_view[it].covariance()(12); + for (auto j = i + 1; j < ntr; ++j) { + auto jt = thisCell.tracks()[j]; + auto qj = tracks_view[jt].quality(); + if (qj <= reject) + continue; + auto opj = tracks_view[jt].state()(2); + auto ctj = tracks_view[jt].state()(3); + auto dct = nSigma2 * (tracks_view[jt].covariance()(12) + e2cti); + if ((cti - ctj) * (cti - ctj) > dct) + continue; + auto dop = nSigma2 * (tracks_view[jt].covariance()(9) + e2opi); + if ((opi - opj) * (opi - opj) > dop) + continue; + if ((qj < qi) || (qj == qi && score(it) < score(jt))) + tracks_view[jt].quality() = reject; + else { + tracks_view[it].quality() = reject; + break; + } + } + } + + // find maxQual + auto maxQual = reject; // no duplicate! + for (auto it : thisCell.tracks()) { + if (tracks_view[it].quality() > maxQual) + maxQual = tracks_view[it].quality(); + } + + if (maxQual <= loose) + continue; + + // find min score + for (auto it : thisCell.tracks()) { + if (tracks_view[it].quality() == maxQual && score(it) < mc) { + mc = score(it); + im = it; + } + } + + if (tkNotFound == im) + continue; + + // mark all other duplicates (not yet, keep it loose) + for (auto it : thisCell.tracks()) { + if (tracks_view[it].quality() > loose && it != im) + tracks_view[it].quality() = loose; //no race: simple assignment of the same constant + } + } + } + }; + + template + class Kernel_connect { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + cms::alpakatools::AtomicPairCounter *apc1, + cms::alpakatools::AtomicPairCounter *apc2, // just to zero them + HitsConstView hh, + CACellT *cells, + uint32_t *nCells, + CellNeighborsVector *cellNeighbors, + OuterHitOfCell const *isOuterHitOfCell, + CAParams params) const { + using Cell = CACellT; + + const uint32_t dimIndexY = 0u; + const uint32_t dimIndexX = 1u; + const uint32_t threadIdxY(alpaka::getIdx(acc)[dimIndexY]); + const uint32_t threadIdxLocalX(alpaka::getIdx(acc)[dimIndexX]); + + if (0 == (threadIdxY + threadIdxLocalX)) { + (*apc1) = 0; + (*apc2) = 0; + } // ready for next kernel + + constexpr uint32_t last_bpix1_detIndex = TrackerTraits::last_bpix1_detIndex; + constexpr uint32_t last_barrel_detIndex = TrackerTraits::last_barrel_detIndex; + + cms::alpakatools::for_each_element_in_grid_strided( + acc, + (*nCells), + 0u, + [&](uint32_t idx) { + auto cellIndex = idx; + auto &thisCell = cells[idx]; + auto innerHitId = thisCell.inner_hit_id(); + if (int(innerHitId) >= isOuterHitOfCell->offset) { + uint32_t numberOfPossibleNeighbors = (*isOuterHitOfCell)[innerHitId].size(); + auto vi = (*isOuterHitOfCell)[innerHitId].data(); + + auto ri = thisCell.inner_r(hh); + auto zi = thisCell.inner_z(hh); + + auto ro = thisCell.outer_r(hh); + auto zo = thisCell.outer_z(hh); + auto isBarrel = thisCell.inner_detIndex(hh) < last_barrel_detIndex; + + cms::alpakatools::for_each_element_in_block_strided( + acc, + numberOfPossibleNeighbors, + 0u, + [&](uint32_t j) { + auto otherCell = (vi[j]); + auto &oc = cells[otherCell]; + auto r1 = oc.inner_r(hh); + auto z1 = oc.inner_z(hh); + bool aligned = Cell::areAlignedRZ( + r1, + z1, + ri, + zi, + ro, + zo, + params.ptmin_, + isBarrel ? params.CAThetaCutBarrel_ + : params.CAThetaCutForward_); // 2.f*thetaCut); // FIXME tune cuts + if (aligned && + thisCell.dcaCut(hh, + oc, + oc.inner_detIndex(hh) < last_bpix1_detIndex ? params.dcaCutInnerTriplet_ + : params.dcaCutOuterTriplet_, + params.hardCurvCut_)) { // FIXME tune cuts + oc.addOuterNeighbor(acc, cellIndex, *cellNeighbors); + thisCell.setStatusBits(Cell::StatusBit::kUsed); + oc.setStatusBits(Cell::StatusBit::kUsed); + } + }, + dimIndexX); // loop on inner cells + } + }, + dimIndexY); // loop on outer cells + } + }; + template + class Kernel_find_ntuplets { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + HitsConstView hh, + TkSoAView tracks_view, + CACellT *__restrict__ cells, + uint32_t const *nCells, + CellTracksVector *cellTracks, + cms::alpakatools::AtomicPairCounter *apc, + CAParams params) const { + // recursive: not obvious to widen + + using Cell = CACellT; + +#ifdef GPU_DEBUG + if (cms::alpakatools::once_per_grid(acc)) + printf("starting producing ntuplets from %d cells \n", *nCells); +#endif + + for (auto idx : cms::alpakatools::elements_with_stride(acc, (*nCells))) { + auto const &thisCell = cells[idx]; + + if (thisCell.isKilled()) + continue; // cut by earlyFishbone + + // we require at least three hits... + + if (thisCell.outerNeighbors().empty()) + continue; + + auto pid = thisCell.layerPairId(); + bool doit = params.startingLayerPair(pid); + + constexpr uint32_t maxDepth = TrackerTraits::maxDepth; + + if (doit) { + typename Cell::TmpTuple stack; + stack.reset(); + bool bpix1Start = params.startAt0(pid); + thisCell.template find_ntuplets(acc, + hh, + cells, + *cellTracks, + tracks_view.hitIndices(), + *apc, + tracks_view.quality(), + stack, + params.minHitsPerNtuplet_, + bpix1Start); + ALPAKA_ASSERT_OFFLOAD(stack.empty()); + } + } + } + }; + + template + class Kernel_mark_used { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + CACellT *__restrict__ cells, + uint32_t const *nCells) const { + using Cell = CACellT; + for (auto idx : cms::alpakatools::elements_with_stride(acc, (*nCells))) { + auto &thisCell = cells[idx]; + if (!thisCell.tracks().empty()) + thisCell.setStatusBits(Cell::StatusBit::kInTrack); + } + } + }; + + template + class Kernel_countMultiplicity { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + TkSoAView tracks_view, + TupleMultiplicity *tupleMultiplicity) const { + for (auto it : cms::alpakatools::elements_with_stride(acc, tracks_view.hitIndices().nOnes())) { + auto nhits = tracks_view.hitIndices().size(it); + if (nhits < 3) + continue; + if (tracks_view[it].quality() == Quality::edup) + continue; + ALPAKA_ASSERT_OFFLOAD(tracks_view[it].quality() == Quality::bad); + if (nhits > TrackerTraits::maxHitsOnTrack) // current limit + printf("wrong mult %d %d\n", it, nhits); + ALPAKA_ASSERT_OFFLOAD(nhits <= TrackerTraits::maxHitsOnTrack); + tupleMultiplicity->count(acc, nhits); + } + } + }; + + template + class Kernel_fillMultiplicity { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + TkSoAView tracks_view, + TupleMultiplicity *tupleMultiplicity) const { + for (auto it : cms::alpakatools::elements_with_stride(acc, tracks_view.hitIndices().nOnes())) { + auto nhits = tracks_view.hitIndices().size(it); + if (nhits < 3) + continue; + if (tracks_view[it].quality() == Quality::edup) + continue; + ALPAKA_ASSERT_OFFLOAD(tracks_view[it].quality() == Quality::bad); + if (nhits > TrackerTraits::maxHitsOnTrack) + printf("wrong mult %d %d\n", it, nhits); + ALPAKA_ASSERT_OFFLOAD(nhits <= TrackerTraits::maxHitsOnTrack); + tupleMultiplicity->fill(acc, nhits, it); + } + } + }; + + template + class Kernel_classifyTracks { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + TkSoAView tracks_view, + QualityCuts cuts) const { + for (auto it : cms::alpakatools::elements_with_stride(acc, tracks_view.hitIndices().nOnes())) { + auto nhits = tracks_view.hitIndices().size(it); + if (nhits == 0) + break; // guard + + // if duplicate: not even fit + if (tracks_view[it].quality() == Quality::edup) + continue; + + ALPAKA_ASSERT_OFFLOAD(tracks_view[it].quality() == Quality::bad); + + // mark doublets as bad + if (nhits < 3) + continue; + + // if the fit has any invalid parameters, mark it as bad + bool isNaN = false; + for (int i = 0; i < 5; ++i) { + isNaN |= std::isnan(tracks_view[it].state()(i)); + } + if (isNaN) { +#ifdef NTUPLE_DEBUG + printf("NaN in fit %d size %d chi2 %f\n", it, tracks_view.hitIndices().size(it), tracks_view[it].chi2()); +#endif + continue; + } + + tracks_view[it].quality() = Quality::strict; + + if (cuts.strictCut(tracks_view, it)) + continue; + + tracks_view[it].quality() = Quality::tight; + + if (cuts.isHP(tracks_view, nhits, it)) + tracks_view[it].quality() = Quality::highPurity; + } + } + }; + + template + class Kernel_doStatsForTracks { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, TkSoAView tracks_view, Counters *counters) const { + for (auto idx : cms::alpakatools::elements_with_stride(acc, tracks_view.hitIndices().nOnes())) { + if (tracks_view.hitIndices().size(idx) == 0) + break; //guard + if (tracks_view[idx].quality() < Quality::loose) + continue; + alpaka::atomicAdd(acc, &(counters->nLooseTracks), 1ull, alpaka::hierarchy::Blocks{}); + if (tracks_view[idx].quality() < Quality::strict) + continue; + alpaka::atomicAdd(acc, &(counters->nGoodTracks), 1ull, alpaka::hierarchy::Blocks{}); + } + } + }; + + template + class Kernel_countHitInTracks { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + TkSoAView tracks_view, + HitToTuple *hitToTuple) const { + for (auto idx : cms::alpakatools::elements_with_stride(acc, tracks_view.hitIndices().nOnes())) { + if (tracks_view.hitIndices().size(idx) == 0) + break; // guard + for (auto h = tracks_view.hitIndices().begin(idx); h != tracks_view.hitIndices().end(idx); ++h) + hitToTuple->count(acc, *h); + } + } + }; + + template + class Kernel_fillHitInTracks { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + TkSoAView tracks_view, + HitToTuple *hitToTuple) const { + for (auto idx : cms::alpakatools::elements_with_stride(acc, tracks_view.hitIndices().nOnes())) { + if (tracks_view.hitIndices().size(idx) == 0) + break; // guard + for (auto h = tracks_view.hitIndices().begin(idx); h != tracks_view.hitIndices().end(idx); ++h) + hitToTuple->fill(acc, *h, idx); + } + } + }; + + template + class Kernel_fillHitDetIndices { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + TkSoAView tracks_view, + HitsConstView hh) const { + // copy offsets + for (auto idx : cms::alpakatools::elements_with_stride(acc, tracks_view.hitIndices().nOnes())) { + tracks_view.detIndices().off[idx] = tracks_view.hitIndices().off[idx]; + } + // fill hit indices + for (auto idx : cms::alpakatools::elements_with_stride(acc, tracks_view.hitIndices().size())) { + ALPAKA_ASSERT_OFFLOAD(tracks_view.hitIndices().content[idx] < (uint32_t)hh.metadata().size()); + tracks_view.detIndices().content[idx] = hh[tracks_view.hitIndices().content[idx]].detectorIndex(); + } + } + }; + + template + class Kernel_fillNLayers { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + TkSoAView tracks_view, + cms::alpakatools::AtomicPairCounter *apc) const { + // clamp the number of tracks to the capacity of the SoA + auto ntracks = std::min(apc->get().first, tracks_view.metadata().size() - 1); + + if (cms::alpakatools::once_per_grid(acc)) + tracks_view.nTracks() = ntracks; + for (auto idx : cms::alpakatools::elements_with_stride(acc, ntracks)) { + ALPAKA_ASSERT_OFFLOAD(TracksUtilities::nHits(tracks_view, idx) >= 3); + tracks_view[idx].nLayers() = TracksUtilities::computeNumberOfLayers(tracks_view, idx); + } + } + }; + + template + class Kernel_doStatsForHitInTracks { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + HitToTuple const *__restrict__ hitToTuple, + Counters *counters) const { + auto &c = *counters; + for (auto idx : cms::alpakatools::elements_with_stride(acc, hitToTuple->nOnes())) { + if (hitToTuple->size(idx) == 0) + continue; // SHALL NOT BE break + alpaka::atomicAdd(acc, &c.nUsedHits, 1ull, alpaka::hierarchy::Blocks{}); + if (hitToTuple->size(idx) > 1) + alpaka::atomicAdd(acc, &c.nDupHits, 1ull, alpaka::hierarchy::Blocks{}); + } + } + }; + + template + class Kernel_countSharedHit { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + int *__restrict__ nshared, + HitContainer const *__restrict__ ptuples, + Quality const *__restrict__ quality, + HitToTuple const *__restrict__ phitToTuple) const { + constexpr auto loose = Quality::loose; + + auto &hitToTuple = *phitToTuple; + auto const &foundNtuplets = *ptuples; + for (auto idx : cms::alpakatools::elements_with_stride(acc, hitToTuple->nbins())) { + if (hitToTuple.size(idx) < 2) + continue; + + int nt = 0; + + // count "good" tracks + for (auto it = hitToTuple.begin(idx); it != hitToTuple.end(idx); ++it) { + if (quality[*it] < loose) + continue; + ++nt; + } + + if (nt < 2) + continue; + + // now mark each track triplet as sharing a hit + for (auto it = hitToTuple.begin(idx); it != hitToTuple.end(idx); ++it) { + if (foundNtuplets.size(*it) > 3) + continue; + alpaka::atomicAdd(acc, &nshared[*it], 1ull, alpaka::hierarchy::Blocks{}); + } + + } // hit loop + } + }; + + template + class Kernel_markSharedHit { + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + int const *__restrict__ nshared, + HitContainer const *__restrict__ tuples, + Quality *__restrict__ quality, + bool dupPassThrough) const { + // constexpr auto bad = Quality::bad; + constexpr auto dup = Quality::dup; + constexpr auto loose = Quality::loose; + // constexpr auto strict = Quality::strict; + + // quality to mark rejected + auto const reject = dupPassThrough ? loose : dup; + for (auto idx : cms::alpakatools::elements_with_stride(acc, tuples->nbins())) { + if (tuples->size(idx) == 0) + break; //guard + if (quality[idx] <= reject) + continue; + if (nshared[idx] > 2) + quality[idx] = reject; + } + } + }; + + // mostly for very forward triplets..... + template + class Kernel_rejectDuplicate { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + TkSoAView tracks_view, + uint16_t nmin, + bool dupPassThrough, + HitToTuple const *__restrict__ phitToTuple) const { + // quality to mark rejected + auto const reject = dupPassThrough ? Quality::loose : Quality::dup; + + auto &hitToTuple = *phitToTuple; + + for (auto idx : cms::alpakatools::elements_with_stride(acc, hitToTuple.nOnes())) { + if (hitToTuple.size(idx) < 2) + continue; + + auto score = [&](auto it, auto nl) { return std::abs(reco::tip(tracks_view, it)); }; + + // full combinatorics + for (auto ip = hitToTuple.begin(idx); ip < hitToTuple.end(idx) - 1; ++ip) { + auto const it = *ip; + auto qi = tracks_view[it].quality(); + if (qi <= reject) + continue; + auto opi = tracks_view[it].state()(2); + auto e2opi = tracks_view[it].covariance()(9); + auto cti = tracks_view[it].state()(3); + auto e2cti = tracks_view[it].covariance()(12); + auto nli = tracks_view[it].nLayers(); + for (auto jp = ip + 1; jp < hitToTuple.end(idx); ++jp) { + auto const jt = *jp; + auto qj = tracks_view[jt].quality(); + if (qj <= reject) + continue; + auto opj = tracks_view[jt].state()(2); + auto ctj = tracks_view[jt].state()(3); + auto dct = nSigma2 * (tracks_view[jt].covariance()(12) + e2cti); + if ((cti - ctj) * (cti - ctj) > dct) + continue; + auto dop = nSigma2 * (tracks_view[jt].covariance()(9) + e2opi); + if ((opi - opj) * (opi - opj) > dop) + continue; + auto nlj = tracks_view[jt].nLayers(); + if (nlj < nli || (nlj == nli && (qj < qi || (qj == qi && score(it, nli) < score(jt, nlj))))) + tracks_view[jt].quality() = reject; + else { + tracks_view[it].quality() = reject; + break; + } + } + } + } + } + }; + + template + class Kernel_sharedHitCleaner { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + HitsConstView hh, + TkSoAView tracks_view, + int nmin, + bool dupPassThrough, + HitToTuple const *__restrict__ phitToTuple) const { + // quality to mark rejected + auto const reject = dupPassThrough ? Quality::loose : Quality::dup; + // quality of longest track + auto const longTqual = Quality::highPurity; + + auto &hitToTuple = *phitToTuple; + + uint32_t l1end = hh.hitsLayerStart()[1]; + + for (auto idx : cms::alpakatools::elements_with_stride(acc, hitToTuple.nOnes())) { + if (hitToTuple.size(idx) < 2) + continue; + + int8_t maxNl = 0; + + // find maxNl + for (auto it = hitToTuple.begin(idx); it != hitToTuple.end(idx); ++it) { + if (tracks_view[*it].quality() < longTqual) + continue; + // if (tracks_view[*it].nHits()==3) continue; + auto nl = tracks_view[*it].nLayers(); + maxNl = std::max(nl, maxNl); + } + + if (maxNl < 4) + continue; + + // quad pass through (leave for tests) + // maxNl = std::min(4, maxNl); + + // kill all tracks shorter than maxHl (only triplets??? + for (auto it = hitToTuple.begin(idx); it != hitToTuple.end(idx); ++it) { + auto nl = tracks_view[*it].nLayers(); + + //checking if shared hit is on bpix1 and if the tuple is short enough + if (idx < l1end and nl > nmin) + continue; + + if (nl < maxNl && tracks_view[*it].quality() > reject) + tracks_view[*it].quality() = reject; + } + } + } + }; + template + class Kernel_tripletCleaner { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + TkSoAView tracks_view, + uint16_t nmin, + bool dupPassThrough, + HitToTuple const *__restrict__ phitToTuple) const { + // quality to mark rejected + auto const reject = Quality::loose; + /// min quality of good + auto const good = Quality::strict; + + auto &hitToTuple = *phitToTuple; + + for (auto idx : cms::alpakatools::elements_with_stride(acc, hitToTuple.nOnes())) { + if (hitToTuple.size(idx) < 2) + continue; + + float mc = maxScore; + uint16_t im = tkNotFound; + bool onlyTriplets = true; + + // check if only triplets + for (auto it = hitToTuple.begin(idx); it != hitToTuple.end(idx); ++it) { + if (tracks_view[*it].quality() <= good) + continue; + onlyTriplets &= reco::isTriplet(tracks_view, *it); + if (!onlyTriplets) + break; + } + + // only triplets + if (!onlyTriplets) + continue; + + // for triplets choose best tip! (should we first find best quality???) + for (auto ip = hitToTuple.begin(idx); ip != hitToTuple.end(idx); ++ip) { + auto const it = *ip; + if (tracks_view[it].quality() >= good && std::abs(reco::tip(tracks_view, it)) < mc) { + mc = std::abs(reco::tip(tracks_view, it)); + im = it; + } + } + + if (tkNotFound == im) + continue; + + // mark worse ambiguities + for (auto ip = hitToTuple.begin(idx); ip != hitToTuple.end(idx); ++ip) { + auto const it = *ip; + if (tracks_view[it].quality() > reject && it != im) + tracks_view[it].quality() = reject; //no race: simple assignment of the same constant + } + + } // loop over hits + } + }; + + template + class Kernel_simpleTripletCleaner { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + TkSoAView tracks_view, + uint16_t nmin, + bool dupPassThrough, + HitToTuple const *__restrict__ phitToTuple) const { + // quality to mark rejected + auto const reject = Quality::loose; + /// min quality of good + auto const good = Quality::loose; + + auto &hitToTuple = *phitToTuple; + + for (auto idx : cms::alpakatools::elements_with_stride(acc, hitToTuple.nOnes())) { + if (hitToTuple.size(idx) < 2) + continue; + + float mc = maxScore; + uint16_t im = tkNotFound; + + // choose best tip! (should we first find best quality???) + for (auto ip = hitToTuple.begin(idx); ip != hitToTuple.end(idx); ++ip) { + auto const it = *ip; + if (tracks_view[it].quality() >= good && std::abs(reco::tip(tracks_view, it)) < mc) { + mc = std::abs(reco::tip(tracks_view, it)); + im = it; + } + } + + if (tkNotFound == im) + continue; + + // mark worse ambiguities + for (auto ip = hitToTuple.begin(idx); ip != hitToTuple.end(idx); ++ip) { + auto const it = *ip; + if (tracks_view[it].quality() > reject && reco::isTriplet(tracks_view, it) && it != im) + tracks_view[it].quality() = reject; //no race: simple assignment of the same constant + } + + } // loop over hits + } + }; + + template + class Kernel_print_found_ntuplets { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + HitsConstView hh, + TkSoAView tracks_view, + HitToTuple const *__restrict__ phitToTuple, + int32_t firstPrint, + int32_t lastPrint, + int iev) const { + constexpr auto loose = Quality::loose; + + for (auto i : cms::alpakatools::elements_with_stride(acc, tracks_view.hitIndices().nbins())) { + auto nh = tracks_view.hitIndices().size(i); + if (nh < 3) + continue; + if (tracks_view[i].quality() < loose) + continue; + printf("TK: %d %d %d %d %f %f %f %f %f %f %f %.3f %.3f %.3f %.3f %.3f %.3f %.3f\n", + 10000 * iev + i, + int(tracks_view[i].quality()), + nh, + tracks_view[i].nLayers(), + reco::charge(tracks_view, i), + tracks_view[i].pt(), + tracks_view[i].eta(), + reco::phi(tracks_view, i), + reco::tip(tracks_view, i), + reco::zip(tracks_view, i), + tracks_view[i].chi2(), + hh[*tracks_view.hitIndices().begin(i)].zGlobal(), + hh[*(tracks_view.hitIndices().begin(i) + 1)].zGlobal(), + hh[*(tracks_view.hitIndices().begin(i) + 2)].zGlobal(), + nh > 3 ? hh[int(*(tracks_view.hitIndices().begin(i) + 3))].zGlobal() : 0, + nh > 4 ? hh[int(*(tracks_view.hitIndices().begin(i) + 4))].zGlobal() : 0, + nh > 5 ? hh[int(*(tracks_view.hitIndices().begin(i) + 5))].zGlobal() : 0, + nh > 6 ? hh[int(*(tracks_view.hitIndices().begin(i) + nh - 1))].zGlobal() : 0); + } + } + }; + + class Kernel_printCounters { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, Counters const *counters) const { + auto const &c = *counters; + printf( + "||Counters | nEvents | nHits | nCells | nTuples | nFitTacks | nLooseTracks | nGoodTracks | " + "nUsedHits " + "| " + "nDupHits | " + "nFishCells | " + "nKilledCells | " + "nUsedCells | nZeroTrackCells ||\n"); + printf("Counters Raw %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld\n", + c.nEvents, + c.nHits, + c.nCells, + c.nTuples, + c.nFitTracks, + c.nLooseTracks, + c.nGoodTracks, + c.nUsedHits, + c.nDupHits, + c.nFishCells, + c.nKilledCells, + c.nEmptyCells, + c.nZeroTrackCells); + printf( + "Counters Norm %lld || %.1f| %.1f| %.1f| %.1f| %.1f| %.1f| %.1f| %.1f| %.3f| %.3f| " + "%.3f| " + "%.3f||\n", + c.nEvents, + c.nHits / double(c.nEvents), + c.nCells / double(c.nEvents), + c.nTuples / double(c.nEvents), + c.nFitTracks / double(c.nEvents), + c.nLooseTracks / double(c.nEvents), + c.nGoodTracks / double(c.nEvents), + c.nUsedHits / double(c.nEvents), + c.nDupHits / double(c.nEvents), + c.nFishCells / double(c.nCells), + c.nKilledCells / double(c.nCells), + c.nEmptyCells / double(c.nCells), + c.nZeroTrackCells / double(c.nCells)); + } + }; + } // namespace caHitNtupletGeneratorKernels +} // namespace ALPAKA_ACCELERATOR_NAMESPACE diff --git a/RecoTracker/PixelSeeding/plugins/alpaka/CAPixelDoublets.h b/RecoTracker/PixelSeeding/plugins/alpaka/CAPixelDoublets.h new file mode 100644 index 0000000000000..0b5ab0a985163 --- /dev/null +++ b/RecoTracker/PixelSeeding/plugins/alpaka/CAPixelDoublets.h @@ -0,0 +1,71 @@ +#ifndef RecoPixelVertexing_PixelTriplets_alpaka_CAPixelDoublets_h +#define RecoPixelVertexing_PixelTriplets_alpaka_CAPixelDoublets_h + +#include + +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/traits.h" +#include "CAPixelDoubletsAlgos.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + using namespace alpaka; + using namespace cms::alpakatools; + namespace caPixelDoublets { + + template + class InitDoublets { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const& acc, + OuterHitOfCell* isOuterHitOfCell, + int nHits, + CellNeighborsVector* cellNeighbors, + CellNeighbors* cellNeighborsContainer, + CellTracksVector* cellTracks, + CellTracks* cellTracksContainer) const { + ALPAKA_ASSERT_OFFLOAD((*isOuterHitOfCell).container); + + for (auto i : cms::alpakatools::elements_with_stride(acc, nHits)) + (*isOuterHitOfCell).container[i].reset(); + + if (cms::alpakatools::once_per_grid(acc)) { + cellNeighbors->construct(TrackerTraits::maxNumOfActiveDoublets, cellNeighborsContainer); + cellTracks->construct(TrackerTraits::maxNumOfActiveDoublets, cellTracksContainer); + [[maybe_unused]] auto i = cellNeighbors->extend(acc); + ALPAKA_ASSERT_OFFLOAD(0 == i); + (*cellNeighbors)[0].reset(); + i = cellTracks->extend(acc); + ALPAKA_ASSERT_OFFLOAD(0 == i); + (*cellTracks)[0].reset(); + } + } + }; + + // Not used for the moment, see below. + //constexpr auto getDoubletsFromHistoMaxBlockSize = 64; // for both x and y + //constexpr auto getDoubletsFromHistoMinBlocksPerMP = 16; + + template + class GetDoubletsFromHisto { + public: + template >> + // #ifdef __CUDACC__ + // __launch_bounds__(getDoubletsFromHistoMaxBlockSize, getDoubletsFromHistoMinBlocksPerMP) // TODO: Alapakify + // #endif + ALPAKA_FN_ACC void operator()(TAcc const& acc, + CACellT* cells, + uint32_t* nCells, + CellNeighborsVector* cellNeighbors, + CellTracksVector* cellTracks, + HitsConstView hh, + OuterHitOfCell* isOuterHitOfCell, + uint32_t nActualPairs, + const uint32_t maxNumOfDoublets, + CellCutsT cuts) const { + doubletsFromHisto( + acc, nActualPairs, maxNumOfDoublets, cells, nCells, cellNeighbors, cellTracks, hh, *isOuterHitOfCell, cuts); + } + }; + } // namespace caPixelDoublets +} // namespace ALPAKA_ACCELERATOR_NAMESPACE +#endif // RecoPixelVertexing_PixelTriplets_plugins_CAPixelDoublets_h diff --git a/RecoTracker/PixelSeeding/plugins/alpaka/CAPixelDoubletsAlgos.h b/RecoTracker/PixelSeeding/plugins/alpaka/CAPixelDoubletsAlgos.h new file mode 100644 index 0000000000000..234b9b7527a3c --- /dev/null +++ b/RecoTracker/PixelSeeding/plugins/alpaka/CAPixelDoubletsAlgos.h @@ -0,0 +1,344 @@ +#ifndef RecoPixelVertexing_PixelTriplets_alpaka_CAPixelDoubletsAlgos_h +#define RecoPixelVertexing_PixelTriplets_alpaka_CAPixelDoubletsAlgos_h + +#include +#include +#include +#include +#include + +#include + +#include "DataFormats/Math/interface/approx_atan2.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "HeterogeneousCore/AlpakaInterface/interface/VecArray.h" +#include "HeterogeneousCore/AlpakaInterface/interface/traits.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" + +#include "CACell.h" +#include "CAStructures.h" + +//#define GPU_DEBUG +//#define NTUPLE_DEBUG + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace caPixelDoublets { + using namespace cms::alpakatools; + + template + using CellNeighbors = caStructures::CellNeighborsT; + template + using CellTracks = caStructures::CellTracksT; + template + using CellNeighborsVector = caStructures::CellNeighborsVectorT; + template + using CellTracksVector = caStructures::CellTracksVectorT; + template + using OuterHitOfCell = caStructures::OuterHitOfCellT; + template + using HitsConstView = typename CACellT::HitsConstView; + + template + struct CellCutsT { + using H = HitsConstView; + using T = TrackerTraits; + + CellCutsT() = default; + + CellCutsT(const bool doClusterCut, + const bool doZ0Cut, + const bool doPtCut, + const bool idealConditions, + const float z0Cut, + const float ptCut, + const std::vector& phiCutsV) + : doClusterCut_(doClusterCut), + doZ0Cut_(doZ0Cut), + doPtCut_(doPtCut), + idealConditions_(idealConditions), + z0Cut_(z0Cut), + ptCut_(ptCut) { + assert(phiCutsV.size() == TrackerTraits::nPairs); + std::copy(phiCutsV.begin(), phiCutsV.end(), &phiCuts[0]); + } + + bool doClusterCut_; + bool doZ0Cut_; + bool doPtCut_; + bool idealConditions_; //this is actually not used by phase2 + + float z0Cut_; //FIXME: check if could be const now + float ptCut_; + + int phiCuts[T::nPairs]; + + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool __attribute__((always_inline)) + zSizeCut(const TAcc& acc, H hh, int i, int o) const { + const uint32_t mi = hh[i].detectorIndex(); + + bool innerB1 = mi < T::last_bpix1_detIndex; + bool isOuterLadder = idealConditions_ ? true : 0 == (mi / 8) % 2; + auto mes = (!innerB1) || isOuterLadder ? hh[i].clusterSizeY() : -1; + + if (mes < 0) + return false; + + const uint32_t mo = hh[o].detectorIndex(); + auto so = hh[o].clusterSizeY(); + + auto dz = hh[i].zGlobal() - hh[o].zGlobal(); + auto dr = hh[i].rGlobal() - hh[o].rGlobal(); + + auto innerBarrel = mi < T::last_barrel_detIndex; + auto onlyBarrel = mo < T::last_barrel_detIndex; + + if (not innerBarrel and not onlyBarrel) + return false; + auto dy = innerB1 ? T::maxDYsize12 : T::maxDYsize; + + return onlyBarrel ? so > 0 && std::abs(so - mes) > dy + : innerBarrel && std::abs(mes - int(std::abs(dz / dr) * T::dzdrFact + 0.5f)) > T::maxDYPred; + } + + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE bool __attribute__((always_inline)) + clusterCut(const TAcc& acc, H hh, uint32_t i) const { + const uint32_t mi = hh[i].detectorIndex(); + bool innerB1orB2 = mi < T::last_bpix2_detIndex; + + if (!innerB1orB2) + return false; + + bool innerB1 = mi < T::last_bpix1_detIndex; + bool isOuterLadder = idealConditions_ ? true : 0 == (mi / 8) % 2; + auto mes = (!innerB1) || isOuterLadder ? hh[i].clusterSizeY() : -1; + + if (innerB1) // B1 + if (mes > 0 && mes < T::minYsizeB1) + return true; // only long cluster (5*8) + bool innerB2 = (mi >= T::last_bpix1_detIndex) && (mi < T::last_bpix2_detIndex); //FIXME number + if (innerB2) // B2 and F1 + if (mes > 0 && mes < T::minYsizeB2) + return true; + + return false; + } + }; + + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE void __attribute__((always_inline)) + doubletsFromHisto(const TAcc& acc, + uint32_t nPairs, + const uint32_t maxNumOfDoublets, + CACellT* cells, + uint32_t* nCells, + CellNeighborsVector* cellNeighbors, + CellTracksVector* cellTracks, + HitsConstView hh, + OuterHitOfCell isOuterHitOfCell, + CellCutsT const& cuts) { // ysize cuts (z in the barrel) times 8 + // these are used if doClusterCut is true + + const bool doClusterCut = cuts.doClusterCut_; + const bool doZ0Cut = cuts.doZ0Cut_; + const bool doPtCut = cuts.doPtCut_; + + const float z0cut = cuts.z0Cut_; // cm + const float hardPtCut = cuts.ptCut_; // GeV + // cm (1 GeV track has 1 GeV/c / (e * 3.8T) ~ 87 cm radius in a 3.8T field) + const float minRadius = hardPtCut * 87.78f; + const float minRadius2T4 = 4.f * minRadius * minRadius; + + using PhiBinner = typename TrackingRecHitSoA::PhiBinner; + + auto const& __restrict__ phiBinner = hh.phiBinner(); + uint32_t const* __restrict__ offsets = hh.hitsLayerStart().data(); + ALPAKA_ASSERT_OFFLOAD(offsets); + + auto layerSize = [=](uint8_t li) { return offsets[li + 1] - offsets[li]; }; + + // nPairsMax to be optimized later (originally was 64). + // If it should much be bigger, consider using a block-wide parallel prefix scan, + // e.g. see https://nvlabs.github.io/cub/classcub_1_1_warp_scan.html + auto& innerLayerCumulativeSize = alpaka::declareSharedVar(acc); + auto& ntot = alpaka::declareSharedVar(acc); + + constexpr uint32_t dimIndexY = 0u; + constexpr uint32_t dimIndexX = 1u; + const uint32_t threadIdxLocalY(alpaka::getIdx(acc)[dimIndexY]); + const uint32_t threadIdxLocalX(alpaka::getIdx(acc)[dimIndexX]); + + if (threadIdxLocalY == 0 && threadIdxLocalX == 0) { + innerLayerCumulativeSize[0] = layerSize(TrackerTraits::layerPairs[0]); + for (uint32_t i = 1; i < nPairs; ++i) { + innerLayerCumulativeSize[i] = innerLayerCumulativeSize[i - 1] + layerSize(TrackerTraits::layerPairs[2 * i]); + } + ntot = innerLayerCumulativeSize[nPairs - 1]; + } + alpaka::syncBlockThreads(acc); + + // x runs faster + const uint32_t blockDimensionX(alpaka::getWorkDiv(acc)[dimIndexX]); + const auto& [firstElementIdxNoStrideX, endElementIdxNoStrideX] = + cms::alpakatools::element_index_range_in_block(acc, 0u, dimIndexX); + + uint32_t pairLayerId = 0; // cannot go backward + + // Outermost loop on Y + const uint32_t gridDimensionY(alpaka::getWorkDiv(acc)[dimIndexY]); + const auto& [firstElementIdxNoStrideY, endElementIdxNoStrideY] = + cms::alpakatools::element_index_range_in_grid(acc, 0u, dimIndexY); + uint32_t firstElementIdxY = firstElementIdxNoStrideY; + uint32_t endElementIdxY = endElementIdxNoStrideY; + + //const uint32_t incY = cms::alpakatools::requires_single_thread_per_block_v ? 1 : gridDimensionY; + for (uint32_t j = firstElementIdxY; j < ntot; j++) { + if (not cms::alpakatools::next_valid_element_index_strided( + j, firstElementIdxY, endElementIdxY, gridDimensionY, ntot)) + break; + + while (j >= innerLayerCumulativeSize[pairLayerId++]) + ; + --pairLayerId; // move to lower_bound ?? + + ALPAKA_ASSERT_OFFLOAD(pairLayerId < nPairs); + ALPAKA_ASSERT_OFFLOAD(j < innerLayerCumulativeSize[pairLayerId]); + ALPAKA_ASSERT_OFFLOAD(0 == pairLayerId || j >= innerLayerCumulativeSize[pairLayerId - 1]); + + uint8_t inner = TrackerTraits::layerPairs[2 * pairLayerId]; + uint8_t outer = TrackerTraits::layerPairs[2 * pairLayerId + 1]; + ALPAKA_ASSERT_OFFLOAD(outer > inner); + + auto hoff = PhiBinner::histOff(outer); + auto i = (0 == pairLayerId) ? j : j - innerLayerCumulativeSize[pairLayerId - 1]; + i += offsets[inner]; + + ALPAKA_ASSERT_OFFLOAD(i >= offsets[inner]); + ALPAKA_ASSERT_OFFLOAD(i < offsets[inner + 1]); + + // found hit corresponding to our cuda thread, now do the job + if (hh[i].detectorIndex() > pixelClustering::maxNumModules) + continue; // invalid + + /* maybe clever, not effective when zoCut is on + auto bpos = (mi%8)/4; // if barrel is 1 for z>0 + auto fpos = (outer>3) & (outer<7); + if ( ((inner<3) & (outer>3)) && bpos!=fpos) continue; + */ + + auto mez = hh[i].zGlobal(); + + if (mez < TrackerTraits::minz[pairLayerId] || mez > TrackerTraits::maxz[pairLayerId]) + continue; + + if (doClusterCut && outer > pixelTopology::last_barrel_layer && cuts.clusterCut(acc, hh, i)) + continue; + + auto mep = hh[i].iphi(); + auto mer = hh[i].rGlobal(); + + // all cuts: true if fails + auto ptcut = [&](int j, int16_t idphi) { + auto r2t4 = minRadius2T4; + auto ri = mer; + auto ro = hh[j].rGlobal(); + auto dphi = short2phi(idphi); + return dphi * dphi * (r2t4 - ri * ro) > (ro - ri) * (ro - ri); + }; + auto z0cutoff = [&](int j) { + auto zo = hh[j].zGlobal(); + auto ro = hh[j].rGlobal(); + auto dr = ro - mer; + return dr > TrackerTraits::maxr[pairLayerId] || dr < 0 || std::abs((mez * ro - mer * zo)) > z0cut * dr; + }; + + auto iphicut = cuts.phiCuts[pairLayerId]; + + auto kl = PhiBinner::bin(int16_t(mep - iphicut)); + auto kh = PhiBinner::bin(int16_t(mep + iphicut)); + auto incr = [](auto& k) { return k = (k + 1) % PhiBinner::nbins(); }; + +#ifdef GPU_DEBUG + int tot = 0; + int nmin = 0; + int tooMany = 0; +#endif + + auto khh = kh; + incr(khh); + for (auto kk = kl; kk != khh; incr(kk)) { +#ifdef GPU_DEBUG + if (kk != kl && kk != kh) + nmin += phiBinner.size(kk + hoff); +#endif + auto const* __restrict__ p = phiBinner.begin(kk + hoff); + auto const* __restrict__ e = phiBinner.end(kk + hoff); + auto const maxpIndex = e - p; + + // Here we parallelize in X + uint32_t firstElementIdxX = firstElementIdxNoStrideX; + uint32_t endElementIdxX = endElementIdxNoStrideX; + + for (uint32_t pIndex = firstElementIdxX; pIndex < maxpIndex; ++pIndex) { + if (not cms::alpakatools::next_valid_element_index_strided( + pIndex, firstElementIdxX, endElementIdxX, blockDimensionX, maxpIndex)) + break; + auto oi = p[pIndex]; // auto oi = __ldg(p); is not allowed since __ldg is device-only + ALPAKA_ASSERT_OFFLOAD(oi >= offsets[outer]); + ALPAKA_ASSERT_OFFLOAD(oi < offsets[outer + 1]); + auto mo = hh[oi].detectorIndex(); + + if (mo > pixelClustering::maxNumModules) + continue; // invalid + + if (doZ0Cut && z0cutoff(oi)) + continue; + + auto mop = hh[oi].iphi(); + uint16_t idphi = std::min(std::abs(int16_t(mop - mep)), std::abs(int16_t(mep - mop))); + + if (idphi > iphicut) + continue; + + if (doClusterCut && cuts.zSizeCut(acc, hh, i, oi)) + continue; + + if (doPtCut && ptcut(oi, idphi)) + continue; + + auto ind = alpaka::atomicAdd(acc, nCells, (uint32_t)1, alpaka::hierarchy::Blocks{}); + if (ind >= maxNumOfDoublets) { + alpaka::atomicSub(acc, nCells, (uint32_t)1, alpaka::hierarchy::Blocks{}); + break; + } // move to SimpleVector?? + cells[ind].init(*cellNeighbors, *cellTracks, hh, pairLayerId, i, oi); + isOuterHitOfCell[oi].push_back(acc, ind); +#ifdef GPU_DEBUG + if (isOuterHitOfCell[oi].full()) + ++tooMany; + ++tot; +#endif + } + } +// #endif +#ifdef GPU_DEBUG + if (tooMany > 0 or tot > 0) + printf("OuterHitOfCell for %d in layer %d/%d, %d,%d %d, %d %.3f %.3f %s\n", + i, + inner, + outer, + nmin, + tot, + tooMany, + iphicut, + TrackerTraits::minz[pairLayerId], + TrackerTraits::maxz[pairLayerId], + tooMany > 0 ? "FULL!!" : "not full."); +#endif + } // loop in block... + } // namespace caPixelDoublets + } // namespace caPixelDoublets +} // namespace ALPAKA_ACCELERATOR_NAMESPACE +#endif // RecoPixelVertexing_PixelTriplets_CAPixelDoubletsAlgos_h diff --git a/RecoTracker/PixelSeeding/plugins/alpaka/CAStructures.h b/RecoTracker/PixelSeeding/plugins/alpaka/CAStructures.h new file mode 100644 index 0000000000000..6ac7a90c724fc --- /dev/null +++ b/RecoTracker/PixelSeeding/plugins/alpaka/CAStructures.h @@ -0,0 +1,52 @@ +#ifndef RecoPixelVertexing_PixelTriplets_CAStructures_h +#define RecoPixelVertexing_PixelTriplets_CAStructures_h + +#include "HeterogeneousCore/AlpakaInterface/interface/SimpleVector.h" +#include "HeterogeneousCore/AlpakaInterface/interface/VecArray.h" +#include "HeterogeneousCore/AlpakaInterface/interface/HistoContainer.h" + +namespace caStructures { + + template + using CellNeighborsT = + cms::alpakatools::VecArray; + + template + using CellTracksT = cms::alpakatools::VecArray; + + template + using CellNeighborsVectorT = cms::alpakatools::SimpleVector>; + + template + using CellTracksVectorT = cms::alpakatools::SimpleVector>; + + template + using OuterHitOfCellContainerT = cms::alpakatools::VecArray; + + template + using TupleMultiplicityT = cms::alpakatools::OneToManyAssocRandomAccess; + + template + using HitToTupleT = + cms::alpakatools::OneToManyAssocRandomAccess; // 3.5 should be enough + + template + using TuplesContainerT = cms::alpakatools::OneToManyAssocRandomAccess; + + template + struct OuterHitOfCellT { + OuterHitOfCellContainerT* container; + int32_t offset; + constexpr auto& operator[](int i) { return container[i - offset]; } + constexpr auto const& operator[](int i) const { return container[i - offset]; } + }; + +} // namespace caStructures + +#endif diff --git a/RecoTracker/PixelSeeding/plugins/alpaka/HelixFit.cc b/RecoTracker/PixelSeeding/plugins/alpaka/HelixFit.cc new file mode 100644 index 0000000000000..078cbe8de45a4 --- /dev/null +++ b/RecoTracker/PixelSeeding/plugins/alpaka/HelixFit.cc @@ -0,0 +1,21 @@ +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HelixFit.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + template + void HelixFit::allocate(TupleMultiplicity const *tupleMultiplicity, OutputSoAView &helix_fit_results) { + tuples_ = &helix_fit_results.hitIndices(); + tupleMultiplicity_ = tupleMultiplicity; + outputSoa_ = helix_fit_results; + + ALPAKA_ASSERT_OFFLOAD(tuples_); + ALPAKA_ASSERT_OFFLOAD(tupleMultiplicity_); + } + + template + void HelixFit::deallocate() {} + + template class HelixFit; + template class HelixFit; + template class HelixFit; +} // namespace ALPAKA_ACCELERATOR_NAMESPACE diff --git a/RecoTracker/PixelSeeding/plugins/alpaka/HelixFit.h b/RecoTracker/PixelSeeding/plugins/alpaka/HelixFit.h new file mode 100644 index 0000000000000..908124bb83081 --- /dev/null +++ b/RecoTracker/PixelSeeding/plugins/alpaka/HelixFit.h @@ -0,0 +1,93 @@ +#ifndef RecoPixelVertexing_PixelTriplets_HelixFit_h +#define RecoPixelVertexing_PixelTriplets_HelixFit_h + +#include +#include "DataFormats/TrackSoA/interface/alpaka/TrackUtilities.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h" +#include "RecoTracker/PixelTrackFitting/interface/alpaka/FitResult.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforDevice.h" + +#include "CAStructures.h" +namespace riemannFit { + // TODO: Can this be taken from TrackerTraits or somewhere else? + // in case of memory issue can be made smaller + constexpr uint32_t maxNumberOfConcurrentFits = 32 * 1024; + constexpr uint32_t stride = maxNumberOfConcurrentFits; + using Matrix3x4d = Eigen::Matrix; + using Map3x4d = Eigen::Map >; + using Matrix6x4f = Eigen::Matrix; + using Map6x4f = Eigen::Map >; + + // hits + template + using Matrix3xNd = Eigen::Matrix; + template + using Map3xNd = Eigen::Map, 0, Eigen::Stride<3 * stride, stride> >; + // errors + template + using Matrix6xNf = Eigen::Matrix; + template + using Map6xNf = Eigen::Map, 0, Eigen::Stride<6 * stride, stride> >; + // fast fit + using Map4d = Eigen::Map >; + + template //a compile-time bounded for loop + constexpr void rolling_fits(F &&f) { + if constexpr (Start < End) { + f(std::integral_constant()); + rolling_fits(f); + } + } + +} // namespace riemannFit + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + template + class HelixFit { + public: + using TrackingRecHitSoAs = TrackingRecHitSoA; + + using HitView = TrackingRecHitSoAView; + using HitConstView = TrackingRecHitSoAConstView; + + using Tuples = typename reco::TrackSoA::HitContainer; + using OutputSoAView = reco::TrackSoAView; + + using TupleMultiplicity = caStructures::TupleMultiplicityT; + + using ParamsOnDevice = pixelCPEforDevice::ParamsOnDeviceT; + + explicit HelixFit(float bf, bool fitNas4) : bField_(bf), fitNas4_(fitNas4) {} + ~HelixFit() { deallocate(); } + + void setBField(double bField) { bField_ = bField; } + void launchRiemannKernels(const HitConstView &hv, + ParamsOnDevice const *cpeParams, + uint32_t nhits, + uint32_t maxNumberOfTuples, + Queue &queue); + void launchBrokenLineKernels(const HitConstView &hv, + ParamsOnDevice const *cpeParams, + uint32_t nhits, + uint32_t maxNumberOfTuples, + Queue &queue); + + void allocate(TupleMultiplicity const *tupleMultiplicity, OutputSoAView &helix_fit_results); + void deallocate(); + + private: + static constexpr uint32_t maxNumberOfConcurrentFits_ = riemannFit::maxNumberOfConcurrentFits; + + // fowarded + Tuples const *tuples_ = nullptr; + TupleMultiplicity const *tupleMultiplicity_ = nullptr; + OutputSoAView outputSoa_; + float bField_; + + const bool fitNas4_; + }; +} // namespace ALPAKA_ACCELERATOR_NAMESPACE +#endif // RecoPixelVertexing_PixelTriplets_plugins_HelixFit_h diff --git a/RecoTracker/PixelSeeding/plugins/alpaka/RiemannFit.dev.cc b/RecoTracker/PixelSeeding/plugins/alpaka/RiemannFit.dev.cc new file mode 100644 index 0000000000000..5aa202700580c --- /dev/null +++ b/RecoTracker/PixelSeeding/plugins/alpaka/RiemannFit.dev.cc @@ -0,0 +1,401 @@ +// +// Author: Felice Pantaleo, CERN +// + +#include +#include + +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/traits.h" +#include "DataFormats/TrackingRecHitSoA/interface/TrackingRecHitsSoA.h" +#include "DataFormats/TrackSoA/interface/alpaka/TrackUtilities.h" +#include "RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforDevice.h" +#include "RecoTracker/PixelTrackFitting/interface/alpaka/RiemannFit.h" +#include "HelixFit.h" +#include "CAStructures.h" + +template +using Tuples = typename reco::TrackSoA::HitContainer; +template +using OutputSoAView = reco::TrackSoAView; +template +using TupleMultiplicity = caStructures::TupleMultiplicityT; + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + using namespace alpaka; + using namespace cms::alpakatools; + + template + class Kernel_FastFit { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + Tuples const *__restrict__ foundNtuplets, + TupleMultiplicity const *__restrict__ tupleMultiplicity, + uint32_t nHits, + TrackingRecHitSoAConstView hh, + pixelCPEforDevice::ParamsOnDeviceT const *__restrict__ cpeParams, + double *__restrict__ phits, + float *__restrict__ phits_ge, + double *__restrict__ pfast_fit, + uint32_t offset) const { + constexpr uint32_t hitsInFit = N; + + ALPAKA_ASSERT_OFFLOAD(hitsInFit <= nHits); + + ALPAKA_ASSERT_OFFLOAD(pfast_fit); + ALPAKA_ASSERT_OFFLOAD(foundNtuplets); + ALPAKA_ASSERT_OFFLOAD(tupleMultiplicity); + + // look in bin for this hit multiplicity + +#ifdef RIEMANN_DEBUG + const uint32_t threadIdx(alpaka::getIdx(acc)[0u]); + if (cms::alpakatools::once_per_grid(acc)) + printf("%d Ntuple of size %d for %d hits to fit\n", tupleMultiplicity->size(nHits), nHits, hitsInFit); +#endif + + const auto nt = riemannFit::maxNumberOfConcurrentFits; + for (auto local_idx : cms::alpakatools::elements_with_stride(acc, nt)) { + auto tuple_idx = local_idx + offset; + if (tuple_idx >= tupleMultiplicity->size(nHits)) + break; + + // get it from the ntuple container (one to one to helix) + auto tkid = *(tupleMultiplicity->begin(nHits) + tuple_idx); + ALPAKA_ASSERT_OFFLOAD(static_cast(tkid) < foundNtuplets->nOnes()); + + ALPAKA_ASSERT_OFFLOAD(foundNtuplets->size(tkid) == nHits); + + riemannFit::Map3xNd hits(phits + local_idx); + riemannFit::Map4d fast_fit(pfast_fit + local_idx); + riemannFit::Map6xNf hits_ge(phits_ge + local_idx); + + // Prepare data structure + auto const *hitId = foundNtuplets->begin(tkid); + for (unsigned int i = 0; i < hitsInFit; ++i) { + auto hit = hitId[i]; + float ge[6]; + cpeParams->detParams(hh[hit].detectorIndex()).frame.toGlobal(hh[hit].xerrLocal(), 0, hh[hit].yerrLocal(), ge); + + hits.col(i) << hh[hit].xGlobal(), hh[hit].yGlobal(), hh[hit].zGlobal(); + hits_ge.col(i) << ge[0], ge[1], ge[2], ge[3], ge[4], ge[5]; + } + riemannFit::fastFit(acc, hits, fast_fit); + + // no NaN here.... + ALPAKA_ASSERT_OFFLOAD(fast_fit(0) == fast_fit(0)); + ALPAKA_ASSERT_OFFLOAD(fast_fit(1) == fast_fit(1)); + ALPAKA_ASSERT_OFFLOAD(fast_fit(2) == fast_fit(2)); + ALPAKA_ASSERT_OFFLOAD(fast_fit(3) == fast_fit(3)); + } + } + }; + + template + class Kernel_CircleFit { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + TupleMultiplicity const *__restrict__ tupleMultiplicity, + uint32_t nHits, + double bField, + double *__restrict__ phits, + float *__restrict__ phits_ge, + double *__restrict__ pfast_fit_input, + riemannFit::CircleFit *circle_fit, + uint32_t offset) const { + ALPAKA_ASSERT_OFFLOAD(circle_fit); + ALPAKA_ASSERT_OFFLOAD(N <= nHits); + + // same as above... + + // look in bin for this hit multiplicity + const auto nt = riemannFit::maxNumberOfConcurrentFits; + for (auto local_idx : cms::alpakatools::elements_with_stride(acc, nt)) { + auto tuple_idx = local_idx + offset; + if (tuple_idx >= tupleMultiplicity->size(nHits)) + break; + + riemannFit::Map3xNd hits(phits + local_idx); + riemannFit::Map4d fast_fit(pfast_fit_input + local_idx); + riemannFit::Map6xNf hits_ge(phits_ge + local_idx); + + riemannFit::VectorNd rad = (hits.block(0, 0, 2, N).colwise().norm()); + + riemannFit::Matrix2Nd hits_cov = riemannFit::Matrix2Nd::Zero(); + riemannFit::loadCovariance2D(acc, hits_ge, hits_cov); + + circle_fit[local_idx] = + riemannFit::circleFit(acc, hits.block(0, 0, 2, N), hits_cov, fast_fit, rad, bField, true); + +#ifdef RIEMANN_DEBUG +// auto tkid = *(tupleMultiplicity->begin(nHits) + tuple_idx); +// printf("kernelCircleFit circle.par(0,1,2): %d %f,%f,%f\n", tkid, +// circle_fit[local_idx].par(0), circle_fit[local_idx].par(1), circle_fit[local_idx].par(2)); +#endif + } + } + }; + + template + class Kernel_LineFit { + public: + template >> + ALPAKA_FN_ACC void operator()(TAcc const &acc, + TupleMultiplicity const *__restrict__ tupleMultiplicity, + uint32_t nHits, + double bField, + OutputSoAView results_view, + double *__restrict__ phits, + float *__restrict__ phits_ge, + double *__restrict__ pfast_fit_input, + riemannFit::CircleFit *__restrict__ circle_fit, + uint32_t offset) const { + ALPAKA_ASSERT_OFFLOAD(circle_fit); + ALPAKA_ASSERT_OFFLOAD(N <= nHits); + + // same as above... + + // look in bin for this hit multiplicity + const auto nt = riemannFit::maxNumberOfConcurrentFits; + for (auto local_idx : cms::alpakatools::elements_with_stride(acc, nt)) { + auto tuple_idx = local_idx + offset; + if (tuple_idx >= tupleMultiplicity->size(nHits)) + break; + + // get it for the ntuple container (one to one to helix) + int32_t tkid = *(tupleMultiplicity->begin(nHits) + tuple_idx); + + riemannFit::Map3xNd hits(phits + local_idx); + riemannFit::Map4d fast_fit(pfast_fit_input + local_idx); + riemannFit::Map6xNf hits_ge(phits_ge + local_idx); + + auto const &line_fit = riemannFit::lineFit(acc, hits, hits_ge, circle_fit[local_idx], fast_fit, bField, true); + + riemannFit::fromCircleToPerigee(acc, circle_fit[local_idx]); + + TracksUtilities::copyFromCircle(results_view, + circle_fit[local_idx].par, + circle_fit[local_idx].cov, + line_fit.par, + line_fit.cov, + 1.f / float(bField), + tkid); + results_view[tkid].pt() = bField / std::abs(circle_fit[local_idx].par(2)); + results_view[tkid].eta() = asinhf(line_fit.par(0)); + results_view[tkid].chi2() = (circle_fit[local_idx].chi2 + line_fit.chi2) / (2 * N - 5); + +#ifdef RIEMANN_DEBUG + printf("kernelLineFit size %d for %d hits circle.par(0,1,2): %d %f,%f,%f\n", + N, + nHits, + tkid, + circle_fit[local_idx].par(0), + circle_fit[local_idx].par(1), + circle_fit[local_idx].par(2)); + printf("kernelLineFit line.par(0,1): %d %f,%f\n", tkid, line_fit.par(0), line_fit.par(1)); + printf("kernelLineFit chi2 cov %f/%f %e,%e,%e,%e,%e\n", + circle_fit[local_idx].chi2, + line_fit.chi2, + circle_fit[local_idx].cov(0, 0), + circle_fit[local_idx].cov(1, 1), + circle_fit[local_idx].cov(2, 2), + line_fit.cov(0, 0), + line_fit.cov(1, 1)); +#endif + } + } + }; + + template + void HelixFit::launchRiemannKernels(const TrackingRecHitSoAConstView &hv, + pixelCPEforDevice::ParamsOnDeviceT const *cpeParams, + uint32_t nhits, + uint32_t maxNumberOfTuples, + Queue &queue) { + assert(tuples_); + + auto blockSize = 64; + auto numberOfBlocks = (maxNumberOfConcurrentFits_ + blockSize - 1) / blockSize; + const auto workDivTriplets = cms::alpakatools::make_workdiv(numberOfBlocks, blockSize); + const auto workDivQuadsPenta = cms::alpakatools::make_workdiv(numberOfBlocks / 4, blockSize); + + // Fit internals + auto hitsDevice = cms::alpakatools::make_device_buffer( + queue, maxNumberOfConcurrentFits_ * sizeof(riemannFit::Matrix3xNd<4>) / sizeof(double)); + auto hits_geDevice = cms::alpakatools::make_device_buffer( + queue, maxNumberOfConcurrentFits_ * sizeof(riemannFit::Matrix6x4f) / sizeof(float)); + auto fast_fit_resultsDevice = cms::alpakatools::make_device_buffer( + queue, maxNumberOfConcurrentFits_ * sizeof(riemannFit::Vector4d) / sizeof(double)); + auto circle_fit_resultsDevice_holder = + cms::alpakatools::make_device_buffer(queue, maxNumberOfConcurrentFits_ * sizeof(riemannFit::CircleFit)); + riemannFit::CircleFit *circle_fit_resultsDevice_ = + (riemannFit::CircleFit *)(circle_fit_resultsDevice_holder.data()); + + for (uint32_t offset = 0; offset < maxNumberOfTuples; offset += maxNumberOfConcurrentFits_) { + // triplets + alpaka::exec(queue, + workDivTriplets, + Kernel_FastFit<3, TrackerTraits>{}, + tuples_, + tupleMultiplicity_, + 3, + hv, + cpeParams, + hitsDevice.data(), + hits_geDevice.data(), + fast_fit_resultsDevice.data(), + offset); + + alpaka::exec(queue, + workDivTriplets, + Kernel_CircleFit<3, TrackerTraits>{}, + tupleMultiplicity_, + 3, + bField_, + hitsDevice.data(), + hits_geDevice.data(), + fast_fit_resultsDevice.data(), + circle_fit_resultsDevice_, + offset); + + alpaka::exec(queue, + workDivTriplets, + Kernel_LineFit<3, TrackerTraits>{}, + tupleMultiplicity_, + 3, + bField_, + outputSoa_, + hitsDevice.data(), + hits_geDevice.data(), + fast_fit_resultsDevice.data(), + circle_fit_resultsDevice_, + offset); + + // quads + alpaka::exec(queue, + workDivQuadsPenta, + Kernel_FastFit<4, TrackerTraits>{}, + tuples_, + tupleMultiplicity_, + 4, + hv, + cpeParams, + hitsDevice.data(), + hits_geDevice.data(), + fast_fit_resultsDevice.data(), + offset); + + alpaka::exec(queue, + workDivQuadsPenta, + Kernel_CircleFit<4, TrackerTraits>{}, + tupleMultiplicity_, + 4, + bField_, + hitsDevice.data(), + hits_geDevice.data(), + fast_fit_resultsDevice.data(), + circle_fit_resultsDevice_, + offset); + + alpaka::exec(queue, + workDivQuadsPenta, + Kernel_LineFit<4, TrackerTraits>{}, + tupleMultiplicity_, + 4, + bField_, + outputSoa_, + hitsDevice.data(), + hits_geDevice.data(), + fast_fit_resultsDevice.data(), + circle_fit_resultsDevice_, + offset); + + if (fitNas4_) { + // penta + alpaka::exec(queue, + workDivQuadsPenta, + Kernel_FastFit<4, TrackerTraits>{}, + tuples_, + tupleMultiplicity_, + 5, + hv, + cpeParams, + hitsDevice.data(), + hits_geDevice.data(), + fast_fit_resultsDevice.data(), + offset); + + alpaka::exec(queue, + workDivQuadsPenta, + Kernel_CircleFit<4, TrackerTraits>{}, + tupleMultiplicity_, + 5, + bField_, + hitsDevice.data(), + hits_geDevice.data(), + fast_fit_resultsDevice.data(), + circle_fit_resultsDevice_, + offset); + + alpaka::exec(queue, + workDivQuadsPenta, + Kernel_LineFit<4, TrackerTraits>{}, + tupleMultiplicity_, + 5, + bField_, + outputSoa_, + hitsDevice.data(), + hits_geDevice.data(), + fast_fit_resultsDevice.data(), + circle_fit_resultsDevice_, + offset); + } else { + // penta all 5 + alpaka::exec(queue, + workDivQuadsPenta, + Kernel_FastFit<5, TrackerTraits>{}, + tuples_, + tupleMultiplicity_, + 5, + hv, + cpeParams, + hitsDevice.data(), + hits_geDevice.data(), + fast_fit_resultsDevice.data(), + offset); + + alpaka::exec(queue, + workDivQuadsPenta, + Kernel_CircleFit<5, TrackerTraits>{}, + tupleMultiplicity_, + 5, + bField_, + hitsDevice.data(), + hits_geDevice.data(), + fast_fit_resultsDevice.data(), + circle_fit_resultsDevice_, + offset); + + alpaka::exec(queue, + workDivQuadsPenta, + Kernel_LineFit<5, TrackerTraits>{}, + tupleMultiplicity_, + 5, + bField_, + outputSoa_, + hitsDevice.data(), + hits_geDevice.data(), + fast_fit_resultsDevice.data(), + circle_fit_resultsDevice_, + offset); + } + } + } + + template class HelixFit; + template class HelixFit; + template class HelixFit; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE diff --git a/RecoTracker/PixelSeeding/plugins/gpuPixelDoubletsAlgos.h b/RecoTracker/PixelSeeding/plugins/gpuPixelDoubletsAlgos.h index b86ba09949416..583021081d534 100644 --- a/RecoTracker/PixelSeeding/plugins/gpuPixelDoubletsAlgos.h +++ b/RecoTracker/PixelSeeding/plugins/gpuPixelDoubletsAlgos.h @@ -9,15 +9,15 @@ #include "CUDADataFormats/TrackingRecHit/interface/TrackingRecHitsUtilities.h" #include "DataFormats/Math/interface/approx_atan2.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" #include "HeterogeneousCore/CUDAUtilities/interface/VecArray.h" #include "HeterogeneousCore/CUDAUtilities/interface/cuda_assert.h" -#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" #include "CAStructures.h" #include "GPUCACell.h" -// #define GPU_DEBUG -// #define NTUPLE_DEBUG +//#define GPU_DEBUG +//#define NTUPLE_DEBUG namespace gpuPixelDoublets { @@ -287,8 +287,8 @@ namespace gpuPixelDoublets { } // #endif #ifdef GPU_DEBUG - if (tooMany > 0) - printf("OuterHitOfCell full for %d in layer %d/%d, %d,%d %d, %d %.3f %.3f\n", + if (tooMany > 0 || tot > 0) + printf("OuterHitOfCell for %d in layer %d/%d, %d,%d %d, %d %.3f %.3f %s\n", i, inner, outer, @@ -297,7 +297,8 @@ namespace gpuPixelDoublets { tooMany, iphicut, TrackerTraits::minz[pairLayerId], - TrackerTraits::maxz[pairLayerId]); + TrackerTraits::maxz[pairLayerId], + tooMany > 0 ? "FULL!!" : "not full."); #endif } // loop in block... } diff --git a/RecoTracker/PixelSeeding/test/BuildFile.xml b/RecoTracker/PixelSeeding/test/BuildFile.xml index 37e12c0ec6aed..74e7849e410e4 100644 --- a/RecoTracker/PixelSeeding/test/BuildFile.xml +++ b/RecoTracker/PixelSeeding/test/BuildFile.xml @@ -28,3 +28,10 @@ + + + + + + + diff --git a/RecoTracker/PixelSeeding/test/alpaka/CAsizes_t.cpp b/RecoTracker/PixelSeeding/test/alpaka/CAsizes_t.cpp new file mode 100644 index 0000000000000..770957d9a79c0 --- /dev/null +++ b/RecoTracker/PixelSeeding/test/alpaka/CAsizes_t.cpp @@ -0,0 +1,40 @@ +#include "RecoTracker/PixelSeeding/plugins/alpaka/CACell.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include +#include + +using namespace ALPAKA_ACCELERATOR_NAMESPACE; + +template +void print() { + std::cout << "size of " << typeid(T).name() << ' ' << sizeof(T) << std::endl; +} + +int main() { + using namespace pixelTopology; + using namespace caStructures; + //for Phase-I + print>(); + print>(); + print>(); + print>(); + print>(); + print>(); + print>(); + + print>(); + + //for Phase-II + + print>(); + print>(); + print>(); + print>(); + print>(); + print>(); + print>(); + + print>(); + + return 0; +} diff --git a/RecoTracker/PixelTrackFitting/BuildFile.xml b/RecoTracker/PixelTrackFitting/BuildFile.xml index b57493ad60503..c21f4634d0308 100644 --- a/RecoTracker/PixelTrackFitting/BuildFile.xml +++ b/RecoTracker/PixelTrackFitting/BuildFile.xml @@ -1,3 +1,4 @@ + @@ -13,6 +14,7 @@ + diff --git a/RecoTracker/PixelTrackFitting/interface/alpaka/BrokenLine.h b/RecoTracker/PixelTrackFitting/interface/alpaka/BrokenLine.h new file mode 100644 index 0000000000000..9e656e2de18dc --- /dev/null +++ b/RecoTracker/PixelTrackFitting/interface/alpaka/BrokenLine.h @@ -0,0 +1,634 @@ +#ifndef RecoPixelVertexing_PixelTrackFitting_interface_BrokenLine_h +#define RecoPixelVertexing_PixelTrackFitting_interface_BrokenLine_h +#include +#include +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "RecoTracker/PixelTrackFitting/interface/alpaka/FitUtils.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace brokenline { + using namespace cms::alpakatools; + using namespace ::riemannFit; + + //!< Karimäki's parameters: (phi, d, k=1/R) + /*!< covariance matrix: \n + |cov(phi,phi)|cov( d ,phi)|cov( k ,phi)| \n + |cov(phi, d )|cov( d , d )|cov( k , d )| \n + |cov(phi, k )|cov( d , k )|cov( k , k )| \n + as defined in Karimäki V., 1990, Effective circle fitting for particle trajectories, + Nucl. Instr. and Meth. A305 (1991) 187. + */ + using karimaki_circle_fit = riemannFit::CircleFit; + + /*! + \brief data needed for the Broken Line fit procedure. + */ + template + struct PreparedBrokenLineData { + int qCharge; //!< particle charge + riemannFit::Matrix2xNd radii; //!< xy data in the system in which the pre-fitted center is the origin + riemannFit::VectorNd sTransverse; //!< total distance traveled in the transverse plane + // starting from the pre-fitted closest approach + riemannFit::VectorNd sTotal; //!< total distance traveled (three-dimensional) + riemannFit::VectorNd zInSZplane; //!< orthogonal coordinate to the pre-fitted line in the sz plane + riemannFit::VectorNd varBeta; //!< kink angles in the SZ plane + }; + + /*! + \brief Computes the Coulomb multiple scattering variance of the planar angle. + + \param length length of the track in the material. + \param bField magnetic field in Gev/cm/c. + \param radius radius of curvature (needed to evaluate p). + \param layer denotes which of the four layers of the detector is the endpoint of the + * multiple scattered track. For example, if Layer=3, then the particle has + * just gone through the material between the second and the third layer. + + \todo add another Layer variable to identify also the start point of the track, + * so if there are missing hits or multiple hits, the part of the detector that + * the particle has traversed can be exactly identified. + + \warning the formula used here assumes beta=1, and so neglects the dependence + * of theta_0 on the mass of the particle at fixed momentum. + + \return the variance of the planar angle ((theta_0)^2 /3). + */ + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE double multScatt( + const TAcc& acc, const double& length, const double bField, const double radius, int layer, double slope) { + // limit R to 20GeV... + auto pt2 = alpaka::math::min(acc, 20., bField * radius); + pt2 *= pt2; + constexpr double inv_X0 = 0.06 / 16.; //!< inverse of radiation length of the material in cm + //if(Layer==1) XXI_0=0.06/16.; + // else XXI_0=0.06/16.; + //XX_0*=1; + + //! number between 1/3 (uniform material) and 1 (thin scatterer) to be manually tuned + constexpr double geometry_factor = 0.7; + constexpr double fact = geometry_factor * riemannFit::sqr(13.6 / 1000.); + return fact / (pt2 * (1. + riemannFit::sqr(slope))) * (alpaka::math::abs(acc, length) * inv_X0) * + riemannFit::sqr(1. + 0.038 * log(alpaka::math::abs(acc, length) * inv_X0)); + } + + /*! + \brief Computes the 2D rotation matrix that transforms the line y=slope*x into the line y=0. + + \param slope tangent of the angle of rotation. + + \return 2D rotation matrix. + */ + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE riemannFit::Matrix2d rotationMatrix(const TAcc& acc, double slope) { + riemannFit::Matrix2d rot; + rot(0, 0) = 1. / alpaka::math::sqrt(acc, 1. + riemannFit::sqr(slope)); + rot(0, 1) = slope * rot(0, 0); + rot(1, 0) = -rot(0, 1); + rot(1, 1) = rot(0, 0); + return rot; + } + + /*! + \brief Changes the Karimäki parameters (and consequently their covariance matrix) under a + * translation of the coordinate system, such that the old origin has coordinates (x0,y0) + * in the new coordinate system. The formulas are taken from Karimäki V., 1990, Effective + * circle fitting for particle trajectories, Nucl. Instr. and Meth. A305 (1991) 187. + + \param circle circle fit in the old coordinate system. circle.par(0) is phi, circle.par(1) is d and circle.par(2) is rho. + \param x0 x coordinate of the translation vector. + \param y0 y coordinate of the translation vector. + \param jacobian passed by reference in order to save stack. + */ + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE void translateKarimaki( + const TAcc& acc, karimaki_circle_fit& circle, double x0, double y0, riemannFit::Matrix3d& jacobian) { + // Avoid multiple access to the circle.par vector. + using scalar = typename std::remove_reference::type; + scalar phi = circle.par(0); + scalar dee = circle.par(1); + scalar rho = circle.par(2); + + // Avoid repeated trig. computations + scalar sinPhi = alpaka::math::sin(acc, phi); + scalar cosPhi = alpaka::math::cos(acc, phi); + + // Intermediate computations for the circle parameters + scalar deltaPara = x0 * cosPhi + y0 * sinPhi; + scalar deltaOrth = x0 * sinPhi - y0 * cosPhi + dee; + scalar tempSmallU = 1 + rho * dee; + scalar tempC = -rho * y0 + tempSmallU * cosPhi; + scalar tempB = rho * x0 + tempSmallU * sinPhi; + scalar tempA = 2. * deltaOrth + rho * (riemannFit::sqr(deltaOrth) + riemannFit::sqr(deltaPara)); + scalar tempU = alpaka::math::sqrt(acc, 1. + rho * tempA); + + // Intermediate computations for the error matrix transform + scalar xi = 1. / (riemannFit::sqr(tempB) + riemannFit::sqr(tempC)); + scalar tempV = 1. + rho * deltaOrth; + scalar lambda = (0.5 * tempA) / (riemannFit::sqr(1. + tempU) * tempU); + scalar mu = 1. / (tempU * (1. + tempU)) + rho * lambda; + scalar zeta = riemannFit::sqr(deltaOrth) + riemannFit::sqr(deltaPara); + jacobian << xi * tempSmallU * tempV, -xi * riemannFit::sqr(rho) * deltaOrth, xi * deltaPara, + 2. * mu * tempSmallU * deltaPara, 2. * mu * tempV, mu * zeta - lambda * tempA, 0, 0, 1.; + + // translated circle parameters + // phi + circle.par(0) = alpaka::math::atan2(acc, tempB, tempC); + // d + circle.par(1) = tempA / (1 + tempU); + // rho after translation. It is invariant, so noop + // circle.par(2)= rho; + + // translated error matrix + circle.cov = jacobian * circle.cov * jacobian.transpose(); + } + + /*! + \brief Computes the data needed for the Broken Line fit procedure that are mainly common for the circle and the line fit. + + \param hits hits coordinates. + \param fast_fit pre-fit result in the form (X0,Y0,R,tan(theta)). + \param bField magnetic field in Gev/cm/c. + \param results PreparedBrokenLineData to be filled (see description of PreparedBrokenLineData). + */ + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE void __attribute__((always_inline)) + prepareBrokenLineData(const TAcc& acc, + const M3xN& hits, + const V4& fast_fit, + const double bField, + PreparedBrokenLineData& results) { + riemannFit::Vector2d dVec; + riemannFit::Vector2d eVec; + + int mId = 1; + + if constexpr (n > 3) { + riemannFit::Vector2d middle = 0.5 * (hits.block(0, n - 1, 2, 1) + hits.block(0, 0, 2, 1)); + auto d1 = (hits.block(0, n / 2, 2, 1) - middle).squaredNorm(); + auto d2 = (hits.block(0, n / 2 - 1, 2, 1) - middle).squaredNorm(); + mId = d1 < d2 ? n / 2 : n / 2 - 1; + } + + dVec = hits.block(0, mId, 2, 1) - hits.block(0, 0, 2, 1); + eVec = hits.block(0, n - 1, 2, 1) - hits.block(0, mId, 2, 1); + results.qCharge = riemannFit::cross2D(acc, dVec, eVec) > 0 ? -1 : 1; + + const double slope = -results.qCharge / fast_fit(3); + + riemannFit::Matrix2d rotMat = rotationMatrix(acc, slope); + + // calculate radii and s + results.radii = hits.block(0, 0, 2, n) - fast_fit.head(2) * riemannFit::MatrixXd::Constant(1, n, 1); + eVec = -fast_fit(2) * fast_fit.head(2) / fast_fit.head(2).norm(); + for (u_int i = 0; i < n; i++) { + dVec = results.radii.block(0, i, 2, 1); + results.sTransverse(i) = + results.qCharge * fast_fit(2) * + alpaka::math::atan2( + acc, riemannFit::cross2D(acc, dVec, eVec), dVec.dot(eVec)); // calculates the arc length + } + riemannFit::VectorNd zVec = hits.block(2, 0, 1, n).transpose(); + + //calculate sTotal and zVec + riemannFit::Matrix2xNd pointsSZ = riemannFit::Matrix2xNd::Zero(); + for (u_int i = 0; i < n; i++) { + pointsSZ(0, i) = results.sTransverse(i); + pointsSZ(1, i) = zVec(i); + pointsSZ.block(0, i, 2, 1) = rotMat * pointsSZ.block(0, i, 2, 1); + } + results.sTotal = pointsSZ.block(0, 0, 1, n).transpose(); + results.zInSZplane = pointsSZ.block(1, 0, 1, n).transpose(); + + //calculate varBeta + results.varBeta(0) = results.varBeta(n - 1) = 0; + for (u_int i = 1; i < n - 1; i++) { + results.varBeta(i) = + multScatt(acc, results.sTotal(i + 1) - results.sTotal(i), bField, fast_fit(2), i + 2, slope) + + multScatt(acc, results.sTotal(i) - results.sTotal(i - 1), bField, fast_fit(2), i + 1, slope); + } + } + + /*! + \brief Computes the n-by-n band matrix obtained minimizing the Broken Line's cost function w.r.t u. + * This is the whole matrix in the case of the line fit and the main n-by-n block in the case + * of the circle fit. + + \param weights weights of the first part of the cost function, the one with the measurements + * and not the angles (\sum_{i=1}^n w*(y_i-u_i)^2). + \param sTotal total distance traveled by the particle from the pre-fitted closest approach. + \param varBeta kink angles' variance. + + \return the n-by-n matrix of the linear system + */ + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE riemannFit::MatrixNd matrixC_u(const TAcc& acc, + const riemannFit::VectorNd& weights, + const riemannFit::VectorNd& sTotal, + const riemannFit::VectorNd& varBeta) { + riemannFit::MatrixNd c_uMat = riemannFit::MatrixNd::Zero(); + for (u_int i = 0; i < n; i++) { + c_uMat(i, i) = weights(i); + if (i > 1) + c_uMat(i, i) += 1. / (varBeta(i - 1) * riemannFit::sqr(sTotal(i) - sTotal(i - 1))); + if (i > 0 && i < n - 1) + c_uMat(i, i) += + (1. / varBeta(i)) * riemannFit::sqr((sTotal(i + 1) - sTotal(i - 1)) / + ((sTotal(i + 1) - sTotal(i)) * (sTotal(i) - sTotal(i - 1)))); + if (i < n - 2) + c_uMat(i, i) += 1. / (varBeta(i + 1) * riemannFit::sqr(sTotal(i + 1) - sTotal(i))); + + if (i > 0 && i < n - 1) + c_uMat(i, i + 1) = + 1. / (varBeta(i) * (sTotal(i + 1) - sTotal(i))) * + (-(sTotal(i + 1) - sTotal(i - 1)) / ((sTotal(i + 1) - sTotal(i)) * (sTotal(i) - sTotal(i - 1)))); + if (i < n - 2) + c_uMat(i, i + 1) += + 1. / (varBeta(i + 1) * (sTotal(i + 1) - sTotal(i))) * + (-(sTotal(i + 2) - sTotal(i)) / ((sTotal(i + 2) - sTotal(i + 1)) * (sTotal(i + 1) - sTotal(i)))); + + if (i < n - 2) + c_uMat(i, i + 2) = 1. / (varBeta(i + 1) * (sTotal(i + 2) - sTotal(i + 1)) * (sTotal(i + 1) - sTotal(i))); + + c_uMat(i, i) *= 0.5; + } + return c_uMat + c_uMat.transpose(); + } + + /*! + \brief A very fast helix fit. + + \param hits the measured hits. + + \return (X0,Y0,R,tan(theta)). + + \warning sign of theta is (intentionally, for now) mistaken for negative charges. + */ + + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE void fastFit(const TAcc& acc, const M3xN& hits, V4& result) { + constexpr uint32_t n = M3xN::ColsAtCompileTime; + + int mId = 1; + + if constexpr (n > 3) { + riemannFit::Vector2d middle = 0.5 * (hits.block(0, n - 1, 2, 1) + hits.block(0, 0, 2, 1)); + auto d1 = (hits.block(0, n / 2, 2, 1) - middle).squaredNorm(); + auto d2 = (hits.block(0, n / 2 - 1, 2, 1) - middle).squaredNorm(); + mId = d1 < d2 ? n / 2 : n / 2 - 1; + } + + const riemannFit::Vector2d a = hits.block(0, mId, 2, 1) - hits.block(0, 0, 2, 1); + const riemannFit::Vector2d b = hits.block(0, n - 1, 2, 1) - hits.block(0, mId, 2, 1); + const riemannFit::Vector2d c = hits.block(0, 0, 2, 1) - hits.block(0, n - 1, 2, 1); + + auto tmp = 0.5 / riemannFit::cross2D(acc, c, a); + result(0) = hits(0, 0) - (a(1) * c.squaredNorm() + c(1) * a.squaredNorm()) * tmp; + result(1) = hits(1, 0) + (a(0) * c.squaredNorm() + c(0) * a.squaredNorm()) * tmp; + // check Wikipedia for these formulas + + result(2) = alpaka::math::sqrt(acc, a.squaredNorm() * b.squaredNorm() * c.squaredNorm()) / + (2. * alpaka::math::abs(acc, riemannFit::cross2D(acc, b, a))); + // Using Math Olympiad's formula R=abc/(4A) + + const riemannFit::Vector2d d = hits.block(0, 0, 2, 1) - result.head(2); + const riemannFit::Vector2d e = hits.block(0, n - 1, 2, 1) - result.head(2); + + result(3) = result(2) * atan2(riemannFit::cross2D(acc, d, e), d.dot(e)) / (hits(2, n - 1) - hits(2, 0)); + // ds/dz slope between last and first point + } + + /*! + \brief Performs the Broken Line fit in the curved track case (that is, the fit + * parameters are the interceptions u and the curvature correction \Delta\kappa). + + \param hits hits coordinates. + \param hits_cov hits covariance matrix. + \param fast_fit pre-fit result in the form (X0,Y0,R,tan(theta)). + \param bField magnetic field in Gev/cm/c. + \param data PreparedBrokenLineData. + \param circle_results struct to be filled with the results in this form: + -par parameter of the line in this form: (phi, d, k); \n + -cov covariance matrix of the fitted parameter; \n + -chi2 value of the cost function in the minimum. + + \details The function implements the steps 2 and 3 of the Broken Line fit + * with the curvature correction.\n + * The step 2 is the least square fit, done by imposing the minimum constraint on + * the cost function and solving the consequent linear system. It determines the + * fitted parameters u and \Delta\kappa and their covariance matrix. + * The step 3 is the correction of the fast pre-fitted parameters for the innermost + * part of the track. It is first done in a comfortable coordinate system (the one + * in which the first hit is the origin) and then the parameters and their + * covariance matrix are transformed to the original coordinate system. + */ + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE void circleFit(const TAcc& acc, + const M3xN& hits, + const M6xN& hits_ge, + const V4& fast_fit, + const double bField, + PreparedBrokenLineData& data, + karimaki_circle_fit& circle_results) { + circle_results.qCharge = data.qCharge; + auto& radii = data.radii; + const auto& sTransverse = data.sTransverse; + const auto& sTotal = data.sTotal; + auto& zInSZplane = data.zInSZplane; + auto& varBeta = data.varBeta; + const double slope = -circle_results.qCharge / fast_fit(3); + varBeta *= 1. + riemannFit::sqr(slope); // the kink angles are projected! + + for (u_int i = 0; i < n; i++) { + zInSZplane(i) = radii.block(0, i, 2, 1).norm() - fast_fit(2); + } + + riemannFit::Matrix2d vMat; // covariance matrix + riemannFit::VectorNd weightsVec; // weights + riemannFit::Matrix2d rotMat; // rotation matrix point by point + for (u_int i = 0; i < n; i++) { + vMat(0, 0) = hits_ge.col(i)[0]; // x errors + vMat(0, 1) = vMat(1, 0) = hits_ge.col(i)[1]; // cov_xy + vMat(1, 1) = hits_ge.col(i)[2]; // y errors + rotMat = rotationMatrix(acc, -radii(0, i) / radii(1, i)); + weightsVec(i) = + 1. / ((rotMat * vMat * rotMat.transpose())(1, 1)); // compute the orthogonal weight point by point + } + + riemannFit::VectorNplusONEd r_uVec; + r_uVec(n) = 0; + for (u_int i = 0; i < n; i++) { + r_uVec(i) = weightsVec(i) * zInSZplane(i); + } + + riemannFit::MatrixNplusONEd c_uMat; + c_uMat.block(0, 0, n, n) = matrixC_u(acc, weightsVec, sTransverse, varBeta); + c_uMat(n, n) = 0; + //add the border to the c_uMat matrix + for (u_int i = 0; i < n; i++) { + c_uMat(i, n) = 0; + if (i > 0 && i < n - 1) { + c_uMat(i, n) += + -(sTransverse(i + 1) - sTransverse(i - 1)) * (sTransverse(i + 1) - sTransverse(i - 1)) / + (2. * varBeta(i) * (sTransverse(i + 1) - sTransverse(i)) * (sTransverse(i) - sTransverse(i - 1))); + } + if (i > 1) { + c_uMat(i, n) += + (sTransverse(i) - sTransverse(i - 2)) / (2. * varBeta(i - 1) * (sTransverse(i) - sTransverse(i - 1))); + } + if (i < n - 2) { + c_uMat(i, n) += + (sTransverse(i + 2) - sTransverse(i)) / (2. * varBeta(i + 1) * (sTransverse(i + 1) - sTransverse(i))); + } + c_uMat(n, i) = c_uMat(i, n); + if (i > 0 && i < n - 1) + c_uMat(n, n) += riemannFit::sqr(sTransverse(i + 1) - sTransverse(i - 1)) / (4. * varBeta(i)); + } + +#ifdef CPP_DUMP + std::cout << "CU5\n" << c_uMat << std::endl; +#endif + riemannFit::MatrixNplusONEd iMat; + math::cholesky::invert(c_uMat, iMat); +#ifdef CPP_DUMP + std::cout << "I5\n" << iMat << std::endl; +#endif + riemannFit::VectorNplusONEd uVec = iMat * r_uVec; // obtain the fitted parameters by solving the linear system + + // compute (phi, d_ca, k) in the system in which the midpoint of the first two corrected hits is the origin... + + radii.block(0, 0, 2, 1) /= radii.block(0, 0, 2, 1).norm(); + radii.block(0, 1, 2, 1) /= radii.block(0, 1, 2, 1).norm(); + + riemannFit::Vector2d dVec = hits.block(0, 0, 2, 1) + (-zInSZplane(0) + uVec(0)) * radii.block(0, 0, 2, 1); + riemannFit::Vector2d eVec = hits.block(0, 1, 2, 1) + (-zInSZplane(1) + uVec(1)) * radii.block(0, 1, 2, 1); + auto eMinusd = eVec - dVec; + auto eMinusd2 = eMinusd.squaredNorm(); + auto tmp1 = 1. / eMinusd2; + auto tmp2 = alpaka::math::sqrt(acc, riemannFit::sqr(fast_fit(2)) - 0.25 * eMinusd2); + + circle_results.par << atan2(eMinusd(1), eMinusd(0)), circle_results.qCharge * (tmp2 - fast_fit(2)), + circle_results.qCharge * (1. / fast_fit(2) + uVec(n)); + + tmp2 = 1. / tmp2; + + riemannFit::Matrix3d jacobian; + jacobian << (radii(1, 0) * eMinusd(0) - eMinusd(1) * radii(0, 0)) * tmp1, + (radii(1, 1) * eMinusd(0) - eMinusd(1) * radii(0, 1)) * tmp1, 0, + circle_results.qCharge * (eMinusd(0) * radii(0, 0) + eMinusd(1) * radii(1, 0)) * tmp2, + circle_results.qCharge * (eMinusd(0) * radii(0, 1) + eMinusd(1) * radii(1, 1)) * tmp2, 0, 0, 0, + circle_results.qCharge; + + circle_results.cov << iMat(0, 0), iMat(0, 1), iMat(0, n), iMat(1, 0), iMat(1, 1), iMat(1, n), iMat(n, 0), + iMat(n, 1), iMat(n, n); + + circle_results.cov = jacobian * circle_results.cov * jacobian.transpose(); + + //...Translate in the system in which the first corrected hit is the origin, adding the m.s. correction... + + translateKarimaki(acc, circle_results, 0.5 * eMinusd(0), 0.5 * eMinusd(1), jacobian); + circle_results.cov(0, 0) += + (1 + riemannFit::sqr(slope)) * multScatt(acc, sTotal(1) - sTotal(0), bField, fast_fit(2), 2, slope); + + //...And translate back to the original system + + translateKarimaki(acc, circle_results, dVec(0), dVec(1), jacobian); + + // compute chi2 + circle_results.chi2 = 0; + for (u_int i = 0; i < n; i++) { + circle_results.chi2 += weightsVec(i) * riemannFit::sqr(zInSZplane(i) - uVec(i)); + if (i > 0 && i < n - 1) + circle_results.chi2 += + riemannFit::sqr(uVec(i - 1) / (sTransverse(i) - sTransverse(i - 1)) - + uVec(i) * (sTransverse(i + 1) - sTransverse(i - 1)) / + ((sTransverse(i + 1) - sTransverse(i)) * (sTransverse(i) - sTransverse(i - 1))) + + uVec(i + 1) / (sTransverse(i + 1) - sTransverse(i)) + + (sTransverse(i + 1) - sTransverse(i - 1)) * uVec(n) / 2) / + varBeta(i); + } + } + + /*! + \brief Performs the Broken Line fit in the straight track case (that is, the fit parameters are only the interceptions u). + + \param hits hits coordinates. + \param fast_fit pre-fit result in the form (X0,Y0,R,tan(theta)). + \param bField magnetic field in Gev/cm/c. + \param data PreparedBrokenLineData. + \param line_results struct to be filled with the results in this form: + -par parameter of the line in this form: (cot(theta), Zip); \n + -cov covariance matrix of the fitted parameter; \n + -chi2 value of the cost function in the minimum. + + \details The function implements the steps 2 and 3 of the Broken Line fit without + * the curvature correction.\n + * The step 2 is the least square fit, done by imposing the minimum constraint + * on the cost function and solving the consequent linear system. It determines + * the fitted parameters u and their covariance matrix. + * The step 3 is the correction of the fast pre-fitted parameters for the innermost + * part of the track. It is first done in a comfortable coordinate system (the one + * in which the first hit is the origin) and then the parameters and their covariance + * matrix are transformed to the original coordinate system. + */ + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE void lineFit(const TAcc& acc, + const M6xN& hits_ge, + const V4& fast_fit, + const double bField, + const PreparedBrokenLineData& data, + riemannFit::LineFit& line_results) { + const auto& radii = data.radii; + const auto& sTotal = data.sTotal; + const auto& zInSZplane = data.zInSZplane; + const auto& varBeta = data.varBeta; + + const double slope = -data.qCharge / fast_fit(3); + riemannFit::Matrix2d rotMat = rotationMatrix(acc, slope); + + riemannFit::Matrix3d vMat = riemannFit::Matrix3d::Zero(); // covariance matrix XYZ + riemannFit::Matrix2x3d jacobXYZtosZ = + riemannFit::Matrix2x3d::Zero(); // jacobian for computation of the error on s (xyz -> sz) + riemannFit::VectorNd weights = riemannFit::VectorNd::Zero(); + for (u_int i = 0; i < n; i++) { + vMat(0, 0) = hits_ge.col(i)[0]; // x errors + vMat(0, 1) = vMat(1, 0) = hits_ge.col(i)[1]; // cov_xy + vMat(0, 2) = vMat(2, 0) = hits_ge.col(i)[3]; // cov_xz + vMat(1, 1) = hits_ge.col(i)[2]; // y errors + vMat(2, 1) = vMat(1, 2) = hits_ge.col(i)[4]; // cov_yz + vMat(2, 2) = hits_ge.col(i)[5]; // z errors + auto tmp = 1. / radii.block(0, i, 2, 1).norm(); + jacobXYZtosZ(0, 0) = radii(1, i) * tmp; + jacobXYZtosZ(0, 1) = -radii(0, i) * tmp; + jacobXYZtosZ(1, 2) = 1.; + weights(i) = 1. / ((rotMat * jacobXYZtosZ * vMat * jacobXYZtosZ.transpose() * rotMat.transpose())( + 1, 1)); // compute the orthogonal weight point by point + } + + riemannFit::VectorNd r_u; + for (u_int i = 0; i < n; i++) { + r_u(i) = weights(i) * zInSZplane(i); + } +#ifdef CPP_DUMP + std::cout << "CU4\n" << matrixC_u(w, sTotal, varBeta) << std::endl; +#endif + riemannFit::MatrixNd iMat; + math::cholesky::invert(matrixC_u(acc, weights, sTotal, varBeta), iMat); +#ifdef CPP_DUMP + std::cout << "I4\n" << iMat << std::endl; +#endif + + riemannFit::VectorNd uVec = iMat * r_u; // obtain the fitted parameters by solving the linear system + + // line parameters in the system in which the first hit is the origin and with axis along SZ + line_results.par << (uVec(1) - uVec(0)) / (sTotal(1) - sTotal(0)), uVec(0); + auto idiff = 1. / (sTotal(1) - sTotal(0)); + line_results.cov << (iMat(0, 0) - 2 * iMat(0, 1) + iMat(1, 1)) * riemannFit::sqr(idiff) + + multScatt(acc, sTotal(1) - sTotal(0), bField, fast_fit(2), 2, slope), + (iMat(0, 1) - iMat(0, 0)) * idiff, (iMat(0, 1) - iMat(0, 0)) * idiff, iMat(0, 0); + + // translate to the original SZ system + riemannFit::Matrix2d jacobian; + jacobian(0, 0) = 1.; + jacobian(0, 1) = 0; + jacobian(1, 0) = -sTotal(0); + jacobian(1, 1) = 1.; + line_results.par(1) += -line_results.par(0) * sTotal(0); + line_results.cov = jacobian * line_results.cov * jacobian.transpose(); + + // rotate to the original sz system + auto tmp = rotMat(0, 0) - line_results.par(0) * rotMat(0, 1); + jacobian(1, 1) = 1. / tmp; + jacobian(0, 0) = jacobian(1, 1) * jacobian(1, 1); + jacobian(0, 1) = 0; + jacobian(1, 0) = line_results.par(1) * rotMat(0, 1) * jacobian(0, 0); + line_results.par(1) = line_results.par(1) * jacobian(1, 1); + line_results.par(0) = (rotMat(0, 1) + line_results.par(0) * rotMat(0, 0)) * jacobian(1, 1); + line_results.cov = jacobian * line_results.cov * jacobian.transpose(); + + // compute chi2 + line_results.chi2 = 0; + for (u_int i = 0; i < n; i++) { + line_results.chi2 += weights(i) * riemannFit::sqr(zInSZplane(i) - uVec(i)); + if (i > 0 && i < n - 1) + line_results.chi2 += riemannFit::sqr(uVec(i - 1) / (sTotal(i) - sTotal(i - 1)) - + uVec(i) * (sTotal(i + 1) - sTotal(i - 1)) / + ((sTotal(i + 1) - sTotal(i)) * (sTotal(i) - sTotal(i - 1))) + + uVec(i + 1) / (sTotal(i + 1) - sTotal(i))) / + varBeta(i); + } + } + + /*! + \brief Helix fit by three step: + -fast pre-fit (see Fast_fit() for further info); \n + -circle fit of the hits projected in the transverse plane by Broken Line algorithm (see BL_Circle_fit() for further info); \n + -line fit of the hits projected on the (pre-fitted) cilinder surface by Broken Line algorithm (see BL_Line_fit() for further info); \n + Points must be passed ordered (from inner to outer layer). + + \param hits Matrix3xNd hits coordinates in this form: \n + |x1|x2|x3|...|xn| \n + |y1|y2|y3|...|yn| \n + |z1|z2|z3|...|zn| + \param hits_cov Matrix3Nd covariance matrix in this form (()->cov()): \n + |(x1,x1)|(x2,x1)|(x3,x1)|(x4,x1)|.|(y1,x1)|(y2,x1)|(y3,x1)|(y4,x1)|.|(z1,x1)|(z2,x1)|(z3,x1)|(z4,x1)| \n + |(x1,x2)|(x2,x2)|(x3,x2)|(x4,x2)|.|(y1,x2)|(y2,x2)|(y3,x2)|(y4,x2)|.|(z1,x2)|(z2,x2)|(z3,x2)|(z4,x2)| \n + |(x1,x3)|(x2,x3)|(x3,x3)|(x4,x3)|.|(y1,x3)|(y2,x3)|(y3,x3)|(y4,x3)|.|(z1,x3)|(z2,x3)|(z3,x3)|(z4,x3)| \n + |(x1,x4)|(x2,x4)|(x3,x4)|(x4,x4)|.|(y1,x4)|(y2,x4)|(y3,x4)|(y4,x4)|.|(z1,x4)|(z2,x4)|(z3,x4)|(z4,x4)| \n + . . . . . . . . . . . . . . . \n + |(x1,y1)|(x2,y1)|(x3,y1)|(x4,y1)|.|(y1,y1)|(y2,y1)|(y3,x1)|(y4,y1)|.|(z1,y1)|(z2,y1)|(z3,y1)|(z4,y1)| \n + |(x1,y2)|(x2,y2)|(x3,y2)|(x4,y2)|.|(y1,y2)|(y2,y2)|(y3,x2)|(y4,y2)|.|(z1,y2)|(z2,y2)|(z3,y2)|(z4,y2)| \n + |(x1,y3)|(x2,y3)|(x3,y3)|(x4,y3)|.|(y1,y3)|(y2,y3)|(y3,x3)|(y4,y3)|.|(z1,y3)|(z2,y3)|(z3,y3)|(z4,y3)| \n + |(x1,y4)|(x2,y4)|(x3,y4)|(x4,y4)|.|(y1,y4)|(y2,y4)|(y3,x4)|(y4,y4)|.|(z1,y4)|(z2,y4)|(z3,y4)|(z4,y4)| \n + . . . . . . . . . . . . . . . \n + |(x1,z1)|(x2,z1)|(x3,z1)|(x4,z1)|.|(y1,z1)|(y2,z1)|(y3,z1)|(y4,z1)|.|(z1,z1)|(z2,z1)|(z3,z1)|(z4,z1)| \n + |(x1,z2)|(x2,z2)|(x3,z2)|(x4,z2)|.|(y1,z2)|(y2,z2)|(y3,z2)|(y4,z2)|.|(z1,z2)|(z2,z2)|(z3,z2)|(z4,z2)| \n + |(x1,z3)|(x2,z3)|(x3,z3)|(x4,z3)|.|(y1,z3)|(y2,z3)|(y3,z3)|(y4,z3)|.|(z1,z3)|(z2,z3)|(z3,z3)|(z4,z3)| \n + |(x1,z4)|(x2,z4)|(x3,z4)|(x4,z4)|.|(y1,z4)|(y2,z4)|(y3,z4)|(y4,z4)|.|(z1,z4)|(z2,z4)|(z3,z4)|(z4,z4)| + \param bField magnetic field in the center of the detector in Gev/cm/c, in order to perform the p_t calculation. + + \warning see BL_Circle_fit(), BL_Line_fit() and Fast_fit() warnings. + + \bug see BL_Circle_fit(), BL_Line_fit() and Fast_fit() bugs. + + \return (phi,Tip,p_t,cot(theta)),Zip), their covariance matrix and the chi2's of the circle and line fits. + */ + + template + class helixFit { + public: + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE void operator()(const TAcc& acc, + const riemannFit::Matrix3xNd* hits, + const Eigen::Matrix* hits_ge, + const double bField, + riemannFit::HelixFit* helix) const { + riemannFit::Vector4d fast_fit; + fastFit(acc, *hits, fast_fit); + + PreparedBrokenLineData data; + karimaki_circle_fit circle; + riemannFit::LineFit line; + riemannFit::Matrix3d jacobian; + + prepareBrokenLineData(acc, *hits, fast_fit, bField, data); + lineFit(acc, *hits_ge, fast_fit, bField, data, line); + circleFit(acc, *hits, *hits_ge, fast_fit, bField, data, circle); + + // the circle fit gives k, but here we want p_t, so let's change the parameter and the covariance matrix + jacobian << 1., 0, 0, 0, 1., 0, 0, 0, + -alpaka::math::abs(acc, circle.par(2)) * bField / (riemannFit::sqr(circle.par(2)) * circle.par(2)); + circle.par(2) = bField / alpaka::math::abs(acc, circle.par(2)); + circle.cov = jacobian * circle.cov * jacobian.transpose(); + + helix->par << circle.par, line.par; + helix->cov = riemannFit::MatrixXd::Zero(5, 5); + helix->cov.block(0, 0, 3, 3) = circle.cov; + helix->cov.block(3, 3, 2, 2) = line.cov; + helix->qCharge = circle.qCharge; + helix->chi2_circle = circle.chi2; + helix->chi2_line = line.chi2; + } + }; + } // namespace brokenline +} // namespace ALPAKA_ACCELERATOR_NAMESPACE +#endif // RecoPixelVertexing_PixelTrackFitting_interface_BrokenLine_h diff --git a/RecoTracker/PixelTrackFitting/interface/alpaka/FitResult.h b/RecoTracker/PixelTrackFitting/interface/alpaka/FitResult.h new file mode 100644 index 0000000000000..3daf271a5ca13 --- /dev/null +++ b/RecoTracker/PixelTrackFitting/interface/alpaka/FitResult.h @@ -0,0 +1,64 @@ +#ifndef RecoPixelVertexing_PixelTrackFitting_interface_FitResult_h +#define RecoPixelVertexing_PixelTrackFitting_interface_FitResult_h + +#include +#include + +#include +#include + +namespace riemannFit { + + using Vector2d = Eigen::Vector2d; + using Vector3d = Eigen::Vector3d; + using Vector4d = Eigen::Vector4d; + using Vector5d = Eigen::Matrix; + using Matrix2d = Eigen::Matrix2d; + using Matrix3d = Eigen::Matrix3d; + using Matrix4d = Eigen::Matrix4d; + using Matrix5d = Eigen::Matrix; + using Matrix6d = Eigen::Matrix; + + template + using Matrix3xNd = Eigen::Matrix; // used for inputs hits + + struct CircleFit { + Vector3d par; //!< parameter: (X0,Y0,R) + Matrix3d cov; + /*!< covariance matrix: \n + |cov(X0,X0)|cov(Y0,X0)|cov( R,X0)| \n + |cov(X0,Y0)|cov(Y0,Y0)|cov( R,Y0)| \n + |cov(X0, R)|cov(Y0, R)|cov( R, R)| + */ + int32_t qCharge; //!< particle charge + float chi2; + }; + + struct LineFit { + Vector2d par; //!<(cotan(theta),Zip) + Matrix2d cov; + /*!< + |cov(c_t,c_t)|cov(Zip,c_t)| \n + |cov(c_t,Zip)|cov(Zip,Zip)| + */ + double chi2; + }; + + struct HelixFit { + Vector5d par; //!<(phi,Tip,pt,cotan(theta)),Zip) + Matrix5d cov; + /*!< ()->cov() \n + |(phi,phi)|(Tip,phi)|(p_t,phi)|(c_t,phi)|(Zip,phi)| \n + |(phi,Tip)|(Tip,Tip)|(p_t,Tip)|(c_t,Tip)|(Zip,Tip)| \n + |(phi,p_t)|(Tip,p_t)|(p_t,p_t)|(c_t,p_t)|(Zip,p_t)| \n + |(phi,c_t)|(Tip,c_t)|(p_t,c_t)|(c_t,c_t)|(Zip,c_t)| \n + |(phi,Zip)|(Tip,Zip)|(p_t,Zip)|(c_t,Zip)|(Zip,Zip)| + */ + float chi2_circle; + float chi2_line; + // Vector4d fast_fit; + int32_t qCharge; //!< particle charge + }; // __attribute__((aligned(16))); + +} // namespace riemannFit +#endif diff --git a/RecoTracker/PixelTrackFitting/interface/alpaka/FitUtils.h b/RecoTracker/PixelTrackFitting/interface/alpaka/FitUtils.h new file mode 100644 index 0000000000000..5dfa609ad3905 --- /dev/null +++ b/RecoTracker/PixelTrackFitting/interface/alpaka/FitUtils.h @@ -0,0 +1,253 @@ +#ifndef RecoPixelVertexing_PixelTrackFitting_alpaka_FitUtils_h +#define RecoPixelVertexing_PixelTrackFitting_alpaka_FitUtils_h +#include +#include "DataFormats/Math/interface/choleskyInversion.h" +#include "RecoTracker/PixelTrackFitting/interface/alpaka/FitResult.h" +namespace riemannFit { + + constexpr double epsilon = 1.e-4; //!< used in numerical derivative (J2 in Circle_fit()) + + using VectorXd = Eigen::VectorXd; + using MatrixXd = Eigen::MatrixXd; + template + using MatrixNd = Eigen::Matrix; + template + using MatrixNplusONEd = Eigen::Matrix; + template + using ArrayNd = Eigen::Array; + template + using Matrix2Nd = Eigen::Matrix; + template + using Matrix3Nd = Eigen::Matrix; + template + using Matrix2xNd = Eigen::Matrix; + template + using Array2xNd = Eigen::Array; + template + using MatrixNx3d = Eigen::Matrix; + template + using MatrixNx5d = Eigen::Matrix; + template + using VectorNd = Eigen::Matrix; + template + using VectorNplusONEd = Eigen::Matrix; + template + using Vector2Nd = Eigen::Matrix; + template + using Vector3Nd = Eigen::Matrix; + template + using RowVectorNd = Eigen::Matrix; + template + using RowVector2Nd = Eigen::Matrix; + + using Matrix2x3d = Eigen::Matrix; + + using Matrix3f = Eigen::Matrix3f; + using Vector3f = Eigen::Vector3f; + using Vector4f = Eigen::Vector4f; + using Vector6f = Eigen::Matrix; + // transformation between the "perigee" to cmssw localcoord frame + // the plane of the latter is the perigee plane... + // from //!<(phi,Tip,q/pt,cotan(theta)),Zip) + // to q/p,dx/dz,dy/dz,x,z + template + inline void transformToPerigeePlane(VI5 const& ip, MI5 const& icov, VO5& op, MO5& ocov) { + auto sinTheta2 = 1. / (1. + ip(3) * ip(3)); + auto sinTheta = std::sqrt(sinTheta2); + auto cosTheta = ip(3) * sinTheta; + + op(0) = sinTheta * ip(2); + op(1) = 0.; + op(2) = -ip(3); + op(3) = ip(1); + op(4) = -ip(4); + + Matrix5d jMat = Matrix5d::Zero(); + + jMat(0, 2) = sinTheta; + jMat(0, 3) = -sinTheta2 * cosTheta * ip(2); + jMat(1, 0) = 1.; + jMat(2, 3) = -1.; + jMat(3, 1) = 1.; + jMat(4, 4) = -1; + + ocov = jMat * icov * jMat.transpose(); + } + +} // namespace riemannFit + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace riemannFit { + using namespace ::riemannFit; + + template + ALPAKA_FN_ACC void printIt(const TAcc& acc, C* m, const char* prefix = "") { +#ifdef RFIT_DEBUG + for (uint r = 0; r < m->rows(); ++r) { + for (uint c = 0; c < m->cols(); ++c) { + printf("%s Matrix(%d,%d) = %g\n", prefix, r, c, (*m)(r, c)); + } + } +#endif + } + + /*! + \brief raise to square. + */ + template + constexpr T sqr(const T a) { + return a * a; + } + + /*! + \brief Compute cross product of two 2D vector (assuming z component 0), + returning z component of the result. + \param a first 2D vector in the product. + \param b second 2D vector in the product. + \return z component of the cross product. + */ + + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE double cross2D(const TAcc& acc, const Vector2d& a, const Vector2d& b) { + return a.x() * b.y() - a.y() * b.x(); + } + + /*! + * load error in CMSSW format to our formalism + * + */ + template + ALPAKA_FN_ACC void loadCovariance2D(const TAcc& acc, M6xNf const& ge, M2Nd& hits_cov) { + // Index numerology: + // i: index of the hits/point (0,..,3) + // j: index of space component (x,y,z) + // l: index of space components (x,y,z) + // ge is always in sync with the index i and is formatted as: + // ge[] ==> [xx, xy, yy, xz, yz, zz] + // in (j,l) notation, we have: + // ge[] ==> [(0,0), (0,1), (1,1), (0,2), (1,2), (2,2)] + // so the index ge_idx corresponds to the matrix elements: + // | 0 1 3 | + // | 1 2 4 | + // | 3 4 5 | + constexpr uint32_t hits_in_fit = M6xNf::ColsAtCompileTime; + for (uint32_t i = 0; i < hits_in_fit; ++i) { + { + constexpr uint32_t ge_idx = 0, j = 0, l = 0; + hits_cov(i + j * hits_in_fit, i + l * hits_in_fit) = ge.col(i)[ge_idx]; + } + { + constexpr uint32_t ge_idx = 2, j = 1, l = 1; + hits_cov(i + j * hits_in_fit, i + l * hits_in_fit) = ge.col(i)[ge_idx]; + } + { + constexpr uint32_t ge_idx = 1, j = 1, l = 0; + hits_cov(i + l * hits_in_fit, i + j * hits_in_fit) = hits_cov(i + j * hits_in_fit, i + l * hits_in_fit) = + ge.col(i)[ge_idx]; + } + } + } + + template + ALPAKA_FN_ACC void loadCovariance(const TAcc& acc, M6xNf const& ge, M3xNd& hits_cov) { + // Index numerology: + // i: index of the hits/point (0,..,3) + // j: index of space component (x,y,z) + // l: index of space components (x,y,z) + // ge is always in sync with the index i and is formatted as: + // ge[] ==> [xx, xy, yy, xz, yz, zz] + // in (j,l) notation, we have: + // ge[] ==> [(0,0), (0,1), (1,1), (0,2), (1,2), (2,2)] + // so the index ge_idx corresponds to the matrix elements: + // | 0 1 3 | + // | 1 2 4 | + // | 3 4 5 | + constexpr uint32_t hits_in_fit = M6xNf::ColsAtCompileTime; + for (uint32_t i = 0; i < hits_in_fit; ++i) { + { + constexpr uint32_t ge_idx = 0, j = 0, l = 0; + hits_cov(i + j * hits_in_fit, i + l * hits_in_fit) = ge.col(i)[ge_idx]; + } + { + constexpr uint32_t ge_idx = 2, j = 1, l = 1; + hits_cov(i + j * hits_in_fit, i + l * hits_in_fit) = ge.col(i)[ge_idx]; + } + { + constexpr uint32_t ge_idx = 5, j = 2, l = 2; + hits_cov(i + j * hits_in_fit, i + l * hits_in_fit) = ge.col(i)[ge_idx]; + } + { + constexpr uint32_t ge_idx = 1, j = 1, l = 0; + hits_cov(i + l * hits_in_fit, i + j * hits_in_fit) = hits_cov(i + j * hits_in_fit, i + l * hits_in_fit) = + ge.col(i)[ge_idx]; + } + { + constexpr uint32_t ge_idx = 3, j = 2, l = 0; + hits_cov(i + l * hits_in_fit, i + j * hits_in_fit) = hits_cov(i + j * hits_in_fit, i + l * hits_in_fit) = + ge.col(i)[ge_idx]; + } + { + constexpr uint32_t ge_idx = 4, j = 2, l = 1; + hits_cov(i + l * hits_in_fit, i + j * hits_in_fit) = hits_cov(i + j * hits_in_fit, i + l * hits_in_fit) = + ge.col(i)[ge_idx]; + } + } + } + + /*! + \brief Transform circle parameter from (X0,Y0,R) to (phi,Tip,p_t) and + consequently covariance matrix. + \param circle_uvr parameter (X0,Y0,R), covariance matrix to + be transformed and particle charge. + \param B magnetic field in Gev/cm/c unit. + \param error flag for errors computation. + */ + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE void par_uvrtopak(const TAcc& acc, + CircleFit& circle, + const double B, + const bool error) { + Vector3d par_pak; + const double temp0 = circle.par.head(2).squaredNorm(); + const double temp1 = alpaka::math::sqrt(acc, temp0); + par_pak << alpaka::math::atan2(acc, circle.qCharge * circle.par(0), -circle.qCharge * circle.par(1)), + circle.qCharge * (temp1 - circle.par(2)), circle.par(2) * B; + if (error) { + const double temp2 = sqr(circle.par(0)) * 1. / temp0; + const double temp3 = 1. / temp1 * circle.qCharge; + Matrix3d j4Mat; + j4Mat << -circle.par(1) * temp2 * 1. / sqr(circle.par(0)), temp2 * 1. / circle.par(0), 0., + circle.par(0) * temp3, circle.par(1) * temp3, -circle.qCharge, 0., 0., B; + circle.cov = j4Mat * circle.cov * j4Mat.transpose(); + } + circle.par = par_pak; + } + + /*! + \brief Transform circle parameter from (X0,Y0,R) to (phi,Tip,q/R) and + consequently covariance matrix. + \param circle_uvr parameter (X0,Y0,R), covariance matrix to + be transformed and particle charge. + */ + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE void fromCircleToPerigee(const TAcc& acc, CircleFit& circle) { + Vector3d par_pak; + const double temp0 = circle.par.head(2).squaredNorm(); + const double temp1 = alpaka::math::sqrt(acc, temp0); + par_pak << alpaka::math::atan2(acc, circle.qCharge * circle.par(0), -circle.qCharge * circle.par(1)), + circle.qCharge * (temp1 - circle.par(2)), circle.qCharge / circle.par(2); + + const double temp2 = sqr(circle.par(0)) * 1. / temp0; + const double temp3 = 1. / temp1 * circle.qCharge; + Matrix3d j4Mat; + j4Mat << -circle.par(1) * temp2 * 1. / sqr(circle.par(0)), temp2 * 1. / circle.par(0), 0., circle.par(0) * temp3, + circle.par(1) * temp3, -circle.qCharge, 0., 0., -circle.qCharge / (circle.par(2) * circle.par(2)); + circle.cov = j4Mat * circle.cov * j4Mat.transpose(); + + circle.par = par_pak; + } + + } // namespace riemannFit + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE +#endif // RecoPixelVertexing_PixelTrackFitting_interface_FitUtils_h diff --git a/RecoTracker/PixelTrackFitting/interface/alpaka/RiemannFit.h b/RecoTracker/PixelTrackFitting/interface/alpaka/RiemannFit.h new file mode 100644 index 0000000000000..8455a03e9f58f --- /dev/null +++ b/RecoTracker/PixelTrackFitting/interface/alpaka/RiemannFit.h @@ -0,0 +1,1023 @@ +#ifndef RecoPixelVertexing_PixelTrackFitting_interface_RiemannFit_h +#define RecoPixelVertexing_PixelTrackFitting_interface_RiemannFit_h +#include +#include "RecoTracker/PixelTrackFitting/interface/alpaka/FitUtils.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + namespace riemannFit { + using namespace ::riemannFit; + /*! Compute the Radiation length in the uniform hypothesis + * + * The Pixel detector, barrel and forward, is considered as an homogeneous + * cylinder of material, whose radiation lengths has been derived from the TDR + * plot that shows that 16cm correspond to 0.06 radiation lengths. Therefore + * one radiation length corresponds to 16cm/0.06 =~ 267 cm. All radiation + * lengths are computed using this unique number, in both regions, barrel and + * endcap. + * + * NB: no angle corrections nor projections are computed inside this routine. + * It is therefore the responsibility of the caller to supply the proper + * lengths in input. These lengths are the path traveled by the particle along + * its trajectory, namely the so called S of the helix in 3D space. + * + * \param length_values vector of incremental distances that will be translated + * into radiation length equivalent. Each radiation length i is computed + * incrementally with respect to the previous length i-1. The first length has + * no reference point (i.e. it has the dca). + * + * \return incremental radiation lengths that correspond to each segment. + */ + + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE void computeRadLenUniformMaterial(const TAcc& acc, + const VNd1& length_values, + VNd2& rad_lengths) { + // Radiation length of the pixel detector in the uniform assumption, with + // 0.06 rad_len at 16 cm + constexpr double xx_0_inv = 0.06 / 16.; + uint n = length_values.rows(); + rad_lengths(0) = length_values(0) * xx_0_inv; + for (uint j = 1; j < n; ++j) { + rad_lengths(j) = alpaka::math::abs(acc, length_values(j) - length_values(j - 1)) * xx_0_inv; + } + } + + /*! + \brief Compute the covariance matrix along cartesian S-Z of points due to + multiple Coulomb scattering to be used in the line_fit, for the barrel + and forward cases. + The input covariance matrix is in the variables s-z, original and + unrotated. + The multiple scattering component is computed in the usual linear + approximation, using the 3D path which is computed as the squared root of + the squared sum of the s and z components passed in. + Internally a rotation by theta is performed and the covariance matrix + returned is the one in the direction orthogonal to the rotated S3D axis, + i.e. along the rotated Z axis. + The choice of the rotation is not arbitrary, but derived from the fact that + putting the horizontal axis along the S3D direction allows the usage of the + ordinary least squared fitting techiques with the trivial parametrization y + = mx + q, avoiding the patological case with m = +/- inf, that would + correspond to the case at eta = 0. + */ + + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE auto scatterCovLine(const TAcc& acc, + Matrix2d const* cov_sz, + const V4& fast_fit, + VNd1 const& s_arcs, + VNd2 const& z_values, + const double theta, + const double bField, + MatrixNd& ret) { +#ifdef RFIT_DEBUG + riemannFit::printIt(&s_arcs, "Scatter_cov_line - s_arcs: "); +#endif + constexpr uint n = N; + double p_t = alpaka::math::min(acc, 20., fast_fit(2) * bField); // limit pt to avoid too small error!!! + double p_2 = p_t * p_t * (1. + 1. / sqr(fast_fit(3))); + VectorNd rad_lengths_S; + // See documentation at http://eigen.tuxfamily.org/dox/group__TutorialArrayClass.html + // Basically, to perform cwise operations on Matrices and Vectors, you need + // to transform them into Array-like objects. + VectorNd s_values = s_arcs.array() * s_arcs.array() + z_values.array() * z_values.array(); + s_values = s_values.array().sqrt(); + computeRadLenUniformMaterial(acc, s_values, rad_lengths_S); + VectorNd sig2_S; + sig2_S = .000225 / p_2 * (1. + 0.038 * rad_lengths_S.array().log()).abs2() * rad_lengths_S.array(); +#ifdef RFIT_DEBUG + riemannFit::printIt(cov_sz, "Scatter_cov_line - cov_sz: "); +#endif + Matrix2Nd tmp = Matrix2Nd::Zero(); + for (uint k = 0; k < n; ++k) { + tmp(k, k) = cov_sz[k](0, 0); + tmp(k + n, k + n) = cov_sz[k](1, 1); + tmp(k, k + n) = tmp(k + n, k) = cov_sz[k](0, 1); + } + for (uint k = 0; k < n; ++k) { + for (uint l = k; l < n; ++l) { + for (uint i = 0; i < uint(alpaka::math::min(acc, k, l)); ++i) { + tmp(k + n, l + n) += alpaka::math::abs(acc, s_values(k) - s_values(i)) * + alpaka::math::abs(acc, s_values(l) - s_values(i)) * sig2_S(i); + } + tmp(l + n, k + n) = tmp(k + n, l + n); + } + } + // We are interested only in the errors orthogonal to the rotated s-axis + // which, in our formalism, are in the lower square matrix. +#ifdef RFIT_DEBUG + riemannFit::printIt(&tmp, "Scatter_cov_line - tmp: "); +#endif + ret = tmp.block(n, n, n, n); + } + + /*! + \brief Compute the covariance matrix (in radial coordinates) of points in + the transverse plane due to multiple Coulomb scattering. + \param p2D 2D points in the transverse plane. + \param fast_fit fast_fit Vector4d result of the previous pre-fit + structured in this form:(X0, Y0, R, Tan(Theta))). + \param B magnetic field use to compute p + \return scatter_cov_rad errors due to multiple scattering. + \warning input points must be ordered radially from the detector center + (from inner layer to outer ones; points on the same layer must ordered too). + \details Only the tangential component is computed (the radial one is + negligible). + */ + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE MatrixNd scatter_cov_rad( + const TAcc& acc, const M2xN& p2D, const V4& fast_fit, VectorNd const& rad, double B) { + constexpr uint n = N; + double p_t = alpaka::math::min(acc, 20., fast_fit(2) * B); // limit pt to avoid too small error!!! + double p_2 = p_t * p_t * (1. + 1. / sqr(fast_fit(3))); + double theta = atan(fast_fit(3)); + theta = theta < 0. ? theta + M_PI : theta; + VectorNd s_values; + VectorNd rad_lengths; + const Vector2d oVec(fast_fit(0), fast_fit(1)); + + // associated Jacobian, used in weights and errors computation + for (uint i = 0; i < n; ++i) { // x + Vector2d pVec = p2D.block(0, i, 2, 1) - oVec; + const double cross = cross2D(acc, -oVec, pVec); + const double dot = (-oVec).dot(pVec); + const double tempAtan2 = atan2(cross, dot); + s_values(i) = alpaka::math::abs(acc, tempAtan2 * fast_fit(2)); + } + computeRadLenUniformMaterial(acc, s_values * sqrt(1. + 1. / sqr(fast_fit(3))), rad_lengths); + MatrixNd scatter_cov_rad = MatrixNd::Zero(); + VectorNd sig2 = (1. + 0.038 * rad_lengths.array().log()).abs2() * rad_lengths.array(); + sig2 *= 0.000225 / (p_2 * sqr(sin(theta))); + for (uint k = 0; k < n; ++k) { + for (uint l = k; l < n; ++l) { + for (uint i = 0; i < uint(alpaka::math::min(acc, k, l)); ++i) { + scatter_cov_rad(k, l) += (rad(k) - rad(i)) * (rad(l) - rad(i)) * sig2(i); + } + scatter_cov_rad(l, k) = scatter_cov_rad(k, l); + } + } +#ifdef RFIT_DEBUG + riemannFit::printIt(&scatter_cov_rad, "Scatter_cov_rad - scatter_cov_rad: "); +#endif + return scatter_cov_rad; + } + + /*! + \brief Transform covariance matrix from radial (only tangential component) + to Cartesian coordinates (only transverse plane component). + \param p2D 2D points in the transverse plane. + \param cov_rad covariance matrix in radial coordinate. + \return cov_cart covariance matrix in Cartesian coordinates. +*/ + + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE Matrix2Nd cov_radtocart(const TAcc& acc, + const M2xN& p2D, + const MatrixNd& cov_rad, + const VectorNd& rad) { +#ifdef RFIT_DEBUG + printf("Address of p2D: %p\n", &p2D); +#endif + printIt(&p2D, "cov_radtocart - p2D:"); + constexpr uint n = N; + Matrix2Nd cov_cart = Matrix2Nd::Zero(); + VectorNd rad_inv = rad.cwiseInverse(); + printIt(&rad_inv, "cov_radtocart - rad_inv:"); + for (uint i = 0; i < n; ++i) { + for (uint j = i; j < n; ++j) { + cov_cart(i, j) = cov_rad(i, j) * p2D(1, i) * rad_inv(i) * p2D(1, j) * rad_inv(j); + cov_cart(i + n, j + n) = cov_rad(i, j) * p2D(0, i) * rad_inv(i) * p2D(0, j) * rad_inv(j); + cov_cart(i, j + n) = -cov_rad(i, j) * p2D(1, i) * rad_inv(i) * p2D(0, j) * rad_inv(j); + cov_cart(i + n, j) = -cov_rad(i, j) * p2D(0, i) * rad_inv(i) * p2D(1, j) * rad_inv(j); + cov_cart(j, i) = cov_cart(i, j); + cov_cart(j + n, i + n) = cov_cart(i + n, j + n); + cov_cart(j + n, i) = cov_cart(i, j + n); + cov_cart(j, i + n) = cov_cart(i + n, j); + } + } + return cov_cart; + } + + /*! + \brief Transform covariance matrix from Cartesian coordinates (only + transverse plane component) to radial coordinates (both radial and + tangential component but only diagonal terms, correlation between different + point are not managed). + \param p2D 2D points in transverse plane. + \param cov_cart covariance matrix in Cartesian coordinates. + \return cov_rad covariance matrix in raidal coordinate. + \warning correlation between different point are not computed. +*/ + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE VectorNd cov_carttorad(const TAcc& acc, + const M2xN& p2D, + const Matrix2Nd& cov_cart, + const VectorNd& rad) { + constexpr uint n = N; + VectorNd cov_rad; + const VectorNd rad_inv2 = rad.cwiseInverse().array().square(); + for (uint i = 0; i < n; ++i) { + //!< in case you have (0,0) to avoid dividing by 0 radius + if (rad(i) < 1.e-4) + cov_rad(i) = cov_cart(i, i); + else { + cov_rad(i) = rad_inv2(i) * (cov_cart(i, i) * sqr(p2D(1, i)) + cov_cart(i + n, i + n) * sqr(p2D(0, i)) - + 2. * cov_cart(i, i + n) * p2D(0, i) * p2D(1, i)); + } + } + return cov_rad; + } + + /*! + \brief Transform covariance matrix from Cartesian coordinates (only + transverse plane component) to coordinates system orthogonal to the + pre-fitted circle in each point. + Further information in attached documentation. + \param p2D 2D points in transverse plane. + \param cov_cart covariance matrix in Cartesian coordinates. + \param fast_fit fast_fit Vector4d result of the previous pre-fit + structured in this form:(X0, Y0, R, tan(theta))). + \return cov_rad covariance matrix in the pre-fitted circle's + orthogonal system. +*/ + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE VectorNd cov_carttorad_prefit( + const TAcc& acc, const M2xN& p2D, const Matrix2Nd& cov_cart, V4& fast_fit, const VectorNd& rad) { + constexpr uint n = N; + VectorNd cov_rad; + for (uint i = 0; i < n; ++i) { + //!< in case you have (0,0) to avoid dividing by 0 radius + if (rad(i) < 1.e-4) + cov_rad(i) = cov_cart(i, i); // TO FIX + else { + Vector2d a = p2D.col(i); + Vector2d b = p2D.col(i) - fast_fit.head(2); + const double x2 = a.dot(b); + const double y2 = cross2D(acc, a, b); + const double tan_c = -y2 / x2; + const double tan_c2 = sqr(tan_c); + cov_rad(i) = + 1. / (1. + tan_c2) * (cov_cart(i, i) + cov_cart(i + n, i + n) * tan_c2 + 2 * cov_cart(i, i + n) * tan_c); + } + } + return cov_rad; + } + + /*! + \brief Compute the points' weights' vector for the circle fit when multiple + scattering is managed. + Further information in attached documentation. + \param cov_rad_inv covariance matrix inverse in radial coordinated + (or, beter, pre-fitted circle's orthogonal system). + \return weight VectorNd points' weights' vector. + \bug I'm not sure this is the right way to compute the weights for non + diagonal cov matrix. Further investigation needed. +*/ + + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE VectorNd weightCircle(const TAcc& acc, const MatrixNd& cov_rad_inv) { + return cov_rad_inv.colwise().sum().transpose(); + } + + /*! + \brief Find particle q considering the sign of cross product between + particles velocity (estimated by the first 2 hits) and the vector radius + between the first hit and the center of the fitted circle. + \param p2D 2D points in transverse plane. + \param par_uvr result of the circle fit in this form: (X0,Y0,R). + \return q int 1 or -1. +*/ + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE int32_t charge(const TAcc& acc, const M2xN& p2D, const Vector3d& par_uvr) { + return ((p2D(0, 1) - p2D(0, 0)) * (par_uvr.y() - p2D(1, 0)) - + (p2D(1, 1) - p2D(1, 0)) * (par_uvr.x() - p2D(0, 0)) > + 0) + ? -1 + : 1; + } + + /*! + \brief Compute the eigenvector associated to the minimum eigenvalue. + \param A the Matrix you want to know eigenvector and eigenvalue. + \param chi2 the double were the chi2-related quantity will be stored. + \return the eigenvector associated to the minimum eigenvalue. + \warning double precision is needed for a correct assessment of chi2. + \details The minimus eigenvalue is related to chi2. + We exploit the fact that the matrix is symmetrical and small (2x2 for line + fit and 3x3 for circle fit), so the SelfAdjointEigenSolver from Eigen + library is used, with the computedDirect method (available only for 2x2 + and 3x3 Matrix) wich computes eigendecomposition of given matrix using a + fast closed-form algorithm. + For this optimization the matrix type must be known at compiling time. +*/ + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE Vector3d min_eigen3D(const TAcc& acc, const Matrix3d& A, double& chi2) { +#ifdef RFIT_DEBUG + printf("min_eigen3D - enter\n"); +#endif + Eigen::SelfAdjointEigenSolver solver(3); + solver.computeDirect(A); + int min_index; + chi2 = solver.eigenvalues().minCoeff(&min_index); +#ifdef RFIT_DEBUG + printf("min_eigen3D - exit\n"); +#endif + return solver.eigenvectors().col(min_index); + } + + /*! + \brief A faster version of min_eigen3D() where double precision is not + needed. + \param A the Matrix you want to know eigenvector and eigenvalue. + \param chi2 the double were the chi2-related quantity will be stored + \return the eigenvector associated to the minimum eigenvalue. + \detail The computedDirect() method of SelfAdjointEigenSolver for 3x3 Matrix + indeed, use trigonometry function (it solves a third degree equation) which + speed up in single precision. +*/ + + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE Vector3d min_eigen3D_fast(const TAcc& acc, const Matrix3d& A) { + Eigen::SelfAdjointEigenSolver solver(3); + solver.computeDirect(A.cast()); + int min_index; + solver.eigenvalues().minCoeff(&min_index); + return solver.eigenvectors().col(min_index).cast(); + } + + /*! + \brief 2D version of min_eigen3D(). + \param aMat the Matrix you want to know eigenvector and eigenvalue. + \param chi2 the double were the chi2-related quantity will be stored + \return the eigenvector associated to the minimum eigenvalue. + \detail The computedDirect() method of SelfAdjointEigenSolver for 2x2 Matrix + do not use special math function (just sqrt) therefore it doesn't speed up + significantly in single precision. +*/ + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE Vector2d min_eigen2D(const TAcc& acc, const Matrix2d& aMat, double& chi2) { + Eigen::SelfAdjointEigenSolver solver(2); + solver.computeDirect(aMat); + int min_index; + chi2 = solver.eigenvalues().minCoeff(&min_index); + return solver.eigenvectors().col(min_index); + } + + /*! + \brief A very fast helix fit: it fits a circle by three points (first, middle + and last point) and a line by two points (first and last). + \param hits points to be fitted + \return result in this form: (X0,Y0,R,tan(theta)). + \warning points must be passed ordered (from internal layer to external) in + order to maximize accuracy and do not mistake tan(theta) sign. + \details This fast fit is used as pre-fit which is needed for: + - weights estimation and chi2 computation in line fit (fundamental); + - weights estimation and chi2 computation in circle fit (useful); + - computation of error due to multiple scattering. +*/ + + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE void fastFit(const TAcc& acc, const M3xN& hits, V4& result) { + constexpr uint32_t N = M3xN::ColsAtCompileTime; + constexpr auto n = N; // get the number of hits + printIt(&hits, "Fast_fit - hits: "); + + // CIRCLE FIT + // Make segments between middle-to-first(b) and last-to-first(c) hits + const Vector2d bVec = hits.block(0, n / 2, 2, 1) - hits.block(0, 0, 2, 1); + const Vector2d cVec = hits.block(0, n - 1, 2, 1) - hits.block(0, 0, 2, 1); + printIt(&bVec, "Fast_fit - b: "); + printIt(&cVec, "Fast_fit - c: "); + // Compute their lengths + auto b2 = bVec.squaredNorm(); + auto c2 = cVec.squaredNorm(); + // The algebra has been verified (MR). The usual approach has been followed: + // * use an orthogonal reference frame passing from the first point. + // * build the segments (chords) + // * build orthogonal lines through mid points + // * make a system and solve for X0 and Y0. + // * add the initial point + bool flip = abs(bVec.x()) < abs(bVec.y()); + auto bx = flip ? bVec.y() : bVec.x(); + auto by = flip ? bVec.x() : bVec.y(); + auto cx = flip ? cVec.y() : cVec.x(); + auto cy = flip ? cVec.x() : cVec.y(); + //!< in case b.x is 0 (2 hits with same x) + auto div = 2. * (cx * by - bx * cy); + // if aligned TO FIX + auto y0 = (cx * b2 - bx * c2) / div; + auto x0 = (0.5 * b2 - y0 * by) / bx; + result(0) = hits(0, 0) + (flip ? y0 : x0); + result(1) = hits(1, 0) + (flip ? x0 : y0); + result(2) = sqrt(sqr(x0) + sqr(y0)); + printIt(&result, "Fast_fit - result: "); + + // LINE FIT + const Vector2d dVec = hits.block(0, 0, 2, 1) - result.head(2); + const Vector2d eVec = hits.block(0, n - 1, 2, 1) - result.head(2); + printIt(&eVec, "Fast_fit - e: "); + printIt(&dVec, "Fast_fit - d: "); + // Compute the arc-length between first and last point: L = R * theta = R * atan (tan (Theta) ) + auto dr = result(2) * atan2(cross2D(acc, dVec, eVec), dVec.dot(eVec)); + // Simple difference in Z between last and first hit + auto dz = hits(2, n - 1) - hits(2, 0); + + result(3) = (dr / dz); + +#ifdef RFIT_DEBUG + printf("Fast_fit: [%f, %f, %f, %f]\n", result(0), result(1), result(2), result(3)); +#endif + } + + /*! + \brief Fit a generic number of 2D points with a circle using Riemann-Chernov + algorithm. Covariance matrix of fitted parameter is optionally computed. + Multiple scattering (currently only in barrel layer) is optionally handled. + \param hits2D 2D points to be fitted. + \param hits_cov2D covariance matrix of 2D points. + \param fast_fit pre-fit result in this form: (X0,Y0,R,tan(theta)). + (tan(theta) is not used). + \param bField magnetic field + \param error flag for error computation. + \param scattering flag for multiple scattering + \return circle circle_fit: + -par parameter of the fitted circle in this form (X0,Y0,R); \n + -cov covariance matrix of the fitted parameter (not initialized if + error = false); \n + -q charge of the particle; \n + -chi2. + \warning hits must be passed ordered from inner to outer layer (double hits + on the same layer must be ordered too) so that multiple scattering is + treated properly. + \warning Multiple scattering for barrel is still not tested. + \warning Multiple scattering for endcap hits is not handled (yet). Do not + fit endcap hits with scattering = true ! + \bug for small pt (<0.3 Gev/c) chi2 could be slightly underestimated. + \bug further investigation needed for error propagation with multiple + scattering. +*/ + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE CircleFit circleFit(const TAcc& acc, + const M2xN& hits2D, + const Matrix2Nd& hits_cov2D, + const V4& fast_fit, + const VectorNd& rad, + const double bField, + const bool error) { +#ifdef RFIT_DEBUG + printf("circle_fit - enter\n"); +#endif + // INITIALIZATION + Matrix2Nd vMat = hits_cov2D; + constexpr uint n = N; + printIt(&hits2D, "circle_fit - hits2D:"); + printIt(&hits_cov2D, "circle_fit - hits_cov2D:"); + +#ifdef RFIT_DEBUG + printf("circle_fit - WEIGHT COMPUTATION\n"); +#endif + // WEIGHT COMPUTATION + VectorNd weight; + MatrixNd gMat; + double renorm; + { + MatrixNd cov_rad = cov_carttorad_prefit(acc, hits2D, vMat, fast_fit, rad).asDiagonal(); + MatrixNd scatterCovRadMat = scatter_cov_rad(acc, hits2D, fast_fit, rad, bField); + printIt(&scatterCovRadMat, "circle_fit - scatter_cov_rad:"); + printIt(&hits2D, "circle_fit - hits2D bis:"); +#ifdef RFIT_DEBUG + printf("Address of hits2D: a) %p\n", &hits2D); +#endif + vMat += cov_radtocart(acc, hits2D, scatterCovRadMat, rad); + printIt(&vMat, "circle_fit - V:"); + cov_rad += scatterCovRadMat; + printIt(&cov_rad, "circle_fit - cov_rad:"); + math::cholesky::invert(cov_rad, gMat); + // gMat = cov_rad.inverse(); + renorm = gMat.sum(); + gMat *= 1. / renorm; + weight = weightCircle(acc, gMat); + } + printIt(&weight, "circle_fit - weight:"); + + // SPACE TRANSFORMATION +#ifdef RFIT_DEBUG + printf("circle_fit - SPACE TRANSFORMATION\n"); +#endif + + // center +#ifdef RFIT_DEBUG + printf("Address of hits2D: b) %p\n", &hits2D); +#endif + const Vector2d hCentroid = hits2D.rowwise().mean(); // centroid + printIt(&hCentroid, "circle_fit - h_:"); + Matrix3xNd p3D; + p3D.block(0, 0, 2, n) = hits2D.colwise() - hCentroid; + printIt(&p3D, "circle_fit - p3D: a)"); + Vector2Nd mc; // centered hits, used in error computation + mc << p3D.row(0).transpose(), p3D.row(1).transpose(); + printIt(&mc, "circle_fit - mc(centered hits):"); + + // scale + const double tempQ = mc.squaredNorm(); + const double tempS = sqrt(n * 1. / tempQ); // scaling factor + p3D.block(0, 0, 2, n) *= tempS; + + // project on paraboloid + p3D.row(2) = p3D.block(0, 0, 2, n).colwise().squaredNorm(); + printIt(&p3D, "circle_fit - p3D: b)"); + +#ifdef RFIT_DEBUG + printf("circle_fit - COST FUNCTION\n"); +#endif + // COST FUNCTION + + // compute + Vector3d r0; + r0.noalias() = p3D * weight; // center of gravity + const Matrix3xNd xMat = p3D.colwise() - r0; + Matrix3d aMat = xMat * gMat * xMat.transpose(); + printIt(&aMat, "circle_fit - A:"); + +#ifdef RFIT_DEBUG + printf("circle_fit - MINIMIZE\n"); +#endif + // minimize + double chi2; + Vector3d vVec = min_eigen3D(acc, aMat, chi2); +#ifdef RFIT_DEBUG + printf("circle_fit - AFTER MIN_EIGEN\n"); +#endif + printIt(&vVec, "v BEFORE INVERSION"); + vVec *= (vVec(2) > 0) ? 1 : -1; // TO FIX dovrebbe essere N(3)>0 + printIt(&vVec, "v AFTER INVERSION"); + // This hack to be able to run on GPU where the automatic assignment to a + // double from the vector multiplication is not working. +#ifdef RFIT_DEBUG + printf("circle_fit - AFTER MIN_EIGEN 1\n"); +#endif + Eigen::Matrix cm; +#ifdef RFIT_DEBUG + printf("circle_fit - AFTER MIN_EIGEN 2\n"); +#endif + cm = -vVec.transpose() * r0; +#ifdef RFIT_DEBUG + printf("circle_fit - AFTER MIN_EIGEN 3\n"); +#endif + const double tempC = cm(0, 0); + +#ifdef RFIT_DEBUG + printf("circle_fit - COMPUTE CIRCLE PARAMETER\n"); +#endif + // COMPUTE CIRCLE PARAMETER + + // auxiliary quantities + const double tempH = sqrt(1. - sqr(vVec(2)) - 4. * tempC * vVec(2)); + const double v2x2_inv = 1. / (2. * vVec(2)); + const double s_inv = 1. / tempS; + Vector3d par_uvr; // used in error propagation + par_uvr << -vVec(0) * v2x2_inv, -vVec(1) * v2x2_inv, tempH * v2x2_inv; + + CircleFit circle; + circle.par << par_uvr(0) * s_inv + hCentroid(0), par_uvr(1) * s_inv + hCentroid(1), par_uvr(2) * s_inv; + circle.qCharge = charge(acc, hits2D, circle.par); + circle.chi2 = abs(chi2) * renorm / sqr(2 * vVec(2) * par_uvr(2) * tempS); + printIt(&circle.par, "circle_fit - CIRCLE PARAMETERS:"); + printIt(&circle.cov, "circle_fit - CIRCLE COVARIANCE:"); +#ifdef RFIT_DEBUG + printf("circle_fit - CIRCLE CHARGE: %d\n", circle.qCharge); +#endif + +#ifdef RFIT_DEBUG + printf("circle_fit - ERROR PROPAGATION\n"); +#endif + // ERROR PROPAGATION + if (error) { +#ifdef RFIT_DEBUG + printf("circle_fit - ERROR PRPAGATION ACTIVATED\n"); +#endif + ArrayNd vcsMat[2][2]; // cov matrix of center & scaled points + MatrixNd cMat[3][3]; // cov matrix of 3D transformed points +#ifdef RFIT_DEBUG + printf("circle_fit - ERROR PRPAGATION ACTIVATED 2\n"); +#endif + { + Eigen::Matrix cm; + Eigen::Matrix cm2; + cm = mc.transpose() * vMat * mc; + const double tempC2 = cm(0, 0); + Matrix2Nd tempVcsMat; + tempVcsMat.template triangularView() = + (sqr(tempS) * vMat + sqr(sqr(tempS)) * 1. / (4. * tempQ * n) * + (2. * vMat.squaredNorm() + 4. * tempC2) * // mc.transpose() * V * mc) * + (mc * mc.transpose())); + + printIt(&tempVcsMat, "circle_fit - Vcs:"); + cMat[0][0] = tempVcsMat.block(0, 0, n, n).template selfadjointView(); + vcsMat[0][1] = tempVcsMat.block(0, n, n, n); + cMat[1][1] = tempVcsMat.block(n, n, n, n).template selfadjointView(); + vcsMat[1][0] = vcsMat[0][1].transpose(); + printIt(&tempVcsMat, "circle_fit - Vcs:"); + } + + { + const ArrayNd t0 = (VectorXd::Constant(n, 1.) * p3D.row(0)); + const ArrayNd t1 = (VectorXd::Constant(n, 1.) * p3D.row(1)); + const ArrayNd t00 = p3D.row(0).transpose() * p3D.row(0); + const ArrayNd t01 = p3D.row(0).transpose() * p3D.row(1); + const ArrayNd t11 = p3D.row(1).transpose() * p3D.row(1); + const ArrayNd t10 = t01.transpose(); + vcsMat[0][0] = cMat[0][0]; + cMat[0][1] = vcsMat[0][1]; + cMat[0][2] = 2. * (vcsMat[0][0] * t0 + vcsMat[0][1] * t1); + vcsMat[1][1] = cMat[1][1]; + cMat[1][2] = 2. * (vcsMat[1][0] * t0 + vcsMat[1][1] * t1); + MatrixNd tmp; + tmp.template triangularView() = + (2. * (vcsMat[0][0] * vcsMat[0][0] + vcsMat[0][0] * vcsMat[0][1] + vcsMat[1][1] * vcsMat[1][0] + + vcsMat[1][1] * vcsMat[1][1]) + + 4. * (vcsMat[0][0] * t00 + vcsMat[0][1] * t01 + vcsMat[1][0] * t10 + vcsMat[1][1] * t11)) + .matrix(); + cMat[2][2] = tmp.template selfadjointView(); + } + printIt(&cMat[0][0], "circle_fit - C[0][0]:"); + + Matrix3d c0Mat; // cov matrix of center of gravity (r0.x,r0.y,r0.z) + for (uint i = 0; i < 3; ++i) { + for (uint j = i; j < 3; ++j) { + Eigen::Matrix tmp; + tmp = weight.transpose() * cMat[i][j] * weight; + // Workaround to get things working in GPU + const double tempC = tmp(0, 0); + c0Mat(i, j) = tempC; //weight.transpose() * C[i][j] * weight; + c0Mat(j, i) = c0Mat(i, j); + } + } + printIt(&c0Mat, "circle_fit - C0:"); + + const MatrixNd wMat = weight * weight.transpose(); + const MatrixNd hMat = MatrixNd::Identity().rowwise() - weight.transpose(); + const MatrixNx3d s_v = hMat * p3D.transpose(); + printIt(&wMat, "circle_fit - W:"); + printIt(&hMat, "circle_fit - H:"); + printIt(&s_v, "circle_fit - s_v:"); + + MatrixNd dMat[3][3]; // cov(s_v) + dMat[0][0] = (hMat * cMat[0][0] * hMat.transpose()).cwiseProduct(wMat); + dMat[0][1] = (hMat * cMat[0][1] * hMat.transpose()).cwiseProduct(wMat); + dMat[0][2] = (hMat * cMat[0][2] * hMat.transpose()).cwiseProduct(wMat); + dMat[1][1] = (hMat * cMat[1][1] * hMat.transpose()).cwiseProduct(wMat); + dMat[1][2] = (hMat * cMat[1][2] * hMat.transpose()).cwiseProduct(wMat); + dMat[2][2] = (hMat * cMat[2][2] * hMat.transpose()).cwiseProduct(wMat); + dMat[1][0] = dMat[0][1].transpose(); + dMat[2][0] = dMat[0][2].transpose(); + dMat[2][1] = dMat[1][2].transpose(); + printIt(&dMat[0][0], "circle_fit - D_[0][0]:"); + + constexpr uint nu[6][2] = {{0, 0}, {0, 1}, {0, 2}, {1, 1}, {1, 2}, {2, 2}}; + + Matrix6d eMat; // cov matrix of the 6 independent elements of A + for (uint a = 0; a < 6; ++a) { + const uint i = nu[a][0], j = nu[a][1]; + for (uint b = a; b < 6; ++b) { + const uint k = nu[b][0], l = nu[b][1]; + VectorNd t0(n); + VectorNd t1(n); + if (l == k) { + t0 = 2. * dMat[j][l] * s_v.col(l); + if (i == j) + t1 = t0; + else + t1 = 2. * dMat[i][l] * s_v.col(l); + } else { + t0 = dMat[j][l] * s_v.col(k) + dMat[j][k] * s_v.col(l); + if (i == j) + t1 = t0; + else + t1 = dMat[i][l] * s_v.col(k) + dMat[i][k] * s_v.col(l); + } + + if (i == j) { + Eigen::Matrix cm; + cm = s_v.col(i).transpose() * (t0 + t1); + // Workaround to get things working in GPU + const double tempC = cm(0, 0); + eMat(a, b) = 0. + tempC; + } else { + Eigen::Matrix cm; + cm = (s_v.col(i).transpose() * t0) + (s_v.col(j).transpose() * t1); + // Workaround to get things working in GPU + const double tempC = cm(0, 0); + eMat(a, b) = 0. + tempC; //(s_v.col(i).transpose() * t0) + (s_v.col(j).transpose() * t1); + } + if (b != a) + eMat(b, a) = eMat(a, b); + } + } + printIt(&eMat, "circle_fit - E:"); + + Eigen::Matrix j2Mat; // Jacobian of min_eigen() (numerically computed) + for (uint a = 0; a < 6; ++a) { + const uint i = nu[a][0], j = nu[a][1]; + Matrix3d delta = Matrix3d::Zero(); + delta(i, j) = delta(j, i) = abs(aMat(i, j) * epsilon); + j2Mat.col(a) = min_eigen3D_fast(acc, aMat + delta); + const int sign = (j2Mat.col(a)(2) > 0) ? 1 : -1; + j2Mat.col(a) = (j2Mat.col(a) * sign - vVec) / delta(i, j); + } + printIt(&j2Mat, "circle_fit - J2:"); + + Matrix4d cvcMat; // joint cov matrix of (v0,v1,v2,c) + { + Matrix3d t0 = j2Mat * eMat * j2Mat.transpose(); + Vector3d t1 = -t0 * r0; + cvcMat.block(0, 0, 3, 3) = t0; + cvcMat.block(0, 3, 3, 1) = t1; + cvcMat.block(3, 0, 1, 3) = t1.transpose(); + Eigen::Matrix cm1; + Eigen::Matrix cm3; + cm1 = (vVec.transpose() * c0Mat * vVec); + // cm2 = (c0Mat.cwiseProduct(t0)).sum(); + cm3 = (r0.transpose() * t0 * r0); + // Workaround to get things working in GPU + const double tempC = cm1(0, 0) + (c0Mat.cwiseProduct(t0)).sum() + cm3(0, 0); + cvcMat(3, 3) = tempC; + // (v.transpose() * c0Mat * v) + (c0Mat.cwiseProduct(t0)).sum() + (r0.transpose() * t0 * r0); + } + printIt(&cvcMat, "circle_fit - Cvc:"); + + Eigen::Matrix j3Mat; // Jacobian (v0,v1,v2,c)->(X0,Y0,R) + { + const double t = 1. / tempH; + j3Mat << -v2x2_inv, 0, vVec(0) * sqr(v2x2_inv) * 2., 0, 0, -v2x2_inv, vVec(1) * sqr(v2x2_inv) * 2., 0, + vVec(0) * v2x2_inv * t, vVec(1) * v2x2_inv * t, + -tempH * sqr(v2x2_inv) * 2. - (2. * tempC + vVec(2)) * v2x2_inv * t, -t; + } + printIt(&j3Mat, "circle_fit - J3:"); + + const RowVector2Nd Jq = mc.transpose() * tempS * 1. / n; // var(q) + printIt(&Jq, "circle_fit - Jq:"); + + Matrix3d cov_uvr = j3Mat * cvcMat * j3Mat.transpose() * sqr(s_inv) // cov(X0,Y0,R) + + (par_uvr * par_uvr.transpose()) * (Jq * vMat * Jq.transpose()); + + circle.cov = cov_uvr; + } + + printIt(&circle.cov, "Circle cov:"); +#ifdef RFIT_DEBUG + printf("circle_fit - exit\n"); +#endif + return circle; + } + + /*! \brief Perform an ordinary least square fit in the s-z plane to compute + * the parameters cotTheta and Zip. + * + * The fit is performed in the rotated S3D-Z' plane, following the formalism of + * Frodesen, Chapter 10, p. 259. + * + * The system has been rotated to both try to use the combined errors in s-z + * along Z', as errors in the Y direction and to avoid the patological case of + * degenerate lines with angular coefficient m = +/- inf. + * + * The rotation is using the information on the theta angle computed in the + * fast fit. The rotation is such that the S3D axis will be the X-direction, + * while the rotated Z-axis will be the Y-direction. This pretty much follows + * what is done in the same fit in the Broken Line approach. + */ + + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE LineFit lineFit(const TAcc& acc, + const M3xN& hits, + const M6xN& hits_ge, + const CircleFit& circle, + const V4& fast_fit, + const double bField, + const bool error) { + constexpr uint32_t N = M3xN::ColsAtCompileTime; + constexpr auto n = N; + double theta = -circle.qCharge * atan(fast_fit(3)); + theta = theta < 0. ? theta + M_PI : theta; + + // Prepare the Rotation Matrix to rotate the points + Eigen::Matrix rot; + rot << sin(theta), cos(theta), -cos(theta), sin(theta); + + // PROJECTION ON THE CILINDER + // + // p2D will be: + // [s1, s2, s3, ..., sn] + // [z1, z2, z3, ..., zn] + // s values will be ordinary x-values + // z values will be ordinary y-values + + Matrix2xNd p2D = Matrix2xNd::Zero(); + Eigen::Matrix jxMat; + +#ifdef RFIT_DEBUG + printf("Line_fit - B: %g\n", bField); + printIt(&hits, "Line_fit points: "); + printIt(&hits_ge, "Line_fit covs: "); + printIt(&rot, "Line_fit rot: "); +#endif + // x & associated Jacobian + // cfr https://indico.cern.ch/event/663159/contributions/2707659/attachments/1517175/2368189/Riemann_fit.pdf + // Slide 11 + // a ==> -o i.e. the origin of the circle in XY plane, negative + // b ==> p i.e. distances of the points wrt the origin of the circle. + const Vector2d oVec(circle.par(0), circle.par(1)); + + // associated Jacobian, used in weights and errors computation + Matrix6d covMat = Matrix6d::Zero(); + Matrix2d cov_sz[N]; + for (uint i = 0; i < n; ++i) { + Vector2d pVec = hits.block(0, i, 2, 1) - oVec; + const double cross = cross2D(acc, -oVec, pVec); + const double dot = (-oVec).dot(pVec); + // atan2(cross, dot) give back the angle in the transverse plane so tha the + // final equation reads: x_i = -q*R*theta (theta = angle returned by atan2) + const double tempQAtan2 = -circle.qCharge * atan2(cross, dot); + // p2D.coeffRef(1, i) = atan2_ * circle.par(2); + p2D(0, i) = tempQAtan2 * circle.par(2); + + // associated Jacobian, used in weights and errors- computation + const double temp0 = -circle.qCharge * circle.par(2) * 1. / (sqr(dot) + sqr(cross)); + double d_X0 = 0., d_Y0 = 0., d_R = 0.; // good approximation for big pt and eta + if (error) { + d_X0 = -temp0 * ((pVec(1) + oVec(1)) * dot - (pVec(0) - oVec(0)) * cross); + d_Y0 = temp0 * ((pVec(0) + oVec(0)) * dot - (oVec(1) - pVec(1)) * cross); + d_R = tempQAtan2; + } + const double d_x = temp0 * (oVec(1) * dot + oVec(0) * cross); + const double d_y = temp0 * (-oVec(0) * dot + oVec(1) * cross); + jxMat << d_X0, d_Y0, d_R, d_x, d_y, 0., 0., 0., 0., 0., 0., 1.; + + covMat.block(0, 0, 3, 3) = circle.cov; + covMat(3, 3) = hits_ge.col(i)[0]; // x errors + covMat(4, 4) = hits_ge.col(i)[2]; // y errors + covMat(5, 5) = hits_ge.col(i)[5]; // z errors + covMat(3, 4) = covMat(4, 3) = hits_ge.col(i)[1]; // cov_xy + covMat(3, 5) = covMat(5, 3) = hits_ge.col(i)[3]; // cov_xz + covMat(4, 5) = covMat(5, 4) = hits_ge.col(i)[4]; // cov_yz + Matrix2d tmp = jxMat * covMat * jxMat.transpose(); + cov_sz[i].noalias() = rot * tmp * rot.transpose(); + } + // Math of d_{X0,Y0,R,x,y} all verified by hand + p2D.row(1) = hits.row(2); + + // The following matrix will contain errors orthogonal to the rotated S + // component only, with the Multiple Scattering properly treated!! + MatrixNd cov_with_ms; + scatterCovLine(acc, cov_sz, fast_fit, p2D.row(0), p2D.row(1), theta, bField, cov_with_ms); +#ifdef RFIT_DEBUG + printIt(cov_sz, "line_fit - cov_sz:"); + printIt(&cov_with_ms, "line_fit - cov_with_ms: "); +#endif + + // Rotate Points with the shape [2, n] + Matrix2xNd p2D_rot = rot * p2D; + +#ifdef RFIT_DEBUG + printf("Fast fit Tan(theta): %g\n", fast_fit(3)); + printf("Rotation angle: %g\n", theta); + printIt(&rot, "Rotation Matrix:"); + printIt(&p2D, "Original Hits(s,z):"); + printIt(&p2D_rot, "Rotated hits(S3D, Z'):"); + printIt(&rot, "Rotation Matrix:"); +#endif + + // Build the A Matrix + Matrix2xNd aMat; + aMat << MatrixXd::Ones(1, n), p2D_rot.row(0); // rotated s values + +#ifdef RFIT_DEBUG + printIt(&aMat, "A Matrix:"); +#endif + + // Build A^T V-1 A, where V-1 is the covariance of only the Y components. + MatrixNd vyInvMat; + math::cholesky::invert(cov_with_ms, vyInvMat); + // MatrixNd vyInvMat = cov_with_ms.inverse(); + Eigen::Matrix covParamsMat = aMat * vyInvMat * aMat.transpose(); + // Compute the Covariance Matrix of the fit parameters + math::cholesky::invert(covParamsMat, covParamsMat); + + // Now Compute the Parameters in the form [2,1] + // The first component is q. + // The second component is m. + Eigen::Matrix sol = covParamsMat * aMat * vyInvMat * p2D_rot.row(1).transpose(); + +#ifdef RFIT_DEBUG + printIt(&sol, "Rotated solutions:"); +#endif + + // We need now to transfer back the results in the original s-z plane + const auto sinTheta = sin(theta); + const auto cosTheta = cos(theta); + auto common_factor = 1. / (sinTheta - sol(1, 0) * cosTheta); + Eigen::Matrix jMat; + jMat << 0., common_factor * common_factor, common_factor, sol(0, 0) * cosTheta * common_factor * common_factor; + + double tempM = common_factor * (sol(1, 0) * sinTheta + cosTheta); + double tempQ = common_factor * sol(0, 0); + auto cov_mq = jMat * covParamsMat * jMat.transpose(); + + VectorNd res = p2D_rot.row(1).transpose() - aMat.transpose() * sol; + double chi2 = res.transpose() * vyInvMat * res; + + LineFit line; + line.par << tempM, tempQ; + line.cov << cov_mq; + line.chi2 = chi2; + +#ifdef RFIT_DEBUG + printf("Common_factor: %g\n", common_factor); + printIt(&jMat, "Jacobian:"); + printIt(&sol, "Rotated solutions:"); + printIt(&covParamsMat, "Cov_params:"); + printIt(&cov_mq, "Rotated Covariance Matrix:"); + printIt(&(line.par), "Real Parameters:"); + printIt(&(line.cov), "Real Covariance Matrix:"); + printf("Chi2: %g\n", chi2); +#endif + + return line; + } + + } // namespace riemannFit +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +namespace riemannFit { + /*! + \brief Helix fit by three step: + -fast pre-fit (see Fast_fit() for further info); \n + -circle fit of hits projected in the transverse plane by Riemann-Chernov + algorithm (see Circle_fit() for further info); \n + -line fit of hits projected on cylinder surface by orthogonal distance + regression (see Line_fit for further info). \n + Points must be passed ordered (from inner to outer layer). + \param hits Matrix3xNd hits coordinates in this form: \n + |x0|x1|x2|...|xn| \n + |y0|y1|y2|...|yn| \n + |z0|z1|z2|...|zn| + \param hits_cov Matrix3Nd covariance matrix in this form (()->cov()): \n + |(x0,x0)|(x1,x0)|(x2,x0)|.|(y0,x0)|(y1,x0)|(y2,x0)|.|(z0,x0)|(z1,x0)|(z2,x0)| \n + |(x0,x1)|(x1,x1)|(x2,x1)|.|(y0,x1)|(y1,x1)|(y2,x1)|.|(z0,x1)|(z1,x1)|(z2,x1)| \n + |(x0,x2)|(x1,x2)|(x2,x2)|.|(y0,x2)|(y1,x2)|(y2,x2)|.|(z0,x2)|(z1,x2)|(z2,x2)| \n + . . . . . . . . . . . \n + |(x0,y0)|(x1,y0)|(x2,y0)|.|(y0,y0)|(y1,y0)|(y2,x0)|.|(z0,y0)|(z1,y0)|(z2,y0)| \n + |(x0,y1)|(x1,y1)|(x2,y1)|.|(y0,y1)|(y1,y1)|(y2,x1)|.|(z0,y1)|(z1,y1)|(z2,y1)| \n + |(x0,y2)|(x1,y2)|(x2,y2)|.|(y0,y2)|(y1,y2)|(y2,x2)|.|(z0,y2)|(z1,y2)|(z2,y2)| \n + . . . . . . . . . . . \n + |(x0,z0)|(x1,z0)|(x2,z0)|.|(y0,z0)|(y1,z0)|(y2,z0)|.|(z0,z0)|(z1,z0)|(z2,z0)| \n + |(x0,z1)|(x1,z1)|(x2,z1)|.|(y0,z1)|(y1,z1)|(y2,z1)|.|(z0,z1)|(z1,z1)|(z2,z1)| \n + |(x0,z2)|(x1,z2)|(x2,z2)|.|(y0,z2)|(y1,z2)|(y2,z2)|.|(z0,z2)|(z1,z2)|(z2,z2)| + \param bField magnetic field in the center of the detector in Gev/cm/c + unit, in order to perform pt calculation. + \param error flag for error computation. + \param scattering flag for multiple scattering treatment. + (see Circle_fit() documentation for further info). + \warning see Circle_fit(), Line_fit() and Fast_fit() warnings. + \bug see Circle_fit(), Line_fit() and Fast_fit() bugs. +*/ + + template + class helixFit { + public: + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE void operator()(const TAcc& acc, + const Matrix3xNd* hits, + const Eigen::Matrix* hits_ge, + const double bField, + const bool error, + HelixFit* helix) const { + constexpr uint n = N; + VectorNd<4> rad = (hits->block(0, 0, 2, n).colwise().norm()); + + // Fast_fit gives back (X0, Y0, R, theta) w/o errors, using only 3 points. + Vector4d fast_fit; + ALPAKA_ACCELERATOR_NAMESPACE::riemannFit::fastFit(acc, *hits, fast_fit); + riemannFit::Matrix2Nd hits_cov = MatrixXd::Zero(2 * n, 2 * n); + ALPAKA_ACCELERATOR_NAMESPACE::riemannFit::loadCovariance2D(acc, *hits_ge, hits_cov); + CircleFit circle = ALPAKA_ACCELERATOR_NAMESPACE::riemannFit::circleFit( + acc, hits->block(0, 0, 2, n), hits_cov, fast_fit, rad, bField, error); + LineFit line = + ALPAKA_ACCELERATOR_NAMESPACE::riemannFit::lineFit(acc, *hits, *hits_ge, circle, fast_fit, bField, error); + + ALPAKA_ACCELERATOR_NAMESPACE::riemannFit::par_uvrtopak(acc, circle, bField, error); + + helix->par << circle.par, line.par; + if (error) { + helix->cov = MatrixXd::Zero(5, 5); + helix->cov.block(0, 0, 3, 3) = circle.cov; + helix->cov.block(3, 3, 2, 2) = line.cov; + } + helix->qCharge = circle.qCharge; + helix->chi2_circle = circle.chi2; + helix->chi2_line = line.chi2; + } + }; +} // namespace riemannFit +#endif // RecoPixelVertexing_PixelTrackFitting_interface_RiemannFit_h diff --git a/RecoTracker/PixelTrackFitting/plugins/BuildFile.xml b/RecoTracker/PixelTrackFitting/plugins/BuildFile.xml index d28dad5793a66..6c8c102293651 100644 --- a/RecoTracker/PixelTrackFitting/plugins/BuildFile.xml +++ b/RecoTracker/PixelTrackFitting/plugins/BuildFile.xml @@ -1,8 +1,10 @@ - - - - - + + + + + + + diff --git a/RecoTracker/PixelTrackFitting/plugins/PixelTrackDumpAlpaka.cc b/RecoTracker/PixelTrackFitting/plugins/PixelTrackDumpAlpaka.cc new file mode 100644 index 0000000000000..c4f0b97dba8a9 --- /dev/null +++ b/RecoTracker/PixelTrackFitting/plugins/PixelTrackDumpAlpaka.cc @@ -0,0 +1,79 @@ +#include // needed here by soa layout + +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/global/EDAnalyzer.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/PluginManager/interface/ModuleDef.h" +#include "FWCore/Utilities/interface/EDGetToken.h" +#include "FWCore/Utilities/interface/InputTag.h" + +#include "DataFormats/VertexSoA/interface/ZVertexHost.h" +#include "DataFormats/TrackSoA/interface/TracksHost.h" + +template +class PixelTrackDumpAlpakaT : public edm::global::EDAnalyzer<> { +public: + using TkSoAHost = TracksHost; + using VertexSoAHost = ZVertexHost; + + explicit PixelTrackDumpAlpakaT(const edm::ParameterSet& iConfig); + ~PixelTrackDumpAlpakaT() override = default; + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + +private: + void analyze(edm::StreamID streamID, edm::Event const& iEvent, const edm::EventSetup& iSetup) const override; + edm::EDGetTokenT tokenSoATrack_; + edm::EDGetTokenT tokenSoAVertex_; +}; + +template +PixelTrackDumpAlpakaT::PixelTrackDumpAlpakaT(const edm::ParameterSet& iConfig) { + tokenSoATrack_ = consumes(iConfig.getParameter("pixelTrackSrc")); + tokenSoAVertex_ = consumes(iConfig.getParameter("pixelVertexSrc")); +} + +template +void PixelTrackDumpAlpakaT::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("pixelTrackSrc", edm::InputTag("pixelTracksAlpaka")); + desc.add("pixelVertexSrc", edm::InputTag("pixelVerticesAlpaka")); + descriptions.addWithDefaultLabel(desc); +} + +template +void PixelTrackDumpAlpakaT::analyze(edm::StreamID streamID, + edm::Event const& iEvent, + const edm::EventSetup& iSetup) const { + auto const& tracks = iEvent.get(tokenSoATrack_); + assert(tracks.view().quality()); + assert(tracks.view().chi2()); + assert(tracks.view().nLayers()); + assert(tracks.view().eta()); + assert(tracks.view().pt()); + assert(tracks.view().state()); + assert(tracks.view().covariance()); + assert(tracks.view().nTracks()); + + auto const& vertices = iEvent.get(tokenSoAVertex_); + assert(vertices.view().idv()); + assert(vertices.view().zv()); + assert(vertices.view().wv()); + assert(vertices.view().chi2()); + assert(vertices.view().ptv2()); + assert(vertices.view().ndof()); + assert(vertices.view().sortInd()); + assert(vertices.view().nvFinal()); +} + +using PixelTrackDumpAlpakaPhase1 = PixelTrackDumpAlpakaT; +using PixelTrackDumpAlpakaPhase2 = PixelTrackDumpAlpakaT; +using PixelTrackDumpAlpakaHIonPhase1 = PixelTrackDumpAlpakaT; + +#include "FWCore/Framework/interface/MakerMacros.h" +DEFINE_FWK_MODULE(PixelTrackDumpAlpakaPhase1); +DEFINE_FWK_MODULE(PixelTrackDumpAlpakaPhase2); +DEFINE_FWK_MODULE(PixelTrackDumpAlpakaHIonPhase1); diff --git a/RecoTracker/PixelTrackFitting/plugins/PixelTrackProducerFromSoAAlpaka.cc b/RecoTracker/PixelTrackFitting/plugins/PixelTrackProducerFromSoAAlpaka.cc new file mode 100644 index 0000000000000..4402a1891b2a4 --- /dev/null +++ b/RecoTracker/PixelTrackFitting/plugins/PixelTrackProducerFromSoAAlpaka.cc @@ -0,0 +1,264 @@ +#include + +#include "DataFormats/BeamSpot/interface/BeamSpot.h" +#include "DataFormats/GeometrySurface/interface/Plane.h" +#include "DataFormats/SiPixelClusterSoA/interface/ClusteringConstants.h" +#include "DataFormats/TrackSoA/interface/TracksHost.h" +#include "DataFormats/TrackReco/interface/Track.h" +#include "DataFormats/TrackReco/interface/TrackExtra.h" +#include "DataFormats/TrackReco/interface/TrackFwd.h" +#include "DataFormats/TrackerCommon/interface/TrackerTopology.h" +#include "DataFormats/TrackerRecHit2D/interface/SiPixelRecHitCollection.h" +#include "DataFormats/TrajectoryState/interface/LocalTrajectoryParameters.h" +#include "FWCore/Framework/interface/ConsumesCollector.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/global/EDProducer.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/Utilities/interface/EDGetToken.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "Geometry/Records/interface/TrackerTopologyRcd.h" +#include "MagneticField/Records/interface/IdealMagneticFieldRecord.h" +#include "TrackingTools/AnalyticalJacobians/interface/JacobianLocalToCurvilinear.h" +#include "TrackingTools/TrajectoryParametrization/interface/CurvilinearTrajectoryError.h" +#include "TrackingTools/TrajectoryParametrization/interface/GlobalTrajectoryParameters.h" + +#include "DataFormats/TrackSoA/interface/alpaka/TrackUtilities.h" +#include "RecoTracker/PixelTrackFitting/interface/alpaka/FitUtils.h" + +#include "storeTracks.h" + +/** + * This class creates "legacy" reco::Track + * objects from the output of SoA CA. + */ + +//#define GPU_DEBUG + +template +class PixelTrackProducerFromSoAAlpaka : public edm::global::EDProducer<> { + using TkSoAHost = TracksHost; + using tracksHelpers = TracksUtilities; + using HMSstorage = std::vector; + +public: + using IndToEdm = std::vector; + + explicit PixelTrackProducerFromSoAAlpaka(const edm::ParameterSet &iConfig); + ~PixelTrackProducerFromSoAAlpaka() override = default; + + static void fillDescriptions(edm::ConfigurationDescriptions &descriptions); + +private: + void produce(edm::StreamID streamID, edm::Event &iEvent, const edm::EventSetup &iSetup) const override; + + // Event Data tokens + const edm::EDGetTokenT tBeamSpot_; + const edm::EDGetTokenT tokenTrack_; + const edm::EDGetTokenT cpuHits_; + const edm::EDGetTokenT hmsToken_; + // Event Setup tokens + const edm::ESGetToken idealMagneticFieldToken_; + const edm::ESGetToken ttTopoToken_; + + int32_t const minNumberOfHits_; + pixelTrack::Quality const minQuality_; +}; + +template +PixelTrackProducerFromSoAAlpaka::PixelTrackProducerFromSoAAlpaka(const edm::ParameterSet &iConfig) + : tBeamSpot_(consumes(iConfig.getParameter("beamSpot"))), + tokenTrack_(consumes(iConfig.getParameter("trackSrc"))), + cpuHits_(consumes(iConfig.getParameter("pixelRecHitLegacySrc"))), + hmsToken_(consumes(iConfig.getParameter("pixelRecHitLegacySrc"))), + idealMagneticFieldToken_(esConsumes()), + ttTopoToken_(esConsumes()), + minNumberOfHits_(iConfig.getParameter("minNumberOfHits")), + minQuality_(pixelTrack::qualityByName(iConfig.getParameter("minQuality"))) { + if (minQuality_ == pixelTrack::Quality::notQuality) { + throw cms::Exception("PixelTrackConfiguration") + << iConfig.getParameter("minQuality") + " is not a pixelTrack::Quality"; + } + if (minQuality_ < pixelTrack::Quality::dup) { + throw cms::Exception("PixelTrackConfiguration") + << iConfig.getParameter("minQuality") + " not supported"; + } + produces(); + produces(); + // TrackCollection refers to TrackingRechit and TrackExtra + // collections, need to declare its production after them to work + // around a rare race condition in framework scheduling + produces(); + produces(); +} + +template +void PixelTrackProducerFromSoAAlpaka::fillDescriptions(edm::ConfigurationDescriptions &descriptions) { + edm::ParameterSetDescription desc; + desc.add("beamSpot", edm::InputTag("offlineBeamSpot")); + desc.add("trackSrc", edm::InputTag("pixelTracksAlpaka")); + desc.add("pixelRecHitLegacySrc", edm::InputTag("siPixelRecHitsPreSplittingLegacy")); + desc.add("minNumberOfHits", 0); + desc.add("minQuality", "loose"); + descriptions.addWithDefaultLabel(desc); +} + +template +void PixelTrackProducerFromSoAAlpaka::produce(edm::StreamID streamID, + edm::Event &iEvent, + const edm::EventSetup &iSetup) const { + // enum class Quality : uint8_t { bad = 0, edup, dup, loose, strict, tight, highPurity }; + reco::TrackBase::TrackQuality recoQuality[] = {reco::TrackBase::undefQuality, + reco::TrackBase::undefQuality, + reco::TrackBase::discarded, + reco::TrackBase::loose, + reco::TrackBase::tight, + reco::TrackBase::tight, + reco::TrackBase::highPurity}; + assert(reco::TrackBase::highPurity == recoQuality[int(pixelTrack::Quality::highPurity)]); + +#ifdef GPU_DEBUG + std::cout << "Converting soa helix in reco tracks" << std::endl; +#endif + + auto indToEdmP = std::make_unique(); + auto &indToEdm = *indToEdmP; + + auto const &idealField = iSetup.getData(idealMagneticFieldToken_); + + pixeltrackfitting::TracksWithRecHits tracks; + + auto const &httopo = iSetup.getData(ttTopoToken_); + + const auto &bsh = iEvent.get(tBeamSpot_); + GlobalPoint bs(bsh.x0(), bsh.y0(), bsh.z0()); + + auto const &rechits = iEvent.get(cpuHits_); + std::vector hitmap; + auto const &rcs = rechits.data(); + auto const nhits = rcs.size(); + + hitmap.resize(nhits, nullptr); + + auto const &hitsModuleStart = iEvent.get(hmsToken_); + + for (auto const &hit : rcs) { + auto const &thit = static_cast(hit); + auto const detI = thit.det()->index(); + auto const &clus = thit.firstClusterRef(); + assert(clus.isPixel()); + auto const idx = hitsModuleStart[detI] + clus.pixelCluster().originalId(); + if (idx >= hitmap.size()) + hitmap.resize(idx + 256, nullptr); // only in case of hit overflow in one module + + assert(nullptr == hitmap[idx]); + hitmap[idx] = &hit; + } + + std::vector hits; + hits.reserve(5); + + auto const &tsoa = iEvent.get(tokenTrack_); + auto const *quality = tsoa.view().quality(); + auto const &hitIndices = tsoa.view().hitIndices(); + auto nTracks = tsoa.view().nTracks(); + + tracks.reserve(nTracks); + + int32_t nt = 0; + + //sort index by pt + std::vector sortIdxs(nTracks); + std::iota(sortIdxs.begin(), sortIdxs.end(), 0); + std::sort(sortIdxs.begin(), sortIdxs.end(), [&](int32_t const i1, int32_t const i2) { + return tsoa.view()[i1].pt() > tsoa.view()[i2].pt(); + }); + + //store the index of the SoA: indToEdm[index_SoAtrack] -> index_edmTrack (if it exists) + indToEdm.resize(sortIdxs.size(), -1); + for (const auto &it : sortIdxs) { + auto nHits = tracksHelpers::nHits(tsoa.view(), it); + assert(nHits >= 3); + auto q = quality[it]; + + if (q < minQuality_) + continue; + if (nHits < minNumberOfHits_) //move to nLayers? + continue; + indToEdm[it] = nt; + ++nt; + + hits.resize(nHits); + auto b = hitIndices.begin(it); + for (int iHit = 0; iHit < nHits; ++iHit) + hits[iHit] = hitmap[*(b + iHit)]; + + // mind: this values are respect the beamspot! + + float chi2 = tsoa.view()[it].chi2(); + float phi = reco::phi(tsoa.view(), it); + + riemannFit::Vector5d ipar, opar; + riemannFit::Matrix5d icov, ocov; + tracksHelpers::template copyToDense(tsoa.view(), ipar, icov, it); + riemannFit::transformToPerigeePlane(ipar, icov, opar, ocov); + + LocalTrajectoryParameters lpar(opar(0), opar(1), opar(2), opar(3), opar(4), 1.); + AlgebraicSymMatrix55 m; + for (int i = 0; i < 5; ++i) + for (int j = i; j < 5; ++j) + m(i, j) = ocov(i, j); + + float sp = std::sin(phi); + float cp = std::cos(phi); + Surface::RotationType rot(sp, -cp, 0, 0, 0, -1.f, cp, sp, 0); + + Plane impPointPlane(bs, rot); + GlobalTrajectoryParameters gp( + impPointPlane.toGlobal(lpar.position()), impPointPlane.toGlobal(lpar.momentum()), lpar.charge(), &idealField); + JacobianLocalToCurvilinear jl2c(impPointPlane, lpar, idealField); + + AlgebraicSymMatrix55 mo = ROOT::Math::Similarity(jl2c.jacobian(), m); + + int ndof = 2 * hits.size() - 5; + chi2 = chi2 * ndof; + GlobalPoint vv = gp.position(); + math::XYZPoint pos(vv.x(), vv.y(), vv.z()); + GlobalVector pp = gp.momentum(); + math::XYZVector mom(pp.x(), pp.y(), pp.z()); + + auto track = std::make_unique(chi2, ndof, pos, mom, gp.charge(), CurvilinearTrajectoryError(mo)); + + // bad and edup not supported as fit not present or not reliable + auto tkq = recoQuality[int(q)]; + track->setQuality(tkq); + // loose,tight and HP are inclusive + if (reco::TrackBase::highPurity == tkq) { + track->setQuality(reco::TrackBase::tight); + track->setQuality(reco::TrackBase::loose); + } else if (reco::TrackBase::tight == tkq) { + track->setQuality(reco::TrackBase::loose); + } + track->setQuality(tkq); + // filter??? + tracks.emplace_back(track.release(), hits); + } +#ifdef GPU_DEBUG + std::cout << "processed " << nt << " good tuples " << tracks.size() << " out of " << indToEdm.size() << std::endl; +#endif + // store tracks + storeTracks(iEvent, tracks, httopo); + iEvent.put(std::move(indToEdmP)); +} + +using PixelTrackProducerFromSoAAlpakaPhase1 = PixelTrackProducerFromSoAAlpaka; +using PixelTrackProducerFromSoAAlpakaPhase2 = PixelTrackProducerFromSoAAlpaka; +using PixelTrackProducerFromSoAAlpakaHIonPhase1 = PixelTrackProducerFromSoAAlpaka; + +#include "FWCore/Framework/interface/MakerMacros.h" +DEFINE_FWK_MODULE(PixelTrackProducerFromSoAAlpakaPhase1); +DEFINE_FWK_MODULE(PixelTrackProducerFromSoAAlpakaPhase2); +DEFINE_FWK_MODULE(PixelTrackProducerFromSoAAlpakaHIonPhase1); diff --git a/RecoTracker/PixelTrackFitting/python/PixelTracks_cff.py b/RecoTracker/PixelTrackFitting/python/PixelTracks_cff.py index 91eb380a33da9..046caa0b033f3 100644 --- a/RecoTracker/PixelTrackFitting/python/PixelTracks_cff.py +++ b/RecoTracker/PixelTrackFitting/python/PixelTracks_cff.py @@ -1,4 +1,5 @@ import FWCore.ParameterSet.Config as cms +from HeterogeneousCore.AlpakaCore.functions import * from HeterogeneousCore.CUDACore.SwitchProducerCUDA import SwitchProducerCUDA from RecoLocalTracker.SiStripRecHitConverter.StripCPEfromTrackAngle_cfi import * @@ -203,3 +204,42 @@ (pixelNtupletFit & gpu & gpuValidationPixel).toModify(pixelTracksSoA.cpu, pixelRecHitSrc = "siPixelRecHitsPreSplittingSoA@cpu" ) + +###################################################################### + +### Alpaka Pixel Track Reco + +from Configuration.ProcessModifiers.alpaka_cff import alpaka + +# pixel tracks SoA producer on the device +from RecoTracker.PixelSeeding.caHitNtupletAlpakaPhase1_cfi import caHitNtupletAlpakaPhase1 as _pixelTracksAlpakaPhase1 +from RecoTracker.PixelSeeding.caHitNtupletAlpakaPhase2_cfi import caHitNtupletAlpakaPhase2 as _pixelTracksAlpakaPhase2 + +pixelTracksAlpaka = _pixelTracksAlpakaPhase1.clone() +phase2_tracker.toReplaceWith(pixelTracksAlpaka,_pixelTracksAlpakaPhase2.clone()) + +# pixel tracks SoA producer on the cpu, for validation +pixelTracksAlpakaSerial = makeSerialClone(pixelTracksAlpaka, + pixelRecHitSrc = 'siPixelRecHitsPreSplittingAlpakaSerial' +) + +# legacy pixel tracks from SoA +from RecoTracker.PixelTrackFitting.pixelTrackProducerFromSoAAlpakaPhase1_cfi import pixelTrackProducerFromSoAAlpakaPhase1 as _pixelTrackProducerFromSoAAlpakaPhase1 +from RecoTracker.PixelTrackFitting.pixelTrackProducerFromSoAAlpakaPhase2_cfi import pixelTrackProducerFromSoAAlpakaPhase2 as _pixelTrackProducerFromSoAAlpakaPhase2 + +(alpaka & ~phase2_tracker).toReplaceWith(pixelTracks, _pixelTrackProducerFromSoAAlpakaPhase1.clone( + pixelRecHitLegacySrc = "siPixelRecHitsPreSplitting", +)) + +(alpaka & phase2_tracker).toReplaceWith(pixelTracks, _pixelTrackProducerFromSoAAlpakaPhase2.clone( + pixelRecHitLegacySrc = "siPixelRecHitsPreSplitting", +)) + +alpaka.toReplaceWith(pixelTracksTask, cms.Task( + # Build the pixel ntuplets and the pixel tracks in SoA format with alpaka on the device + pixelTracksAlpaka, + # Build the pixel ntuplets and the pixel tracks in SoA format with alpaka on the cpu (if requested by the validation) + pixelTracksAlpakaSerial, + # Convert the pixel tracks from SoA to legacy format + pixelTracks) +) diff --git a/RecoTracker/PixelVertexFinding/BuildFile.xml b/RecoTracker/PixelVertexFinding/BuildFile.xml index 6171a7a94824a..aebe052016d0d 100644 --- a/RecoTracker/PixelVertexFinding/BuildFile.xml +++ b/RecoTracker/PixelVertexFinding/BuildFile.xml @@ -1,3 +1,5 @@ + + @@ -5,7 +7,6 @@ - diff --git a/RecoTracker/PixelVertexFinding/interface/PixelVertexWorkSpaceLayout.h b/RecoTracker/PixelVertexFinding/interface/PixelVertexWorkSpaceLayout.h new file mode 100644 index 0000000000000..0948d88ef3acf --- /dev/null +++ b/RecoTracker/PixelVertexFinding/interface/PixelVertexWorkSpaceLayout.h @@ -0,0 +1,33 @@ +#ifndef RecoTracker_PixelVertexFinding_interface_PixelVertexWorkSpaceLayout_h +#define RecoTracker_PixelVertexFinding_interface_PixelVertexWorkSpaceLayout_h + +#include + +#include "DataFormats/SoATemplate/interface/SoALayout.h" + +// Intermediate data used in the vertex reco algos +// For internal use only +namespace vertexFinder { + + GENERATE_SOA_LAYOUT(PixelVertexWSSoALayout, + SOA_COLUMN(uint16_t, itrk), // index of original track + SOA_COLUMN(float, zt), // input track z at bs + SOA_COLUMN(float, ezt2), // input error^2 on the above + SOA_COLUMN(float, ptt2), // input pt^2 on the above + SOA_COLUMN(uint8_t, izt), // interized z-position of input tracks + SOA_COLUMN(int32_t, iv), // vertex index for each associated track + SOA_SCALAR(uint32_t, ntrks), // number of "selected tracks" + SOA_SCALAR(uint32_t, nvIntermediate)) // the number of vertices after splitting pruning etc. + + using PixelVertexWorkSpaceSoALayout = PixelVertexWSSoALayout<>; + using PixelVertexWorkSpaceSoAView = PixelVertexWSSoALayout<>::View; + using PixelVertexWorkSpaceSoAConstView = PixelVertexWSSoALayout<>::ConstView; + + ALPAKA_FN_HOST_ACC ALPAKA_FN_INLINE void init(PixelVertexWorkSpaceSoAView& workspace_view) { + workspace_view.ntrks() = 0; + workspace_view.nvIntermediate() = 0; + } + +} // namespace vertexFinder + +#endif // RecoTracker_PixelVertexFinding_interface_PixelVertexWorkSpaceLayout_h diff --git a/RecoTracker/PixelVertexFinding/plugins/BuildFile.xml b/RecoTracker/PixelVertexFinding/plugins/BuildFile.xml index d330676889f26..2df520dffcf5b 100644 --- a/RecoTracker/PixelVertexFinding/plugins/BuildFile.xml +++ b/RecoTracker/PixelVertexFinding/plugins/BuildFile.xml @@ -1,5 +1,3 @@ - - @@ -17,18 +15,33 @@ - - + + + + + + + + + + + + + + + + + diff --git a/RecoTracker/PixelVertexFinding/plugins/PixelVertexProducerFromSoAAlpaka.cc b/RecoTracker/PixelVertexFinding/plugins/PixelVertexProducerFromSoAAlpaka.cc new file mode 100644 index 0000000000000..6e542f7870c2e --- /dev/null +++ b/RecoTracker/PixelVertexFinding/plugins/PixelVertexProducerFromSoAAlpaka.cc @@ -0,0 +1,175 @@ +#include "DataFormats/BeamSpot/interface/BeamSpot.h" +#include "DataFormats/Common/interface/OrphanHandle.h" +#include "DataFormats/TrackReco/interface/Track.h" +#include "DataFormats/TrackReco/interface/TrackExtra.h" +#include "DataFormats/TrackReco/interface/TrackFwd.h" +#include "DataFormats/VertexReco/interface/Vertex.h" +#include "DataFormats/VertexReco/interface/VertexFwd.h" +#include "DataFormats/VertexSoA/interface/ZVertexHost.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/Framework/interface/global/EDProducer.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/PluginManager/interface/ModuleDef.h" +#include "FWCore/Utilities/interface/EDGetToken.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "Geometry/Records/interface/TrackerTopologyRcd.h" +#include "MagneticField/Records/interface/IdealMagneticFieldRecord.h" + +class PixelVertexProducerFromSoAAlpaka : public edm::global::EDProducer<> { +public: + using IndToEdm = std::vector; + + explicit PixelVertexProducerFromSoAAlpaka(const edm::ParameterSet &iConfig); + ~PixelVertexProducerFromSoAAlpaka() override = default; + + static void fillDescriptions(edm::ConfigurationDescriptions &descriptions); + +private: + void produce(edm::StreamID streamID, edm::Event &iEvent, const edm::EventSetup &iSetup) const override; + + edm::EDGetTokenT tokenVertex_; + edm::EDGetTokenT tokenBeamSpot_; + edm::EDGetTokenT tokenTracks_; + edm::EDGetTokenT tokenIndToEdm_; +}; + +PixelVertexProducerFromSoAAlpaka::PixelVertexProducerFromSoAAlpaka(const edm::ParameterSet &conf) + : tokenVertex_(consumes(conf.getParameter("src"))), + tokenBeamSpot_(consumes(conf.getParameter("beamSpot"))), + tokenTracks_(consumes(conf.getParameter("TrackCollection"))), + tokenIndToEdm_(consumes(conf.getParameter("TrackCollection"))) { + produces(); +} + +void PixelVertexProducerFromSoAAlpaka::fillDescriptions(edm::ConfigurationDescriptions &descriptions) { + edm::ParameterSetDescription desc; + + desc.add("TrackCollection", edm::InputTag("pixelTracks")); + desc.add("beamSpot", edm::InputTag("offlineBeamSpot")); + desc.add("src", edm::InputTag("pixelVerticesAlpaka")); + + descriptions.add("pixelVertexFromSoAAlpaka", desc); +} + +void PixelVertexProducerFromSoAAlpaka::produce(edm::StreamID streamID, + edm::Event &iEvent, + const edm::EventSetup &) const { + auto vertexes = std::make_unique(); + + auto tracksHandle = iEvent.getHandle(tokenTracks_); + auto tracksSize = tracksHandle->size(); + auto const &indToEdm = iEvent.get(tokenIndToEdm_); + auto bsHandle = iEvent.getHandle(tokenBeamSpot_); + + float x0 = 0, y0 = 0, z0 = 0, dxdz = 0, dydz = 0; + std::vector itrk; + itrk.reserve(64); // avoid first relocations + if (!bsHandle.isValid()) { + edm::LogWarning("PixelVertexProducer") << "No beamspot found. returning vertexes with (0,0,Z) "; + } else { + const reco::BeamSpot &bs = *bsHandle; + x0 = bs.x0(); + y0 = bs.y0(); + z0 = bs.z0(); + dxdz = bs.dxdz(); + dydz = bs.dydz(); + } + + auto const &soa = iEvent.get(tokenVertex_); + + int nv = soa.view().nvFinal(); + +#ifdef PIXVERTEX_DEBUG_PRODUCE + std::cout << "converting " << nv << " vertices " + << " from " << indToEdm.size() << " tracks" << std::endl; +#endif // PIXVERTEX_DEBUG_PRODUCE + + std::set uind; // for verifing index consistency + for (int j = nv - 1; j >= 0; --j) { + auto i = soa.view()[j].sortInd(); // on gpu sorted in ascending order.... + assert(i < nv); + uind.insert(i); + assert(itrk.empty()); + auto z = soa.view()[i].zv(); + auto x = x0 + dxdz * z; + auto y = y0 + dydz * z; + z += z0; + reco::Vertex::Error err; + err(2, 2) = 1.f / soa.view()[i].wv(); + err(2, 2) *= 2.; // artifically inflate error + //Copy also the tracks (no intention to be efficient....) + for (auto k = 0U; k < indToEdm.size(); ++k) { + if (soa.view()[k].idv() == int16_t(i)) + itrk.push_back(k); + } + auto nt = itrk.size(); + if (nt == 0) { +#ifdef PIXVERTEX_DEBUG_PRODUCE + std::cout << "vertex " << i << " with no tracks..." << std::endl; +#endif // PIXVERTEX_DEBUG_PRODUCE + continue; + } + if (nt < 2) { + itrk.clear(); + continue; + } // remove outliers + (*vertexes).emplace_back(reco::Vertex::Point(x, y, z), err, soa.view()[i].chi2(), soa.view()[i].ndof(), nt); + auto &v = (*vertexes).back(); + v.reserve(itrk.size()); + for (auto it : itrk) { + assert(it < int(indToEdm.size())); + auto k = indToEdm[it]; + if (k > tracksSize) { + edm::LogWarning("PixelVertexProducer") << "oops track " << it << " does not exists on CPU " << k; + continue; + } + auto tk = reco::TrackRef(tracksHandle, k); + v.add(tk); + } + itrk.clear(); + } + + LogDebug("PixelVertexProducer") << ": Found " << vertexes->size() << " vertexes\n"; + for (unsigned int i = 0; i < vertexes->size(); ++i) { + LogDebug("PixelVertexProducer") << "Vertex number " << i << " has " << (*vertexes)[i].tracksSize() + << " tracks with a position of " << (*vertexes)[i].z() << " +- " + << std::sqrt((*vertexes)[i].covariance(2, 2)); + } + + // legacy logic.... + if (vertexes->empty() && bsHandle.isValid()) { + const reco::BeamSpot &bs = *bsHandle; + + GlobalError bse(bs.rotatedCovariance3D()); + if ((bse.cxx() <= 0.) || (bse.cyy() <= 0.) || (bse.czz() <= 0.)) { + AlgebraicSymMatrix33 we; + we(0, 0) = 10000; + we(1, 1) = 10000; + we(2, 2) = 10000; + vertexes->push_back(reco::Vertex(bs.position(), we, 0., 0., 0)); + + edm::LogInfo("PixelVertexProducer") << "No vertices found. Beamspot with invalid errors " << bse.matrix() + << "\nWill put Vertex derived from dummy-fake BeamSpot into Event.\n" + << (*vertexes)[0].x() << "\n" + << (*vertexes)[0].y() << "\n" + << (*vertexes)[0].z() << "\n"; + } else { + vertexes->push_back(reco::Vertex(bs.position(), bs.rotatedCovariance3D(), 0., 0., 0)); + + edm::LogInfo("PixelVertexProducer") << "No vertices found. Will put Vertex derived from BeamSpot into Event:\n" + << (*vertexes)[0].x() << "\n" + << (*vertexes)[0].y() << "\n" + << (*vertexes)[0].z() << "\n"; + } + } else if (vertexes->empty() && !bsHandle.isValid()) { + edm::LogWarning("PixelVertexProducer") << "No beamspot and no vertex found. No vertex returned."; + } + + iEvent.put(std::move(vertexes)); +} + +DEFINE_FWK_MODULE(PixelVertexProducerFromSoAAlpaka); diff --git a/RecoTracker/PixelVertexFinding/plugins/PixelVertexWorkSpaceSoADevice.h b/RecoTracker/PixelVertexFinding/plugins/PixelVertexWorkSpaceSoADevice.h index 0c55cd97b070a..48848ff959554 100644 --- a/RecoTracker/PixelVertexFinding/plugins/PixelVertexWorkSpaceSoADevice.h +++ b/RecoTracker/PixelVertexFinding/plugins/PixelVertexWorkSpaceSoADevice.h @@ -8,7 +8,7 @@ template class PixelVertexWorkSpaceSoADevice : public cms::cuda::PortableDeviceCollection> { public: - PixelVertexWorkSpaceSoADevice() = default; + explicit PixelVertexWorkSpaceSoADevice() = default; // Constructor which specifies the SoA size and CUDA stream explicit PixelVertexWorkSpaceSoADevice(cudaStream_t stream) diff --git a/RecoTracker/PixelVertexFinding/plugins/PixelVertexWorkSpaceSoAHost.h b/RecoTracker/PixelVertexFinding/plugins/PixelVertexWorkSpaceSoAHost.h index 0e698933b0731..9df8cc4580a1f 100644 --- a/RecoTracker/PixelVertexFinding/plugins/PixelVertexWorkSpaceSoAHost.h +++ b/RecoTracker/PixelVertexFinding/plugins/PixelVertexWorkSpaceSoAHost.h @@ -9,6 +9,7 @@ template class PixelVertexWorkSpaceSoAHost : public cms::cuda::PortableHostCollection> { public: explicit PixelVertexWorkSpaceSoAHost() : PortableHostCollection>(S) {} + // Constructor which specifies the SoA size and CUDA stream explicit PixelVertexWorkSpaceSoAHost(cudaStream_t stream) : PortableHostCollection>(S, stream) {} diff --git a/RecoTracker/PixelVertexFinding/plugins/PixelVertexWorkSpaceSoAHostAlpaka.h b/RecoTracker/PixelVertexFinding/plugins/PixelVertexWorkSpaceSoAHostAlpaka.h new file mode 100644 index 0000000000000..33e163dbab784 --- /dev/null +++ b/RecoTracker/PixelVertexFinding/plugins/PixelVertexWorkSpaceSoAHostAlpaka.h @@ -0,0 +1,15 @@ +#ifndef RecoTracker_PixelVertexFinding_plugins_PixelVertexWorkSpaceSoAHostAlpaka_h +#define RecoTracker_PixelVertexFinding_plugins_PixelVertexWorkSpaceSoAHostAlpaka_h + +#include + +#include "DataFormats/Portable/interface/PortableHostCollection.h" +#include "RecoTracker/PixelVertexFinding/interface/PixelVertexWorkSpaceLayout.h" + +namespace vertexFinder { + + using PixelVertexWorkSpaceSoAHost = PortableHostCollection>; + +} // namespace vertexFinder + +#endif // RecoTracker_PixelVertexFinding_plugins_PixelVertexWorkSpaceSoAHostAlpaka_h diff --git a/RecoTracker/PixelVertexFinding/plugins/alpaka/PixelVertexProducerAlpaka.cc b/RecoTracker/PixelVertexFinding/plugins/alpaka/PixelVertexProducerAlpaka.cc new file mode 100644 index 0000000000000..d572a181ccf85 --- /dev/null +++ b/RecoTracker/PixelVertexFinding/plugins/alpaka/PixelVertexProducerAlpaka.cc @@ -0,0 +1,110 @@ +#include + +#include "Geometry/CommonTopologies/interface/SimplePixelTopology.h" +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Utilities/interface/StreamID.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/EDPutToken.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/ESGetToken.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/Event.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/EventSetup.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/global/EDProducer.h" + +#include "DataFormats/TrackSoA/interface/alpaka/TracksSoACollection.h" +#include "DataFormats/TrackSoA/interface/TracksDevice.h" +#include "DataFormats/VertexSoA/interface/alpaka/ZVertexSoACollection.h" +#include "DataFormats/VertexSoA/interface/ZVertexDevice.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/MakerMacros.h" + +#include "vertexFinder.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + using namespace cms::alpakatools; + + template + class PixelVertexProducerAlpaka : public global::EDProducer<> { + using TkSoADevice = TracksSoACollection; + using Algo = vertexFinder::Producer; + + public: + explicit PixelVertexProducerAlpaka(const edm::ParameterSet& iConfig); + ~PixelVertexProducerAlpaka() override = default; + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + + private: + void produce(edm::StreamID streamID, device::Event& iEvent, const device::EventSetup& iSetup) const override; + + const Algo algo_; + + // Tracking cuts before sending tracks to vertex algo + const float ptMin_; + const float ptMax_; + + device::EDGetToken tokenDeviceTrack_; + device::EDPutToken tokenDeviceVertex_; + }; + + template + PixelVertexProducerAlpaka::PixelVertexProducerAlpaka(const edm::ParameterSet& conf) + : algo_(conf.getParameter("oneKernel"), + conf.getParameter("useDensity"), + conf.getParameter("useDBSCAN"), + conf.getParameter("useIterative"), + conf.getParameter("doSplitting"), + conf.getParameter("minT"), + conf.getParameter("eps"), + conf.getParameter("errmax"), + conf.getParameter("chi2max")), + ptMin_(conf.getParameter("PtMin")), // 0.5 GeV + ptMax_(conf.getParameter("PtMax")), // 75. Onsumes + tokenDeviceTrack_(consumes(conf.getParameter("pixelTrackSrc"))), + tokenDeviceVertex_(produces()) {} + + template + void PixelVertexProducerAlpaka::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + + // Only one of these three algos can be used at once. + // Maybe this should become a Plugin Factory + desc.add("oneKernel", true); + desc.add("useDensity", true); + desc.add("useDBSCAN", false); + desc.add("useIterative", false); + desc.add("doSplitting", true); + + desc.add("minT", 2); // min number of neighbours to be "core" + desc.add("eps", 0.07); // max absolute distance to cluster + desc.add("errmax", 0.01); // max error to be "seed" + desc.add("chi2max", 9.); // max normalized distance to cluster + + desc.add("PtMin", 0.5); + desc.add("PtMax", 75.); + desc.add("pixelTrackSrc", edm::InputTag("pixelTracksAlpaka")); + + descriptions.addWithDefaultLabel(desc); + } + + template + void PixelVertexProducerAlpaka::produce(edm::StreamID streamID, + device::Event& iEvent, + const device::EventSetup& iSetup) const { + auto const& hTracks = iEvent.get(tokenDeviceTrack_); + + iEvent.emplace(tokenDeviceVertex_, algo_.makeAsync(iEvent.queue(), hTracks.view(), ptMin_, ptMax_)); + } + + using PixelVertexProducerAlpakaPhase1 = PixelVertexProducerAlpaka; + using PixelVertexProducerAlpakaPhase2 = PixelVertexProducerAlpaka; + using PixelVertexProducerAlpakaHIonPhase1 = PixelVertexProducerAlpaka; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +DEFINE_FWK_ALPAKA_MODULE(PixelVertexProducerAlpakaPhase1); +DEFINE_FWK_ALPAKA_MODULE(PixelVertexProducerAlpakaPhase2); +DEFINE_FWK_ALPAKA_MODULE(PixelVertexProducerAlpakaHIonPhase1); diff --git a/RecoTracker/PixelVertexFinding/plugins/alpaka/PixelVertexWorkSpaceSoADeviceAlpaka.h b/RecoTracker/PixelVertexFinding/plugins/alpaka/PixelVertexWorkSpaceSoADeviceAlpaka.h new file mode 100644 index 0000000000000..d0ec816b32aee --- /dev/null +++ b/RecoTracker/PixelVertexFinding/plugins/alpaka/PixelVertexWorkSpaceSoADeviceAlpaka.h @@ -0,0 +1,23 @@ +#ifndef RecoTracker_PixelVertexFinding_plugins_alpaka_PixelVertexWorkSpaceSoADeviceAlpaka_h +#define RecoTracker_PixelVertexFinding_plugins_alpaka_PixelVertexWorkSpaceSoADeviceAlpaka_h + +#include + +#include "DataFormats/Portable/interface/alpaka/PortableCollection.h" +#include "DataFormats/VertexSoA/interface/ZVertexDefinitions.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" +#include "RecoTracker/PixelVertexFinding/interface/PixelVertexWorkSpaceLayout.h" +#include "RecoTracker/PixelVertexFinding/plugins/PixelVertexWorkSpaceSoAHostAlpaka.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + namespace vertexFinder { + + using PixelVertexWorkSpaceSoADevice = PortableCollection<::vertexFinder::PixelVertexWSSoALayout<>>; + using PixelVertexWorkSpaceSoAHost = ::vertexFinder::PixelVertexWorkSpaceSoAHost; + + } // namespace vertexFinder + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +#endif // RecoTracker_PixelVertexFinding_plugins_alpaka_PixelVertexWorkSpaceSoADeviceAlpaka_h diff --git a/RecoTracker/PixelVertexFinding/plugins/alpaka/clusterTracksByDensity.h b/RecoTracker/PixelVertexFinding/plugins/alpaka/clusterTracksByDensity.h new file mode 100644 index 0000000000000..29cd537ac4aa7 --- /dev/null +++ b/RecoTracker/PixelVertexFinding/plugins/alpaka/clusterTracksByDensity.h @@ -0,0 +1,248 @@ +#ifndef RecoPixelVertexing_PixelVertexFinding_alpaka_clusterTracksByDensity_h +#define RecoPixelVertexing_PixelVertexFinding_alpaka_clusterTracksByDensity_h + +#include +#include +#include +#include +#include "DataFormats/VertexSoA/interface/ZVertexSoA.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/HistoContainer.h" +#include "RecoTracker/PixelVertexFinding/interface/PixelVertexWorkSpaceLayout.h" +#include "vertexFinder.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace vertexFinder { + using VtxSoAView = ::reco::ZVertexSoAView; + using WsSoAView = ::vertexFinder::PixelVertexWorkSpaceSoAView; + // this algo does not really scale as it works in a single block... + // enough for <10K tracks we have + // + // based on Rodrighez&Laio algo + // + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE void __attribute__((always_inline)) + clusterTracksByDensity(const TAcc& acc, + VtxSoAView& pdata, + WsSoAView& pws, + int minT, // min number of neighbours to be "seed" + float eps, // max absolute distance to cluster + float errmax, // max error to be "seed" + float chi2max // max normalized distance to cluster + ) { + using namespace vertexFinder; + constexpr bool verbose = false; // in principle the compiler should optmize out if false + const uint32_t threadIdxLocal(alpaka::getIdx(acc)[0u]); + + if constexpr (verbose) { + if (cms::alpakatools::once_per_block(acc)) + printf("params %d %f %f %f\n", minT, eps, errmax, chi2max); + } + auto er2mx = errmax * errmax; + + auto& __restrict__ data = pdata; + auto& __restrict__ ws = pws; + auto nt = ws.ntrks(); + float const* __restrict__ zt = ws.zt(); + float const* __restrict__ ezt2 = ws.ezt2(); + + uint32_t& nvFinal = data.nvFinal(); + uint32_t& nvIntermediate = ws.nvIntermediate(); + + uint8_t* __restrict__ izt = ws.izt(); + int32_t* __restrict__ nn = data.ndof(); + int32_t* __restrict__ iv = ws.iv(); + + ALPAKA_ASSERT_OFFLOAD(zt); + ALPAKA_ASSERT_OFFLOAD(ezt2); + ALPAKA_ASSERT_OFFLOAD(izt); + ALPAKA_ASSERT_OFFLOAD(nn); + ALPAKA_ASSERT_OFFLOAD(iv); + + using Hist = cms::alpakatools::HistoContainer; + auto& hist = alpaka::declareSharedVar(acc); + auto& hws = alpaka::declareSharedVar(acc); + + for (auto j : cms::alpakatools::elements_with_stride(acc, Hist::totbins())) { + hist.off[j] = 0; + } + alpaka::syncBlockThreads(acc); + + if constexpr (verbose) { + if (cms::alpakatools::once_per_block(acc)) + printf("booked hist with %d bins, size %d for %d tracks\n", hist.totbins(), hist.capacity(), nt); + } + ALPAKA_ASSERT_OFFLOAD(static_cast(nt) <= hist.capacity()); + + // fill hist (bin shall be wider than "eps") + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + ALPAKA_ASSERT_OFFLOAD(i < ::zVertex::MAXTRACKS); + int iz = int(zt[i] * 10.); // valid if eps<=0.1 + // iz = std::clamp(iz, INT8_MIN, INT8_MAX); // sorry c++17 only + iz = std::min(std::max(iz, INT8_MIN), INT8_MAX); + izt[i] = iz - INT8_MIN; + ALPAKA_ASSERT_OFFLOAD(iz - INT8_MIN >= 0); + ALPAKA_ASSERT_OFFLOAD(iz - INT8_MIN < 256); + hist.count(acc, izt[i]); + iv[i] = i; + nn[i] = 0; + } + alpaka::syncBlockThreads(acc); + if (threadIdxLocal < 32) + hws[threadIdxLocal] = 0; // used by prefix scan... + alpaka::syncBlockThreads(acc); + hist.finalize(acc, hws); + alpaka::syncBlockThreads(acc); + ALPAKA_ASSERT_OFFLOAD(hist.size() == nt); + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + hist.fill(acc, izt[i], uint16_t(i)); + } + alpaka::syncBlockThreads(acc); + // count neighbours + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + if (ezt2[i] > er2mx) + continue; + auto loop = [&](uint32_t j) { + if (i == j) + return; + auto dist = std::abs(zt[i] - zt[j]); + if (dist > eps) + return; + if (dist * dist > chi2max * (ezt2[i] + ezt2[j])) + return; + nn[i]++; + }; + + cms::alpakatools::forEachInBins(hist, izt[i], 1, loop); + } + alpaka::syncBlockThreads(acc); + + // find closest above me .... (we ignore the possibility of two j at same distance from i) + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + float mdist = eps; + auto loop = [&](uint32_t j) { + if (nn[j] < nn[i]) + return; + if (nn[j] == nn[i] && zt[j] >= zt[i]) + return; // if equal use natural order... + auto dist = std::abs(zt[i] - zt[j]); + if (dist > mdist) + return; + if (dist * dist > chi2max * (ezt2[i] + ezt2[j])) + return; // (break natural order???) + mdist = dist; + iv[i] = j; // assign to cluster (better be unique??) + }; + cms::alpakatools::forEachInBins(hist, izt[i], 1, loop); + } + alpaka::syncBlockThreads(acc); + +#ifdef GPU_DEBUG + // mini verification + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + if (iv[i] != int(i)) + ALPAKA_ASSERT_OFFLOAD(iv[iv[i]] != int(i)); + } + alpaka::syncBlockThreads(acc); +#endif + + // consolidate graph (percolate index of seed) + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + auto m = iv[i]; + while (m != iv[m]) + m = iv[m]; + iv[i] = m; + } + +#ifdef GPU_DEBUG + alpaka::syncBlockThreads(acc); + // mini verification + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + if (iv[i] != int(i)) + ALPAKA_ASSERT_OFFLOAD(iv[iv[i]] != int(i)); + } +#endif + +#ifdef GPU_DEBUG + // and verify that we did not spit any cluster... + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + auto minJ = i; + auto mdist = eps; + auto loop = [&](uint32_t j) { + if (nn[j] < nn[i]) + return; + if (nn[j] == nn[i] && zt[j] >= zt[i]) + return; // if equal use natural order... + auto dist = std::abs(zt[i] - zt[j]); + if (dist > mdist) + return; + if (dist * dist > chi2max * (ezt2[i] + ezt2[j])) + return; + mdist = dist; + minJ = j; + }; + cms::alpakatools::forEachInBins(hist, izt[i], 1, loop); + // should belong to the same cluster... + ALPAKA_ASSERT_OFFLOAD(iv[i] == iv[minJ]); + ALPAKA_ASSERT_OFFLOAD(nn[i] <= nn[iv[i]]); + } + alpaka::syncBlockThreads(acc); +#endif + + auto& foundClusters = alpaka::declareSharedVar(acc); + foundClusters = 0; + alpaka::syncBlockThreads(acc); + + // find the number of different clusters, identified by a tracks with clus[i] == i and density larger than threshold; + // mark these tracks with a negative id. + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + if (iv[i] == int(i)) { + if (nn[i] >= minT) { + auto old = alpaka::atomicInc(acc, &foundClusters, 0xffffffff, alpaka::hierarchy::Threads{}); + iv[i] = -(old + 1); + } else { // noise + iv[i] = -9998; + } + } + } + alpaka::syncBlockThreads(acc); + + ALPAKA_ASSERT_OFFLOAD(foundClusters < ::zVertex::MAXVTX); + + // propagate the negative id to all the tracks in the cluster. + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + if (iv[i] >= 0) { + // mark each track in a cluster with the same id as the first one + iv[i] = iv[iv[i]]; + } + } + alpaka::syncBlockThreads(acc); + + // adjust the cluster id to be a positive value starting from 0 + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + iv[i] = -iv[i] - 1; + } + + nvIntermediate = nvFinal = foundClusters; + if constexpr (verbose) { + if (cms::alpakatools::once_per_block(acc)) + printf("found %d proto vertices\n", foundClusters); + } + } + class ClusterTracksByDensityKernel { + public: + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, + VtxSoAView pdata, + WsSoAView pws, + int minT, // min number of neighbours to be "seed" + float eps, // max absolute distance to cluster + float errmax, // max error to be "seed" + float chi2max // max normalized distance to cluster + ) const { + clusterTracksByDensity(acc, pdata, pws, minT, eps, errmax, chi2max); + } + }; + } // namespace vertexFinder +} // namespace ALPAKA_ACCELERATOR_NAMESPACE +#endif // RecoPixelVertexing_PixelVertexFinding_alpaka_clusterTracksByDensity_h diff --git a/RecoTracker/PixelVertexFinding/plugins/alpaka/clusterTracksDBSCAN.h b/RecoTracker/PixelVertexFinding/plugins/alpaka/clusterTracksDBSCAN.h new file mode 100644 index 0000000000000..46ae2ad80ecc9 --- /dev/null +++ b/RecoTracker/PixelVertexFinding/plugins/alpaka/clusterTracksDBSCAN.h @@ -0,0 +1,255 @@ +#ifndef RecoPixelVertexing_PixelVertexFinding_gpuClusterTracksDBSCAN_h +#define RecoPixelVertexing_PixelVertexFinding_gpuClusterTracksDBSCAN_h + +#include +#include +#include +#include +#include "DataFormats/VertexSoA/interface/ZVertexSoA.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/HistoContainer.h" +#include "RecoTracker/PixelVertexFinding/interface/PixelVertexWorkSpaceLayout.h" +#include "vertexFinder.h" +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace vertexFinder { + using VtxSoAView = ::reco::ZVertexSoAView; + using WsSoAView = ::vertexFinder::PixelVertexWorkSpaceSoAView; + // this algo does not really scale as it works in a single block... + // enough for <10K tracks we have + class ClusterTracksDBSCAN { + public: + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, + VtxSoAView pdata, + WsSoAView pws, + int minT, // min number of neighbours to be "core" + float eps, // max absolute distance to cluster + float errmax, // max error to be "seed" + float chi2max // max normalized distance to cluster + ) const { + constexpr bool verbose = false; // in principle the compiler should optmize out if false + const uint32_t threadIdxLocal(alpaka::getIdx(acc)[0u]); + if constexpr (verbose) { + if (cms::alpakatools::once_per_block(acc)) + printf("params %d %f %f %f\n", minT, eps, errmax, chi2max); + } + auto er2mx = errmax * errmax; + + auto& __restrict__ data = pdata; + auto& __restrict__ ws = pws; + auto nt = ws.ntrks(); + float const* __restrict__ zt = ws.zt(); + float const* __restrict__ ezt2 = ws.ezt2(); + + uint32_t& nvFinal = data.nvFinal(); + uint32_t& nvIntermediate = ws.nvIntermediate(); + + uint8_t* __restrict__ izt = ws.izt(); + int32_t* __restrict__ nn = data.ndof(); + int32_t* __restrict__ iv = ws.iv(); + + ALPAKA_ASSERT_OFFLOAD(zt); + ALPAKA_ASSERT_OFFLOAD(iv); + ALPAKA_ASSERT_OFFLOAD(nn); + ALPAKA_ASSERT_OFFLOAD(ezt2); + + using Hist = cms::alpakatools::HistoContainer; + auto& hist = alpaka::declareSharedVar(acc); + auto& hws = alpaka::declareSharedVar(acc); + + for (auto j : cms::alpakatools::elements_with_stride(acc, Hist::totbins())) { + hist.off[j] = 0; + } + alpaka::syncBlockThreads(acc); + + if constexpr (verbose) { + if (cms::alpakatools::once_per_block(acc)) + printf("booked hist with %d bins, size %d for %d tracks\n", hist.nbins(), hist.capacity(), nt); + } + + ALPAKA_ASSERT_OFFLOAD(static_cast(nt) <= hist.capacity()); + + // fill hist (bin shall be wider than "eps") + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + ALPAKA_ASSERT_OFFLOAD(i < ::zVertex::MAXTRACKS); + int iz = int(zt[i] * 10.); // valid if eps<=0.1 + iz = std::clamp(iz, INT8_MIN, INT8_MAX); + izt[i] = iz - INT8_MIN; + ALPAKA_ASSERT_OFFLOAD(iz - INT8_MIN >= 0); + ALPAKA_ASSERT_OFFLOAD(iz - INT8_MIN < 256); + hist.count(acc, izt[i]); + iv[i] = i; + nn[i] = 0; + } + alpaka::syncBlockThreads(acc); + if (threadIdxLocal < 32) + hws[threadIdxLocal] = 0; // used by prefix scan... + alpaka::syncBlockThreads(acc); + hist.finalize(acc, hws); + alpaka::syncBlockThreads(acc); + ALPAKA_ASSERT_OFFLOAD(hist.size() == nt); + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + hist.fill(acc, izt[i], uint32_t(i)); + } + alpaka::syncBlockThreads(acc); + + // count neighbours + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + if (ezt2[i] > er2mx) + continue; + auto loop = [&](uint32_t j) { + if (i == j) + return; + auto dist = std::abs(zt[i] - zt[j]); + if (dist > eps) + return; + // if (dist*dist>chi2max*(ezt2[i]+ezt2[j])) return; + nn[i]++; + }; + + cms::alpakatools::forEachInBins(hist, izt[i], 1, loop); + } + + alpaka::syncBlockThreads(acc); + + // find NN with smaller z... + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + if (nn[i] < minT) + continue; // DBSCAN core rule + float mz = zt[i]; + auto loop = [&](uint32_t j) { + if (zt[j] >= mz) + return; + if (nn[j] < minT) + return; // DBSCAN core rule + auto dist = std::abs(zt[i] - zt[j]); + if (dist > eps) + return; + // if (dist*dist>chi2max*(ezt2[i]+ezt2[j])) return; + mz = zt[j]; + iv[i] = j; // assign to cluster (better be unique??) + }; + cms::alpakatools::forEachInBins(hist, izt[i], 1, loop); + } + + alpaka::syncBlockThreads(acc); + +#ifdef GPU_DEBUG + // mini verification + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + if (iv[i] != int(i)) + ALPAKA_ASSERT_OFFLOAD(iv[iv[i]] != int(i)); + } + alpaka::syncBlockThreads(acc); +#endif + + // consolidate graph (percolate index of seed) + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + auto m = iv[i]; + while (m != iv[m]) + m = iv[m]; + iv[i] = m; + } + + alpaka::syncBlockThreads(acc); + +#ifdef GPU_DEBUG + // mini verification + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + if (iv[i] != int(i)) + ALPAKA_ASSERT_OFFLOAD(iv[iv[i]] != int(i)); + } + alpaka::syncBlockThreads(acc); +#endif + +#ifdef GPU_DEBUG + // and verify that we did not spit any cluster... + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + if (nn[i] < minT) + continue; // DBSCAN core rule + ALPAKA_ASSERT_OFFLOAD(zt[iv[i]] <= zt[i]); + auto loop = [&](uint32_t j) { + if (nn[j] < minT) + return; // DBSCAN core rule + auto dist = std::abs(zt[i] - zt[j]); + if (dist > eps) + return; + // if (dist*dist>chi2max*(ezt2[i]+ezt2[j])) return; + // they should belong to the same cluster, isn't it? + if (iv[i] != iv[j]) { + printf("ERROR %d %d %f %f %d\n", i, iv[i], zt[i], zt[iv[i]], iv[iv[i]]); + printf(" %d %d %f %f %d\n", j, iv[j], zt[j], zt[iv[j]], iv[iv[j]]); + ; + } + ALPAKA_ASSERT_OFFLOAD(iv[i] == iv[j]); + }; + cms::alpakatools::forEachInBins(hist, izt[i], 1, loop); + } + alpaka::syncBlockThreads(acc); +#endif + + // collect edges (assign to closest cluster of closest point??? here to closest point) + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + // if (nn[i]==0 || nn[i]>=minT) continue; // DBSCAN edge rule + if (nn[i] >= minT) + continue; // DBSCAN edge rule + float mdist = eps; + auto loop = [&](uint32_t j) { + if (nn[j] < minT) + return; // DBSCAN core rule + auto dist = std::abs(zt[i] - zt[j]); + if (dist > mdist) + return; + if (dist * dist > chi2max * (ezt2[i] + ezt2[j])) + return; // needed? + mdist = dist; + iv[i] = iv[j]; // assign to cluster (better be unique??) + }; + cms::alpakatools::forEachInBins(hist, izt[i], 1, loop); + } + + auto& foundClusters = alpaka::declareSharedVar(acc); + foundClusters = 0; + alpaka::syncBlockThreads(acc); + + // find the number of different clusters, identified by a tracks with clus[i] == i; + // mark these tracks with a negative id. + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + if (iv[i] == int(i)) { + if (nn[i] >= minT) { + auto old = alpaka::atomicInc(acc, &foundClusters, 0xffffffff, alpaka::hierarchy::Threads{}); + iv[i] = -(old + 1); + } else { // noise + iv[i] = -9998; + } + } + } + alpaka::syncBlockThreads(acc); + + ALPAKA_ASSERT_OFFLOAD(foundClusters < ::zVertex::MAXVTX); + + // propagate the negative id to all the tracks in the cluster. + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + if (iv[i] >= 0) { + // mark each track in a cluster with the same id as the first one + iv[i] = iv[iv[i]]; + } + } + alpaka::syncBlockThreads(acc); + + // adjust the cluster id to be a positive value starting from 0 + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + iv[i] = -iv[i] - 1; + } + + nvIntermediate = nvFinal = foundClusters; + + if constexpr (verbose) { + if (cms::alpakatools::once_per_block(acc)) + printf("found %d proto vertices\n", foundClusters); + } + } + }; + } // namespace vertexFinder +} // namespace ALPAKA_ACCELERATOR_NAMESPACE +#endif // RecoPixelVertexing_PixelVertexFinding_plugins_gpuClusterTracksDBSCAN_h diff --git a/RecoTracker/PixelVertexFinding/plugins/alpaka/clusterTracksIterative.h b/RecoTracker/PixelVertexFinding/plugins/alpaka/clusterTracksIterative.h new file mode 100644 index 0000000000000..3fe0202121f80 --- /dev/null +++ b/RecoTracker/PixelVertexFinding/plugins/alpaka/clusterTracksIterative.h @@ -0,0 +1,230 @@ +#ifndef RecoTracker_PixelVertexFinding_clusterTracksIterativeAlpaka_h +#define RecoTracker_PixelVertexFinding_clusterTracksIterativeAlpaka_h + +#include +#include +#include +#include + +#include "DataFormats/VertexSoA/interface/ZVertexDefinitions.h" +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/HistoContainer.h" +#include "RecoTracker/PixelVertexFinding/interface/PixelVertexWorkSpaceLayout.h" +#include "vertexFinder.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace vertexFinder { + + // this algo does not really scale as it works in a single block... + // enough for <10K tracks we have + class ClusterTracksIterative { + public: + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, + VtxSoAView pdata, + WsSoAView pws, + int minT, // min number of neighbours to be "core" + float eps, // max absolute distance to cluster + float errmax, // max error to be "seed" + float chi2max // max normalized distance to cluster + ) const { + constexpr bool verbose = false; // in principle the compiler should optmize out if false + const uint32_t threadIdxLocal(alpaka::getIdx(acc)[0u]); + if constexpr (verbose) { + if (cms::alpakatools::once_per_block(acc)) + printf("params %d %f %f %f\n", minT, eps, errmax, chi2max); + } + auto er2mx = errmax * errmax; + + auto& __restrict__ data = pdata; + auto& __restrict__ ws = pws; + auto nt = ws.ntrks(); + float const* __restrict__ zt = ws.zt(); + float const* __restrict__ ezt2 = ws.ezt2(); + + uint32_t& nvFinal = data.nvFinal(); + uint32_t& nvIntermediate = ws.nvIntermediate(); + + uint8_t* __restrict__ izt = ws.izt(); + int32_t* __restrict__ nn = data.ndof(); + int32_t* __restrict__ iv = ws.iv(); + + ALPAKA_ASSERT_OFFLOAD(zt); + ALPAKA_ASSERT_OFFLOAD(nn); + ALPAKA_ASSERT_OFFLOAD(iv); + ALPAKA_ASSERT_OFFLOAD(ezt2); + + using Hist = cms::alpakatools::HistoContainer; + auto& hist = alpaka::declareSharedVar(acc); + auto& hws = alpaka::declareSharedVar(acc); + + for (auto j : cms::alpakatools::elements_with_stride(acc, Hist::totbins())) { + hist.off[j] = 0; + } + alpaka::syncBlockThreads(acc); + + if constexpr (verbose) { + if (cms::alpakatools::once_per_block(acc)) + printf("booked hist with %d bins, size %d for %d tracks\n", hist.nbins(), hist.capacity(), nt); + } + + ALPAKA_ASSERT_OFFLOAD(static_cast(nt) <= hist.capacity()); + + // fill hist (bin shall be wider than "eps") + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + ALPAKA_ASSERT_OFFLOAD(i < ::zVertex::MAXTRACKS); + int iz = int(zt[i] * 10.); // valid if eps<=0.1 + iz = std::clamp(iz, INT8_MIN, INT8_MAX); + izt[i] = iz - INT8_MIN; + ALPAKA_ASSERT_OFFLOAD(iz - INT8_MIN >= 0); + ALPAKA_ASSERT_OFFLOAD(iz - INT8_MIN < 256); + hist.count(acc, izt[i]); + iv[i] = i; + nn[i] = 0; + } + alpaka::syncBlockThreads(acc); + + if (threadIdxLocal < 32) + hws[threadIdxLocal] = 0; // used by prefix scan... + alpaka::syncBlockThreads(acc); + + hist.finalize(acc, hws); + alpaka::syncBlockThreads(acc); + + ALPAKA_ASSERT_OFFLOAD(hist.size() == nt); + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + hist.fill(acc, izt[i], uint16_t(i)); + } + alpaka::syncBlockThreads(acc); + + // count neighbours + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + if (ezt2[i] > er2mx) + continue; + auto loop = [&](uint32_t j) { + if (i == j) + return; + auto dist = std::abs(zt[i] - zt[j]); + if (dist > eps) + return; + if (dist * dist > chi2max * (ezt2[i] + ezt2[j])) + return; + nn[i]++; + }; + + cms::alpakatools::forEachInBins(hist, izt[i], 1, loop); + } + + auto& nloops = alpaka::declareSharedVar(acc); + nloops = 0; + + alpaka::syncBlockThreads(acc); + + // cluster seeds only + bool more = true; + while (alpaka::syncBlockThreadsPredicate(acc, more)) { + if (1 == nloops % 2) { + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + auto m = iv[i]; + while (m != iv[m]) + m = iv[m]; + iv[i] = m; + } + } else { + more = false; + for (auto k : cms::alpakatools::elements_with_stride(acc, hist.size())) { + auto p = hist.begin() + k; + auto i = (*p); + auto be = std::min(Hist::bin(izt[i]) + 1, int(hist.nbins() - 1)); + if (nn[i] < minT) + continue; // DBSCAN core rule + auto loop = [&](uint32_t j) { + ALPAKA_ASSERT_OFFLOAD(i != j); + if (nn[j] < minT) + return; // DBSCAN core rule + auto dist = std::abs(zt[i] - zt[j]); + if (dist > eps) + return; + if (dist * dist > chi2max * (ezt2[i] + ezt2[j])) + return; + auto old = alpaka::atomicMin(acc, &iv[j], iv[i], alpaka::hierarchy::Blocks{}); + if (old != iv[i]) { + // end the loop only if no changes were applied + more = true; + } + alpaka::atomicMin(acc, &iv[i], old, alpaka::hierarchy::Blocks{}); + }; + ++p; + for (; p < hist.end(be); ++p) + loop(*p); + } // for i + } + if (threadIdxLocal == 0) + ++nloops; + } // while + + // collect edges (assign to closest cluster of closest point??? here to closest point) + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + // if (nn[i]==0 || nn[i]>=minT) continue; // DBSCAN edge rule + if (nn[i] >= minT) + continue; // DBSCAN edge rule + float mdist = eps; + auto loop = [&](int j) { + if (nn[j] < minT) + return; // DBSCAN core rule + auto dist = std::abs(zt[i] - zt[j]); + if (dist > mdist) + return; + if (dist * dist > chi2max * (ezt2[i] + ezt2[j])) + return; // needed? + mdist = dist; + iv[i] = iv[j]; // assign to cluster (better be unique??) + }; + cms::alpakatools::forEachInBins(hist, izt[i], 1, loop); + } + + auto& foundClusters = alpaka::declareSharedVar(acc); + foundClusters = 0; + alpaka::syncBlockThreads(acc); + + // find the number of different clusters, identified by a tracks with clus[i] == i; + // mark these tracks with a negative id. + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + if (iv[i] == int(i)) { + if (nn[i] >= minT) { + auto old = alpaka::atomicInc(acc, &foundClusters, 0xffffffff, alpaka::hierarchy::Threads{}); + iv[i] = -(old + 1); + } else { // noise + iv[i] = -9998; + } + } + } + alpaka::syncBlockThreads(acc); + + ALPAKA_ASSERT_OFFLOAD(foundClusters < ::zVertex::MAXVTX); + + // propagate the negative id to all the tracks in the cluster. + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + if (iv[i] >= 0) { + // mark each track in a cluster with the same id as the first one + iv[i] = iv[iv[i]]; + } + } + alpaka::syncBlockThreads(acc); + + // adjust the cluster id to be a positive value starting from 0 + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + iv[i] = -iv[i] - 1; + } + + nvIntermediate = nvFinal = foundClusters; + + if constexpr (verbose) { + if (cms::alpakatools::once_per_block(acc)) + printf("found %d proto vertices\n", foundClusters); + } + } + }; + } // namespace vertexFinder +} // namespace ALPAKA_ACCELERATOR_NAMESPACE +#endif // RecoTracker_PixelVertexFinding_plugins_clusterTracksIterativeAlpaka_h diff --git a/RecoTracker/PixelVertexFinding/plugins/alpaka/fitVertices.h b/RecoTracker/PixelVertexFinding/plugins/alpaka/fitVertices.h new file mode 100644 index 0000000000000..9ff4656b9718e --- /dev/null +++ b/RecoTracker/PixelVertexFinding/plugins/alpaka/fitVertices.h @@ -0,0 +1,123 @@ +#ifndef RecoPixelVertexing_PixelVertexFinding_gpuFitVertices_h +#define RecoPixelVertexing_PixelVertexFinding_gpuFitVertices_h + +#include +#include +#include +#include +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/HistoContainer.h" + +#include "vertexFinder.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace vertexFinder { + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE __attribute__((always_inline)) void fitVertices( + const TAcc& acc, + VtxSoAView& pdata, + WsSoAView& pws, + float chi2Max // for outlier rejection + ) { + constexpr bool verbose = false; // in principle the compiler should optmize out if false + + auto& __restrict__ data = pdata; + auto& __restrict__ ws = pws; + auto nt = ws.ntrks(); + float const* __restrict__ zt = ws.zt(); + float const* __restrict__ ezt2 = ws.ezt2(); + float* __restrict__ zv = data.zv(); + float* __restrict__ wv = data.wv(); + float* __restrict__ chi2 = data.chi2(); + uint32_t& nvFinal = data.nvFinal(); + uint32_t& nvIntermediate = ws.nvIntermediate(); + + int32_t* __restrict__ nn = data.ndof(); + int32_t* __restrict__ iv = ws.iv(); + + ALPAKA_ASSERT_OFFLOAD(nvFinal <= nvIntermediate); + nvFinal = nvIntermediate; + auto foundClusters = nvFinal; + + // zero + for (auto i : cms::alpakatools::elements_with_stride(acc, foundClusters)) { + zv[i] = 0; + wv[i] = 0; + chi2[i] = 0; + } + + // only for test + auto& noise = alpaka::declareSharedVar(acc); + + if constexpr (verbose) { + if (cms::alpakatools::once_per_block(acc)) + noise = 0; + } + alpaka::syncBlockThreads(acc); + + // compute cluster location + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + if (iv[i] > 9990) { + if constexpr (verbose) + alpaka::atomicAdd(acc, &noise, 1, alpaka::hierarchy::Threads{}); + continue; + } + ALPAKA_ASSERT_OFFLOAD(iv[i] >= 0); + ALPAKA_ASSERT_OFFLOAD(iv[i] < int(foundClusters)); + auto w = 1.f / ezt2[i]; + alpaka::atomicAdd(acc, &zv[iv[i]], zt[i] * w, alpaka::hierarchy::Threads{}); + alpaka::atomicAdd(acc, &wv[iv[i]], w, alpaka::hierarchy::Threads{}); + } + + alpaka::syncBlockThreads(acc); + // reuse nn + for (auto i : cms::alpakatools::elements_with_stride(acc, foundClusters)) { + ALPAKA_ASSERT_OFFLOAD(wv[i] > 0.f); + zv[i] /= wv[i]; + nn[i] = -1; // ndof + } + alpaka::syncBlockThreads(acc); + + // compute chi2 + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + if (iv[i] > 9990) + continue; + + auto c2 = zv[iv[i]] - zt[i]; + c2 *= c2 / ezt2[i]; + if (c2 > chi2Max) { + iv[i] = 9999; + continue; + } + alpaka::atomicAdd(acc, &chi2[iv[i]], c2, alpaka::hierarchy::Blocks{}); + alpaka::atomicAdd(acc, &nn[iv[i]], 1, alpaka::hierarchy::Blocks{}); + } + alpaka::syncBlockThreads(acc); + + for (auto i : cms::alpakatools::elements_with_stride(acc, foundClusters)) { + if (nn[i] > 0) { + wv[i] *= float(nn[i]) / chi2[i]; + } + } + if constexpr (verbose) { + if (cms::alpakatools::once_per_block(acc)) { + printf("found %d proto clusters ", foundClusters); + printf("and %d noise\n", noise); + } + } + } + + class FitVerticesKernel { + public: + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, + VtxSoAView pdata, + WsSoAView pws, + float chi2Max // for outlier rejection + ) const { + fitVertices(acc, pdata, pws, chi2Max); + } + }; + } // namespace vertexFinder +} // namespace ALPAKA_ACCELERATOR_NAMESPACE +#endif // RecoPixelVertexing_PixelVertexFinding_plugins_gpuFitVertices_h diff --git a/RecoTracker/PixelVertexFinding/plugins/alpaka/sortByPt2.h b/RecoTracker/PixelVertexFinding/plugins/alpaka/sortByPt2.h new file mode 100644 index 0000000000000..2c6f0cb0597e4 --- /dev/null +++ b/RecoTracker/PixelVertexFinding/plugins/alpaka/sortByPt2.h @@ -0,0 +1,80 @@ +#ifndef RecoPixelVertexing_PixelVertexFinding_sortByPt2_h +#define RecoPixelVertexing_PixelVertexFinding_sortByPt2_h + +#include +#include +#include +#include +#include +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/HistoContainer.h" +#include "HeterogeneousCore/AlpakaInterface/interface/radixSort.h" +#include "DataFormats/VertexSoA/interface/ZVertexSoA.h" +#include "RecoTracker/PixelVertexFinding/interface/PixelVertexWorkSpaceLayout.h" + +#include "vertexFinder.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace vertexFinder { + using VtxSoAView = ::reco::ZVertexSoAView; + using WsSoAView = ::vertexFinder::PixelVertexWorkSpaceSoAView; + + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE void sortByPt2(const TAcc& acc, VtxSoAView& data, WsSoAView& ws) { + auto nt = ws.ntrks(); + float const* __restrict__ ptt2 = ws.ptt2(); + uint32_t const& nvFinal = data.nvFinal(); + + int32_t const* __restrict__ iv = ws.iv(); + float* __restrict__ ptv2 = data.ptv2(); + uint16_t* __restrict__ sortInd = data.sortInd(); + + if (nvFinal < 1) + return; + + // fill indexing + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + data.idv()[ws.itrk()[i]] = iv[i]; + }; + + // can be done asynchronously at the end of previous event + for (auto i : cms::alpakatools::elements_with_stride(acc, nvFinal)) { + ptv2[i] = 0; + }; + alpaka::syncBlockThreads(acc); + + for (auto i : cms::alpakatools::elements_with_stride(acc, nt)) { + if (iv[i] <= 9990) { + alpaka::atomicAdd(acc, &ptv2[iv[i]], ptt2[i], alpaka::hierarchy::Blocks{}); + } + }; + alpaka::syncBlockThreads(acc); + + const uint32_t threadIdxLocal(alpaka::getIdx(acc)[0u]); + if (1 == nvFinal) { + if (threadIdxLocal == 0) + sortInd[0] = 0; + return; + } + + if constexpr (not cms::alpakatools::requires_single_thread_per_block_v) { + auto& sws = alpaka::declareSharedVar(acc); + // sort using only 16 bits + cms::alpakatools::radixSort(acc, ptv2, sortInd, sws, nvFinal); + } else { + for (uint16_t i = 0; i < nvFinal; ++i) + sortInd[i] = i; + std::sort(sortInd, sortInd + nvFinal, [&](auto i, auto j) { return ptv2[i] < ptv2[j]; }); + } + } + + class SortByPt2Kernel { + public: + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, VtxSoAView pdata, WsSoAView pws) const { + sortByPt2(acc, pdata, pws); + } + }; + } // namespace vertexFinder +} // namespace ALPAKA_ACCELERATOR_NAMESPACE +#endif // RecoPixelVertexing_PixelVertexFinding_sortByPt2_h diff --git a/RecoTracker/PixelVertexFinding/plugins/alpaka/splitVertices.h b/RecoTracker/PixelVertexFinding/plugins/alpaka/splitVertices.h new file mode 100644 index 0000000000000..f5b05e17bb038 --- /dev/null +++ b/RecoTracker/PixelVertexFinding/plugins/alpaka/splitVertices.h @@ -0,0 +1,162 @@ +#ifndef RecoPixelVertexing_PixelVertexFinding_splitVertices_h +#define RecoPixelVertexing_PixelVertexFinding_splitVertices_h + +#include +#include +#include +#include +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/HistoContainer.h" + +#include "vertexFinder.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace vertexFinder { + using VtxSoAView = ::reco::ZVertexSoAView; + using WsSoAView = ::vertexFinder::PixelVertexWorkSpaceSoAView; + template + ALPAKA_FN_ACC ALPAKA_FN_INLINE __attribute__((always_inline)) void splitVertices(const TAcc& acc, + VtxSoAView& pdata, + WsSoAView& pws, + float maxChi2) { + constexpr bool verbose = false; // in principle the compiler should optmize out if false + const uint32_t threadIdxLocal(alpaka::getIdx(acc)[0u]); + + auto& __restrict__ data = pdata; + auto& __restrict__ ws = pws; + auto nt = ws.ntrks(); + float const* __restrict__ zt = ws.zt(); + float const* __restrict__ ezt2 = ws.ezt2(); + float* __restrict__ zv = data.zv(); + float* __restrict__ wv = data.wv(); + float const* __restrict__ chi2 = data.chi2(); + uint32_t& nvFinal = data.nvFinal(); + + int32_t const* __restrict__ nn = data.ndof(); + int32_t* __restrict__ iv = ws.iv(); + + ALPAKA_ASSERT_OFFLOAD(zt); + ALPAKA_ASSERT_OFFLOAD(wv); + ALPAKA_ASSERT_OFFLOAD(chi2); + ALPAKA_ASSERT_OFFLOAD(nn); + + constexpr uint32_t MAXTK = 512; + + auto& it = alpaka::declareSharedVar(acc); // track index + auto& zz = alpaka::declareSharedVar(acc); // z pos + auto& newV = alpaka::declareSharedVar(acc); // 0 or 1 + auto& ww = alpaka::declareSharedVar(acc); // z weight + auto& nq = alpaka::declareSharedVar(acc); // number of track for this vertex + + const uint32_t blockIdx(alpaka::getIdx(acc)[0u]); + const uint32_t gridDimension(alpaka::getWorkDiv(acc)[0u]); + + // one vertex per block + for (auto kv = blockIdx; kv < nvFinal; kv += gridDimension) { + if (nn[kv] < 4) + continue; + if (chi2[kv] < maxChi2 * float(nn[kv])) + continue; + + ALPAKA_ASSERT_OFFLOAD(nn[kv] < int32_t(MAXTK)); + + if ((uint32_t)nn[kv] >= MAXTK) + continue; // too bad FIXME + + nq = 0u; + alpaka::syncBlockThreads(acc); + + // copy to local + for (auto k : cms::alpakatools::independent_group_elements(acc, nt)) { + if (iv[k] == int(kv)) { + auto old = alpaka::atomicInc(acc, &nq, MAXTK, alpaka::hierarchy::Threads{}); + zz[old] = zt[k] - zv[kv]; + newV[old] = zz[old] < 0 ? 0 : 1; + ww[old] = 1.f / ezt2[k]; + it[old] = k; + } + } + + // the new vertices + auto& znew = alpaka::declareSharedVar(acc); + auto& wnew = alpaka::declareSharedVar(acc); + alpaka::syncBlockThreads(acc); + + ALPAKA_ASSERT_OFFLOAD(int(nq) == nn[kv] + 1); + + int maxiter = 20; + // kt-min.... + bool more = true; + while (alpaka::syncBlockThreadsPredicate(acc, more)) { + more = false; + if (0 == threadIdxLocal) { + znew[0] = 0; + znew[1] = 0; + wnew[0] = 0; + wnew[1] = 0; + } + alpaka::syncBlockThreads(acc); + + for (auto k : cms::alpakatools::elements_with_stride(acc, nq)) { + auto i = newV[k]; + alpaka::atomicAdd(acc, &znew[i], zz[k] * ww[k], alpaka::hierarchy::Threads{}); + alpaka::atomicAdd(acc, &wnew[i], ww[k], alpaka::hierarchy::Threads{}); + } + alpaka::syncBlockThreads(acc); + + if (0 == threadIdxLocal) { + znew[0] /= wnew[0]; + znew[1] /= wnew[1]; + } + alpaka::syncBlockThreads(acc); + + for (auto k : cms::alpakatools::elements_with_stride(acc, nq)) { + auto d0 = fabs(zz[k] - znew[0]); + auto d1 = fabs(zz[k] - znew[1]); + auto newer = d0 < d1 ? 0 : 1; + more |= newer != newV[k]; + newV[k] = newer; + } + --maxiter; + if (maxiter <= 0) + more = false; + } + + // avoid empty vertices + if (0 == wnew[0] || 0 == wnew[1]) + continue; + + // quality cut + auto dist2 = (znew[0] - znew[1]) * (znew[0] - znew[1]); + + auto chi2Dist = dist2 / (1.f / wnew[0] + 1.f / wnew[1]); + + if (verbose && 0 == threadIdxLocal) + printf("inter %d %f %f\n", 20 - maxiter, chi2Dist, dist2 * wv[kv]); + + if (chi2Dist < 4) + continue; + + // get a new global vertex + auto& igv = alpaka::declareSharedVar(acc); + if (0 == threadIdxLocal) + igv = alpaka::atomicAdd(acc, &ws.nvIntermediate(), 1u, alpaka::hierarchy::Blocks{}); + alpaka::syncBlockThreads(acc); + for (auto k : cms::alpakatools::elements_with_stride(acc, nq)) { + if (1 == newV[k]) + iv[it[k]] = igv; + } + + } // loop on vertices + } + + class SplitVerticesKernel { + public: + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, VtxSoAView pdata, WsSoAView pws, float maxChi2) const { + splitVertices(acc, pdata, pws, maxChi2); + } + }; + } // namespace vertexFinder +} // namespace ALPAKA_ACCELERATOR_NAMESPACE +#endif // RecoPixelVertexing_PixelVertexFinding_plugins_splitVertices.h diff --git a/RecoTracker/PixelVertexFinding/plugins/alpaka/vertexFinder.dev.cc b/RecoTracker/PixelVertexFinding/plugins/alpaka/vertexFinder.dev.cc new file mode 100644 index 0000000000000..c40d9adda93c5 --- /dev/null +++ b/RecoTracker/PixelVertexFinding/plugins/alpaka/vertexFinder.dev.cc @@ -0,0 +1,208 @@ +#include +#include "DataFormats/TrackSoA/interface/alpaka/TrackUtilities.h" + +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/traits.h" +#include "RecoTracker/PixelVertexFinding/interface/PixelVertexWorkSpaceLayout.h" +#include "RecoTracker/PixelVertexFinding/plugins/alpaka/PixelVertexWorkSpaceSoADeviceAlpaka.h" + +#include "vertexFinder.h" +#include "vertexFinder.h" +#include "clusterTracksDBSCAN.h" +#include "clusterTracksIterative.h" +#include "clusterTracksByDensity.h" +#include "fitVertices.h" +#include "sortByPt2.h" +#include "splitVertices.h" + +#undef PIXVERTEX_DEBUG_PRODUCE +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace vertexFinder { + using namespace cms::alpakatools; + // reject outlier tracks that contribute more than this to the chi2 of the vertex fit + constexpr float maxChi2ForFirstFit = 50.f; + constexpr float maxChi2ForFinalFit = 5000.f; + + // split vertices with a chi2/NDoF greater than this + constexpr float maxChi2ForSplit = 9.f; + + template + class LoadTracks { + public: + template >> + ALPAKA_FN_ACC void operator()(const TAcc& acc, + reco::TrackSoAConstView tracks_view, + VtxSoAView soa, + WsSoAView pws, + float ptMin, + float ptMax) const { + auto const* quality = tracks_view.quality(); + using helper = TracksUtilities; + + for (auto idx : cms::alpakatools::elements_with_stride(acc, tracks_view.nTracks())) { + [[maybe_unused]] auto nHits = helper::nHits(tracks_view, idx); + ALPAKA_ASSERT_OFFLOAD(nHits >= 3); + + // initialize soa... + soa[idx].idv() = -1; + + if (reco::isTriplet(tracks_view, idx)) + continue; // no triplets + if (quality[idx] < ::pixelTrack::Quality::highPurity) + continue; + + auto pt = tracks_view[idx].pt(); + + if (pt < ptMin) + continue; + + // clamp pt + pt = std::min(pt, ptMax); + + auto& data = pws; + auto it = alpaka::atomicAdd(acc, &data.ntrks(), 1u, alpaka::hierarchy::Blocks{}); + data[it].itrk() = idx; + data[it].zt() = reco::zip(tracks_view, idx); + data[it].ezt2() = tracks_view[idx].covariance()(14); + data[it].ptt2() = pt * pt; + } + } + }; +// #define THREE_KERNELS +#ifndef THREE_KERNELS + class VertexFinderOneKernel { + public: + template >> + ALPAKA_FN_ACC void operator()(const TAcc& acc, + VtxSoAView pdata, + WsSoAView pws, + bool doSplit, + int minT, // min number of neighbours to be "seed" + float eps, // max absolute distance to cluster + float errmax, // max error to be "seed" + float chi2max // max normalized distance to cluster, + ) const { + clusterTracksByDensity(acc, pdata, pws, minT, eps, errmax, chi2max); + alpaka::syncBlockThreads(acc); + fitVertices(acc, pdata, pws, maxChi2ForFirstFit); + alpaka::syncBlockThreads(acc); + if (doSplit) { + splitVertices(acc, pdata, pws, maxChi2ForSplit); + alpaka::syncBlockThreads(acc); + fitVertices(acc, pdata, pws, maxChi2ForFinalFit); + alpaka::syncBlockThreads(acc); + } + sortByPt2(acc, pdata, pws); + } + }; +#else + class VertexFinderKernel1 { + public: + template >> + ALPAKA_FN_ACC void operator()(const TAcc& acc, + VtxSoAView pdata, + WsSoAView pws, + int minT, // min number of neighbours to be "seed" + float eps, // max absolute distance to cluster + float errmax, // max error to be "seed" + float chi2max // max normalized distance to cluster, + ) const { + clusterTracksByDensity(pdata, pws, minT, eps, errmax, chi2max); + alpaka::syncBlockThreads(acc); + fitVertices(pdata, pws, maxChi2ForFirstFit); + } + }; + class VertexFinderKernel2 { + public: + template >> + ALPAKA_FN_ACC void operator()(const TAcc& acc, VtxSoAView pdata, WsSoAView pws) const { + fitVertices(pdata, pws, maxChi2ForFinalFit); + alpaka::syncBlockThreads(acc); + sortByPt2(pdata, pws); + } + }; +#endif + + template + ZVertexSoACollection Producer::makeAsync(Queue& queue, + const reco::TrackSoAConstView& tracks_view, + float ptMin, + float ptMax) const { +#ifdef PIXVERTEX_DEBUG_PRODUCE + std::cout << "producing Vertices on GPU" << std::endl; +#endif // PIXVERTEX_DEBUG_PRODUCE + ZVertexSoACollection vertices(queue); + + auto soa = vertices.view(); + + auto ws_d = PixelVertexWorkSpaceSoADevice(::zVertex::MAXTRACKS, queue); + + // Initialize + const auto initWorkDiv = cms::alpakatools::make_workdiv(1, 1); + alpaka::exec(queue, initWorkDiv, Init{}, soa, ws_d.view()); + + // Load Tracks + const uint32_t blockSize = 128; + const uint32_t numberOfBlocks = + cms::alpakatools::divide_up_by(tracks_view.metadata().size() + blockSize - 1, blockSize); + const auto loadTracksWorkDiv = cms::alpakatools::make_workdiv(numberOfBlocks, blockSize); + alpaka::exec( + queue, loadTracksWorkDiv, LoadTracks{}, tracks_view, soa, ws_d.view(), ptMin, ptMax); + + // Running too many thread lead to problems when printf is enabled. + const auto finderSorterWorkDiv = cms::alpakatools::make_workdiv(1, 1024 - 128); + const auto splitterFitterWorkDiv = cms::alpakatools::make_workdiv(1024, 128); + + if (oneKernel_) { + // implemented only for density clustesrs +#ifndef THREE_KERNELS + alpaka::exec(queue, + finderSorterWorkDiv, + VertexFinderOneKernel{}, + soa, + ws_d.view(), + doSplitting_, + minT, + eps, + errmax, + chi2max); +#else + alpaka::exec( + queue, finderSorterWorkDiv, VertexFinderOneKernel{}, soa, ws_d.view(), minT, eps, errmax, chi2max); + + // one block per vertex... + if (doSplitting_) + alpaka::exec(queue, splitterFitterWorkDiv, SplitVerticesKernel{}, soa, ws_d.view(), maxChi2ForSplit); + alpaka::exec(queue, finderSorterWorkDiv{}, soa, ws_d.view()); +#endif + } else { // five kernels + if (useDensity_) { + alpaka::exec( + queue, finderSorterWorkDiv, ClusterTracksByDensityKernel{}, soa, ws_d.view(), minT, eps, errmax, chi2max); + + } else if (useDBSCAN_) { + alpaka::exec( + queue, finderSorterWorkDiv, ClusterTracksDBSCAN{}, soa, ws_d.view(), minT, eps, errmax, chi2max); + } else if (useIterative_) { + alpaka::exec( + queue, finderSorterWorkDiv, ClusterTracksIterative{}, soa, ws_d.view(), minT, eps, errmax, chi2max); + } + alpaka::exec(queue, finderSorterWorkDiv, FitVerticesKernel{}, soa, ws_d.view(), maxChi2ForFirstFit); + + // one block per vertex... + if (doSplitting_) { + alpaka::exec(queue, splitterFitterWorkDiv, SplitVerticesKernel{}, soa, ws_d.view(), maxChi2ForSplit); + + alpaka::exec(queue, finderSorterWorkDiv, FitVerticesKernel{}, soa, ws_d.view(), maxChi2ForFinalFit); + } + alpaka::exec(queue, finderSorterWorkDiv, SortByPt2Kernel{}, soa, ws_d.view()); + } + + return vertices; + } + + template class Producer; + template class Producer; + template class Producer; + } // namespace vertexFinder +} // namespace ALPAKA_ACCELERATOR_NAMESPACE diff --git a/RecoTracker/PixelVertexFinding/plugins/alpaka/vertexFinder.h b/RecoTracker/PixelVertexFinding/plugins/alpaka/vertexFinder.h new file mode 100644 index 0000000000000..23e5db1e706c4 --- /dev/null +++ b/RecoTracker/PixelVertexFinding/plugins/alpaka/vertexFinder.h @@ -0,0 +1,76 @@ +#ifndef RecoPixelVertexing_PixelVertexFinding_vertexFinder_h +#define RecoPixelVertexing_PixelVertexFinding_vertexFinder_h + +#include +#include +#include +#include "DataFormats/TrackSoA/interface/alpaka/TrackUtilities.h" +#include "DataFormats/VertexSoA/interface/ZVertexHost.h" +#include "DataFormats/VertexSoA/interface/ZVertexSoA.h" +#include "DataFormats/VertexSoA/interface/alpaka/ZVertexSoACollection.h" +#include "DataFormats/VertexSoA/interface/ZVertexDevice.h" + +#include "RecoTracker/PixelVertexFinding/interface/PixelVertexWorkSpaceLayout.h" +#include "RecoTracker/PixelVertexFinding/plugins/alpaka/PixelVertexWorkSpaceSoADeviceAlpaka.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + namespace vertexFinder { + using namespace cms::alpakatools; + using VtxSoAView = ::reco::ZVertexSoAView; + using WsSoAView = ::vertexFinder::PixelVertexWorkSpaceSoAView; + + class Init { + public: + template >> + ALPAKA_FN_ACC void operator()(const TAcc &acc, VtxSoAView pdata, WsSoAView pws) const { + pdata.nvFinal() = 0; // initialization + ::vertexFinder::init(pws); + } + }; + + template + class Producer { + using TkSoAConstView = reco::TrackSoAConstView; + + public: + Producer(bool oneKernel, + bool useDensity, + bool useDBSCAN, + bool useIterative, + bool doSplitting, + int iminT, // min number of neighbours to be "core" + float ieps, // max absolute distance to cluster + float ierrmax, // max error to be "seed" + float ichi2max // max normalized distance to cluster + ) + : oneKernel_(oneKernel && !(useDBSCAN || useIterative)), + useDensity_(useDensity), + useDBSCAN_(useDBSCAN), + useIterative_(useIterative), + doSplitting_(doSplitting), + minT(iminT), + eps(ieps), + errmax(ierrmax), + chi2max(ichi2max) {} + + ~Producer() = default; + + ZVertexSoACollection makeAsync(Queue &queue, const TkSoAConstView &tracks_view, float ptMin, float ptMax) const; + + private: + const bool oneKernel_; // run everything (cluster,fit,split,sort) in one kernel. Uses only density clusterizer + const bool useDensity_; // use density clusterizer + const bool useDBSCAN_; // use DBScan clusterizer + const bool useIterative_; // use iterative clusterizer + const bool doSplitting_; //run vertex splitting + + int minT; // min number of neighbours to be "core" + float eps; // max absolute distance to cluster + float errmax; // max error to be "seed" + float chi2max; // max normalized distance to cluster + }; + + } // namespace vertexFinder +} // namespace ALPAKA_ACCELERATOR_NAMESPACE +#endif diff --git a/RecoTracker/PixelVertexFinding/test/BuildFile.xml b/RecoTracker/PixelVertexFinding/test/BuildFile.xml index 9343f00f9a027..d5d0142eca659 100644 --- a/RecoTracker/PixelVertexFinding/test/BuildFile.xml +++ b/RecoTracker/PixelVertexFinding/test/BuildFile.xml @@ -10,29 +10,31 @@ - - - - - - - - - - + + + + + - - - - - + + + + + + + + + + + + + + + + + - - - - - @@ -42,3 +44,31 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/RecoTracker/PixelVertexFinding/test/PixelTrackRoot.cc b/RecoTracker/PixelVertexFinding/test/PixelTrackRoot.cc deleted file mode 100644 index 6f2696608921d..0000000000000 --- a/RecoTracker/PixelVertexFinding/test/PixelTrackRoot.cc +++ /dev/null @@ -1,136 +0,0 @@ - -#include "FWCore/Framework/interface/MakerMacros.h" -#include "FWCore/Framework/interface/Frameworkfwd.h" -#include "FWCore/Framework/interface/EDAnalyzer.h" -#include "FWCore/Framework/interface/Event.h" -#include "FWCore/Framework/interface/EventSetup.h" -#include "DataFormats/Common/interface/Handle.h" -#include "FWCore/Framework/interface/ESHandle.h" -#include "FWCore/ServiceRegistry/interface/Service.h" -#include "FWCore/ParameterSet/interface/ParameterSet.h" -#include "FWCore/MessageLogger/interface/MessageLogger.h" - -#include "DataFormats/TrackReco/interface/TrackFwd.h" -#include "DataFormats/TrackReco/interface/Track.h" -//#include "DataFormats/TrackReco/interface/print.h" - -#include "./PixelTrackRoot.h" -using namespace std; - -class PixelTrackRoot : public edm::EDAnalyzer { -public: - explicit PixelTrackRoot(const edm::ParameterSet& conf); - ~PixelTrackRoot(); - virtual void beginJob() {} - virtual void analyze(const edm::Event& ev, const edm::EventSetup& es); - virtual void endJob() {} - void book(); - void store(); - -private: - TFile* rootfile; - TTree* tthtree; - int Event; - static const int numMaxTrks = 100; - int CMNumTrk; - float CMTrkVtx[numMaxTrks]; - float CMTrkPt[numMaxTrks]; - float CMTrkP[numMaxTrks]; - float CMTrkIP[numMaxTrks]; - int HPNumTrk; - float HPTrkVtx[numMaxTrks]; - float HPTrkPt[numMaxTrks]; - float HPTrkP[numMaxTrks]; - float HPTrkIP[numMaxTrks]; - int SiNumTrk; - float SiTrkVtx[numMaxTrks]; - float SiTrkPt[numMaxTrks]; - float SiTrkP[numMaxTrks]; - float SiTrkIP[numMaxTrks]; -}; - -PixelTrackRoot::PixelTrackRoot(const edm::ParameterSet& conf) { - rootfile = new TFile("pixel_parameters.root", "RECREATE"); - tthtree = new TTree("T", "pixTracTestCP"); - book(); - edm::LogInfo("PixelTrackRoot") << " CTOR"; -} - -PixelTrackRoot::~PixelTrackRoot() { - rootfile->cd(); - tthtree->Write(); - rootfile->Close(); - delete rootfile; - edm::LogInfo("PixelTrackRoot") << " DTOR"; -} - -void PixelTrackRoot::analyze(const edm::Event& ev, const edm::EventSetup& es) { - typedef reco::TrackCollection::const_iterator IT; - edm::Handle trackCollection1; - ev.getByLabel("tracks1", trackCollection1); - const reco::TrackCollection tracks1 = *(trackCollection1.product()); - CMNumTrk = tracks1.size(); - Event = ev.id().event(); - int i = 0; - for (IT it = tracks1.begin(); it != tracks1.end(); it++) { - // myfillCM(*it); - CMTrkP[i] = it->p(); - CMTrkVtx[i] = 10 * (it->vertex().z()); - CMTrkPt[i] = it->pt(); - CMTrkIP[i] = it->dz(); - i++; - } - i = 0; - edm::Handle trackCollection2; - ev.getByLabel("tracks2", trackCollection2); - const reco::TrackCollection tracks2 = *(trackCollection2.product()); - HPNumTrk = tracks2.size(); - for (IT it = tracks2.begin(); it != tracks2.end(); it++) { - //myfillHP(*it); - // store(); - HPTrkP[i] = it->p(); - HPTrkVtx[i] = 10 * (it->vertex().z()); - HPTrkPt[i] = it->pt(); - HPTrkIP[i] = it->dz(); - i++; - } - i = 0; - edm::Handle silTracks; - ev.getByLabel("trackp", silTracks); - const reco::TrackCollection SiliconTrks = *(silTracks.product()); - // std::cout << "Silicon Tracks Size: "<< SiliconTrks.size()<p(); - SiTrkVtx[i] = 10 * (it->vertex().z()); - SiTrkPt[i] = it->pt(); - SiTrkIP[i] = it->dz(); - i++; - } - - store(); -} -void PixelTrackRoot::book() { - tthtree->Branch("Event", &Event, "Event/I"); - tthtree->Branch("CMNumTracks", &CMNumTrk, "CMNumTrk/I"); - tthtree->Branch("CMTrackVtx", &CMTrkVtx, "CMTrkVtx[CMNumTrk]/F"); - tthtree->Branch("CMTrkPT", &CMTrkPt, "CMTrkPt[CMNumTrk]/F"); - tthtree->Branch("CMTrkMomentum", &CMTrkP, "CMTrkP[CMNumTrk]/F"); - tthtree->Branch("CMTrkImpactParam", &CMTrkIP, "CMTrkIP[CMNumTrk]/F"); - tthtree->Branch("HPNumTracks", &HPNumTrk, "HPNumTrk/I"); - tthtree->Branch("HPTrackVtx", &HPTrkVtx, "HPTrkVtx[HPNumTrk]/F"); - tthtree->Branch("HPTrkPT", &HPTrkPt, "HPTrkPt[HPNumTrk]/F"); - tthtree->Branch("HPTrkMomentum", &HPTrkP, "HPTrkP[HPNumTrk]/F"); - tthtree->Branch("TrkImpactParam", &HPTrkIP, "HPTrkIP[HPNumTrk]/F"); - tthtree->Branch("SiNumTracks", &SiNumTrk, "SiNumTrk/I"); - tthtree->Branch("SiTrackVtx", &SiTrkVtx, "SiTrkVtx[SiNumTrk]/F"); - tthtree->Branch("SiTrkPT", &SiTrkPt, "SiTrkPt[SiNumTrk]/F"); - tthtree->Branch("SiTrkMomentum", &SiTrkP, "SiTrkP[SiNumTrk]/F"); - tthtree->Branch("SiTrkImpactParam", &SiTrkIP, "SiTrkIP[SiNumTrk]/F"); -} - -void PixelTrackRoot::store() { tthtree->Fill(); } - -DEFINE_FWK_MODULE(PixelTrackRoot); diff --git a/RecoTracker/PixelVertexFinding/test/PixelTrackRoot.h b/RecoTracker/PixelVertexFinding/test/PixelTrackRoot.h deleted file mode 100644 index 27ac456938fb3..0000000000000 --- a/RecoTracker/PixelVertexFinding/test/PixelTrackRoot.h +++ /dev/null @@ -1,13 +0,0 @@ -#include "TROOT.h" -#include "TObject.h" -#include "TFile.h" -#include "TH1.h" -#include "TH2.h" -#include "TProfile.h" -#include "TNtuple.h" -#include "TTree.h" -#include "TRandom.h" -#include -#include -#include -#include diff --git a/RecoTracker/PixelVertexFinding/test/PixelVertexTest.cc b/RecoTracker/PixelVertexFinding/test/PixelVertexTest.cc deleted file mode 100644 index 56b6953d4abd5..0000000000000 --- a/RecoTracker/PixelVertexFinding/test/PixelVertexTest.cc +++ /dev/null @@ -1,260 +0,0 @@ -#include "FWCore/Framework/interface/MakerMacros.h" -#include "FWCore/Framework/interface/Frameworkfwd.h" -#include "FWCore/Framework/interface/EDAnalyzer.h" -#include "FWCore/Framework/interface/Event.h" -#include "FWCore/Framework/interface/EventSetup.h" -#include "DataFormats/Common/interface/Handle.h" -#include "FWCore/Framework/interface/ESHandle.h" -#include "FWCore/ServiceRegistry/interface/Service.h" -#include "FWCore/ParameterSet/interface/ParameterSet.h" -#include "FWCore/MessageLogger/interface/MessageLogger.h" - -#include "DataFormats/TrackReco/interface/TrackFwd.h" -#include "DataFormats/TrackReco/interface/Track.h" -#include "DataFormats/VertexReco/interface/VertexFwd.h" -#include "DataFormats/VertexReco/interface/Vertex.h" - -#include "SimDataFormats/Vertex/interface/SimVertex.h" -#include "SimDataFormats/Vertex/interface/SimVertexContainer.h" -#include "SimDataFormats/Track/interface/SimTrack.h" -#include "SimDataFormats/Track/interface/SimTrackContainer.h" - -//#include "SimDataFormats/HepMCProduct/interface/HepMCProduct.h" - -#include "RecoTracker/PixelVertexFinding/interface/PVPositionBuilder.h" -#include "RecoTracker/PixelVertexFinding/interface/PVClusterComparer.h" - -#include "MagneticField/Engine/interface/MagneticField.h" -#include "MagneticField/Records/interface/IdealMagneticFieldRecord.h" - -#include "TrackingTools/Records/interface/TransientTrackRecord.h" -#include "TrackingTools/TransientTrack/interface/TransientTrack.h" -#include "RecoVertex/VertexPrimitives/interface/TransientVertex.h" -#include "RecoVertex/KalmanVertexFit/interface/KalmanVertexFitter.h" -#include "DataFormats/GeometryVector/interface/GlobalPoint.h" - -#include -#include -#include -#include "TTree.h" -#include "TFile.h" -#include "TDirectory.h" - -using namespace std; - -class PixelVertexTest : public edm::EDAnalyzer { -public: - explicit PixelVertexTest(const edm::ParameterSet& conf); - ~PixelVertexTest(); - virtual void beginJob(); - virtual void analyze(const edm::Event& ev, const edm::EventSetup& es); - virtual void endJob(); - -private: - edm::ParameterSet conf_; - // How noisy should I be - int verbose_; - // Tree of simple vars for testing resolution eff etc - TTree* t_; - TFile* f_; - int ntrk_; - static const int maxtrk_ = 1000; - double pt_[maxtrk_]; - double z0_[maxtrk_]; - double errz0_[maxtrk_]; - // double tanl_[maxtrk_]; - double theta_[maxtrk_]; - int nvtx_; - static const int maxvtx_ = 15; - double vz_[maxvtx_]; - double vzwt_[maxvtx_]; - double errvz_[maxvtx_]; - double errvzwt_[maxvtx_]; - int nvtx2_; - double vz2_[maxvtx_]; - double trk2avg_[maxvtx_]; - double errvz2_[maxvtx_]; - int ntrk2_[maxvtx_]; - double sumpt2_[maxvtx_]; - double simx_; - double simy_; - double simz_; - double vxkal_[maxvtx_]; - double vykal_[maxvtx_]; - double vzkal_[maxvtx_]; -}; - -PixelVertexTest::PixelVertexTest(const edm::ParameterSet& conf) : conf_(conf), t_(0), f_(0) { - edm::LogInfo("PixelVertexTest") << " CTOR"; -} - -PixelVertexTest::~PixelVertexTest() { - edm::LogInfo("PixelVertexTest") << " DTOR"; - delete f_; - // delete t_; -} - -void PixelVertexTest::beginJob() { - // How noisy? - verbose_ = conf_.getUntrackedParameter("Verbosity", 0); - - // Make my little tree - std::string file = conf_.getUntrackedParameter("OutputTree", "mytree.root"); - const char* cwd = gDirectory->GetPath(); - f_ = new TFile(file.c_str(), "RECREATE"); - t_ = new TTree("t", "Pixel Vertex Testing"); - t_->Branch("nvtx", &nvtx_, "nvtx/I"); - t_->Branch("vz", vz_, "vz[nvtx]/D"); - t_->Branch("errvz", errvz_, "errvz[nvtx]/D"); - t_->Branch("vzwt", vzwt_, "vzwt[nvtx]/D"); - t_->Branch("errvzwt", errvzwt_, "errvzwt[nvtx]/D"); - t_->Branch("nvtx2", &nvtx2_, "nvtx2/I"); - t_->Branch("vz2", vz2_, "vz2[nvtx2]/D"); - t_->Branch("trk2avg", trk2avg_, "trk2avg[nvtx2]/D"); - t_->Branch("errvz2", errvz2_, "errvz2[nvtx2]/D"); - t_->Branch("ntrk2", ntrk2_, "ntrk2[nvtx2]/I"); - t_->Branch("sumpt2", sumpt2_, "sumpt2[nvtx2]/D"); - t_->Branch("ntrk", &ntrk_, "ntrk/I"); - t_->Branch("pt", pt_, "pt[ntrk]/D"); - t_->Branch("z0", z0_, "z0[ntrk]/D"); - t_->Branch("errz0", errz0_, "errz0[ntrk]/D"); - // t_->Branch("tanl",tanl_,"tanl[ntrk]/D"); - t_->Branch("theta", theta_, "theta[ntrk]/D"); - t_->Branch("simx", &simx_, "simx/D"); - t_->Branch("simy", &simy_, "simy/D"); - t_->Branch("simz", &simz_, "simz/D"); - t_->Branch("vxkal", vxkal_, "vxkal[nvtx2]/D"); - t_->Branch("vykal", vykal_, "vykal[nvtx2]/D"); - t_->Branch("vzkal", vzkal_, "vzkal[nvtx2]/D"); - gDirectory->cd(cwd); -} - -void PixelVertexTest::analyze(const edm::Event& ev, const edm::EventSetup& es) { - cout << "*** PixelVertexTest, analyze event: " << ev.id() << endl; - - edm::ESHandle field; - es.get().get(field); - - edm::InputTag simG4 = conf_.getParameter("simG4"); - edm::Handle simVtcs; - ev.getByLabel(simG4, simVtcs); - if (verbose_ > 0) { - cout << "simulated vertices: " << simVtcs->size() << std::endl; - } - // simx_ = (simVtcs->size() > 0) ? (*simVtcs)[0].position().x()/10 : -9999.0; - // simy_ = (simVtcs->size() > 0) ? (*simVtcs)[0].position().y()/10 : -9999.0; - // simz_ = (simVtcs->size() > 0) ? (*simVtcs)[0].position().z()/10 : -9999.0; - // No longer need to convert from mm as of version 1_2_0 - simx_ = (simVtcs->size() > 0) ? (*simVtcs)[0].position().x() : -9999.0; - simy_ = (simVtcs->size() > 0) ? (*simVtcs)[0].position().y() : -9999.0; - simz_ = (simVtcs->size() > 0) ? (*simVtcs)[0].position().z() : -9999.0; - if (verbose_ > 1) { - for (int i = 0; i < simVtcs->size(); i++) { - std::cout << (*simVtcs)[i].parentIndex() << ": " << (*simVtcs)[i].position().x() << ", " - << (*simVtcs)[i].position().y() << ", " << (*simVtcs)[i].position().z() << "; "; - } - std::cout << "\n" << std::endl; - } - - edm::Handle trackCollection; - std::string trackCollName = conf_.getParameter("TrackCollection"); - ev.getByLabel(trackCollName, trackCollection); - const reco::TrackCollection tracks = *(trackCollection.product()); - - reco::TrackRefVector trks; - - if (verbose_ > 0) { - std::cout << *(trackCollection.provenance()) << std::endl; - cout << "Reconstructed " << tracks.size() << " tracks" << std::endl; - } - ntrk_ = 0; - for (unsigned int i = 0; i < tracks.size(); i++) { - if (verbose_ > 0) { - cout << "\tmomentum: " << tracks[i].momentum() << "\tPT: " << tracks[i].pt() << endl; - cout << "\tvertex: " << tracks[i].vertex() << "\tZ0: " << tracks[i].dz() << " +- " << tracks[i].dzError() << endl; - cout << "\tcharge: " << tracks[i].charge() << endl; - } - trks.push_back(reco::TrackRef(trackCollection, i)); - // Fill ntuple vars - if (ntrk_ < maxtrk_) { - pt_[ntrk_] = tracks[i].pt(); - z0_[ntrk_] = tracks[i].dz(); - // errz0_[ntrk_] = std::sqrt( tracks[i].covariance(3,3) ); - errz0_[ntrk_] = tracks[i].dzError(); - // tanl_[ntrk_] = tracks[i].tanDip(); - theta_[ntrk_] = tracks[i].theta(); - ntrk_++; - } - if (verbose_ > 0) - cout << "------------------------------------------------" << endl; - } - PVPositionBuilder pos; - nvtx_ = 0; - vz_[nvtx_] = pos.average(trks).value(); - errvz_[nvtx_] = pos.average(trks).error(); - vzwt_[nvtx_] = pos.wtAverage(trks).value(); - errvzwt_[nvtx_] = pos.wtAverage(trks).error(); - nvtx_++; - if (verbose_ > 0) { - std::cout << "The average z-position of these tracks is " << vz_[0] << " +- " << errvz_[0] << std::endl; - std::cout << "The weighted average z-position of these tracks is " << vzwt_[0] << " +- " << errvzwt_[0] - << std::endl; - } - - // NOW let's see if my vertex producer did a darn thing... - edm::Handle vertexCollection; - ev.getByLabel("pixelVertices", vertexCollection); - const reco::VertexCollection vertexes = *(vertexCollection.product()); - if (verbose_ > 0) { - std::cout << *(vertexCollection.provenance()) << std::endl; - cout << "Reconstructed " << vertexes.size() << " vertexes" << std::endl; - } - nvtx2_ = vertexes.size(); - PVClusterComparer vcompare; - for (int i = 0; i < nvtx2_ && i < maxvtx_; i++) { - vz2_[i] = vertexes[i].z(); - errvz2_[i] = std::sqrt(vertexes[i].error(2, 2)); - ntrk2_[i] = vertexes[i].tracksSize(); - sumpt2_[i] = vcompare.pTSquaredSum(vertexes[i]); - // Now calculate my own average position by hand to cross check conversion process - // trks.clear(); // not yet implemented - while (!trks.empty()) - trks.erase(trks.begin()); - for (reco::track_iterator j = vertexes[i].tracks_begin(); j != vertexes[i].tracks_end(); ++j) - trks.push_back(*j); - trk2avg_[i] = pos.wtAverage(trks).value(); - } - - // Now let's send off our tracks to the Kalman fitter to see what great things happen... - if (nvtx2_ > 0) { - vector t_tks; - for (int i = 0; i < nvtx2_ && i < maxvtx_; i++) { - t_tks.clear(); - for (reco::track_iterator j = vertexes[i].tracks_begin(); j != vertexes[i].tracks_end(); ++j) { - t_tks.push_back(reco::TransientTrack(**j, field.product())); - } - KalmanVertexFitter kvf; - // TransientVertex tv = kvf.vertex(t_tks,GlobalPoint(vertexes[i].x(),vertexes[i].y(),vertexes[i].z())); - TransientVertex tv = kvf.vertex(t_tks); - if (verbose_ > 0) - std::cout << "Kalman Position: " << reco::Vertex::Point(tv.position()) << std::endl; - vxkal_[i] = tv.position().x(); - vykal_[i] = tv.position().y(); - vzkal_[i] = tv.position().z(); - } - } - - // Finally, fill the tree with the above values - t_->Fill(); -} - -void PixelVertexTest::endJob() { - if (t_) - t_->Print(); - if (f_) { - f_->Print(); - f_->Write(); - } -} - -DEFINE_FWK_MODULE(PixelVertexTest); diff --git a/RecoTracker/PixelVertexFinding/test/alpaka/VertexFinder_t.cc b/RecoTracker/PixelVertexFinding/test/alpaka/VertexFinder_t.cc new file mode 100644 index 0000000000000..c3a74676956f8 --- /dev/null +++ b/RecoTracker/PixelVertexFinding/test/alpaka/VertexFinder_t.cc @@ -0,0 +1,33 @@ +#include +#include "HeterogeneousCore/AlpakaInterface/interface/devices.h" +#include "HeterogeneousCore/AlpakaInterface/interface/host.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" + +#include "DataFormats/VertexSoA/interface/ZVertexHost.h" +#include "DataFormats/VertexSoA/interface/alpaka/ZVertexSoACollection.h" +#include "DataFormats/VertexSoA/interface/ZVertexDevice.h" + +#include "RecoTracker/PixelVertexFinding/interface/PixelVertexWorkSpaceLayout.h" +#include "RecoTracker/PixelVertexFinding/plugins/PixelVertexWorkSpaceSoAHostAlpaka.h" +#include "RecoTracker/PixelVertexFinding/plugins/alpaka/PixelVertexWorkSpaceSoADeviceAlpaka.h" + +using namespace std; +using namespace ALPAKA_ACCELERATOR_NAMESPACE; + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + namespace vertexfinder_t { + void runKernels(Queue& queue); + } + +}; // namespace ALPAKA_ACCELERATOR_NAMESPACE + +int main() { + const auto host = cms::alpakatools::host(); + const auto device = cms::alpakatools::devices()[0]; + Queue queue(device); + + vertexfinder_t::runKernels(queue); + return 0; +} diff --git a/RecoTracker/PixelVertexFinding/test/alpaka/VertexFinder_t.dev.cc b/RecoTracker/PixelVertexFinding/test/alpaka/VertexFinder_t.dev.cc new file mode 100644 index 0000000000000..e92d586dc1833 --- /dev/null +++ b/RecoTracker/PixelVertexFinding/test/alpaka/VertexFinder_t.dev.cc @@ -0,0 +1,282 @@ +#include +#include +#include +#include +#include +#include +#include "HeterogeneousCore/AlpakaInterface/interface/workdivision.h" +#include "HeterogeneousCore/AlpakaInterface/interface/memory.h" +// TrackUtilities only included in order to compile SoALayout with Eigen columns +#include "DataFormats/TrackSoA/interface/alpaka/TrackUtilities.h" +#ifdef USE_DBSCAN +#include "RecoTracker/PixelVertexFinding/plugins/alpaka/clusterTracksDBSCAN.h" +#define CLUSTERIZE ALPAKA_ACCELERATOR_NAMESPACE::vertexFinder::ClusterTracksDBSCAN +#elif USE_ITERATIVE +#include "RecoTracker/PixelVertexFinding/plugins/alpaka/clusterTracksIterative.h" +#define CLUSTERIZE ALPAKA_ACCELERATOR_NAMESPACE::vertexFinder::ClusterTracksIterative +#else +#include "RecoTracker/PixelVertexFinding/plugins/alpaka/clusterTracksByDensity.h" +#define CLUSTERIZE ALPAKA_ACCELERATOR_NAMESPACE::vertexFinder::ClusterTracksByDensityKernel +#endif + +#include "RecoTracker/PixelVertexFinding/interface/PixelVertexWorkSpaceLayout.h" +#include "RecoTracker/PixelVertexFinding/plugins/PixelVertexWorkSpaceSoAHostAlpaka.h" +#include "RecoTracker/PixelVertexFinding/plugins/alpaka/PixelVertexWorkSpaceSoADeviceAlpaka.h" + +#include "RecoTracker/PixelVertexFinding/plugins/alpaka/fitVertices.h" +#include "RecoTracker/PixelVertexFinding/plugins/alpaka/sortByPt2.h" +#include "RecoTracker/PixelVertexFinding/plugins/alpaka/splitVertices.h" +#include "RecoTracker/PixelVertexFinding/plugins/alpaka/vertexFinder.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + using namespace cms::alpakatools; + + struct ClusterGenerator { + explicit ClusterGenerator(float nvert, float ntrack) + : rgen(-13., 13), errgen(0.005, 0.025), clusGen(nvert), trackGen(ntrack), gauss(0., 1.), ptGen(1.) {} + + void operator()(vertexFinder::PixelVertexWorkSpaceSoAHost& pwsh, ZVertexHost& vtxh) { + int nclus = clusGen(reng); + for (int zint = 0; zint < vtxh.view().metadata().size(); ++zint) { + vtxh.view().zv()[zint] = 3.5f * gauss(reng); + } + + int aux = 0; + for (int iv = 0; iv < nclus; ++iv) { + auto nt = trackGen(reng); + pwsh.view().itrk()[iv] = nt; + for (int it = 0; it < nt; ++it) { + auto err = errgen(reng); // reality is not flat.... + pwsh.view().zt()[aux] = vtxh.view().zv()[iv] + err * gauss(reng); + pwsh.view().ezt2()[aux] = err * err; + pwsh.view().iv()[aux] = iv; + pwsh.view().ptt2()[aux] = (iv == 5 ? 1.f : 0.5f) + ptGen(reng); + pwsh.view().ptt2()[aux] *= pwsh.view().ptt2()[aux]; + ++aux; + } + } + pwsh.view().ntrks() = aux; + // add noise + auto nt = 2 * trackGen(reng); + for (int it = 0; it < nt; ++it) { + auto err = 0.03f; + pwsh.view().zt()[it] = rgen(reng); + pwsh.view().ezt2()[it] = err * err; + pwsh.view().iv()[it] = 9999; + pwsh.view().ptt2()[it] = 0.5f + ptGen(reng); + pwsh.view().ptt2()[it] *= pwsh.view().ptt2()[it]; + } + } + + std::mt19937 reng; + std::uniform_real_distribution rgen; + std::uniform_real_distribution errgen; + std::poisson_distribution clusGen; + std::poisson_distribution trackGen; + std::normal_distribution gauss; + std::exponential_distribution ptGen; + }; + + namespace vertexfinder_t { +#ifdef ONE_KERNEL + class VertexFinderOneKernel { + public: + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, + vertexFinder::VtxSoAView pdata, + vertexFinder::WsSoAView pws, + int minT, // min number of neighbours to be "seed" + float eps, // max absolute distance to cluster + float errmax, // max error to be "seed" + float chi2max // max normalized distance to cluster, + ) const { + vertexFinder::clusterTracksByDensity(acc, pdata, pws, minT, eps, errmax, chi2max); + alpaka::syncBlockThreads(acc); + vertexFinder::fitVertices(acc, pdata, pws, 50.); + alpaka::syncBlockThreads(acc); + vertexFinder::splitVertices(acc, pdata, pws, 9.f); + alpaka::syncBlockThreads(acc); + vertexFinder::fitVertices(acc, pdata, pws, 5000.); + alpaka::syncBlockThreads(acc); + vertexFinder::sortByPt2(acc, pdata, pws); + alpaka::syncBlockThreads(acc); + } + }; +#endif + + class Kernel_print { + public: + template + ALPAKA_FN_ACC void operator()(const TAcc& acc, + vertexFinder::VtxSoAView pdata, + vertexFinder::WsSoAView pws) const { + printf("nt,nv %d %d,%d\n", pws.ntrks(), pdata.nvFinal(), pws.nvIntermediate()); + } + }; + + void runKernels(Queue& queue) { + vertexFinder::PixelVertexWorkSpaceSoADevice ws_d(zVertex::MAXTRACKS, queue); + vertexFinder::PixelVertexWorkSpaceSoAHost ws_h(zVertex::MAXTRACKS, queue); + ZVertexHost vertices_h(queue); + ZVertexSoACollection vertices_d(queue); + + float eps = 0.1f; + std::array par{{eps, 0.01f, 9.0f}}; + for (int nav = 30; nav < 80; nav += 20) { + ClusterGenerator gen(nav, 10); + + for (int i = 8; i < 20; ++i) { + auto kk = i / 4; // M param + + gen(ws_h, vertices_h); + auto workDiv1D = make_workdiv(1, 1); + alpaka::exec(queue, workDiv1D, vertexFinder::Init{}, vertices_d.view(), ws_d.view()); + // std::cout << "v,t size " << ws_h.view().zt()[0] << ' ' << vertices_h.view().zv()[0] << std::endl; + alpaka::memcpy(queue, ws_d.buffer(), ws_h.buffer()); + alpaka::wait(queue); + + std::cout << "M eps, pset " << kk << ' ' << eps << ' ' << (i % 4) << std::endl; + + if ((i % 4) == 0) + par = {{eps, 0.02f, 12.0f}}; + if ((i % 4) == 1) + par = {{eps, 0.02f, 9.0f}}; + if ((i % 4) == 2) + par = {{eps, 0.01f, 9.0f}}; + if ((i % 4) == 3) + par = {{0.7f * eps, 0.01f, 9.0f}}; + + alpaka::exec(queue, workDiv1D, Kernel_print{}, vertices_d.view(), ws_d.view()); + + auto workDivClusterizer = make_workdiv(1, 512 + 256); +#ifdef ONE_KERNEL + alpaka::exec(queue, + workDivClusterizer, + VertexFinderOneKernel{}, + vertices_d.view(), + ws_d.view(), + kk, + par[0], + par[1], + par[2]); +#else + alpaka::exec( + queue, workDivClusterizer, CLUSTERIZE{}, vertices_d.view(), ws_d.view(), kk, par[0], par[1], par[2]); +#endif + alpaka::wait(queue); + alpaka::exec(queue, workDiv1D, Kernel_print{}, vertices_d.view(), ws_d.view()); + alpaka::wait(queue); + + auto workDivFitter = make_workdiv(1, 1024 - 256); + + alpaka::exec( + queue, workDivFitter, vertexFinder::FitVerticesKernel{}, vertices_d.view(), ws_d.view(), 50.f); + + alpaka::memcpy(queue, vertices_h.buffer(), vertices_d.buffer()); + alpaka::wait(queue); + + if (vertices_h.view().nvFinal() == 0) { + std::cout << "NO VERTICES???" << std::endl; + continue; + } + + for (auto j = 0U; j < vertices_h.view().nvFinal(); ++j) + if (vertices_h.view().ndof()[j] > 0) + vertices_h.view().chi2()[j] /= float(vertices_h.view().ndof()[j]); + { + auto mx = + std::minmax_element(vertices_h.view().chi2(), vertices_h.view().chi2() + vertices_h.view().nvFinal()); + std::cout << "after fit nv, min max chi2 " << vertices_h.view().nvFinal() << " " << *mx.first << ' ' + << *mx.second << std::endl; + } + + alpaka::exec( + queue, workDivFitter, vertexFinder::FitVerticesKernel{}, vertices_d.view(), ws_d.view(), 50.f); + alpaka::memcpy(queue, vertices_h.buffer(), vertices_d.buffer()); + alpaka::wait(queue); + + for (auto j = 0U; j < vertices_h.view().nvFinal(); ++j) + if (vertices_h.view().ndof()[j] > 0) + vertices_h.view().chi2()[j] /= float(vertices_h.view().ndof()[j]); + { + auto mx = + std::minmax_element(vertices_h.view().chi2(), vertices_h.view().chi2() + vertices_h.view().nvFinal()); + std::cout << "before splitting nv, min max chi2 " << vertices_h.view().nvFinal() << " " << *mx.first << ' ' + << *mx.second << std::endl; + } + + auto workDivSplitter = make_workdiv(1024, 64); + + // one vertex per block!!! + alpaka::exec( + queue, workDivSplitter, vertexFinder::SplitVerticesKernel{}, vertices_d.view(), ws_d.view(), 9.f); + alpaka::memcpy(queue, ws_h.buffer(), ws_d.buffer()); + alpaka::wait(queue); + std::cout << "after split " << ws_h.view().nvIntermediate() << std::endl; + + alpaka::exec( + queue, workDivFitter, vertexFinder::FitVerticesKernel{}, vertices_d.view(), ws_d.view(), 5000.f); + + auto workDivSorter = make_workdiv(1, 256); + alpaka::exec(queue, workDivSorter, vertexFinder::SortByPt2Kernel{}, vertices_d.view(), ws_d.view()); + alpaka::memcpy(queue, vertices_h.buffer(), vertices_d.buffer()); + alpaka::wait(queue); + + if (vertices_h.view().nvFinal() == 0) { + std::cout << "NO VERTICES???" << std::endl; + continue; + } + + for (auto j = 0U; j < vertices_h.view().nvFinal(); ++j) + if (vertices_h.view().ndof()[j] > 0) + vertices_h.view().chi2()[j] /= float(vertices_h.view().ndof()[j]); + { + auto mx = + std::minmax_element(vertices_h.view().chi2(), vertices_h.view().chi2() + vertices_h.view().nvFinal()); + std::cout << "nv, min max chi2 " << vertices_h.view().nvFinal() << " " << *mx.first << ' ' << *mx.second + << std::endl; + } + + { + auto mx = std::minmax_element(vertices_h.view().wv(), vertices_h.view().wv() + vertices_h.view().nvFinal()); + std::cout << "min max error " << 1. / std::sqrt(*mx.first) << ' ' << 1. / std::sqrt(*mx.second) + << std::endl; + } + + { + auto mx = + std::minmax_element(vertices_h.view().ptv2(), vertices_h.view().ptv2() + vertices_h.view().nvFinal()); + std::cout << "min max ptv2 " << *mx.first << ' ' << *mx.second << std::endl; + std::cout << "min max ptv2 " << vertices_h.view().ptv2()[vertices_h.view().sortInd()[0]] << ' ' + << vertices_h.view().ptv2()[vertices_h.view().sortInd()[vertices_h.view().nvFinal() - 1]] + << " at " << vertices_h.view().sortInd()[0] << ' ' + << vertices_h.view().sortInd()[vertices_h.view().nvFinal() - 1] << std::endl; + } + + float dd[vertices_h.view().nvFinal()]; + for (auto kv = 0U; kv < vertices_h.view().nvFinal(); ++kv) { + auto zr = vertices_h.view().zv()[kv]; + auto md = 500.0f; + for (int zint = 0; zint < ws_h.view().metadata().size(); ++zint) { + auto d = std::abs(zr - ws_h.view().zt()[zint]); + md = std::min(d, md); + } + dd[kv] = md; + } + if (i == 6) { + for (auto d : dd) + std::cout << d << ' '; + std::cout << std::endl; + } + auto mx = std::minmax_element(dd, dd + vertices_h.view().nvFinal()); + float rms = 0; + for (auto d : dd) + rms += d * d; + rms = std::sqrt(rms) / (vertices_h.view().nvFinal() - 1); + std::cout << "min max rms " << *mx.first << ' ' << *mx.second << ' ' << rms << std::endl; + + } // loop on events + } // lopp on ave vert + } + } // namespace vertexfinder_t +} // namespace ALPAKA_ACCELERATOR_NAMESPACE diff --git a/RecoTracker/TkSeedGenerator/python/trackerClusterCheck_cfi.py b/RecoTracker/TkSeedGenerator/python/trackerClusterCheck_cfi.py index e8d07bc719229..33bb15935778a 100644 --- a/RecoTracker/TkSeedGenerator/python/trackerClusterCheck_cfi.py +++ b/RecoTracker/TkSeedGenerator/python/trackerClusterCheck_cfi.py @@ -35,3 +35,10 @@ MaxNumberOfStripClusters = 1000 ) +from Configuration.Eras.Modifier_run3_upc_cff import run3_upc +run3_upc.toModify(trackerClusterCheck, + doClusterCheck=True, + cut = "strip < 30000 && pixel < 10000", + MaxNumberOfPixelClusters = 10000, + MaxNumberOfStripClusters = 30000 + ) diff --git a/RecoVertex/BeamSpotProducer/plugins/BuildFile.xml b/RecoVertex/BeamSpotProducer/plugins/BuildFile.xml index dec839e2af6cc..318ef5848183d 100644 --- a/RecoVertex/BeamSpotProducer/plugins/BuildFile.xml +++ b/RecoVertex/BeamSpotProducer/plugins/BuildFile.xml @@ -12,33 +12,42 @@ + + + + + + + + + @@ -48,3 +57,11 @@ + + + + + + + + diff --git a/RecoVertex/BeamSpotProducer/plugins/alpaka/BeamSpotDeviceProducer.cc b/RecoVertex/BeamSpotProducer/plugins/alpaka/BeamSpotDeviceProducer.cc new file mode 100644 index 0000000000000..bd597164827fa --- /dev/null +++ b/RecoVertex/BeamSpotProducer/plugins/alpaka/BeamSpotDeviceProducer.cc @@ -0,0 +1,59 @@ +#include "DataFormats/BeamSpot/interface/BeamSpot.h" +#include "DataFormats/BeamSpot/interface/BeamSpotHost.h" +#include "DataFormats/BeamSpot/interface/BeamSpotPOD.h" +#include "DataFormats/BeamSpot/interface/alpaka/BeamSpotDevice.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/EDPutToken.h" +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/global/EDProducer.h" +#include "HeterogeneousCore/AlpakaInterface/interface/config.h" + +namespace ALPAKA_ACCELERATOR_NAMESPACE { + + class BeamSpotDeviceProducer : public global::EDProducer<> { + public: + BeamSpotDeviceProducer(edm::ParameterSet const& config) + : legacyToken_{consumes(config.getParameter("src"))}, deviceToken_{produces()} {} + + void produce(edm::StreamID, device::Event& event, device::EventSetup const& setup) const override { + reco::BeamSpot const& beamspot = event.get(legacyToken_); + + BeamSpotHost hostProduct{event.queue()}; + hostProduct->x = beamspot.x0(); + hostProduct->y = beamspot.y0(); + hostProduct->z = beamspot.z0(); + hostProduct->sigmaZ = beamspot.sigmaZ(); + hostProduct->beamWidthX = beamspot.BeamWidthX(); + hostProduct->beamWidthY = beamspot.BeamWidthY(); + hostProduct->dxdz = beamspot.dxdz(); + hostProduct->dydz = beamspot.dydz(); + hostProduct->emittanceX = beamspot.emittanceX(); + hostProduct->emittanceY = beamspot.emittanceY(); + hostProduct->betaStar = beamspot.betaStar(); + + if constexpr (std::is_same_v) { + event.emplace(deviceToken_, std::move(hostProduct)); + } else { + BeamSpotDevice deviceProduct{event.queue()}; + alpaka::memcpy(event.queue(), deviceProduct.buffer(), hostProduct.const_buffer()); + event.emplace(deviceToken_, std::move(deviceProduct)); + } + } + + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("src", edm::InputTag{}); + descriptions.addWithDefaultLabel(desc); + } + + private: + const edm::EDGetTokenT legacyToken_; + const device::EDPutToken deviceToken_; + }; + +} // namespace ALPAKA_ACCELERATOR_NAMESPACE + +#include "HeterogeneousCore/AlpakaCore/interface/alpaka/MakerMacros.h" +DEFINE_FWK_ALPAKA_MODULE(BeamSpotDeviceProducer); diff --git a/RecoVertex/BeamSpotProducer/python/BeamSpot_cff.py b/RecoVertex/BeamSpotProducer/python/BeamSpot_cff.py index 7cc651af22106..5c17275c17274 100644 --- a/RecoVertex/BeamSpotProducer/python/BeamSpot_cff.py +++ b/RecoVertex/BeamSpotProducer/python/BeamSpot_cff.py @@ -2,6 +2,7 @@ from RecoVertex.BeamSpotProducer.BeamSpot_cfi import * from RecoVertex.BeamSpotProducer.offlineBeamSpotToCUDA_cfi import offlineBeamSpotToCUDA +from RecoVertex.BeamSpotProducer.beamSpotDeviceProducer_cfi import beamSpotDeviceProducer as _beamSpotDeviceProducer offlineBeamSpotTask = cms.Task(offlineBeamSpot) @@ -9,3 +10,9 @@ _offlineBeamSpotTask_gpu = offlineBeamSpotTask.copy() _offlineBeamSpotTask_gpu.add(offlineBeamSpotToCUDA) gpu.toReplaceWith(offlineBeamSpotTask, _offlineBeamSpotTask_gpu) + +from Configuration.ProcessModifiers.alpaka_cff import alpaka +_offlineBeamSpotTask_alpaka = offlineBeamSpotTask.copy() +offlineBeamSpotDevice = _beamSpotDeviceProducer.clone(src = cms.InputTag('offlineBeamSpot')) +_offlineBeamSpotTask_alpaka.add(offlineBeamSpotDevice) +alpaka.toReplaceWith(offlineBeamSpotTask, _offlineBeamSpotTask_alpaka) diff --git a/RecoVertex/ConfigurableVertexReco/test/CVRAnalysis.cc b/RecoVertex/ConfigurableVertexReco/test/CVRAnalysis.cc deleted file mode 100644 index 8871326bd073b..0000000000000 --- a/RecoVertex/ConfigurableVertexReco/test/CVRAnalysis.cc +++ /dev/null @@ -1,155 +0,0 @@ -#include -#include "FWCore/Framework/interface/Event.h" -#include "FWCore/Framework/interface/ESHandle.h" -#include "FWCore/Framework/interface/MakerMacros.h" -#include "FWCore/MessageLogger/interface/MessageLogger.h" -#include "MagneticField/Engine/interface/MagneticField.h" -#include "DataFormats/BeamSpot/interface/BeamSpot.h" -#include "DataFormats/VertexReco/interface/VertexFwd.h" -#include "TrackingTools/TransientTrack/interface/TransientTrackBuilder.h" -#include "TrackingTools/Records/interface/TransientTrackRecord.h" -#include "SimDataFormats/TrackingAnalysis/interface/TrackingParticle.h" -#include "SimDataFormats/TrackingAnalysis/interface/TrackingVertex.h" -#include "SimDataFormats/TrackingAnalysis/interface/TrackingVertexContainer.h" -#include "SimDataFormats/Associations/interface/TrackToTrackingParticleAssociator.h" -#include "RecoVertex/ConfigurableVertexReco/test/CVRAnalysis.h" -#include - -using namespace std; -using namespace reco; -using namespace edm; - - -namespace { - void printTSOS ( const TrajectoryStateOnSurface & tsos ) - { - cout << tsos.globalPosition() << " , " - << tsos.globalMomentum() << endl; - } - - void printVertex ( const TransientVertex & vtx ) - { - cout << " `- pos=(" << vtx.position().x() << ", " - << vtx.position().y() << ", " << vtx.position().z() - << ") chi2=" << vtx.totalChiSquared() - << " ndf=" << vtx.degreesOfFreedom() << " hr=" - << vtx.hasRefittedTracks() << endl; - if ( vtx.originalTracks().size() && vtx.hasRefittedTracks() ) - { - cout << " `- 1st trk: "; - reco::TransientTrack t = vtx.originalTracks()[0]; - TrajectoryStateOnSurface tsos = t.impactPointState(); - printTSOS ( tsos ); - if ( vtx.refittedTracks().size() ) - { - cout << " `- 1st refttd: "; - reco::TransientTrack t2 = vtx.refittedTracks()[0]; - printTSOS ( t2.impactPointState() ); - } - } - } - - void printVertices ( const vector < TransientVertex > & vtces ) - { - cout << "[CVRAnalysis] " << vtces.size() << " vertices." << endl; - for ( vector< TransientVertex >::const_iterator i=vtces.begin(); - i!=vtces.end() ; ++i ) - { - printVertex ( *i ); - cout << endl; - } - } - - void discussBeamSpot ( const reco::BeamSpot & bs ) - { - cout << "[CVRAnalysis] beamspot at " << bs.position() << endl; - reco::BeamSpot::Covariance3DMatrix cov = bs.rotatedCovariance3D(); - cout << "[CVRAnalysis] cov=" << cov << endl; - } -} - -CVRAnalysis::CVRAnalysis(const edm::ParameterSet& iconfig) : - trackcoll_( iconfig.getParameter("trackcoll") ), - vertexcoll_( iconfig.getParameter("vertexcoll") ), - beamspot_( iconfig.getParameter("beamspot") ), - trackingtruth_ ( iconfig.getParameter< edm::InputTag >("truth") ), - associator_ ( iconfig.getParameter("associator") ), - histo_ ( VertexHisto ( "vertices.root", "tracks.root" ) ), - bhisto_ ( VertexHisto ( "vertices-b.root", "tracks-b.root" ) ) -{ - edm::ParameterSet vtxconfig = iconfig.getParameter("vertexreco"); - vrec_ = new ConfigurableVertexReconstructor ( vtxconfig ); - cout << "[CVRAnalysis] vtxconfig=" << vtxconfig << endl; -} - -CVRAnalysis::~CVRAnalysis() { - if ( vrec_ ) delete vrec_; -} - -void CVRAnalysis::discussPrimary( const edm::Event& iEvent ) const -{ - edm::Handle retColl; - iEvent.getByLabel( vertexcoll_, retColl); - if ( retColl->size() ) - { - const reco::Vertex & vtx = *(retColl->begin()); - cout << "[CVRAnalysis] persistent primary: " << vtx.x() << ", " << vtx.y() - << ", " << vtx.z() << endl; - } -} - -void CVRAnalysis::analyze( const edm::Event & iEvent, - const edm::EventSetup & iSetup ) -{ - int evt=iEvent.id().event(); - cout << "[CVRAnalysis] next event: " << evt << endl; - edm::ESHandle magneticField; - iSetup.get().get(magneticField); - edm::ESHandle builder; - iSetup.get().get("TransientTrackBuilder",builder ); - - edm::Handle< edm::View < reco::Track > > tks; - iEvent.getByLabel( trackcoll_, tks ); - discussPrimary( iEvent ); - - edm::Handle TVCollectionH; - iEvent.getByLabel( trackingtruth_, TVCollectionH); - - edm::Handle TPCollectionH; - iEvent.getByLabel( trackingtruth_, TPCollectionH); - - edm::Handle byHitsAssociator; - iEvent.getByLabel(associator_, byHitsAssociator); - - reco::RecoToSimCollection p = - byHitsAssociator->associateRecoToSim ( tks, TPCollectionH ); - - edm::Handle bs; - iEvent.getByLabel ( beamspot_, bs ); - discussBeamSpot ( *bs ); - - vector ttks; - ttks = builder->build(tks); - cout << "[CVRAnalysis] got " << ttks.size() << " tracks." << endl; - - cout << "[CVRAnalysis] fit w/o beamspot constraint" << endl; - vector < TransientVertex > vtces = vrec_->vertices ( ttks ); - printVertices ( vtces ); - - if ( vtces.size() && TVCollectionH->size() ) - { - histo_.analyse ( *(TVCollectionH->begin()), vtces[0], "Primaries" ); - histo_.saveTracks ( vtces[0], p, "VtxTk" ); - } - - cout << "[CVRAnalysis] fit w beamspot constraint" << endl; - vector < TransientVertex > bvtces = vrec_->vertices ( ttks, *bs ); - printVertices ( bvtces ); - if ( bvtces.size() && TVCollectionH->size() ) - { - bhisto_.analyse ( *(TVCollectionH->begin()), bvtces[0], "Primaries" ); - } -} - -//define this as a plug-in -DEFINE_FWK_MODULE(CVRAnalysis); diff --git a/RecoVertex/ConfigurableVertexReco/test/CVRAnalysis.h b/RecoVertex/ConfigurableVertexReco/test/CVRAnalysis.h deleted file mode 100644 index 8f068267d7722..0000000000000 --- a/RecoVertex/ConfigurableVertexReco/test/CVRAnalysis.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef RecoVertex_CVRAnalysis -#define RecoVertex_CVRAnalysis - -#include "FWCore/Framework/interface/Frameworkfwd.h" -#include "FWCore/Framework/interface/EDAnalyzer.h" -#include "RecoVertex/ConfigurableVertexReco/interface/ConfigurableVertexReconstructor.h" -#include "RecoVertex/ConfigurableVertexReco/test/VertexHisto.h" - -class CVRAnalysis : public edm::EDAnalyzer { - /** - * Class that glues the combined btagging algorithm to the framework - */ - public: - explicit CVRAnalysis( const edm::ParameterSet & ); - ~CVRAnalysis(); - - virtual void analyze( const edm::Event &, const edm::EventSetup &); - - private: - void discussPrimary( const edm::Event & ) const; - - private: - ConfigurableVertexReconstructor * vrec_; - std::string trackcoll_; - std::string vertexcoll_; - std::string beamspot_; - edm::InputTag trackingtruth_; - std::string associator_; - VertexHisto histo_; - VertexHisto bhisto_; -}; - -#endif diff --git a/RecoVertex/Configuration/python/RecoVertex_EventContent_cff.py b/RecoVertex/Configuration/python/RecoVertex_EventContent_cff.py index 00327cd5ec7a3..fc0802a044dd1 100644 --- a/RecoVertex/Configuration/python/RecoVertex_EventContent_cff.py +++ b/RecoVertex/Configuration/python/RecoVertex_EventContent_cff.py @@ -17,9 +17,7 @@ 'keep *_offlinePrimaryVertices4DWithBS__*', 'keep *_trackTimeValueMapProducer_*_*' ] -_phase2_tktiming_layer_RecoVertexEventContent = [ 'keep *_offlinePrimaryVertices4DnoPID__*', - 'keep *_offlinePrimaryVertices4DnoPIDWithBS__*', - 'keep *_tofPID_*_*'] +_phase2_tktiming_layer_RecoVertexEventContent = [ 'keep *_tofPID_*_*'] phase2_timing.toModify( RecoVertexAOD, outputCommands = RecoVertexAOD.outputCommands + _phase2_tktiming_RecoVertexEventContent) phase2_timing_layer.toModify( RecoVertexAOD, diff --git a/RecoVertex/Configuration/python/RecoVertex_cff.py b/RecoVertex/Configuration/python/RecoVertex_cff.py index 2bbba3b2aab00..dd2b3f8927818 100644 --- a/RecoVertex/Configuration/python/RecoVertex_cff.py +++ b/RecoVertex/Configuration/python/RecoVertex_cff.py @@ -45,15 +45,11 @@ from RecoVertex.Configuration.RecoVertex_phase2_timing_cff import (tpClusterProducer , quickTrackAssociatorByHits , trackTimeValueMapProducer , - unsortedOfflinePrimaryVertices4DnoPID , - trackWithVertexRefSelectorBeforeSorting4DnoPID , - trackRefsForJetsBeforeSorting4DnoPID , - offlinePrimaryVertices4DnoPID , - offlinePrimaryVertices4DnoPIDWithBS, unsortedOfflinePrimaryVertices4DwithPID , offlinePrimaryVertices4DwithPID , offlinePrimaryVertices4DwithPIDWithBS, tofPID, + tofPID3D, tofPID4DnoPID, unsortedOfflinePrimaryVertices4D, trackWithVertexRefSelectorBeforeSorting4D, @@ -73,11 +69,7 @@ ) _phase2_tktiming_layer_vertexrecoTask = cms.Task( _phase2_tktiming_vertexrecoTask.copy() , - unsortedOfflinePrimaryVertices4DnoPID , - trackWithVertexRefSelectorBeforeSorting4DnoPID , - trackRefsForJetsBeforeSorting4DnoPID , - offlinePrimaryVertices4DnoPID , - offlinePrimaryVertices4DnoPIDWithBS, + tofPID3D, tofPID, tofPID4DnoPID, ) @@ -93,3 +85,6 @@ phase2_timing_layer.toModify(offlinePrimaryVertices4D, vertices = "unsortedOfflinePrimaryVertices4D", particles = "trackRefsForJetsBeforeSorting4D") phase2_timing_layer.toModify(offlinePrimaryVertices4DWithBS, vertices = "unsortedOfflinePrimaryVertices4D:WithBS", particles = "trackRefsForJetsBeforeSorting4D") +from Configuration.ProcessModifiers.vertex4DTrackSelMVA_cff import vertex4DTrackSelMVA +vertex4DTrackSelMVA.toModify(unsortedOfflinePrimaryVertices4D, useMVACut = True) +vertex4DTrackSelMVA.toModify(unsortedOfflinePrimaryVertices4DwithPID, useMVACut = True) diff --git a/RecoVertex/Configuration/python/RecoVertex_phase2_timing_cff.py b/RecoVertex/Configuration/python/RecoVertex_phase2_timing_cff.py index 1ca0af44b269e..37f196eabaa1b 100644 --- a/RecoVertex/Configuration/python/RecoVertex_phase2_timing_cff.py +++ b/RecoVertex/Configuration/python/RecoVertex_phase2_timing_cff.py @@ -1,12 +1,19 @@ import FWCore.ParameterSet.Config as cms from RecoVertex.Configuration.RecoVertex_cff import unsortedOfflinePrimaryVertices, trackWithVertexRefSelector, trackRefsForJets, sortedPrimaryVertices, offlinePrimaryVertices, offlinePrimaryVerticesWithBS,vertexrecoTask -from RecoVertex.PrimaryVertexProducer.TkClusParameters_cff import DA2D_vectParameters - unsortedOfflinePrimaryVertices4D = unsortedOfflinePrimaryVertices.clone( - TkClusParameters = DA2D_vectParameters, + TkClusParameters = dict( + algorithm = "DA2D_vect", + TkDAClusParameters = dict( + Tmin = 4.0, + Tpurge = 4.0, + Tstop = 2.0 + ), + ), TrackTimesLabel = cms.InputTag("trackTimeValueMapProducer","generalTracksConfigurableFlatResolutionModel"), TrackTimeResosLabel = cms.InputTag("trackTimeValueMapProducer","generalTracksConfigurableFlatResolutionModelResolution"), + vertexCollections = {0: dict(vertexTimeParameters = cms.PSet( algorithm = cms.string('legacy4D'))), + 1: dict(vertexTimeParameters = cms.PSet( algorithm = cms.string('legacy4D')))} ) trackWithVertexRefSelectorBeforeSorting4D = trackWithVertexRefSelector.clone( vertexTag = "unsortedOfflinePrimaryVertices4D", @@ -27,28 +34,6 @@ vertices = "unsortedOfflinePrimaryVertices4D:WithBS" ) -unsortedOfflinePrimaryVertices4DnoPID = unsortedOfflinePrimaryVertices4D.clone( - TrackTimesLabel = "trackExtenderWithMTD:generalTrackt0", - TrackTimeResosLabel = "trackExtenderWithMTD:generalTracksigmat0" -) -trackWithVertexRefSelectorBeforeSorting4DnoPID = trackWithVertexRefSelector.clone( - vertexTag = "unsortedOfflinePrimaryVertices4DnoPID", - ptMax = 9e99, - ptErrorCut = 9e99 -) -trackRefsForJetsBeforeSorting4DnoPID = trackRefsForJets.clone( - src = "trackWithVertexRefSelectorBeforeSorting4DnoPID" -) -offlinePrimaryVertices4DnoPID = offlinePrimaryVertices4D.clone( - vertices = "unsortedOfflinePrimaryVertices4DnoPID", - particles = "trackRefsForJetsBeforeSorting4DnoPID", - trackTimeTag = "trackExtenderWithMTD:generalTrackt0", - trackTimeResoTag = "trackExtenderWithMTD:generalTracksigmat0" -) -offlinePrimaryVertices4DnoPIDWithBS=offlinePrimaryVertices4DnoPID.clone( - vertices = "unsortedOfflinePrimaryVertices4DnoPID:WithBS" -) - unsortedOfflinePrimaryVertices4DwithPID = unsortedOfflinePrimaryVertices4D.clone( TrackTimesLabel = "tofPID4DnoPID:t0safe", TrackTimeResosLabel = "tofPID4DnoPID:sigmat0safe" @@ -76,9 +61,15 @@ from SimTracker.TrackAssociation.trackTimeValueMapProducer_cfi import trackTimeValueMapProducer from RecoMTD.TimingIDTools.tofPIDProducer_cfi import tofPIDProducer -tofPID4DnoPID=tofPIDProducer.clone(vtxsSrc='unsortedOfflinePrimaryVertices4DnoPID') +tofPID4DnoPID=tofPIDProducer.clone(vtxsSrc='unsortedOfflinePrimaryVertices') tofPID=tofPIDProducer.clone() +tofPID3D=tofPIDProducer.clone(vtxsSrc='unsortedOfflinePrimaryVertices') from Configuration.Eras.Modifier_phase2_timing_layer_cff import phase2_timing_layer -phase2_timing_layer.toModify(tofPID, vtxsSrc='unsortedOfflinePrimaryVertices4D') +phase2_timing_layer.toModify(tofPID, vtxsSrc='unsortedOfflinePrimaryVertices4D', vertexReassignment=False) +phase2_timing_layer.toModify(tofPID3D, vertexReassignment=False) +phase2_timing_layer.toModify(unsortedOfflinePrimaryVertices, + vertexCollections = {0: dict(vertexTimeParameters = cms.PSet( algorithm = cms.string('fromTracksPID'))), + 1: dict(vertexTimeParameters = cms.PSet( algorithm = cms.string('fromTracksPID')))} +) diff --git a/RecoVertex/PrimaryVertexProducer/interface/AdaptiveChisquarePrimaryVertexFitter.h b/RecoVertex/PrimaryVertexProducer/interface/AdaptiveChisquarePrimaryVertexFitter.h new file mode 100644 index 0000000000000..612d6a8db6b89 --- /dev/null +++ b/RecoVertex/PrimaryVertexProducer/interface/AdaptiveChisquarePrimaryVertexFitter.h @@ -0,0 +1,98 @@ +#ifndef RecoVertex_PrimaryVertexProducer_AdaptiveChisquarePrimaryVertexFitter_h +#define RecoVertex_PrimaryVertexProducer_AdaptiveChisquarePrimaryVertexFitter_h + +/**\class AdaptiveChisquarePrimaryVertexFitter + + Description: simultaneous chisquared fit of primary vertices + +*/ +#include + +#include "RecoVertex/VertexPrimitives/interface/TransientVertex.h" +#include "TrackingTools/TransientTrack/interface/TransientTrack.h" +#include "RecoVertex/PrimaryVertexProducer/interface/PrimaryVertexFitterBase.h" + +class AdaptiveChisquarePrimaryVertexFitter : public PrimaryVertexFitterBase { +public: + AdaptiveChisquarePrimaryVertexFitter(double chicutoff = 2.5, + double zcutoff = 1.0, + double mintrkweight = 0.4, + bool multivertexfit = false); + ~AdaptiveChisquarePrimaryVertexFitter() override = default; + + std::vector fit(const std::vector &, + const std::vector &, + const reco::BeamSpot &, + const bool) override; + + using Error3 = ROOT::Math::SMatrix; + +protected: + void verify() { // DEBUG only + unsigned int nt = trackinfo_.size(); + unsigned int nv = xv_.size(); + assert((yv_.size() == nv) && "yv size"); + assert((zv_.size() == nv) && "zv size"); + assert((tkfirstv_.size() == (nv + 1)) && "tkfirstv size"); + assert((tkmap_.size() == tkweight_.size()) && "tkmapsize <> tkweightssize"); + for (unsigned int k = 0; k < nv; k++) { + assert((tkfirstv_[k] < tkweight_.size()) && "tkfirst[k]"); + assert((tkfirstv_[k + 1] <= tkweight_.size()) && "tkfirst[k+1]"); + assert((tkfirstv_[k] <= tkfirstv_[k + 1]) && "tkfirst[k/k+1]"); + for (unsigned int j = tkfirstv_[k]; j < tkfirstv_[k + 1]; k++) { + assert((j < tkmap_.size()) && "illegal tkfirst entry"); + unsigned int i = tkmap_[j]; + assert((i < nt) && "illegal tkmap entry"); + assert((tkweight_[i] >= 0) && "negative tkweight or nan"); + assert((tkweight_[i] <= 1) && "tkweight > 1 or nan"); + } + } + }; + + struct TrackInfo { + double S11, S22, S12; // inverse of the covariance (sub-)matrix + Error3 C; // H^T S H + double g[3]; + double H1[3], H2[3]; + double b1, b2; + double zpca, dzError; + }; + + std::vector vertices(const std::vector &, + const std::vector &, + const reco::BeamSpot &, + const bool); + TransientVertex refit(const TransientVertex &, const reco::BeamSpot &, const bool); + double track_in_vertex_chsq(const TrackInfo &, const double, const double, const double); + void fill_trackinfo(const std::vector &, const reco::BeamSpot &); + void fill_weights(const reco::BeamSpot &, const double beta = 1.); + TransientVertex get_TransientVertex(const unsigned int, + const std::vector> &, + const std::vector &, + const float, + const reco::BeamSpot &); + Error3 get_inverse_beam_covariance(const reco::BeamSpot &); + double update(const reco::BeamSpot &, float beam_weight, const bool fill_covariances = false); + void make_vtx_trk_map(const double); + bool clean(); + void remove_vertex(unsigned int); + + // track information + std::vector trackinfo_; + + // vertex lists: + std::vector xv_; + std::vector yv_; + std::vector zv_; + std::vector covv_; + // track-vertex-mapping and weights after a coarse z-cut: + std::vector tkfirstv_; // parallel to the vertex list + std::vector tkmap_; // parallel to tkweight + std::vector tkweight_; // parallel to tkmap + // configuration + double chi_cutoff_; + double z_cutoff_; + double min_trackweight_; + double multivertexfit_; +}; +#endif diff --git a/RecoVertex/PrimaryVertexProducer/interface/DAClusterizerInZ.h b/RecoVertex/PrimaryVertexProducer/interface/DAClusterizerInZ.h deleted file mode 100644 index d9f7a485f2040..0000000000000 --- a/RecoVertex/PrimaryVertexProducer/interface/DAClusterizerInZ.h +++ /dev/null @@ -1,82 +0,0 @@ -#ifndef DAClusterizerInZ_h -#define DAClusterizerInZ_h - -/**\class DAClusterizerInZ - - Description: separates event tracks into clusters along the beam line - -*/ - -#include "RecoVertex/PrimaryVertexProducer/interface/TrackClusterizerInZ.h" -#include "TrackingTools/TransientTrack/interface/TransientTrack.h" -#include "FWCore/ParameterSet/interface/ParameterSet.h" -#include -#include "DataFormats/Math/interface/Error.h" -#include "RecoVertex/VertexTools/interface/VertexDistanceXY.h" -#include "RecoVertex/VertexPrimitives/interface/TransientVertex.h" - -class DAClusterizerInZ : public TrackClusterizerInZ { -public: - struct track_t { - double z; // z-coordinate at point of closest approach to the beamline - double dz2; // square of the error of z(pca) - const reco::TransientTrack *tt; // a pointer to the Transient Track - double Z; // Z[i] for DA clustering - double pi; // track weight - }; - - struct vertex_t { - double z; // z coordinate - double pk; // vertex weight for "constrained" clustering - // --- temporary numbers, used during update - double ei; - double sw; - double swz; - double se; - // ---for Tc - double swE; - double Tc; - }; - - DAClusterizerInZ(const edm::ParameterSet &conf); - - std::vector > clusterize( - const std::vector &tracks) const override; - - std::vector vertices(const std::vector &tracks, const int verbosity = 0) const; - - std::vector fill(const std::vector &tracks) const; - - bool split(double beta, std::vector &tks, std::vector &y, double threshold) const; - - double update(double beta, std::vector &tks, std::vector &y) const; - - double update(double beta, std::vector &tks, std::vector &y, double &) const; - - void dump(const double beta, - const std::vector &y, - const std::vector &tks, - const int verbosity = 0) const; - bool merge(std::vector &, int) const; - bool merge(std::vector &, double &) const; - bool purge(std::vector &, std::vector &, double &, const double) const; - - void splitAll(std::vector &y) const; - - double beta0(const double betamax, std::vector &tks, std::vector &y) const; - - double Eik(const track_t &t, const vertex_t &k) const; - -private: - bool verbose_; - bool useTc_; - float vertexSize_; - int maxIterations_; - double coolingFactor_; - float betamax_; - float betastop_; - double dzCutOff_; - double d0CutOff_; -}; - -#endif diff --git a/RecoVertex/PrimaryVertexProducer/interface/DAClusterizerInZT_vect.h b/RecoVertex/PrimaryVertexProducer/interface/DAClusterizerInZT_vect.h index b1c61fa9e6f6e..67c3943f9d6d2 100644 --- a/RecoVertex/PrimaryVertexProducer/interface/DAClusterizerInZT_vect.h +++ b/RecoVertex/PrimaryVertexProducer/interface/DAClusterizerInZT_vect.h @@ -11,6 +11,7 @@ #include "RecoVertex/PrimaryVertexProducer/interface/TrackClusterizerInZ.h" #include "TrackingTools/TransientTrack/interface/TransientTrack.h" +#include "TrackingTools/TransientTrack/interface/TransientTrackBuilder.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" #include @@ -264,7 +265,7 @@ class DAClusterizerInZT_vect final : public TrackClusterizerInZ { std::vector > clusterize( const std::vector &tracks) const override; - std::vector vertices(const std::vector &tracks) const; + std::vector vertices(const std::vector &tracks) const override; track_t fill(const std::vector &tracks) const; diff --git a/RecoVertex/PrimaryVertexProducer/interface/DAClusterizerInZ_vect.h b/RecoVertex/PrimaryVertexProducer/interface/DAClusterizerInZ_vect.h index 85fa682a30f85..092ab552e3402 100644 --- a/RecoVertex/PrimaryVertexProducer/interface/DAClusterizerInZ_vect.h +++ b/RecoVertex/PrimaryVertexProducer/interface/DAClusterizerInZ_vect.h @@ -175,8 +175,10 @@ class DAClusterizerInZ_vect final : public TrackClusterizerInZ { std::vector > clusterize( const std::vector &tracks) const override; - std::vector vertices(const std::vector &tracks) const; + std::vector vertices(const std::vector &tracks) const override; + std::vector vertices_no_blocks(const std::vector &tracks) const; std::vector vertices_in_blocks(const std::vector &tracks) const; + std::vector fill_vertices(double beta, double rho0, track_t &tracks, vertex_t &vertices) const; track_t fill(const std::vector &tracks) const; diff --git a/RecoVertex/PrimaryVertexProducer/interface/GapClusterizerInZ.h b/RecoVertex/PrimaryVertexProducer/interface/GapClusterizerInZ.h index d70d2e0291aa7..fa9a127397c2d 100644 --- a/RecoVertex/PrimaryVertexProducer/interface/GapClusterizerInZ.h +++ b/RecoVertex/PrimaryVertexProducer/interface/GapClusterizerInZ.h @@ -23,7 +23,7 @@ class GapClusterizerInZ : public TrackClusterizerInZ { float zSeparation() const; - std::vector vertices(const std::vector& tracks) const; + std::vector vertices(const std::vector& tracks) const override; ~GapClusterizerInZ() override{}; diff --git a/RecoVertex/PrimaryVertexProducer/interface/HITrackFilterForPVFinding.h b/RecoVertex/PrimaryVertexProducer/interface/HITrackFilterForPVFinding.h index 65964e09cfd88..7018aac859c6b 100644 --- a/RecoVertex/PrimaryVertexProducer/interface/HITrackFilterForPVFinding.h +++ b/RecoVertex/PrimaryVertexProducer/interface/HITrackFilterForPVFinding.h @@ -39,7 +39,6 @@ class HITrackFilterForPVFinding : public TrackFilterForPVFinding { } static void fillPSetDescription(edm::ParameterSetDescription& desc) { - TrackFilterForPVFinding::fillPSetDescription(desc); desc.add("numTracksThreshold", 0); // HI only desc.add("maxNumTracksThreshold", std::numeric_limits::max()); desc.add("minPtTight", 0.0); diff --git a/RecoVertex/PrimaryVertexProducer/interface/PrimaryVertexFitterBase.h b/RecoVertex/PrimaryVertexProducer/interface/PrimaryVertexFitterBase.h new file mode 100644 index 0000000000000..870654e2766df --- /dev/null +++ b/RecoVertex/PrimaryVertexProducer/interface/PrimaryVertexFitterBase.h @@ -0,0 +1,33 @@ +#ifndef PrimaryVertexFitterBase_h +#define PrimaryVertexFitterBase_h + +#include + +/**\class PrimaryVertexFitterBase + + Description: base class for primary vertex fitters + +*/ +namespace edm { + class ParameterSet; + class ParameterSetDescription; +} // namespace edm + +namespace reco { + class BeamSpot; + class TransientTrack; +} // namespace reco + +class TransientVertex; + +class PrimaryVertexFitterBase { +public: + PrimaryVertexFitterBase(const edm::ParameterSet &conf) {} + PrimaryVertexFitterBase() {} + virtual ~PrimaryVertexFitterBase() = default; + virtual std::vector fit(const std::vector &, + const std::vector &, + const reco::BeamSpot &, + const bool) = 0; +}; +#endif diff --git a/RecoVertex/PrimaryVertexProducer/interface/PrimaryVertexProducer.h b/RecoVertex/PrimaryVertexProducer/interface/PrimaryVertexProducer.h index 8cd9bb51a4ded..fea7068469a59 100644 --- a/RecoVertex/PrimaryVertexProducer/interface/PrimaryVertexProducer.h +++ b/RecoVertex/PrimaryVertexProducer/interface/PrimaryVertexProducer.h @@ -28,7 +28,6 @@ #include "FWCore/ParameterSet/interface/ParameterSet.h" -//#include "RecoVertex/PrimaryVertexProducer/interface/PrimaryVertexProducerAlgorithm.h" #include "TrackingTools/TransientTrack/interface/TransientTrack.h" #include "TrackingTools/TransientTrack/interface/TransientTrackBuilder.h" #include "TrackingTools/Records/interface/TransientTrackRecord.h" @@ -40,16 +39,23 @@ #include "RecoVertex/PrimaryVertexProducer/interface/TrackFilterForPVFinding.h" #include "RecoVertex/PrimaryVertexProducer/interface/HITrackFilterForPVFinding.h" #include "RecoVertex/PrimaryVertexProducer/interface/GapClusterizerInZ.h" -#include "RecoVertex/PrimaryVertexProducer/interface/DAClusterizerInZ.h" -#include "RecoVertex/PrimaryVertexProducer/interface/WeightedMeanFitter.h" #include "RecoVertex/KalmanVertexFit/interface/KalmanVertexFitter.h" #include "RecoVertex/AdaptiveVertexFit/interface/AdaptiveVertexFitter.h" -//#include "RecoVertex/VertexTools/interface/VertexDistanceXY.h" +#include "RecoVertex/PrimaryVertexProducer/interface/PrimaryVertexFitterBase.h" +#include "RecoVertex/PrimaryVertexProducer/interface/SequentialPrimaryVertexFitterAdapter.h" +#include "RecoVertex/PrimaryVertexProducer/interface/AdaptiveChisquarePrimaryVertexFitter.h" +#include "RecoVertex/PrimaryVertexProducer/interface/WeightedMeanFitter.h" + #include "RecoVertex/VertexPrimitives/interface/VertexException.h" #include #include "RecoVertex/PrimaryVertexProducer/interface/VertexHigherPtSquared.h" #include "RecoVertex/VertexTools/interface/VertexCompatibleWithBeam.h" #include "DataFormats/Common/interface/ValueMap.h" +// vertex timing +#include "RecoVertex/PrimaryVertexProducer/interface/VertexTimeAlgorithmBase.h" +#include "RecoVertex/PrimaryVertexProducer/interface/VertexTimeAlgorithmFromTracksPID.h" +#include "RecoVertex/PrimaryVertexProducer/interface/VertexTimeAlgorithmLegacy4D.h" + // // class declaration // @@ -75,11 +81,12 @@ class PrimaryVertexProducer : public edm::stream::EDProducer<> { // vtx fitting algorithms struct algo { - VertexFitter<5>* fitter; + PrimaryVertexFitterBase* pv_fitter; VertexCompatibleWithBeam* vertexSelector; std::string label; bool useBeamConstraint; double minNdof; + VertexTimeAlgorithmBase* pv_time_estimator; }; std::vector algorithms; @@ -94,7 +101,11 @@ class PrimaryVertexProducer : public edm::stream::EDProducer<> { edm::EDGetTokenT trkToken; edm::EDGetTokenT > trkTimesToken; edm::EDGetTokenT > trkTimeResosToken; + edm::EDGetTokenT > trackMTDTimeQualityToken; - bool f4D; - bool weightFit; + bool useTransientTrackTime_; + bool useMVASelection_; + edm::ValueMap trackMTDTimeQualities_; + edm::ValueMap trackTimes_; + double minTrackTimeQuality_; }; diff --git a/RecoVertex/PrimaryVertexProducer/interface/PrimaryVertexProducerAlgorithm.h b/RecoVertex/PrimaryVertexProducer/interface/PrimaryVertexProducerAlgorithm.h index e41609be01220..f61844b09aac2 100644 --- a/RecoVertex/PrimaryVertexProducer/interface/PrimaryVertexProducerAlgorithm.h +++ b/RecoVertex/PrimaryVertexProducer/interface/PrimaryVertexProducerAlgorithm.h @@ -39,10 +39,8 @@ #include "RecoVertex/PrimaryVertexProducer/interface/TrackFilterForPVFinding.h" #include "RecoVertex/PrimaryVertexProducer/interface/HITrackFilterForPVFinding.h" #include "RecoVertex/PrimaryVertexProducer/interface/GapClusterizerInZ.h" -#include "RecoVertex/PrimaryVertexProducer/interface/DAClusterizerInZ.h" #include "RecoVertex/KalmanVertexFit/interface/KalmanVertexFitter.h" #include "RecoVertex/AdaptiveVertexFit/interface/AdaptiveVertexFitter.h" -//#include "RecoVertex/VertexTools/interface/VertexDistanceXY.h" #include "RecoVertex/VertexPrimitives/interface/VertexException.h" #include #include "RecoVertex/PrimaryVertexProducer/interface/VertexHigherPtSquared.h" diff --git a/RecoVertex/PrimaryVertexProducer/interface/PrimaryVertexTrackClusterizer.h b/RecoVertex/PrimaryVertexProducer/interface/PrimaryVertexTrackClusterizer.h new file mode 100644 index 0000000000000..98bddb4bbfd3d --- /dev/null +++ b/RecoVertex/PrimaryVertexProducer/interface/PrimaryVertexTrackClusterizer.h @@ -0,0 +1,28 @@ +#ifndef PrimaryVertexTrackClusterizer_h +#define PrimaryVertexTrackClusterizer_h + +/**\class PrimaryVertexTrackClusterizer + + Description: interface/base class for track clusterizers that separate event tracks into clusters along the beam line + extends TrackClusterizerInZ + +*/ + +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include +#include "TrackingTools/TransientTrack/interface/TransientTrack.h" +#include "RecoVertex/VertexPrimitives/interface/TransientVertex.h" +#include "RecoVertex/PrimaryVertexProducer/interface/TrackClusterizerInZ.h" + +class PrimaryVertexTrackClusterizer : public TrackClusterizerInZ { +public: + PrimaryVertexTrackClusterizer() = default; + PrimaryVertexTrackClusterizer(const edm::ParameterSet& conf){}; + virtual std::vector vertices(const std::vector& tracks) const = 0; + virtual std::vector > clusterize( + const std::vector& tracks) const = 0; + + virtual ~PrimaryVertexTrackClusterizer() = default; +}; + +#endif diff --git a/RecoVertex/PrimaryVertexProducer/interface/SequentialPrimaryVertexFitterAdapter.h b/RecoVertex/PrimaryVertexProducer/interface/SequentialPrimaryVertexFitterAdapter.h new file mode 100644 index 0000000000000..bd5f866e2f21f --- /dev/null +++ b/RecoVertex/PrimaryVertexProducer/interface/SequentialPrimaryVertexFitterAdapter.h @@ -0,0 +1,46 @@ +#ifndef SequentialPrimaryVertexFitterAdapter_h +#define SequentialPrimaryVertexFitterAdapter_h + +/**\class SequentialPrimaryVertexFitterAdapter + + Description: Adapter class for Kalman and Adaptive vertex fitters + +*/ + +#include "RecoVertex/VertexPrimitives/interface/TransientVertex.h" +#include "TrackingTools/TransientTrack/interface/TransientTrack.h" +#include "RecoVertex/PrimaryVertexProducer/interface/PrimaryVertexFitterBase.h" +#include "RecoVertex/VertexPrimitives/interface/VertexFitter.h" + +class SequentialPrimaryVertexFitterAdapter : public PrimaryVertexFitterBase { +public: + SequentialPrimaryVertexFitterAdapter() : fitter(nullptr){}; + SequentialPrimaryVertexFitterAdapter(const VertexFitter<5>* vertex_fitter) : fitter(vertex_fitter){}; + ~SequentialPrimaryVertexFitterAdapter() override = default; + + std::vector fit(const std::vector& dummy, + const std::vector& clusters, + const reco::BeamSpot& beamspot, + const bool useBeamConstraint) override { + std::vector pvs; + for (auto& cluster : clusters) { + const std::vector& tracklist = cluster.originalTracks(); + TransientVertex v; + if (useBeamConstraint && (tracklist.size() > 1)) { + v = fitter->vertex(tracklist, beamspot); + } else if (!(useBeamConstraint) && (tracklist.size() > 1)) { + v = fitter->vertex(tracklist); + } // else: no fit ==> v.isValid()=False + + if (v.isValid()) { + pvs.push_back(v); + } + } + return pvs; + }; + +protected: + // configuration + const VertexFitter<5>* fitter; // Kalman or Adaptive +}; +#endif diff --git a/RecoVertex/PrimaryVertexProducer/interface/TrackClusterizerInZ.h b/RecoVertex/PrimaryVertexProducer/interface/TrackClusterizerInZ.h index e087aa7215d93..2acf274befb81 100644 --- a/RecoVertex/PrimaryVertexProducer/interface/TrackClusterizerInZ.h +++ b/RecoVertex/PrimaryVertexProducer/interface/TrackClusterizerInZ.h @@ -10,16 +10,17 @@ #include "FWCore/ParameterSet/interface/ParameterSet.h" #include #include "TrackingTools/TransientTrack/interface/TransientTrack.h" +#include "RecoVertex/VertexPrimitives/interface/TransientVertex.h" class TrackClusterizerInZ { public: - TrackClusterizerInZ(){}; + TrackClusterizerInZ() = default; TrackClusterizerInZ(const edm::ParameterSet& conf){}; - + virtual std::vector vertices(const std::vector& tracks) const = 0; virtual std::vector > clusterize( const std::vector& tracks) const = 0; - virtual ~TrackClusterizerInZ(){}; + virtual ~TrackClusterizerInZ() = default; }; #endif diff --git a/RecoVertex/PrimaryVertexProducer/interface/VertexTimeAlgorithmBase.h b/RecoVertex/PrimaryVertexProducer/interface/VertexTimeAlgorithmBase.h new file mode 100644 index 0000000000000..198faa631d0b9 --- /dev/null +++ b/RecoVertex/PrimaryVertexProducer/interface/VertexTimeAlgorithmBase.h @@ -0,0 +1,56 @@ +#ifndef usercode_PrimaryVertexAnalyzer_VertexTimeAlgorithmBase_h +#define usercode_PrimaryVertexAnalyzer_VertexTimeAlgorithmBase_h +#include "RecoVertex/VertexPrimitives/interface/TransientVertex.h" + +namespace edm { + class Event; + class EventSetup; + class ParameterSet; + class ParameterSetDescription; + class ConsumesCollector; +} // namespace edm + +class VertexTimeAlgorithmBase { +public: + VertexTimeAlgorithmBase(const edm::ParameterSet& conf, edm::ConsumesCollector& iC) {} + virtual ~VertexTimeAlgorithmBase() = default; + VertexTimeAlgorithmBase(const VertexTimeAlgorithmBase&) = delete; + VertexTimeAlgorithmBase& operator=(const VertexTimeAlgorithmBase&) = delete; + + static void fillPSetDescription(edm::ParameterSetDescription& iDesc) {} + + virtual void setEvent(edm::Event& iEvent, edm::EventSetup const& iSetup) = 0; + + /** + * estimate the vertex time and time uncertainty for transient vertex + * + * returns true when a valid time has been determined, otherwise return false + */ + virtual bool vertexTime(float& vtxTime, float& vtxTimeError, TransientVertex const& vtx) const = 0; + + /** + * replace the vertices in the input vector by new vertices with time coordinates + * determined by the vertexTime method + * this implementation does not alter the weights from the previous fit + * must be overridden to change weights, coordinates, tracklists or to add or remove vertices + */ + virtual void fill_vertex_times(std::vector& pvs) { + for (unsigned int i = 0; i < pvs.size(); i++) { + auto vtx = pvs[i]; + if (vtx.isValid()) { + auto vtxTime(0.f), vtxTimeError(-1.f); + if (vertexTime(vtxTime, vtxTimeError, vtx)) { + auto err = vtx.positionError().matrix4D(); + err(3, 3) = vtxTimeError * vtxTimeError; + auto trkWeightMap3d = vtx.weightMap(); + auto vtx_with_time = TransientVertex( + vtx.position(), vtxTime, err, vtx.originalTracks(), vtx.totalChiSquared(), vtx.degreesOfFreedom()); + vtx_with_time.weightMap(trkWeightMap3d); + pvs[i] = vtx_with_time; + } + } + } + } +}; + +#endif diff --git a/RecoVertex/PrimaryVertexProducer/interface/VertexTimeAlgorithmFromTracksPID.h b/RecoVertex/PrimaryVertexProducer/interface/VertexTimeAlgorithmFromTracksPID.h new file mode 100644 index 0000000000000..1de2e45e59e6a --- /dev/null +++ b/RecoVertex/PrimaryVertexProducer/interface/VertexTimeAlgorithmFromTracksPID.h @@ -0,0 +1,51 @@ + +#ifndef usercode_PrimaryVertexAnalyzer_VertexTimeAlgorithmFromTracksPID_h +#define usercode_PrimaryVertexAnalyzer_VertexTimeAlgorithmFromTracksPID_h + +#include "VertexTimeAlgorithmBase.h" + +#include "FWCore/Utilities/interface/EDGetToken.h" +#include "DataFormats/Common/interface/ValueMap.h" + +class VertexTimeAlgorithmFromTracksPID : public VertexTimeAlgorithmBase { +public: + VertexTimeAlgorithmFromTracksPID(const edm::ParameterSet& conf, edm::ConsumesCollector& iC); + ~VertexTimeAlgorithmFromTracksPID() override = default; + + static void fillPSetDescription(edm::ParameterSetDescription& iDesc); + + void setEvent(edm::Event& iEvent, edm::EventSetup const& iSetup) override; + + bool vertexTime(float& vtxTime, float& vtxTimeError, TransientVertex const& vtx) const override; + +protected: + struct TrackInfo { + double trkWeight; + double trkTimeError; + double trkTimeHyp[3]; + }; + + edm::EDGetTokenT> const trackMTDTimeToken_; + edm::EDGetTokenT> const trackMTDTimeErrorToken_; + edm::EDGetTokenT> const trackMTDTimeQualityToken_; + edm::EDGetTokenT> const trackMTDTofPiToken_; + edm::EDGetTokenT> const trackMTDTofKToken_; + edm::EDGetTokenT> const trackMTDTofPToken_; + + double const minTrackVtxWeight_; + double const minTrackTimeQuality_; + double const probPion_; + double const probKaon_; + double const probProton_; + double const Tstart_; + double const coolingFactor_; + + edm::ValueMap trackMTDTimes_; + edm::ValueMap trackMTDTimeErrors_; + edm::ValueMap trackMTDTimeQualities_; + edm::ValueMap trackMTDTofPi_; + edm::ValueMap trackMTDTofK_; + edm::ValueMap trackMTDTofP_; +}; + +#endif diff --git a/RecoVertex/PrimaryVertexProducer/interface/VertexTimeAlgorithmLegacy4D.h b/RecoVertex/PrimaryVertexProducer/interface/VertexTimeAlgorithmLegacy4D.h new file mode 100644 index 0000000000000..0d84a2730842d --- /dev/null +++ b/RecoVertex/PrimaryVertexProducer/interface/VertexTimeAlgorithmLegacy4D.h @@ -0,0 +1,22 @@ +#ifndef usercode_PrimaryVertexAnalyzer_VertexTimeLegacy4D_h +#define usercode_PrimaryVertexAnalyzer_VertexTimeLegacy4D_h + +#include "VertexTimeAlgorithmBase.h" + +#include "FWCore/Utilities/interface/EDGetToken.h" +#include "DataFormats/Common/interface/ValueMap.h" +#include "TrackingTools/TransientTrack/interface/TransientTrackBuilder.h" + +class VertexTimeAlgorithmLegacy4D : public VertexTimeAlgorithmBase { +public: + VertexTimeAlgorithmLegacy4D(const edm::ParameterSet& conf, edm::ConsumesCollector& iC); + ~VertexTimeAlgorithmLegacy4D() override = default; + + static void fillPSetDescription(edm::ParameterSetDescription& iDesc); + + void setEvent(edm::Event& iEvent, edm::EventSetup const& iSetup) override; + + bool vertexTime(float& vtxTime, float& vtxTimeError, TransientVertex const& vtx) const override; +}; + +#endif diff --git a/RecoVertex/PrimaryVertexProducer/interface/WeightedMeanFitter.h b/RecoVertex/PrimaryVertexProducer/interface/WeightedMeanFitter.h index 20e4ac03d1e70..881bf2014187c 100644 --- a/RecoVertex/PrimaryVertexProducer/interface/WeightedMeanFitter.h +++ b/RecoVertex/PrimaryVertexProducer/interface/WeightedMeanFitter.h @@ -6,6 +6,7 @@ #include "FWCore/MessageLogger/interface/MessageLogger.h" #include "DataFormats/BeamSpot/interface/BeamSpot.h" #include "RecoVertex/VertexPrimitives/interface/TransientVertex.h" +#include "RecoVertex/PrimaryVertexProducer/interface/PrimaryVertexFitterBase.h" namespace WeightedMeanFitter { @@ -160,7 +161,9 @@ namespace WeightedMeanFitter { dist += std::pow(p.first.z() - z, 2) / (std::pow(wz, 2) + err(2, 2)); chi2 += dist; } - TransientVertex v(GlobalPoint(x, y, z), err, iclus, chi2, (int)ndof_x); + float ndof = + ndof_x > 1 ? (2 * ndof_x - 3) : 0.00001; // ndof_x is actually the number of tracks with non-zero weight + TransientVertex v(GlobalPoint(x, y, z), err, iclus, chi2, ndof); return v; } @@ -455,4 +458,71 @@ namespace WeightedMeanFitter { }; // namespace WeightedMeanFitter +// adapter for the multiprimaryvertexfitter scheme +// this code was originally introduced as part of PrimaryVertexProducer.cc +// by Adriano Di Florio , Giorgio Pizzati et.al. in #39995, then moved here with minor modifications +class WeightedMeanPrimaryVertexEstimator : public PrimaryVertexFitterBase { +public: + WeightedMeanPrimaryVertexEstimator() = default; + ~WeightedMeanPrimaryVertexEstimator() override = default; + + std::vector fit(const std::vector& dummy, + const std::vector& clusters, + const reco::BeamSpot& beamSpot, + const bool useBeamConstraint) override { + std::vector pvs; + std::vector seed(1); + + for (auto& cluster : clusters) { + if (cluster.originalTracks().size() > 1) { + std::vector tracklist = cluster.originalTracks(); + TransientVertex::TransientTrackToFloatMap trkWeightMap; + std::vector> points; + if (useBeamConstraint && (tracklist.size() > 1)) { + for (const auto& itrack : tracklist) { + GlobalPoint p = itrack.stateAtBeamLine().trackStateAtPCA().position(); + GlobalPoint err(itrack.stateAtBeamLine().transverseImpactParameter().error(), + itrack.stateAtBeamLine().transverseImpactParameter().error(), + itrack.track().dzError()); + std::pair p2(p, err); + points.push_back(p2); + } + + TransientVertex v = WeightedMeanFitter::weightedMeanOutlierRejectionBeamSpot(points, tracklist, beamSpot); + if (!v.hasTrackWeight()) { + // if the fitter doesn't provide weights, fill dummy values + TransientVertex::TransientTrackToFloatMap trkWeightMap; + for (const auto& trk : v.originalTracks()) { + trkWeightMap[trk] = 1.; + } + v.weightMap(trkWeightMap); + } + if ((v.positionError().matrix())(2, 2) != (WeightedMeanFitter::startError * WeightedMeanFitter::startError)) + pvs.push_back(v); + } else if (!(useBeamConstraint) && (tracklist.size() > 1)) { + for (const auto& itrack : tracklist) { + GlobalPoint p = itrack.impactPointState().globalPosition(); + GlobalPoint err(itrack.track().dxyError(), itrack.track().dxyError(), itrack.track().dzError()); + std::pair p2(p, err); + points.push_back(p2); + } + + TransientVertex v = WeightedMeanFitter::weightedMeanOutlierRejection(points, tracklist); + if (!v.hasTrackWeight()) { + // if the fitter doesn't provide weights, fill dummy values + TransientVertex::TransientTrackToFloatMap trkWeightMap; + for (const auto& trk : v.originalTracks()) { + trkWeightMap[trk] = 1.; + } + v.weightMap(trkWeightMap); + } + if ((v.positionError().matrix())(2, 2) != (WeightedMeanFitter::startError * WeightedMeanFitter::startError)) + pvs.push_back(v); //FIX with constants + } + } + } + return pvs; + } +}; + #endif diff --git a/RecoVertex/PrimaryVertexProducer/plugins/PrimaryVertexProducer.cc b/RecoVertex/PrimaryVertexProducer/plugins/PrimaryVertexProducer.cc index 6282d76774da7..eb27d0ccf8167 100644 --- a/RecoVertex/PrimaryVertexProducer/plugins/PrimaryVertexProducer.cc +++ b/RecoVertex/PrimaryVertexProducer/plugins/PrimaryVertexProducer.cc @@ -1,4 +1,5 @@ #include "RecoVertex/PrimaryVertexProducer/interface/PrimaryVertexProducer.h" +#include "FWCore/Framework/interface/ConsumesCollector.h" #include "DataFormats/VertexReco/interface/VertexFwd.h" #include "DataFormats/TrackReco/interface/TrackFwd.h" #include "DataFormats/Common/interface/Handle.h" @@ -18,11 +19,11 @@ PrimaryVertexProducer::PrimaryVertexProducer(const edm::ParameterSet& conf) : theTTBToken(esConsumes(edm::ESInputTag("", "TransientTrackBuilder"))), theConfig(conf) { fVerbose = conf.getUntrackedParameter("verbose", false); + useMVASelection_ = conf.getParameter("useMVACut"); trkToken = consumes(conf.getParameter("TrackLabel")); bsToken = consumes(conf.getParameter("beamSpotLabel")); - f4D = false; - weightFit = false; + useTransientTrackTime_ = false; // select and configure the track selection std::string trackSelectionAlgorithm = @@ -41,53 +42,84 @@ PrimaryVertexProducer::PrimaryVertexProducer(const edm::ParameterSet& conf) if (clusteringAlgorithm == "gap") { theTrackClusterizer = new GapClusterizerInZ( conf.getParameter("TkClusParameters").getParameter("TkGapClusParameters")); - } else if (clusteringAlgorithm == "DA") { - theTrackClusterizer = new DAClusterizerInZ( - conf.getParameter("TkClusParameters").getParameter("TkDAClusParameters")); - } - // provide the vectorized version of the clusterizer, if supported by the build - else if (clusteringAlgorithm == "DA_vect") { + } else if (clusteringAlgorithm == "DA_vect") { theTrackClusterizer = new DAClusterizerInZ_vect( conf.getParameter("TkClusParameters").getParameter("TkDAClusParameters")); } else if (clusteringAlgorithm == "DA2D_vect") { theTrackClusterizer = new DAClusterizerInZT_vect( conf.getParameter("TkClusParameters").getParameter("TkDAClusParameters")); - f4D = true; - } - - else { + useTransientTrackTime_ = true; + } else { throw VertexException("PrimaryVertexProducer: unknown clustering algorithm: " + clusteringAlgorithm); } - if (f4D) { - trkTimesToken = consumes>(conf.getParameter("TrackTimesLabel")); - trkTimeResosToken = consumes>(conf.getParameter("TrackTimeResosLabel")); + if (useTransientTrackTime_) { + trkTimesToken = consumes >(conf.getParameter("TrackTimesLabel")); + trkTimeResosToken = consumes >(conf.getParameter("TrackTimeResosLabel")); + trackMTDTimeQualityToken = + consumes >(conf.getParameter("trackMTDTimeQualityVMapTag")); + minTrackTimeQuality_ = conf.getParameter("minTrackTimeQuality"); } // select and configure the vertex fitters std::vector vertexCollections = - conf.getParameter>("vertexCollections"); + conf.getParameter >("vertexCollections"); for (std::vector::const_iterator algoconf = vertexCollections.begin(); algoconf != vertexCollections.end(); algoconf++) { algo algorithm; + + algorithm.label = algoconf->getParameter("label"); + + // configure the fitter and selector std::string fitterAlgorithm = algoconf->getParameter("algorithm"); if (fitterAlgorithm == "KalmanVertexFitter") { - algorithm.fitter = new KalmanVertexFitter(); + algorithm.pv_fitter = new SequentialPrimaryVertexFitterAdapter(new KalmanVertexFitter()); } else if (fitterAlgorithm == "AdaptiveVertexFitter") { - algorithm.fitter = new AdaptiveVertexFitter(GeometricAnnealing(algoconf->getParameter("chi2cutoff"))); + auto fitter = new AdaptiveVertexFitter(GeometricAnnealing(algoconf->getParameter("chi2cutoff"))); + algorithm.pv_fitter = new SequentialPrimaryVertexFitterAdapter(fitter); + } else if (fitterAlgorithm.empty()) { + algorithm.pv_fitter = nullptr; + } else if (fitterAlgorithm == "AdaptiveChisquareVertexFitter") { + algorithm.pv_fitter = new AdaptiveChisquarePrimaryVertexFitter(algoconf->getParameter("chi2cutoff"), + algoconf->getParameter("zcutoff"), + algoconf->getParameter("mintrkweight"), + false); + } else if (fitterAlgorithm == "MultiPrimaryVertexFitter") { + algorithm.pv_fitter = new AdaptiveChisquarePrimaryVertexFitter(algoconf->getParameter("chi2cutoff"), + algoconf->getParameter("zcutoff"), + algoconf->getParameter("mintrkweight"), + true); } else if (fitterAlgorithm == "WeightedMeanFitter") { - algorithm.fitter = nullptr; - weightFit = true; + algorithm.pv_fitter = new WeightedMeanPrimaryVertexEstimator(); } else { throw VertexException("PrimaryVertexProducer: unknown algorithm: " + fitterAlgorithm); } - algorithm.label = algoconf->getParameter("label"); algorithm.minNdof = algoconf->getParameter("minNdof"); algorithm.useBeamConstraint = algoconf->getParameter("useBeamConstraint"); algorithm.vertexSelector = new VertexCompatibleWithBeam(VertexDistanceXY(), algoconf->getParameter("maxDistanceToBeam")); + + // configure separate vertex time reconstruction if applicable + // note that the vertex time could, in principle, also come from the clusterizer or the vertex fit + + const auto& pv_time_conf = algoconf->getParameter("vertexTimeParameters"); + const std::string vertexTimeAlgorithm = pv_time_conf.getParameter("algorithm"); + edm::ConsumesCollector&& collector = consumesCollector(); + + if (vertexTimeAlgorithm.empty()) { + algorithm.pv_time_estimator = nullptr; + } else if (vertexTimeAlgorithm == "legacy4D") { + useTransientTrackTime_ = true; + algorithm.pv_time_estimator = + new VertexTimeAlgorithmLegacy4D(pv_time_conf.getParameter("legacy4D"), collector); + } else if (vertexTimeAlgorithm == "fromTracksPID") { + algorithm.pv_time_estimator = new VertexTimeAlgorithmFromTracksPID( + pv_time_conf.getParameter("fromTracksPID"), collector); + } else { + edm::LogWarning("MisConfiguration") << "unknown vertexTimeParameters.algorithm" << vertexTimeAlgorithm; + } algorithms.push_back(algorithm); produces(algorithm.label); @@ -113,8 +145,10 @@ PrimaryVertexProducer::~PrimaryVertexProducer() { if (theTrackClusterizer) delete theTrackClusterizer; for (std::vector::const_iterator algorithm = algorithms.begin(); algorithm != algorithms.end(); algorithm++) { - if (algorithm->fitter) - delete algorithm->fitter; + if (algorithm->pv_fitter) + delete algorithm->pv_fitter; + if (algorithm->pv_time_estimator) + delete algorithm->pv_time_estimator; if (algorithm->vertexSelector) delete algorithm->vertexSelector; } @@ -135,8 +169,8 @@ void PrimaryVertexProducer::produce(edm::Event& iEvent, const edm::EventSetup& i VertexState beamVertexState(beamSpot); if ((beamVertexState.error().cxx() <= 0.) || (beamVertexState.error().cyy() <= 0.) || (beamVertexState.error().czz() <= 0.)) { - validBS = false; edm::LogError("UnusableBeamSpot") << "Beamspot with invalid errors " << beamVertexState.error().matrix(); + validBS = false; } //if this is a recovery iteration, check if we already have a valid PV @@ -160,155 +194,124 @@ void PrimaryVertexProducer::produce(edm::Event& iEvent, const edm::EventSetup& i edm::Handle tks; iEvent.getByToken(trkToken, tks); + // mechanism to put the beamspot if the track collection is empty + if (!tks.isValid()) { + for (std::vector::const_iterator algorithm = algorithms.begin(); algorithm != algorithms.end(); algorithm++) { + auto result = std::make_unique(); + reco::VertexCollection& vColl = (*result); + + GlobalError bse(beamSpot.rotatedCovariance3D()); + if ((bse.cxx() <= 0.) || (bse.cyy() <= 0.) || (bse.czz() <= 0.)) { + AlgebraicSymMatrix33 we; + we(0, 0) = 10000; + we(1, 1) = 10000; + we(2, 2) = 10000; + vColl.push_back(reco::Vertex(beamSpot.position(), we, 0., 0., 0)); + if (fVerbose) { + std::cout << "RecoVertex/PrimaryVertexProducer: " + << "Beamspot with invalid errors " << bse.matrix() << std::endl; + std::cout << "Will put Vertex derived from dummy-fake BeamSpot into Event.\n"; + } + } else { + vColl.push_back(reco::Vertex(beamSpot.position(), beamSpot.rotatedCovariance3D(), 0., 0., 0)); + if (fVerbose) { + std::cout << "RecoVertex/PrimaryVertexProducer: " + << " will put Vertex derived from BeamSpot into Event.\n"; + } + } + iEvent.put(std::move(result), algorithm->label); + } + + return; // early return + } + + for (auto& algo : algorithms) { + if (algo.pv_time_estimator) { + algo.pv_time_estimator->setEvent(iEvent, iSetup); + } + } + // interface RECO tracks to vertex reconstruction const auto& theB = &iSetup.getData(theTTBToken); std::vector t_tks; - if (f4D) { - edm::Handle> trackTimesH; - edm::Handle> trackTimeResosH; - iEvent.getByToken(trkTimesToken, trackTimesH); - iEvent.getByToken(trkTimeResosToken, trackTimeResosH); - t_tks = (*theB).build(tks, beamSpot, *(trackTimesH.product()), *(trackTimeResosH.product())); + if (useTransientTrackTime_) { + auto const& trackTimeResos_ = iEvent.get(trkTimeResosToken); + auto trackTimes_ = iEvent.get(trkTimesToken); + + if (useMVASelection_) { + trackMTDTimeQualities_ = iEvent.get(trackMTDTimeQualityToken); + + for (unsigned int i = 0; i < (*tks).size(); i++) { + const reco::TrackRef ref(tks, i); + auto const trkTimeQuality = trackMTDTimeQualities_[ref]; + if (trkTimeQuality < minTrackTimeQuality_) { + trackTimes_[ref] = std::numeric_limits::max(); + } + } + t_tks = (*theB).build(tks, beamSpot, trackTimes_, trackTimeResos_); + } else { + t_tks = (*theB).build(tks, beamSpot, trackTimes_, trackTimeResos_); + } } else { t_tks = (*theB).build(tks, beamSpot); } - if (fVerbose) { - std::cout << "RecoVertex/PrimaryVertexProducer" - << "Found: " << t_tks.size() << " reconstructed tracks" - << "\n"; - } // select tracks std::vector&& seltks = theTrackFilter->select(t_tks); // clusterize tracks in Z - std::vector>&& clusters = theTrackClusterizer->clusterize(seltks); + std::vector&& clusters = theTrackClusterizer->vertices(seltks); if (fVerbose) { - std::cout << " clustering returned " << clusters.size() << " clusters from " << seltks.size() - << " selected tracks" << std::endl; + edm::LogPrint("PrimaryVertexProducer") + << "Clustering returned " << clusters.size() << " clusters from " << seltks.size() << " selected tracks"; } // vertex fits for (std::vector::const_iterator algorithm = algorithms.begin(); algorithm != algorithms.end(); algorithm++) { auto result = std::make_unique(); reco::VertexCollection& vColl = (*result); - std::vector pvs; - for (std::vector>::const_iterator iclus = clusters.begin(); - iclus != clusters.end(); - iclus++) { - double sumwt = 0.; - double sumwt2 = 0.; - double sumw = 0.; - double meantime = 0.; - double vartime = 0.; - if (f4D) { - for (const auto& tk : *iclus) { - const double time = tk.timeExt(); - const double err = tk.dtErrorExt(); - const double inverr = err > 0. ? 1.0 / err : 0.; - const double w = inverr * inverr; - sumwt += w * time; - sumwt2 += w * time * time; - sumw += w; - } - meantime = sumwt / sumw; - double sumsq = sumwt2 - sumwt * sumwt / sumw; - double chisq = iclus->size() > 1 ? sumsq / double(iclus->size() - 1) : sumsq / double(iclus->size()); - vartime = chisq / sumw; - } - TransientVertex v; - if (algorithm->fitter) { - if (algorithm->useBeamConstraint && validBS && (iclus->size() > 1)) { - v = algorithm->fitter->vertex(*iclus, beamSpot); - } else if (!(algorithm->useBeamConstraint) && (iclus->size() > 1)) { - v = algorithm->fitter->vertex(*iclus); - } // else: no fit ==> v.isValid()=False - } else if (weightFit) { - std::vector> points; - if (algorithm->useBeamConstraint && validBS && (iclus->size() > 1)) { - for (const auto& itrack : *iclus) { - GlobalPoint p = itrack.stateAtBeamLine().trackStateAtPCA().position(); - GlobalPoint err(itrack.stateAtBeamLine().transverseImpactParameter().error(), - itrack.stateAtBeamLine().transverseImpactParameter().error(), - itrack.track().dzError()); - std::pair p2(p, err); - points.push_back(p2); - } - - v = WeightedMeanFitter::weightedMeanOutlierRejectionBeamSpot(points, *iclus, beamSpot); - if ((v.positionError().matrix())(2, 2) != (WeightedMeanFitter::startError * WeightedMeanFitter::startError)) - pvs.push_back(v); - } else if (!(algorithm->useBeamConstraint) && (iclus->size() > 1)) { - for (const auto& itrack : *iclus) { - GlobalPoint p = itrack.impactPointState().globalPosition(); - GlobalPoint err(itrack.track().dxyError(), itrack.track().dxyError(), itrack.track().dzError()); - std::pair p2(p, err); - points.push_back(p2); - } - - v = WeightedMeanFitter::weightedMeanOutlierRejection(points, *iclus); - if ((v.positionError().matrix())(2, 2) != (WeightedMeanFitter::startError * WeightedMeanFitter::startError)) - pvs.push_back(v); //FIX with constants - } - } else - throw VertexException( - "PrimaryVertexProducer: Something went wrong. You are not using the weighted mean fit and no algorithm was " - "selected."); - - // 4D vertices: add timing information - if (f4D and v.isValid()) { - auto err = v.positionError().matrix4D(); - err(3, 3) = vartime; - auto trkWeightMap3d = v.weightMap(); // copy the 3d-fit weights - v = TransientVertex(v.position(), meantime, err, v.originalTracks(), v.totalChiSquared(), v.degreesOfFreedom()); - v.weightMap(trkWeightMap3d); - } + if (algorithm->pv_fitter == nullptr) { + pvs = clusters; + } else { + pvs = algorithm->pv_fitter->fit(seltks, clusters, beamSpot, algorithm->useBeamConstraint); + } + + if (algorithm->pv_time_estimator != nullptr) { + algorithm->pv_time_estimator->fill_vertex_times(pvs); + } - if (fVerbose) { - if (v.isValid()) { - std::cout << "x,y,z"; - if (f4D) - std::cout << ",t"; - std::cout << "=" << v.position().x() << " " << v.position().y() << " " << v.position().z(); - if (f4D) - std::cout << " " << v.time(); - std::cout << " cluster size = " << (*iclus).size() << std::endl; - } else { - std::cout << "Invalid fitted vertex, cluster size=" << (*iclus).size() << std::endl; + // sort vertices by pt**2 vertex + if (pvs.size() > 1) { + sort(pvs.begin(), pvs.end(), VertexHigherPtSquared()); + } + + // select and convert transient vertices to (reco) vertices + for (std::vector::const_iterator iv = pvs.begin(); iv != pvs.end(); iv++) { + if (iv->isValid() && (iv->degreesOfFreedom() >= algorithm->minNdof)) { + reco::Vertex v = *iv; + if (!validBS || ((*(algorithm->vertexSelector))(v, beamVertexState))) { + vColl.push_back(v); } } - - //for weightFit we have already pushed it above (no timing infomration anyway) - if (v.isValid() && not weightFit && (v.degreesOfFreedom() >= algorithm->minNdof) && - (!validBS || (*(algorithm->vertexSelector))(v, beamVertexState))) - pvs.push_back(v); - } // end of cluster loop + } if (fVerbose) { - std::cout << "PrimaryVertexProducerAlgorithm::vertices candidates =" << pvs.size() << std::endl; + edm::LogPrint("PrimaryVertexProducer") << "PrimaryVertexProducer \"" << algorithm->label << "\" contains " + << pvs.size() << " reco::Vertex candidates"; } - if (clusters.size() > 2 && clusters.size() > 2 * pvs.size()) - edm::LogWarning("PrimaryVertexProducer") - << "more than half of candidate vertices lost " << pvs.size() << ' ' << clusters.size(); - - if (pvs.empty() && seltks.size() > 5) + if (clusters.size() > 2 && clusters.size() > 2 * pvs.size()) { edm::LogWarning("PrimaryVertexProducer") - << "no vertex found with " << seltks.size() << " tracks and " << clusters.size() << " vertex-candidates"; - - // sort vertices by pt**2 vertex (aka signal vertex tagging) - if (pvs.size() > 1) { - sort(pvs.begin(), pvs.end(), VertexHigherPtSquared()); + << "More than 50% of candidate vertices lost (" << pvs.size() << " out of " << clusters.size() << ")"; } - // convert transient vertices returned by the theAlgo to (reco) vertices - for (std::vector::const_iterator iv = pvs.begin(); iv != pvs.end(); iv++) { - reco::Vertex v = *iv; - vColl.push_back(v); + if (pvs.empty() && seltks.size() > 5) { + edm::LogWarning("PrimaryVertexProducer") + << "No vertex found with " << seltks.size() << " tracks and " << clusters.size() << " vertex candidates"; } if (vColl.empty()) { @@ -319,16 +322,14 @@ void PrimaryVertexProducer::produce(edm::Event& iEvent, const edm::EventSetup& i we(1, 1) = 10000; we(2, 2) = 10000; vColl.push_back(reco::Vertex(beamSpot.position(), we, 0., 0., 0)); - if (fVerbose) { - std::cout << "RecoVertex/PrimaryVertexProducer: " - << "Beamspot with invalid errors " << bse.matrix() << std::endl; - std::cout << "Will put Vertex derived from dummy-fake BeamSpot into Event.\n"; - } + edm::LogWarning("PrimaryVertexProducer") << "Zero recostructed vertices, will put reco::Vertex derived from " + "dummy/fake BeamSpot into Event, BeamSpot has invalid errors: " + << bse.matrix(); } else { vColl.push_back(reco::Vertex(beamSpot.position(), beamSpot.rotatedCovariance3D(), 0., 0., 0)); if (fVerbose) { - std::cout << "RecoVertex/PrimaryVertexProducer: " - << " will put Vertex derived from BeamSpot into Event.\n"; + edm::LogWarning("PrimaryVertexProducer") + << "Zero recostructed vertices, will put reco::Vertex derived from BeamSpot into Event."; } } } @@ -336,15 +337,18 @@ void PrimaryVertexProducer::produce(edm::Event& iEvent, const edm::EventSetup& i if (fVerbose) { int ivtx = 0; for (reco::VertexCollection::const_iterator v = vColl.begin(); v != vColl.end(); ++v) { - std::cout << "recvtx " << ivtx++ << "#trk " << std::setw(3) << v->tracksSize() << " chi2 " << std::setw(4) - << v->chi2() << " ndof " << std::setw(3) << v->ndof() << " x " << std::setw(6) << v->position().x() - << " dx " << std::setw(6) << v->xError() << " y " << std::setw(6) << v->position().y() << " dy " - << std::setw(6) << v->yError() << " z " << std::setw(6) << v->position().z() << " dz " << std::setw(6) - << v->zError(); - if (f4D) { - std::cout << " t " << std::setw(6) << v->t() << " dt " << std::setw(6) << v->tError(); + edm::LogPrint("PrimaryVertexProducer") + << "recvtx " << std::setw(3) << std::fixed << ivtx++ << " #trk " << std::setw(3) << v->tracksSize() + << " chi2 " << std::setw(5) << std::setprecision(1) << v->chi2() << " ndof " << std::setw(5) + << std::setprecision(1) << v->ndof() << " x " << std::setw(7) << std::setprecision(4) << v->position().x() + << " dx " << std::setw(6) << std::setprecision(4) << v->xError() << " y " << std::setw(7) + << std::setprecision(4) << v->position().y() << " dy " << std::setw(6) << std::setprecision(4) + << v->yError() << " z " << std::setw(8) << std::setprecision(4) << v->position().z() << " dz " + << std::setw(6) << std::setprecision(4) << v->zError(); + if (v->tError() > 0) { + edm::LogPrint("PrimaryVertexProducer") << " t " << std::setw(6) << std::setprecision(3) << v->t() << " dt " + << std::setw(6) << std::setprecision(3) << v->tError(); } - std::cout << std::endl; } } @@ -353,7 +357,19 @@ void PrimaryVertexProducer::produce(edm::Event& iEvent, const edm::EventSetup& i } void PrimaryVertexProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { - // offlinePrimaryVertices + edm::ParameterSetDescription psd_pv_time; + { + edm::ParameterSetDescription psd1; + VertexTimeAlgorithmFromTracksPID::fillPSetDescription(psd1); + psd_pv_time.add("fromTracksPID", psd1); + + edm::ParameterSetDescription psd2; + VertexTimeAlgorithmLegacy4D::fillPSetDescription(psd2); + psd_pv_time.add("legacy4D", psd2); + } + psd_pv_time.add("algorithm", ""); // default = none + + // vertex collections edm::ParameterSetDescription desc; { edm::ParameterSetDescription vpsd1; @@ -362,7 +378,12 @@ void PrimaryVertexProducer::fillDescriptions(edm::ConfigurationDescriptions& des vpsd1.add("useBeamConstraint", false); vpsd1.add("label", ""); vpsd1.add("chi2cutoff", 2.5); + vpsd1.add("zcutoff", 1.0); + vpsd1.add("mintrkweight", 0.0); vpsd1.add("minNdof", 0.0); + vpsd1.add("vertexTimeParameters", psd_pv_time); + + // two default values : with- and without beam constraint std::vector temp1; temp1.reserve(2); { @@ -372,7 +393,12 @@ void PrimaryVertexProducer::fillDescriptions(edm::ConfigurationDescriptions& des temp2.addParameter("useBeamConstraint", false); temp2.addParameter("label", ""); temp2.addParameter("chi2cutoff", 2.5); + temp2.addParameter("zcutoff", 1.0); + temp2.addParameter("mintrkweight", 0.); temp2.addParameter("minNdof", 0.0); + edm::ParameterSet temp_vertexTime; + temp_vertexTime.addParameter("algorithm", ""); + temp2.addParameter("vertexTimeParameters", temp_vertexTime); temp1.push_back(temp2); } { @@ -382,7 +408,12 @@ void PrimaryVertexProducer::fillDescriptions(edm::ConfigurationDescriptions& des temp2.addParameter("useBeamConstraint", true); temp2.addParameter("label", "WithBS"); temp2.addParameter("chi2cutoff", 2.5); + temp2.addParameter("zcutoff", 1.0); + temp2.addParameter("mintrkweight", 0.); temp2.addParameter("minNdof", 2.0); + edm::ParameterSet temp_vertexTime; + temp_vertexTime.addParameter("algorithm", ""); + temp2.addParameter("vertexTimeParameters", temp_vertexTime); temp1.push_back(temp2); } desc.addVPSet("vertexCollections", vpsd1, temp1); @@ -391,15 +422,14 @@ void PrimaryVertexProducer::fillDescriptions(edm::ConfigurationDescriptions& des { edm::ParameterSetDescription psd0; TrackFilterForPVFinding::fillPSetDescription(psd0); - psd0.add("numTracksThreshold", 0); // HI only - psd0.add("maxNumTracksThreshold", 10000000); // HI only - psd0.add("minPtTight", 0.0); // HI only + HITrackFilterForPVFinding::fillPSetDescription(psd0); // HI only desc.add("TkFilterParameters", psd0); } desc.add("beamSpotLabel", edm::InputTag("offlineBeamSpot")); desc.add("TrackLabel", edm::InputTag("generalTracks")); - desc.add("TrackTimeResosLabel", edm::InputTag("dummy_default")); // 4D only - desc.add("TrackTimesLabel", edm::InputTag("dummy_default")); // 4D only + desc.add("TrackTimeResosLabel", edm::InputTag("dummy_default")); // 4D only + desc.add("TrackTimesLabel", edm::InputTag("dummy_default")); // 4D only + desc.add("trackMTDTimeQualityVMapTag", edm::InputTag("mtdTrackQualityMVA:mtdQualMVA")); // 4D only { edm::ParameterSetDescription psd0; @@ -418,6 +448,8 @@ void PrimaryVertexProducer::fillDescriptions(edm::ConfigurationDescriptions& des desc.add("isRecoveryIteration", false); desc.add("recoveryVtxCollection", {""}); + desc.add("useMVACut", false); + desc.add("minTrackTimeQuality", 0.8); descriptions.add("primaryVertexProducer", desc); } diff --git a/RecoVertex/PrimaryVertexProducer/python/OfflinePrimaryVertices_cfi.py b/RecoVertex/PrimaryVertexProducer/python/OfflinePrimaryVertices_cfi.py index ec9a05c3a4393..cff8dbaebaa1e 100644 --- a/RecoVertex/PrimaryVertexProducer/python/OfflinePrimaryVertices_cfi.py +++ b/RecoVertex/PrimaryVertexProducer/python/OfflinePrimaryVertices_cfi.py @@ -1,51 +1,45 @@ import FWCore.ParameterSet.Config as cms -from RecoVertex.PrimaryVertexProducer.TkClusParameters_cff import DA_vectParameters +from RecoVertex.PrimaryVertexProducer.primaryVertexProducer_cfi import primaryVertexProducer -offlinePrimaryVertices = cms.EDProducer( - "PrimaryVertexProducer", +offlinePrimaryVertices = primaryVertexProducer.clone() - verbose = cms.untracked.bool(False), - TrackLabel = cms.InputTag("generalTracks"), - beamSpotLabel = cms.InputTag("offlineBeamSpot"), - - TkFilterParameters = cms.PSet( - algorithm=cms.string('filter'), - maxNormalizedChi2 = cms.double(10.0), - minPixelLayersWithHits=cms.int32(2), - minSiliconLayersWithHits = cms.int32(5), - maxD0Significance = cms.double(4.0), - maxD0Error = cms.double(1.0), - maxDzError = cms.double(1.0), - minPt = cms.double(0.0), - maxEta = cms.double(2.4), - trackQuality = cms.string("any") - ), +DA_vectParameters = cms.PSet(primaryVertexProducer.TkClusParameters.clone()) - TkClusParameters = DA_vectParameters, +from Configuration.ProcessModifiers.vertexInBlocks_cff import vertexInBlocks +vertexInBlocks.toModify(offlinePrimaryVertices, + TkClusParameters = dict( + TkDAClusParameters = dict( + runInBlocks = True, + block_size = 128, + overlap_frac = 0.5 + ) + ) +) - vertexCollections = cms.VPSet( - [cms.PSet(label=cms.string(""), - algorithm=cms.string("AdaptiveVertexFitter"), - chi2cutoff = cms.double(2.5), - minNdof=cms.double(0.0), - useBeamConstraint = cms.bool(False), - maxDistanceToBeam = cms.double(1.0) - ), - cms.PSet(label=cms.string("WithBS"), - algorithm = cms.string('AdaptiveVertexFitter'), - chi2cutoff = cms.double(2.5), - minNdof=cms.double(2.0), - useBeamConstraint = cms.bool(True), - maxDistanceToBeam = cms.double(1.0), - ) - ] - ), - - isRecoveryIteration = cms.bool(False), - recoveryVtxCollection = cms.InputTag("") +from Configuration.Eras.Modifier_phase2_tracker_cff import phase2_tracker +(phase2_tracker & vertexInBlocks).toModify(offlinePrimaryVertices, + TkClusParameters = dict( + TkDAClusParameters = dict( + block_size = 512, + overlap_frac = 0.5) + ) +) - +from Configuration.Eras.Modifier_highBetaStar_2018_cff import highBetaStar_2018 +highBetaStar_2018.toModify(offlinePrimaryVertices, + TkClusParameters = dict( + TkDAClusParameters = dict( + Tmin = 4.0, + Tpurge = 1.0, + Tstop = 1.0, + vertexSize = 0.01, + d0CutOff = 4., + dzCutOff = 5., + zmerge = 2.e-2, + uniquetrkweight = 0.9 + ) + ) ) from Configuration.ProcessModifiers.weightedVertexing_cff import weightedVertexing @@ -97,11 +91,8 @@ maxNumTracksThreshold = cms.int32(1000), minPtTight = cms.double(1.0) ), - TkClusParameters = cms.PSet( - algorithm = cms.string("gap"), - TkGapClusParameters = cms.PSet( - zSeparation = cms.double(1.0) - ) + TkClusParameters = dict( + algorithm = "gap" ) ) @@ -121,4 +112,3 @@ 1: dict(chi2cutoff = 4.0, minNdof = -2.0), } ) - diff --git a/RecoVertex/PrimaryVertexProducer/python/TkClusParameters_cff.py b/RecoVertex/PrimaryVertexProducer/python/TkClusParameters_cff.py deleted file mode 100644 index a9fd50d4301dd..0000000000000 --- a/RecoVertex/PrimaryVertexProducer/python/TkClusParameters_cff.py +++ /dev/null @@ -1,77 +0,0 @@ -import FWCore.ParameterSet.Config as cms - -DA_vectParameters = cms.PSet( - algorithm = cms.string("DA_vect"), - TkDAClusParameters = cms.PSet( - coolingFactor = cms.double(0.6), # moderate annealing speed - zrange = cms.double(4.), # consider only clusters within 4 sigma*sqrt(T) of a track - delta_highT = cms.double(1.e-2), # convergence requirement at high T - delta_lowT = cms.double(1.e-3), # convergence requirement at low T - convergence_mode = cms.int32(0), # 0 = two steps, 1 = dynamic with sqrt(T) - Tmin = cms.double(2.0), # end of vertex splitting - Tpurge = cms.double(2.0), # cleaning - Tstop = cms.double(0.5), # end of annealing - vertexSize = cms.double(0.006), # added in quadrature to track-z resolutions - d0CutOff = cms.double(3.), # downweight high IP tracks - dzCutOff = cms.double(3.), # outlier rejection after freeze-out (T= 0) && " negative chi**2"); +#endif + return chsq; +} + +void AdaptiveChisquarePrimaryVertexFitter::fill_trackinfo(const std::vector &tracks, + const reco::BeamSpot &beamSpot) { + /* fill track information used during fits into arrays, parallell to the list of input tracks */ + + trackinfo_.clear(); + trackinfo_.reserve(tracks.size()); + + for (auto &trk : tracks) { + TrackInfo ti; + // F1,F2 are the perigee parameters (3,4) + const auto tspca = trk.stateAtBeamLine().trackStateAtPCA(); // freeTrajectoryState + const auto tspca_pe = PerigeeConversions::ftsToPerigeeError(tspca); + const auto momentum = tspca.momentum(); + auto const cos_phi = momentum.x() / momentum.perp(); + auto const sin_phi = momentum.y() / momentum.perp(); + auto const tan_lambda = momentum.z() / momentum.perp(); + + // covariance matrix of (F1,F2) + double cov11 = tspca_pe.covarianceMatrix()(3, 3); + double cov22 = tspca_pe.covarianceMatrix()(4, 4); + double cov12 = tspca_pe.covarianceMatrix()(3, 4); + + // S = cov^{-1} + double DetV = cov11 * cov22 - cov12 * cov12; + if (fabs(DetV) < 1.e-16) { + edm::LogWarning("AdaptiveChisquarePrimaryVertexFitter") + << "Warning, det(V) almost vanishes : " << DetV << " !! This should not happen!" << std::endl; + ti.S11 = 0; + ti.S22 = 0; + ti.S12 = 0; + } else { + ti.S11 = cov22 / DetV; + ti.S22 = cov11 / DetV; + ti.S12 = -cov12 / DetV; + } + ti.b1 = tspca.position().x() * sin_phi - tspca.position().y() * cos_phi; + ti.H1[0] = -sin_phi; + ti.H1[1] = cos_phi; + ti.H1[2] = 0; + ti.b2 = tspca.position().z() - (tspca.position().x() * cos_phi + tspca.position().y() * sin_phi) * tan_lambda; + ti.H2[0] = cos_phi * tan_lambda; + ti.H2[1] = sin_phi * tan_lambda; + ti.H2[2] = -1.; + + for (int k = 0; k < 3; k++) { + double SH1k = (ti.S11 * ti.H1[k] + ti.S12 * ti.H2[k]); + double SH2k = (ti.S12 * ti.H1[k] + ti.S22 * ti.H2[k]); + ti.g[k] = ti.b1 * SH1k + ti.b2 * SH2k; + for (int l = 0; l < 3; l++) { + ti.C(l, k) = ti.H1[l] * SH1k + ti.H2[l] * SH2k; + } + } + + ti.zpca = tspca.position().z(); + ti.dzError = trk.track().dzError(); + trackinfo_.push_back(ti); + } +} + +void AdaptiveChisquarePrimaryVertexFitter::make_vtx_trk_map(double zrange_scale) { + unsigned const int nv = xv_.size(); + unsigned const int nt = trackinfo_.size(); + +#ifdef PVTX_DEBUG + if (nv < 1) { + edm::LogWarning("AdaptiveChisquarePrimaryVertexFit") << " empty vertex list with " << nt << " tracks" << std::endl; + return; + } +#endif + + // parallel lists for track to vertex mapping, tracks are not sorted + tkmap_.clear(); // index in trackinfo_ + tkweight_.clear(); // weight in vertex + tkfirstv_.clear(); // each vertex k owns a section of those list : tkfirstv_[k] .. tkfirstv_[k+1]-1 + + if (nv == 1) { + // always accept all tracks for a single vertex fit + tkfirstv_.push_back(0); + tkfirstv_.push_back(nt); + tkweight_.assign(nt, 0.); + tkmap_.reserve(nt); + for (unsigned int i = 0; i < nt; i++) { + tkmap_.emplace_back(i); + } + return; + } + + // n > 1 + tkmap_.reserve(nv * 100); + tkweight_.reserve(nv * 100); + for (unsigned int k = 0; k < nv; k++) { + tkfirstv_.emplace_back(tkmap_.size()); + for (unsigned int i = 0; i < nt; i++) { + auto &ti = trackinfo_[i]; + const double zrange = zrange_scale * ti.dzError; + if (std::abs(zv_[k] - ti.zpca) < z_cutoff_) { + const double dztrk = ti.b2 + xv_[k] * ti.H2[0] + yv_[k] * ti.H2[1] - zv_[k]; + if (std::abs(dztrk) < zrange) { + tkmap_.emplace_back(i); + tkweight_.emplace_back(0.); + } + } + } + } + tkfirstv_.emplace_back(tkmap_.size()); // extra entry, simplifies loops, every vertex has a "successor" now +} + +void AdaptiveChisquarePrimaryVertexFitter::fill_weights(const reco::BeamSpot &beamspot, double beta) { + // multi-vertex version + unsigned const int nt = trackinfo_.size(); + unsigned const int nv = xv_.size(); + const double beta_over_2 = 0.5 * beta; + const double argmax = beta_over_2 * chi_cutoff_ * chi_cutoff_ * 5; + const double Z_cutoff = vdt::fast_exp(-beta_over_2 * chi_cutoff_ * chi_cutoff_); + + std::vector Z_track(nt, Z_cutoff); + + // evaluate and cache track-vertex assignment chi**2 for all clusters and sum up Z + for (unsigned int k = 0; k < nv; k++) { + for (unsigned int j = tkfirstv_[k]; j < tkfirstv_[k + 1]; j++) { + const unsigned int i = tkmap_[j]; + double arg = beta_over_2 * track_in_vertex_chsq(trackinfo_[i], xv_[k], yv_[k], zv_[k]); + if (arg < argmax) { + const double e = vdt::fast_exp(-arg); + tkweight_[j] = e; // must later be normalized by the proper Z_track[i] + Z_track[i] += e; // sum up exponentials to normalize + } else { + tkweight_[j] = 0.; + } + } + } + + // now we have the partition function, Z_i and can evaluate assignment probabilities (aka weights) + for (unsigned int j = 0; j < tkmap_.size(); j++) { + const unsigned int i = tkmap_[j]; +#ifdef PVT_DEBUG + assert((i < nt) && "tkmap out of range"); + assert((tkmap_.size() == tkweight_.size()) && "map and list not aliged"); +#endif + tkweight_[j] /= Z_track[i]; + } +} + +bool AdaptiveChisquarePrimaryVertexFitter::clean() { + /* in multi-vertex fitting, nearby vertices can fall on top of each other, + even when the initial seeds don't, some kind of duplicate removal is required + the approach in this method is similar to the method applied in clustering: + at least two tracks with a weight above a threshold (trkweight_threshold) are required. + vertices that don't fulfill this are either insignficant or very close + to another vertex + */ + const double trkweight_threshold = 0.7; + unsigned int nv = xv_.size(); + if (nv < 2) + return false; + + // sum of weights per vertex + std::vector wsumhi(nv, 0); + for (unsigned int k = 0; k < nv; k++) { + for (unsigned int j = tkfirstv_[k]; j < tkfirstv_[k + 1]; j++) { + if (tkweight_[j] > trkweight_threshold) + wsumhi[k] += tkweight_[j]; + } + } + + double dzmin = 0; + unsigned int k_dzmin = 0; + for (unsigned int k = 0; k < nv - 1; k++) { + double dz = std::abs(zv_[k + 1] - zv_[k]); + if ((k == 0) || (dz < dzmin)) { + dzmin = dz; + k_dzmin = k; + } + } + + if ((std::abs(dzmin) < 0.0200) && (std::min(wsumhi[k_dzmin], wsumhi[k_dzmin + 1]) < 0.5)) { + if (wsumhi[k_dzmin] < wsumhi[k_dzmin + 1]) { + remove_vertex(k_dzmin); + } else { + remove_vertex(k_dzmin + 1); + } + } + + return true; +} + +void AdaptiveChisquarePrimaryVertexFitter::remove_vertex(unsigned int k) { + // remove a vertex or rather merge it with it's neighbour + // used for multi-vertex fits only + unsigned int nv = xv_.size(); + if (nv < 2) + return; + + // 1) remove the vertex from the vertex list + xv_.erase(xv_.begin() + k); + yv_.erase(yv_.begin() + k); + zv_.erase(zv_.begin() + k); + covv_.erase(covv_.begin() + k); + + // 2) adjust the track-map map + // 2a) remove the map entries that belong the deleted vertex + const unsigned int num_erased_map_entries = tkfirstv_[k + 1] - tkfirstv_[k]; + tkmap_.erase(tkmap_.begin() + tkfirstv_[k], tkmap_.begin() + tkfirstv_[k + 1]); + tkweight_.erase(tkweight_.begin() + tkfirstv_[k], tkweight_.begin() + tkfirstv_[k + 1]); + // 2b) adjust pointers for the following vertices, including the dummy entry behind the last (now [nv-1]) + for (unsigned int k1 = k + 1; k1 < nv + 1; k1++) { + tkfirstv_[k1] -= num_erased_map_entries; + } + // 2c) erase the pointer of the removed vertex + tkfirstv_.erase(tkfirstv_.begin() + k); +} + +double AdaptiveChisquarePrimaryVertexFitter::update(const reco::BeamSpot &beamspot, + const float beam_weight, + const bool fill_covariances) { + double rho_vtx = 0; + double delta_z = 0; + double delta_x = 0; + double delta_y = 0; + unsigned const int nt = trackinfo_.size(); + unsigned const int nv = xv_.size(); + if (fill_covariances) { + covv_.clear(); + } + + // initial value for S, 0 or inverse of the beamspot covariance matrix + Error3 S0; + double c_beam[3] = {0, 0, 0}; + if (beam_weight > 0) { + S0 = get_inverse_beam_covariance(beamspot); + for (unsigned int j = 0; j < 3; j++) { + c_beam[j] = -(S0(j, 0) * beamspot.x0() + S0(j, 1) * beamspot.y0() + S0(j, 2) * beamspot.z0()); + } + } + + for (unsigned int k = 0; k < nv; k++) { + rho_vtx = 0; + Error3 S(S0); + // sum track contributions + double c[3] = {c_beam[0], c_beam[1], c_beam[2]}; + for (unsigned int j = tkfirstv_[k]; j < tkfirstv_[k + 1]; j++) { + const unsigned int i = tkmap_[j]; + const auto w = tkweight_[j]; + rho_vtx += w; + S += w * trackinfo_[i].C; + for (unsigned int l = 0; l < 3; l++) { + c[l] += w * trackinfo_[i].g[l]; + } + } + +#ifdef PVTX_DEBUG + if ((fabs(S(1, 2) - S(2, 1)) > 1e-3) || (fabs(S(0, 2) - S(2, 0)) > 1e-3) || (fabs(S(0, 1) - S(1, 0)) > 1e-3) || + (S(0, 0) <= 0) || (S(0, 0) <= 0) || (S(0, 0) <= 0)) { + edm::LogWarning("AdaptiveChisquarePrimaryVertexFitter") << "update() bad S-matrix S=" << std::endl + << S << std::endl; + edm::LogWarning("AdaptiveChisquarePrimaryVertexFitter") + << "n-vertex = " << nv << " n-track = " << nt << std::endl; + } +#endif + + const auto xold = xv_[k]; + const auto yold = yv_[k]; + const auto zold = zv_[k]; + + if (S.Invert()) { + xv_[k] = -(S(0, 0) * c[0] + S(0, 1) * c[1] + S(0, 2) * c[2]); + yv_[k] = -(S(1, 0) * c[0] + S(1, 1) * c[1] + S(1, 2) * c[2]); + zv_[k] = -(S(2, 0) * c[0] + S(2, 1) * c[1] + S(2, 2) * c[2]); + if (fill_covariances) { + covv_.emplace_back(S); + } + } else { + edm::LogWarning("AdaptiveChisquarePrimaryVertexFitter") << "update() Matrix inversion failed" << S << std::endl; + if (fill_covariances) { + Error3 covv_dummy; + covv_dummy(0, 0) = 100.; + covv_dummy(1, 1) = 100.; + covv_dummy(2, 2) = 100.; + covv_.emplace_back(covv_dummy); + } + } + + if ((nt > 1) && (rho_vtx > 1.0)) { + delta_x = std::max(delta_x, std::abs(xv_[k] - xold)); + delta_y = std::max(delta_y, std::abs(yv_[k] - yold)); + delta_z = std::max(delta_z, std::abs(zv_[k] - zold)); + } + + } // vertex loop + + return std::max(delta_z, std::max(delta_x, delta_y)); +} + +AdaptiveChisquarePrimaryVertexFitter::Error3 AdaptiveChisquarePrimaryVertexFitter::get_inverse_beam_covariance( + const reco::BeamSpot &beamspot) { + auto SBeam = beamspot.rotatedCovariance3D(); + if (SBeam.Invert()) { + return SBeam; + } else { + edm::LogWarning("AdaptiveChisquarePrimaryVertexFitter") + << "Warning, beam-spot covariance matrix inversion failed " << std::endl; + Error3 S0; + S0(0, 0) = 1. / pow(beamspot.BeamWidthX(), 2); + S0(1, 1) = 1. / pow(beamspot.BeamWidthY(), 2); + S0(2, 2) = 1. / pow(beamspot.sigmaZ(), 2); + return S0; + } +} + +TransientVertex AdaptiveChisquarePrimaryVertexFitter::get_TransientVertex( + const unsigned int k, + const std::vector> &vertex_track_weights, + const std::vector &tracks, + const float beam_weight, + const reco::BeamSpot &beamspot) { + const GlobalPoint pos(xv_[k], yv_[k], zv_[k]); + const GlobalError posError( + covv_[k](0, 0), covv_[k](1, 0), covv_[k](1, 1), covv_[k](2, 0), covv_[k](2, 1), covv_[k](2, 2)); + float chi2 = 0.; + float vtx_ndof = -3.; + if (beam_weight > 0) { + // add beam-spot chi**2 and degrees of freedom + vtx_ndof = 3 * beam_weight; + const auto S = get_inverse_beam_covariance(beamspot); + const double dx = xv_[k] - beamspot.x0(); + const double dy = yv_[k] - beamspot.y0(); + const double dz = zv_[k] - beamspot.z0(); + chi2 = beam_weight * (S(0, 0) * dx * dx + S(1, 1) * dy * dy + 2 * S(0, 1) * dx * dy + S(2, 2) * dz * dz + + 2 * S(0, 2) * dx * dz + 2 * S(1, 2) * dy * dz); + } + + std::vector vertex_tracks; + TransientVertex::TransientTrackToFloatMap trkWeightMap; + for (const auto &tk : vertex_track_weights) { + const unsigned int i = tk.first; + const float track_weight = tk.second; + if (track_weight >= min_trackweight_) { + vertex_tracks.emplace_back(tracks[i]); + trkWeightMap[tracks[i]] = track_weight; + vtx_ndof += 2 * track_weight; + chi2 += track_weight * track_in_vertex_chsq(trackinfo_[i], xv_[k], yv_[k], zv_[k]); + } + } + + auto vtx = TransientVertex(pos, posError, vertex_tracks, chi2, vtx_ndof); + vtx.weightMap(trkWeightMap); + + return vtx; +} + +std::vector AdaptiveChisquarePrimaryVertexFitter::vertices( + const std::vector &tracks, + const std::vector &clusters, + const reco::BeamSpot &beamspot, + const bool useBeamConstraint) { + // simultaneous fit of all vertices in the input list + + const int max_iterations = 50; + + // initialize the vertices + const unsigned int nv = clusters.size(); + xv_.clear(); + xv_.reserve(nv); + yv_.clear(); + yv_.reserve(nv); + zv_.clear(); + zv_.reserve(nv); + tkfirstv_.clear(); + tkfirstv_.reserve(nv + 1); + covv_.clear(); + covv_.reserve(nv); + + // seeds + for (auto &clu : clusters) { + const double zclu = clu.position().z(); + xv_.emplace_back(beamspot.x(zclu)); + yv_.emplace_back(beamspot.y(zclu)); + zv_.emplace_back(zclu); + } + + fill_trackinfo(tracks, beamspot); + + make_vtx_trk_map(5.); // use tracks within 5 sigma windows (if that is less than z_cutoff_) + + float beam_weight = useBeamConstraint ? 1. : 0.; + + double delta = 0; + unsigned int nit = 0; + while ((nit == 0) || ((delta > 0.0001) && (nit < max_iterations))) { + fill_weights(beamspot); + delta = update(beamspot, beam_weight, false); + nit++; + } + if ((nit >= max_iterations) && (delta > 0.01)) { + edm::LogWarning("AdaptiveChisquarePrimaryVertexFitter") + << "iteration limit reached " << nit << " last delta = " << delta << std::endl + << " nv = " << nv << " nt = " << tracks.size() << std::endl; + } + + // may need to remove collapsed vertices + nit = 0; + while ((xv_.size() > 1) && (nit < max_iterations) && (clean())) { + fill_weights(beamspot); + update(beamspot, beam_weight, false); + nit++; + } + + // fill the covariance matrices + update(beamspot, beam_weight, true); + + // assign tracks to vertices + std::vector track_to_vertex(trackinfo_.size(), nv); + // for each track identify the vertex that wants it most + std::vector maxweight(trackinfo_.size(), -1.); + for (unsigned int k = 0; k < nv; k++) { + for (unsigned int j = tkfirstv_[k]; j < tkfirstv_[k + 1]; j++) { + const unsigned int i = tkmap_[j]; + if (tkweight_[j] > maxweight[i]) { + maxweight[i] = tkweight_[j]; + track_to_vertex[i] = k; + } + } + } + + // fill the fit result into transient vertices + std::vector pvs; + for (unsigned int k = 0; k < xv_.size(); k++) { + std::vector> vertex_tracks_weights; + for (unsigned int j = tkfirstv_[k]; j < tkfirstv_[k + 1]; j++) { + unsigned int i = tkmap_[j]; + if (track_to_vertex[i] == k) { + vertex_tracks_weights.emplace_back(tkmap_[j], tkweight_[j]); + } + } + pvs.emplace_back(get_TransientVertex(k, vertex_tracks_weights, tracks, beam_weight, beamspot)); + } + + return pvs; +} + +TransientVertex AdaptiveChisquarePrimaryVertexFitter::refit(const TransientVertex &cluster, + const reco::BeamSpot &beamspot, + const bool useBeamConstraint) { + // fit a single vertex using all tracks in the tracklist + const unsigned int nt = cluster.originalTracks().size(); + const int max_iterations = 50; + + // initialize, vectors with size=1 here to avoid code duplication from the multivertex case in update() + const double zclu = cluster.position().z(); + xv_ = {beamspot.x(zclu)}; + yv_ = {beamspot.y(zclu)}; + zv_ = {zclu}; + tkfirstv_ = {0, nt}; + covv_.clear(); + + fill_trackinfo(cluster.originalTracks(), beamspot); + tkweight_.assign(nt, 0.); + tkmap_.clear(); + tkmap_.reserve(nt); + for (unsigned int i = 0; i < nt; i++) { + tkmap_.emplace_back(i); // trivial map for single vertex fits + } + + float beam_weight = useBeamConstraint ? 1. : 0.; + + double delta = 0; + unsigned int nit = 0; + while ((nit == 0) || ((delta > 0.0001) && (nit < max_iterations))) { + fill_weights(beamspot); + delta = update(beamspot, beam_weight, false); + nit++; + } + + if ((nit >= max_iterations) && (delta > 0.1)) { + edm::LogWarning("AdaptiveChisquarePrimaryVertexFitter") + << "single vertex fit, iteration limit reached " << nit << " last delta = " << delta << std::endl + << " nt = " << cluster.originalTracks().size() << std::endl; + } + + // fill the covariance matrices + update(beamspot, beam_weight, true); + + // put the result into a transient vertex + std::vector> vertex_track_weights; + for (unsigned int i = 0; i < nt; i++) { + vertex_track_weights.emplace_back(i, tkweight_[i]); + } + + return get_TransientVertex(0, vertex_track_weights, cluster.originalTracks(), beam_weight, beamspot); +} + +// +std::vector AdaptiveChisquarePrimaryVertexFitter::fit(const std::vector &tracks, + const std::vector &clusters, + const reco::BeamSpot &beamspot, + const bool useBeamConstraint) { + if (multivertexfit_) { + return vertices(tracks, clusters, beamspot, useBeamConstraint); + + } else { + // fit the clusters one-by-one using the tracklist of the clusters (ignores the "tracks" argument) + std::vector pvs; + pvs.reserve(clusters.size()); + for (auto &cluster : clusters) { + if (cluster.originalTracks().size() > (useBeamConstraint ? 0 : 1)) { + auto pv = refit(cluster, beamspot, useBeamConstraint); + if (pv.isValid()) { + pvs.emplace_back(pv); + } + } + } + return pvs; + } +} diff --git a/RecoVertex/PrimaryVertexProducer/src/DAClusterizerInZ.cc b/RecoVertex/PrimaryVertexProducer/src/DAClusterizerInZ.cc deleted file mode 100644 index 956a3eab1eb48..0000000000000 --- a/RecoVertex/PrimaryVertexProducer/src/DAClusterizerInZ.cc +++ /dev/null @@ -1,713 +0,0 @@ -#include "RecoVertex/PrimaryVertexProducer/interface/DAClusterizerInZ.h" -#include "FWCore/MessageLogger/interface/MessageLogger.h" -#include "DataFormats/GeometryCommonDetAlgo/interface/Measurement1D.h" -#include "RecoVertex/VertexPrimitives/interface/VertexException.h" - -using namespace std; - -namespace { - - bool recTrackLessZ1(const DAClusterizerInZ::track_t& tk1, const DAClusterizerInZ::track_t& tk2) { - return tk1.z < tk2.z; - } -} // namespace - -vector DAClusterizerInZ::fill(const vector& tracks) const { - // prepare track data for clustering - vector tks; - for (vector::const_iterator it = tracks.begin(); it != tracks.end(); it++) { - track_t t; - t.z = ((*it).stateAtBeamLine().trackStateAtPCA()).position().z(); - double tantheta = tan(((*it).stateAtBeamLine().trackStateAtPCA()).momentum().theta()); - double phi = ((*it).stateAtBeamLine().trackStateAtPCA()).momentum().phi(); - // get the beam-spot - reco::BeamSpot beamspot = (it->stateAtBeamLine()).beamSpot(); - t.dz2 = pow((*it).track().dzError(), 2) // track errror - + (pow(beamspot.BeamWidthX() * cos(phi), 2) + pow(beamspot.BeamWidthY() * sin(phi), 2)) / - pow(tantheta, 2) // beam-width induced - + pow(vertexSize_, 2); // intrinsic vertex size, safer for outliers and short lived decays - if (d0CutOff_ > 0) { - Measurement1D IP = (*it).stateAtBeamLine().transverseImpactParameter(); // error constains beamspot - t.pi = 1. / (1. + exp(pow(IP.value() / IP.error(), 2) - pow(d0CutOff_, 2))); // reduce weight for high ip tracks - } else { - t.pi = 1.; - } - t.tt = &(*it); - t.Z = 1.; - tks.push_back(t); - } - return tks; -} - -double DAClusterizerInZ::Eik(const track_t& t, const vertex_t& k) const { return pow(t.z - k.z, 2) / t.dz2; } - -double DAClusterizerInZ::update(double beta, vector& tks, vector& y) const { - //update weights and vertex positions - // mass constrained annealing without noise - // returns the squared sum of changes of vertex positions - - unsigned int nt = tks.size(); - - //initialize sums - double sumpi = 0; - for (vector::iterator k = y.begin(); k != y.end(); k++) { - k->se = 0; - k->sw = 0; - k->swz = 0; - k->swE = 0; - k->Tc = 0; - } - - // loop over tracks - for (unsigned int i = 0; i < nt; i++) { - // update pik and Zi - double Zi = 0.; - for (vector::iterator k = y.begin(); k != y.end(); k++) { - k->ei = exp(-beta * Eik(tks[i], *k)); // cache exponential for one track at a time - Zi += k->pk * k->ei; - } - tks[i].Z = Zi; - - // normalization for pk - if (tks[i].Z > 0) { - sumpi += tks[i].pi; - // accumulate weighted z and weights for vertex update - for (vector::iterator k = y.begin(); k != y.end(); k++) { - k->se += tks[i].pi * k->ei / Zi; - double w = k->pk * tks[i].pi * k->ei / Zi / tks[i].dz2; - k->sw += w; - k->swz += w * tks[i].z; - k->swE += w * Eik(tks[i], *k); - } - } else { - sumpi += tks[i].pi; - } - - } // end of track loop - - // now update z and pk - double delta = 0; - for (vector::iterator k = y.begin(); k != y.end(); k++) { - if (k->sw > 0) { - double znew = k->swz / k->sw; - delta += pow(k->z - znew, 2); - k->z = znew; - k->Tc = 2 * k->swE / k->sw; - } else { - edm::LogInfo("sumw") << "invalid sum of weights in fit: " << k->sw << endl; - if (verbose_) { - cout << " a cluster melted away ? pk=" << k->pk << " sumw=" << k->sw << endl; - } - k->Tc = -1; - } - - k->pk = k->pk * k->se / sumpi; - } - - // return how much the prototypes moved - return delta; -} - -double DAClusterizerInZ::update(double beta, vector& tks, vector& y, double& rho0) const { - // MVF style, no more vertex weights, update tracks weights and vertex positions, with noise - // returns the squared sum of changes of vertex positions - - unsigned int nt = tks.size(); - - //initialize sums - for (vector::iterator k = y.begin(); k != y.end(); k++) { - k->se = 0; - k->sw = 0; - k->swz = 0; - k->swE = 0; - k->Tc = 0; - } - - // loop over tracks - for (unsigned int i = 0; i < nt; i++) { - // update pik and Zi - double Zi = rho0 * exp(-beta * dzCutOff_ * dzCutOff_); // cut-off - for (vector::iterator k = y.begin(); k != y.end(); k++) { - k->ei = exp(-beta * Eik(tks[i], *k)); // cache exponential for one track at a time - Zi += k->pk * k->ei; - } - tks[i].Z = Zi; - - // normalization - if (tks[i].Z > 0) { - // accumulate weighted z and weights for vertex update - for (vector::iterator k = y.begin(); k != y.end(); k++) { - k->se += tks[i].pi * k->ei / Zi; - double w = k->pk * tks[i].pi * k->ei / Zi / tks[i].dz2; - k->sw += w; - k->swz += w * tks[i].z; - k->swE += w * Eik(tks[i], *k); - } - } - - } // end of track loop - - // now update z - double delta = 0; - for (vector::iterator k = y.begin(); k != y.end(); k++) { - if (k->sw > 0) { - double znew = k->swz / k->sw; - delta += pow(k->z - znew, 2); - k->z = znew; - k->Tc = 2 * k->swE / k->sw; - } else { - edm::LogInfo("sumw") << "invalid sum of weights in fit: " << k->sw << endl; - if (verbose_) { - cout << " a cluster melted away ? pk=" << k->pk << " sumw=" << k->sw << endl; - } - k->Tc = 0; - } - } - - // return how much the prototypes moved - return delta; -} - -bool DAClusterizerInZ::merge(vector& y, int nt) const { - // merge clusters that collapsed or never separated, return true if vertices were merged, false otherwise - - if (y.size() < 2) - return false; - - for (vector::iterator k = y.begin(); (k + 1) != y.end(); k++) { - if (fabs((k + 1)->z - k->z) < 1.e-3) { // with fabs if only called after freeze-out (splitAll() at highter T) - double rho = k->pk + (k + 1)->pk; - if (rho > 0) { - k->z = (k->pk * k->z + (k + 1)->z * (k + 1)->pk) / rho; - } else { - k->z = 0.5 * (k->z + (k + 1)->z); - } - k->pk = rho; - - y.erase(k + 1); - return true; - } - } - - return false; -} - -bool DAClusterizerInZ::merge(vector& y, double& beta) const { - // merge clusters that collapsed or never separated, - // only merge if the estimated critical temperature of the merged vertex is below the current temperature - // return true if vertices were merged, false otherwise - if (y.size() < 2) - return false; - - for (vector::iterator k = y.begin(); (k + 1) != y.end(); k++) { - if (fabs((k + 1)->z - k->z) < 2.e-3) { - double rho = k->pk + (k + 1)->pk; - double swE = k->swE + (k + 1)->swE - k->pk * (k + 1)->pk / rho * pow((k + 1)->z - k->z, 2); - double Tc = 2 * swE / (k->sw + (k + 1)->sw); - - if (Tc * beta < 1) { - if (rho > 0) { - k->z = (k->pk * k->z + (k + 1)->z * (k + 1)->pk) / rho; - } else { - k->z = 0.5 * (k->z + (k + 1)->z); - } - k->pk = rho; - k->sw += (k + 1)->sw; - k->swE = swE; - k->Tc = Tc; - y.erase(k + 1); - return true; - } - } - } - - return false; -} - -bool DAClusterizerInZ::purge(vector& y, vector& tks, double& rho0, const double beta) const { - // eliminate clusters with only one significant/unique track - if (y.size() < 2) - return false; - - unsigned int nt = tks.size(); - double sumpmin = nt; - vector::iterator k0 = y.end(); - for (vector::iterator k = y.begin(); k != y.end(); k++) { - int nUnique = 0; - double sump = 0; - double pmax = k->pk / (k->pk + rho0 * exp(-beta * dzCutOff_ * dzCutOff_)); - for (unsigned int i = 0; i < nt; i++) { - if (tks[i].Z > 0) { - double p = k->pk * exp(-beta * Eik(tks[i], *k)) / tks[i].Z; - sump += p; - if ((p > 0.9 * pmax) && (tks[i].pi > 0)) { - nUnique++; - } - } - } - - if ((nUnique < 2) && (sump < sumpmin)) { - sumpmin = sump; - k0 = k; - } - } - - if (k0 != y.end()) { - if (verbose_) { - cout << "eliminating prototype at " << k0->z << " with sump=" << sumpmin << endl; - } - //rho0+=k0->pk; - y.erase(k0); - return true; - } else { - return false; - } -} - -double DAClusterizerInZ::beta0(double betamax, vector& tks, vector& y) const { - double T0 = 0; // max Tc for beta=0 - // estimate critical temperature from beta=0 (T=inf) - unsigned int nt = tks.size(); - - for (vector::iterator k = y.begin(); k != y.end(); k++) { - // vertex fit at T=inf - double sumwz = 0; - double sumw = 0; - for (unsigned int i = 0; i < nt; i++) { - double w = tks[i].pi / tks[i].dz2; - sumwz += w * tks[i].z; - sumw += w; - } - k->z = sumwz / sumw; - - // estimate Tcrit, eventually do this in the same loop - double a = 0, b = 0; - for (unsigned int i = 0; i < nt; i++) { - double dx = tks[i].z - (k->z); - double w = tks[i].pi / tks[i].dz2; - a += w * pow(dx, 2) / tks[i].dz2; - b += w; - } - double Tc = 2. * a / b; // the critical temperature of this vertex - if (Tc > T0) - T0 = Tc; - } // vertex loop (normally there should be only one vertex at beta=0) - - if (T0 > 1. / betamax) { - return betamax / pow(coolingFactor_, int(log(T0 * betamax) / log(coolingFactor_)) - 1); - } else { - // ensure at least one annealing step - return betamax / coolingFactor_; - } -} - -bool DAClusterizerInZ::split(double beta, vector& tks, vector& y, double threshold) const { - // split only critical vertices (Tc >~ T=1/beta <==> beta*Tc>~1) - // an update must have been made just before doing this (same beta, no merging) - // returns true if at least one cluster was split - - double epsilon = 1e-3; // split all single vertices by 10 um - bool split = false; - - // avoid left-right biases by splitting highest Tc first - - std::vector > critical; - for (unsigned int ik = 0; ik < y.size(); ik++) { - if (beta * y[ik].Tc > 1.) { - critical.push_back(make_pair(y[ik].Tc, ik)); - } - } - stable_sort(critical.begin(), critical.end(), std::greater >()); - - for (unsigned int ic = 0; ic < critical.size(); ic++) { - unsigned int ik = critical[ic].second; - // estimate subcluster positions and weight - double p1 = 0, z1 = 0, w1 = 0; - double p2 = 0, z2 = 0, w2 = 0; - //double sumpi=0; - for (unsigned int i = 0; i < tks.size(); i++) { - if (tks[i].Z > 0) { - //sumpi+=tks[i].pi; - double p = y[ik].pk * exp(-beta * Eik(tks[i], y[ik])) / tks[i].Z * tks[i].pi; - double w = p / tks[i].dz2; - if (tks[i].z < y[ik].z) { - p1 += p; - z1 += w * tks[i].z; - w1 += w; - } else { - p2 += p; - z2 += w * tks[i].z; - w2 += w; - } - } - } - if (w1 > 0) { - z1 = z1 / w1; - } else { - z1 = y[ik].z - epsilon; - } - if (w2 > 0) { - z2 = z2 / w2; - } else { - z2 = y[ik].z + epsilon; - } - - // reduce split size if there is not enough room - if ((ik > 0) && (y[ik - 1].z >= z1)) { - z1 = 0.5 * (y[ik].z + y[ik - 1].z); - } - if ((ik + 1 < y.size()) && (y[ik + 1].z <= z2)) { - z2 = 0.5 * (y[ik].z + y[ik + 1].z); - } - - // split if the new subclusters are significantly separated - if ((z2 - z1) > epsilon) { - split = true; - vertex_t vnew; - vnew.pk = p1 * y[ik].pk / (p1 + p2); - y[ik].pk = p2 * y[ik].pk / (p1 + p2); - vnew.z = z1; - y[ik].z = z2; - y.insert(y.begin() + ik, vnew); - - // adjust remaining pointers - for (unsigned int jc = ic; jc < critical.size(); jc++) { - if (critical[jc].second > ik) { - critical[jc].second++; - } - } - } - } - - // stable_sort(y.begin(), y.end(), clusterLessZ); - return split; -} - -void DAClusterizerInZ::splitAll(vector& y) const { - double epsilon = 1e-3; // split all single vertices by 10 um - double zsep = 2 * epsilon; // split vertices that are isolated by at least zsep (vertices that haven't collapsed) - vector y1; - - for (vector::iterator k = y.begin(); k != y.end(); k++) { - if (((k == y.begin()) || (k - 1)->z < k->z - zsep) && (((k + 1) == y.end()) || (k + 1)->z > k->z + zsep)) { - // isolated prototype, split - vertex_t vnew; - vnew.z = k->z - epsilon; - (*k).z = k->z + epsilon; - vnew.pk = 0.5 * (*k).pk; - (*k).pk = 0.5 * (*k).pk; - y1.push_back(vnew); - y1.push_back(*k); - - } else if (y1.empty() || (y1.back().z < k->z - zsep)) { - y1.push_back(*k); - } else { - y1.back().z -= epsilon; - k->z += epsilon; - y1.push_back(*k); - } - } // vertex loop - - y = y1; -} - -DAClusterizerInZ::DAClusterizerInZ(const edm::ParameterSet& conf) { - // some defaults to avoid uninitialized variables - verbose_ = conf.getUntrackedParameter("verbose", false); - useTc_ = true; - betamax_ = 0.1; - betastop_ = 1.0; - coolingFactor_ = 0.8; - maxIterations_ = 100; - vertexSize_ = 0.05; // 0.5 mm - dzCutOff_ = 4.0; // Adaptive Fitter uses 3.0 but that appears to be a bit tight here sometimes - - // configure - - double Tmin = conf.getParameter("Tmin"); - vertexSize_ = conf.getParameter("vertexSize"); - coolingFactor_ = conf.getParameter("coolingFactor"); - d0CutOff_ = conf.getParameter("d0CutOff"); - dzCutOff_ = conf.getParameter("dzCutOff"); - maxIterations_ = 100; - if (Tmin == 0) { - cout << "DAClusterizerInZ: invalid Tmin" << Tmin << " reset do default " << 1. / betamax_ << endl; - } else { - betamax_ = 1. / Tmin; - } - - // for testing, negative cooling factor: revert to old splitting scheme - if (coolingFactor_ < 0) { - coolingFactor_ = -coolingFactor_; - useTc_ = false; - } -} - -void DAClusterizerInZ::dump(const double beta, - const vector& y, - const vector& tks0, - int verbosity) const { - // copy and sort for nicer printout - vector tks; - for (vector::const_iterator t = tks0.begin(); t != tks0.end(); t++) { - tks.push_back(*t); - } - stable_sort(tks.begin(), tks.end(), recTrackLessZ1); - - cout << "-----DAClusterizerInZ::dump ----" << endl; - cout << "beta=" << beta << " betamax= " << betamax_ << endl; - cout << " z= "; - cout.precision(4); - for (vector::const_iterator k = y.begin(); k != y.end(); k++) { - cout << setw(8) << fixed << k->z; - } - cout << endl << "T=" << setw(15) << 1. / beta << " Tc= "; - for (vector::const_iterator k = y.begin(); k != y.end(); k++) { - cout << setw(8) << fixed << k->Tc; - } - - cout << endl << " pk="; - for (vector::const_iterator k = y.begin(); k != y.end(); k++) { - cout << setw(8) << setprecision(3) << fixed << k->pk; - } - cout << endl; - - if (verbosity > 0) { - double E = 0, F = 0; - cout << endl; - cout << "---- z +/- dz ip +/-dip pt phi eta weights ----" << endl; - cout.precision(4); - for (unsigned int i = 0; i < tks.size(); i++) { - if (tks[i].Z > 0) { - F -= log(tks[i].Z) / beta; - } - double tz = tks[i].z; - cout << setw(3) << i << ")" << setw(8) << fixed << setprecision(4) << tz << " +/-" << setw(6) << sqrt(tks[i].dz2); - - if (tks[i].tt->track().quality(reco::TrackBase::highPurity)) { - cout << " *"; - } else { - cout << " "; - } - if (tks[i].tt->track().hitPattern().hasValidHitInPixelLayer(PixelSubdetector::SubDetector::PixelBarrel, 1)) { - cout << "+"; - } else { - cout << "-"; - } - cout << setw(1) - << tks[i] - .tt->track() - .hitPattern() - .pixelBarrelLayersWithMeasurement(); // see DataFormats/TrackReco/interface/HitPattern.h - cout << setw(1) << tks[i].tt->track().hitPattern().pixelEndcapLayersWithMeasurement(); - cout << setw(1) << hex - << tks[i].tt->track().hitPattern().trackerLayersWithMeasurement() - - tks[i].tt->track().hitPattern().pixelLayersWithMeasurement() - << dec; - cout << "=" << setw(1) << hex - << tks[i].tt->track().hitPattern().numberOfLostHits(reco::HitPattern::MISSING_OUTER_HITS) << dec; - - Measurement1D IP = tks[i].tt->stateAtBeamLine().transverseImpactParameter(); - cout << setw(8) << IP.value() << "+/-" << setw(6) << IP.error(); - cout << " " << setw(6) << setprecision(2) << tks[i].tt->track().pt() * tks[i].tt->track().charge(); - cout << " " << setw(5) << setprecision(2) << tks[i].tt->track().phi() << " " << setw(5) << setprecision(2) - << tks[i].tt->track().eta(); - - for (vector::const_iterator k = y.begin(); k != y.end(); k++) { - if ((tks[i].pi > 0) && (tks[i].Z > 0)) { - //double p=pik(beta,tks[i],*k); - double p = k->pk * exp(-beta * Eik(tks[i], *k)) / tks[i].Z; - if (p > 0.0001) { - cout << setw(8) << setprecision(3) << p; - } else { - cout << " . "; - } - E += p * Eik(tks[i], *k); - } else { - cout << " "; - } - } - cout << endl; - } - cout << endl << "T=" << 1 / beta << " E=" << E << " n=" << y.size() << " F= " << F << endl << "----------" << endl; - } -} - -vector DAClusterizerInZ::vertices(const vector& tracks, - const int verbosity) const { - vector tks = fill(tracks); - unsigned int nt = tracks.size(); - double rho0 = 0.0; // start with no outlier rejection - - vector clusters; - if (tks.empty()) - return clusters; - - vector y; // the vertex prototypes - - // initialize:single vertex at infinite temperature - vertex_t vstart; - vstart.z = 0.; - vstart.pk = 1.; - y.push_back(vstart); - int niter = 0; // number of iterations - - // estimate first critical temperature - double beta = beta0(betamax_, tks, y); - niter = 0; - while ((update(beta, tks, y) > 1.e-6) && (niter++ < maxIterations_)) { - } - - // annealing loop, stop when T1/Tmin) - while (beta < betamax_) { - if (useTc_) { - update(beta, tks, y); - while (merge(y, beta)) { - update(beta, tks, y); - } - split(beta, tks, y, 1.); - beta = beta / coolingFactor_; - } else { - beta = beta / coolingFactor_; - splitAll(y); - } - - // make sure we are not too far from equilibrium before cooling further - niter = 0; - while ((update(beta, tks, y) > 1.e-6) && (niter++ < maxIterations_)) { - } - } - - if (useTc_) { - // last round of splitting, make sure no critical clusters are left - update(beta, tks, y); - while (merge(y, beta)) { - update(beta, tks, y); - } - unsigned int ntry = 0; - while (split(beta, tks, y, 1.) && (ntry++ < 10)) { - niter = 0; - while ((update(beta, tks, y) > 1.e-6) && (niter++ < maxIterations_)) { - } - merge(y, beta); - update(beta, tks, y); - } - } else { - // merge collapsed clusters - while (merge(y, beta)) { - update(beta, tks, y); - } - if (verbose_) { - cout << "dump after 1st merging " << endl; - dump(beta, y, tks, 2); - } - } - - // switch on outlier rejection - rho0 = 1. / nt; - for (vector::iterator k = y.begin(); k != y.end(); k++) { - k->pk = 1.; - } // democratic - niter = 0; - while ((update(beta, tks, y, rho0) > 1.e-8) && (niter++ < maxIterations_)) { - } - if (verbose_) { - cout << "rho0=" << rho0 << " niter=" << niter << endl; - dump(beta, y, tks, 2); - } - - // merge again (some cluster split by outliers collapse here) - while (merge(y, tks.size())) { - } - if (verbose_) { - cout << "dump after 2nd merging " << endl; - dump(beta, y, tks, 2); - } - - // continue from freeze-out to Tstop (=1) without splitting, eliminate insignificant vertices - while (beta <= betastop_) { - while (purge(y, tks, rho0, beta)) { - niter = 0; - while ((update(beta, tks, y, rho0) > 1.e-6) && (niter++ < maxIterations_)) { - } - } - beta /= coolingFactor_; - niter = 0; - while ((update(beta, tks, y, rho0) > 1.e-6) && (niter++ < maxIterations_)) { - } - } - - // // new, one last round of cleaning at T=Tstop - // while(purge(y,tks,rho0, beta)){ - // niter=0; while((update(beta, tks,y,rho0) > 1.e-6) && (niter++ < maxIterations_)){ } - // } - - if (verbose_) { - cout << "Final result, rho0=" << rho0 << endl; - dump(beta, y, tks, 2); - } - - // select significant tracks and use a TransientVertex as a container - GlobalError dummyError; - - // ensure correct normalization of probabilities, should make double assginment reasonably impossible - for (unsigned int i = 0; i < nt; i++) { - tks[i].Z = rho0 * exp(-beta * dzCutOff_ * dzCutOff_); - for (vector::iterator k = y.begin(); k != y.end(); k++) { - tks[i].Z += k->pk * exp(-beta * Eik(tks[i], *k)); - } - } - - for (vector::iterator k = y.begin(); k != y.end(); k++) { - GlobalPoint pos(0, 0, k->z); - vector vertexTracks; - for (unsigned int i = 0; i < nt; i++) { - if (tks[i].Z > 0) { - double p = k->pk * exp(-beta * Eik(tks[i], *k)) / tks[i].Z; - if ((tks[i].pi > 0) && (p > 0.5)) { - vertexTracks.push_back(*(tks[i].tt)); - tks[i].Z = 0; - } // setting Z=0 excludes double assignment - } - } - TransientVertex v(pos, dummyError, vertexTracks, 5); - clusters.push_back(v); - } - - return clusters; -} - -vector > DAClusterizerInZ::clusterize(const vector& tracks) const { - if (verbose_) { - cout << "###################################################" << endl; - cout << "# DAClusterizerInZ::clusterize nt=" << tracks.size() << endl; - cout << "###################################################" << endl; - } - - vector > clusters; - vector pv = vertices(tracks); - - if (verbose_) { - cout << "# DAClusterizerInZ::clusterize pv.size=" << pv.size() << endl; - } - if (pv.empty()) { - return clusters; - } - - // fill into clusters and merge - vector aCluster = pv.begin()->originalTracks(); - - for (vector::iterator k = pv.begin() + 1; k != pv.end(); k++) { - if (fabs(k->position().z() - (k - 1)->position().z()) > (2 * vertexSize_)) { - // close a cluster - clusters.push_back(aCluster); - aCluster.clear(); - } - for (unsigned int i = 0; i < k->originalTracks().size(); i++) { - aCluster.push_back(k->originalTracks().at(i)); - } - } - clusters.push_back(aCluster); - - return clusters; -} diff --git a/RecoVertex/PrimaryVertexProducer/src/DAClusterizerInZT_vect.cc b/RecoVertex/PrimaryVertexProducer/src/DAClusterizerInZT_vect.cc index 7e96ab91f9c09..61ef41b672e80 100644 --- a/RecoVertex/PrimaryVertexProducer/src/DAClusterizerInZT_vect.cc +++ b/RecoVertex/PrimaryVertexProducer/src/DAClusterizerInZT_vect.cc @@ -238,7 +238,7 @@ DAClusterizerInZT_vect::track_t DAClusterizerInZT_vect::fill(const vector 0.3) || (std::abs(t_t) > t0Max_)) { + if ((tk.dtErrorExt() > TransientTrackBuilder::defaultInvalidTrackTimeReso) || (std::abs(t_t) > t0Max_)) { t_dt2 = 0; // tracks with no time measurement } else { t_dt2 = 1. / t_dt2; @@ -1560,7 +1560,7 @@ void DAClusterizerInZT_vect::dump(const double beta, const vertex_t& y, const tr void DAClusterizerInZT_vect::fillPSetDescription(edm::ParameterSetDescription& desc) { DAClusterizerInZ_vect::fillPSetDescription(desc); - desc.add("tmerge", 0.01); // 4D only + desc.add("tmerge", 0.1); // 4D only desc.add("dtCutOff", 4.); // 4D only desc.add("t0Max", 1.0); // 4D only desc.add("vertexSizeTime", 0.008); // 4D only diff --git a/RecoVertex/PrimaryVertexProducer/src/DAClusterizerInZ_vect.cc b/RecoVertex/PrimaryVertexProducer/src/DAClusterizerInZ_vect.cc index 00e86781117eb..3477a1a33416c 100644 --- a/RecoVertex/PrimaryVertexProducer/src/DAClusterizerInZ_vect.cc +++ b/RecoVertex/PrimaryVertexProducer/src/DAClusterizerInZ_vect.cc @@ -48,7 +48,7 @@ DAClusterizerInZ_vect::DAClusterizerInZ_vect(const edm::ParameterSet& conf) { overlap_frac_ = conf.getParameter("overlap_frac"); #ifdef DEBUG - std::cout << "DAClusterizerinZ_vect: mintrkweight = " << mintrkweight << std::endl; + std::cout << "DAClusterizerinZ_vect: mintrkweight = " << mintrkweight_ << std::endl; std::cout << "DAClusterizerinZ_vect: uniquetrkweight = " << uniquetrkweight_ << std::endl; std::cout << "DAClusterizerInZ_vect: uniquetrkminp = " << uniquetrkminp_ << std::endl; std::cout << "DAClusterizerinZ_vect: zmerge = " << zmerge_ << std::endl; @@ -63,6 +63,10 @@ DAClusterizerInZ_vect::DAClusterizerInZ_vect(const edm::ParameterSet& conf) { std::cout << "DAClusterizerinZ_vect: convergence mode = " << convergence_mode_ << std::endl; std::cout << "DAClusterizerinZ_vect: delta_highT = " << delta_highT_ << std::endl; std::cout << "DAClusterizerinZ_vect: delta_lowT = " << delta_lowT_ << std::endl; + + std::cout << "DAClusterizerinZ_vect: run in blocks = " << runInBlocks_ << std::endl; + std::cout << "DAClusterizerinZ_vect: block_size = " << block_size_ << std::endl; + std::cout << "DAClusterizerinZ_vect: overlap_fraction = " << overlap_frac_ << std::endl; std::cout << "DAClusterizerinZ_vect: DEBUGLEVEL " << DEBUGLEVEL << std::endl; #endif @@ -212,7 +216,6 @@ DAClusterizerInZ_vect::track_t DAClusterizerInZ_vect::fill(const vector 1) { std::cout << "eliminating prototype at " << std::setw(10) << std::setprecision(4) << y.zvtx[k0] - << " with sump=" << sumpmin << " rho*nt =" << y.rho[k0] * nt << endl; + << " with sump=" << sumpmin << " rho*nt =" << y.rho[k0] * nt << " pnUnique=" << pnUnique[k0] << endl; } #endif @@ -763,11 +766,10 @@ bool DAClusterizerInZ_vect::split(const double beta, track_t& tks, vertex_t& y, return split; } -vector DAClusterizerInZ_vect::vertices(const vector& tracks) const { +vector DAClusterizerInZ_vect::vertices_no_blocks(const vector& tracks) const { track_t&& tks = fill(tracks); tks.extractRaw(); - unsigned int nt = tks.getSize(); double rho0 = 0.0; // start with no outlier rejection vector clusters; @@ -807,7 +809,7 @@ vector DAClusterizerInZ_vect::vertices(const vector 0) { - std::cout << "DAClusterizerInZ_vect::vertices :" + std::cout << "DAClusterizerInZ_vect::vertices_no_blocks :" << "last round of splitting" << std::endl; } #endif @@ -835,7 +837,7 @@ vector DAClusterizerInZ_vect::vertices(const vector 0) { - std::cout << "DAClusterizerInZ_vect::vertices :" + std::cout << "DAClusterizerInZ_vect::vertices_no_blocks :" << "turning on outlier rejection at T=" << 1 / beta << std::endl; } #endif @@ -853,7 +855,7 @@ vector DAClusterizerInZ_vect::vertices(const vector 0) { - std::cout << "DAClusterizerInZ_vect::vertices :" + std::cout << "DAClusterizerInZ_vect::vertices_no_blocks :" << "merging with outlier rejection at T=" << 1 / beta << std::endl; } if (DEBUGLEVEL > 2) @@ -869,7 +871,7 @@ vector DAClusterizerInZ_vect::vertices(const vector 0) { - std::cout << "DAClusterizerInZ_vect::vertices :" + std::cout << "DAClusterizerInZ_vect::vertices_no_blocks :" << "after merging with outlier rejection at T=" << 1 / beta << std::endl; } if (DEBUGLEVEL > 2) @@ -898,7 +900,7 @@ vector DAClusterizerInZ_vect::vertices(const vector 0) { - std::cout << "DAClusterizerInZ_vect::vertices :" + std::cout << "DAClusterizerInZ_vect::vertices_no_blocks :" << "last cooling T=" << 1 / beta << std::endl; } #endif @@ -912,66 +914,15 @@ vector DAClusterizerInZ_vect::vertices(const vector 0) { - std::cout << "DAClusterizerInZ_vect::vertices :" + std::cout << "DAClusterizerInZ_vect::vertices_no_blocks :" << "stop cooling at T=" << 1 / beta << std::endl; } if (DEBUGLEVEL > 2) dump(beta, y, tks, 2, rho0); #endif - // select significant tracks and use a TransientVertex as a container - - set_vtx_range(beta, tks, y); - const unsigned int nv = y.getSize(); - for (unsigned int k = 0; k < nv; k++) { - if (edm::isNotFinite(y.rho[k]) || edm::isNotFinite(y.zvtx[k])) { - y.rho[k] = 0; - y.zvtx[k] = 0; - } - } - - const auto z_sum_init = rho0 * local_exp(-beta * dzCutOff_ * dzCutOff_); - std::vector> vtx_track_indices(nv); - for (unsigned int i = 0; i < nt; i++) { - const auto kmin = tks.kmin[i]; - const auto kmax = tks.kmax[i]; - for (auto k = kmin; k < kmax; k++) { - y.exp_arg[k] = -beta * Eik(tks.zpca[i], y.zvtx[k], tks.dz2[i]); - } - - local_exp_list_range(y.exp_arg, y.exp, kmin, kmax); - - tks.sum_Z[i] = z_sum_init; - for (auto k = kmin; k < kmax; k++) { - tks.sum_Z[i] += y.rho[k] * y.exp[k]; - } - const double invZ = tks.sum_Z[i] > 1e-100 ? 1. / tks.sum_Z[i] : 0.0; - - for (auto k = kmin; k < kmax; k++) { - double p = y.rho[k] * y.exp[k] * invZ; - if (p > mintrkweight_) { - // assign track i -> vertex k (hard, mintrkweight should be >= 0.5 here - vtx_track_indices[k].push_back(i); - break; - } - } - - } // track loop - - GlobalError dummyError(0.01, 0, 0.01, 0., 0., 0.01); - for (unsigned int k = 0; k < nv; k++) { - if (!vtx_track_indices[k].empty()) { - GlobalPoint pos(0, 0, y.zvtx[k]); - vector vertexTracks; - for (auto i : vtx_track_indices[k]) { - vertexTracks.push_back(*(tks.tt[i])); - } - TransientVertex v(pos, dummyError, vertexTracks, 0); - clusters.push_back(v); - } - } - - return clusters; + // assign tracks and fill into transient vertices + return fill_vertices(beta, rho0, tks, y); } vector DAClusterizerInZ_vect::vertices_in_blocks(const vector& tracks) const { @@ -1241,8 +1192,9 @@ vector DAClusterizerInZ_vect::vertices_in_blocks(const vector DAClusterizerInZ_vect::vertices_in_blocks(const vector clusters; + if (nv == 0) { + return clusters; + } GlobalError dummyError(0.01, 0, 0.01, 0., 0., 0.01); + vector vertexTracks; + for (unsigned int k = 0; k < nv; k++) { if (!vtx_track_indices[k].empty()) { - GlobalPoint pos(0, 0, vertices_tot[k].first); - vector vertexTracks; for (auto i : vtx_track_indices[k]) { vertexTracks.push_back(*(tracks_tot.tt[i])); #ifdef DEBUG - std::cout << y.zvtx[k] << "," << (*tks.tt[i]).stateAtBeamLine().trackStateAtPCA().position().z() << std::endl; + std::cout << vertices_tot[k].first << "," + << (*(tracks_tot.tt[i])).stateAtBeamLine().trackStateAtPCA().position().z() << std::endl; #endif } - TransientVertex v(pos, dummyError, vertexTracks, 0); + } + + // implement what clusterize() did before : merge left-to-right if distance < 2 * vertexSize_ + if ((k + 1 == nv) || (abs(vertices_tot[k + 1].first - vertices_tot[k].first) > (2 * vertexSize_))) { + // close a cluster + if (vertexTracks.size() > 1) { + GlobalPoint pos(0, 0, vertices_tot[k].first); // only usable with subsequent fit + TransientVertex v(pos, dummyError, vertexTracks, 0); + clusters.push_back(v); + } + vertexTracks.clear(); + } + } + + return clusters; +} // end of vertices_in_blocks + +vector DAClusterizerInZ_vect::fill_vertices(double beta, double rho0, track_t& tks, vertex_t& y) const { + // select significant tracks and use a TransientVertex as a container + + set_vtx_range(beta, tks, y); + const unsigned int nv = y.getSize(); + for (unsigned int k = 0; k < nv; k++) { + if (edm::isNotFinite(y.rho[k]) || edm::isNotFinite(y.zvtx[k])) { + y.rho[k] = 0; + y.zvtx[k] = 0; + } + } + + // ensure consistent assignment probabillities and make a hard assignment + const unsigned int nt = tks.getSize(); + const auto z_sum_init = rho0 * local_exp(-beta * dzCutOff_ * dzCutOff_); + std::vector> vtx_track_indices(nv); + std::vector> vtx_track_weights(nv); + for (unsigned int i = 0; i < nt; i++) { + const auto kmin = tks.kmin[i]; + const auto kmax = tks.kmax[i]; + for (auto k = kmin; k < kmax; k++) { + y.exp_arg[k] = -beta * Eik(tks.zpca[i], y.zvtx[k], tks.dz2[i]); + } + + local_exp_list_range(y.exp_arg, y.exp, kmin, kmax); + + tks.sum_Z[i] = z_sum_init; + for (auto k = kmin; k < kmax; k++) { + tks.sum_Z[i] += y.rho[k] * y.exp[k]; + } + const double invZ = tks.sum_Z[i] > 1e-100 ? 1. / tks.sum_Z[i] : 0.0; + + double pmax = -1; + unsigned int k_pmax = 0; + for (auto k = kmin; k < kmax; k++) { + double p = y.rho[k] * y.exp[k] * invZ; + if (p > pmax) { + pmax = p; + k_pmax = k; + } + } + + if (pmax > mintrkweight_) { + // assign to the cluster with the highest assignment weight, if it is at least mintrkweight_ + vtx_track_indices[k_pmax].push_back(i); + vtx_track_weights[k_pmax].push_back(pmax); + } + } + + // fill transient vertices + // the position is normally not used, probably not optimal when Tstop <> 2, anyway + vector clusters; + for (unsigned int k = 0; k < nv; k++) { + double sump = 0; + double sumw = 0; + double sumwp = 0, sumwz = 0; + if (!vtx_track_indices[k].empty()) { + vector vertexTracks; + TransientVertex::TransientTrackToFloatMap trkWeightMap; + unsigned int j = 0; + for (auto i : vtx_track_indices[k]) { + auto p = vtx_track_weights[k][j]; + vertexTracks.push_back(*(tks.tt[i])); + trkWeightMap[vertexTracks[j]] = p; + auto w = p * tks.dz2[i]; + sump += p; + sumw += w; + sumwp += w * p; + sumwz += w * tks.zpca[i]; + j++; + } + float zerror_squared = 1.; // + if ((sumw > 0) && (sumwp > 0)) { + zerror_squared = sumwp / (sumw * sumw); + y.zvtx[k] = sumwz / sumw; + } + + reco::BeamSpot bs = vertexTracks[0].stateAtBeamLine().beamSpot(); + GlobalPoint pos(bs.x(y.zvtx[k]), bs.y(y.zvtx[k]), y.zvtx[k]); + const float xerror_squared = pow(bs.BeamWidthX(), 2); + const float yerror_squared = pow(bs.BeamWidthY(), 2); + GlobalError err(xerror_squared, 0, yerror_squared, 0., 0., zerror_squared); + TransientVertex v(pos, err, vertexTracks, 0, 2 * sump - 3.); + v.weightMap(trkWeightMap); clusters.push_back(v); } } @@ -1272,15 +1328,17 @@ vector DAClusterizerInZ_vect::vertices_in_blocks(const vector> DAClusterizerInZ_vect::clusterize( - const vector& tracks) const { - vector> clusters; - - vector pv; +vector DAClusterizerInZ_vect::vertices(const vector& tracks) const { if (runInBlocks_ and (block_size_ < tracks.size())) //doesn't bother if low number of tracks - pv = vertices_in_blocks(tracks); + return vertices_in_blocks(tracks); else - pv = vertices(tracks); + return vertices_no_blocks(tracks); +} + +vector> DAClusterizerInZ_vect::clusterize( // OBSOLETE + const vector& tracks) const { + vector> clusters; + vector&& pv = vertices(tracks); #ifdef DEBUG if (DEBUGLEVEL > 0) { @@ -1491,6 +1549,6 @@ void DAClusterizerInZ_vect::fillPSetDescription(edm::ParameterSetDescription& de desc.add("uniquetrkminp", 0.0); desc.add("zrange", 4.0); desc.add("runInBlocks", false); - desc.add("block_size", 512); - desc.add("overlap_frac", 0.5); + desc.add("block_size", 10000); + desc.add("overlap_frac", 0.0); } diff --git a/RecoVertex/PrimaryVertexProducer/src/GapClusterizerInZ.cc b/RecoVertex/PrimaryVertexProducer/src/GapClusterizerInZ.cc index 9a3d6c0b33ed9..495718ea25dee 100644 --- a/RecoVertex/PrimaryVertexProducer/src/GapClusterizerInZ.cc +++ b/RecoVertex/PrimaryVertexProducer/src/GapClusterizerInZ.cc @@ -61,6 +61,20 @@ vector > GapClusterizerInZ::clusterize(const vector return clusters; } +vector GapClusterizerInZ::vertices(const vector& tracks) const { + /* repackage track clusters, compatibility with newer clusterizers */ + std::vector primary_vertices; + auto trackClusters = clusterize(tracks); + + GlobalError dummyError(0.01, 0, 0.01, 0., 0., 0.01); + for (auto& vertexTracks : trackClusters) { + GlobalPoint position(0, 0, 0); // dummy + primary_vertices.push_back(TransientVertex(position, dummyError, vertexTracks, 0)); + } + + return primary_vertices; +} + void GapClusterizerInZ::fillPSetDescription(edm::ParameterSetDescription& desc) { desc.add("zSeparation", 1.0); desc.addUntracked("verbose", false); diff --git a/RecoVertex/PrimaryVertexProducer/src/PrimaryVertexProducerAlgorithm.cc b/RecoVertex/PrimaryVertexProducer/src/PrimaryVertexProducerAlgorithm.cc index 70d502aae3de4..74ae7c36154ce 100644 --- a/RecoVertex/PrimaryVertexProducer/src/PrimaryVertexProducerAlgorithm.cc +++ b/RecoVertex/PrimaryVertexProducer/src/PrimaryVertexProducerAlgorithm.cc @@ -39,9 +39,6 @@ PrimaryVertexProducerAlgorithm::PrimaryVertexProducerAlgorithm(const edm::Parame if (clusteringAlgorithm == "gap") { theTrackClusterizer = new GapClusterizerInZ( conf.getParameter("TkClusParameters").getParameter("TkGapClusParameters")); - } else if (clusteringAlgorithm == "DA") { - theTrackClusterizer = new DAClusterizerInZ( - conf.getParameter("TkClusParameters").getParameter("TkDAClusParameters")); } // provide the vectorized version of the clusterizer, if supported by the build else if (clusteringAlgorithm == "DA_vect") { diff --git a/RecoVertex/PrimaryVertexProducer/src/VertexTimeAlgorithmFromTracksPID.cc b/RecoVertex/PrimaryVertexProducer/src/VertexTimeAlgorithmFromTracksPID.cc new file mode 100644 index 0000000000000..db1fd074b4621 --- /dev/null +++ b/RecoVertex/PrimaryVertexProducer/src/VertexTimeAlgorithmFromTracksPID.cc @@ -0,0 +1,193 @@ +#include "FWCore/Framework/interface/ConsumesCollector.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/ParameterSet/interface/ValidatedPluginMacros.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "DataFormats/VertexReco/interface/Vertex.h" +#include "vdt/vdtMath.h" + +#include "RecoVertex/PrimaryVertexProducer/interface/VertexTimeAlgorithmFromTracksPID.h" + +#ifdef PVTX_DEBUG +#define LOG edm::LogPrint("VertexTimeAlgorithmFromTracksPID") +#else +#define LOG LogDebug("VertexTimeAlgorithmFromTracksPID") +#endif + +VertexTimeAlgorithmFromTracksPID::VertexTimeAlgorithmFromTracksPID(edm::ParameterSet const& iConfig, + edm::ConsumesCollector& iCC) + : VertexTimeAlgorithmBase(iConfig, iCC), + trackMTDTimeToken_(iCC.consumes(iConfig.getParameter("trackMTDTimeVMapTag"))), + trackMTDTimeErrorToken_(iCC.consumes(iConfig.getParameter("trackMTDTimeErrorVMapTag"))), + trackMTDTimeQualityToken_(iCC.consumes(iConfig.getParameter("trackMTDTimeQualityVMapTag"))), + trackMTDTofPiToken_(iCC.consumes(iConfig.getParameter("trackMTDTofPiVMapTag"))), + trackMTDTofKToken_(iCC.consumes(iConfig.getParameter("trackMTDTofKVMapTag"))), + trackMTDTofPToken_(iCC.consumes(iConfig.getParameter("trackMTDTofPVMapTag"))), + minTrackVtxWeight_(iConfig.getParameter("minTrackVtxWeight")), + minTrackTimeQuality_(iConfig.getParameter("minTrackTimeQuality")), + probPion_(iConfig.getParameter("probPion")), + probKaon_(iConfig.getParameter("probKaon")), + probProton_(iConfig.getParameter("probProton")), + Tstart_(iConfig.getParameter("Tstart")), + coolingFactor_(iConfig.getParameter("coolingFactor")) {} + +void VertexTimeAlgorithmFromTracksPID::fillPSetDescription(edm::ParameterSetDescription& iDesc) { + VertexTimeAlgorithmBase::fillPSetDescription(iDesc); + + iDesc.add("trackMTDTimeVMapTag", edm::InputTag("trackExtenderWithMTD:generalTracktmtd")) + ->setComment("Input ValueMap for track time at MTD"); + iDesc.add("trackMTDTimeErrorVMapTag", edm::InputTag("trackExtenderWithMTD:generalTracksigmatmtd")) + ->setComment("Input ValueMap for track time uncertainty at MTD"); + iDesc.add("trackMTDTimeQualityVMapTag", edm::InputTag("mtdTrackQualityMVA:mtdQualMVA")) + ->setComment("Input ValueMap for track MVA quality value"); + iDesc.add("trackMTDTofPiVMapTag", edm::InputTag("trackExtenderWithMTD:generalTrackTofPi")) + ->setComment("Input ValueMap for track tof as pion"); + iDesc.add("trackMTDTofKVMapTag", edm::InputTag("trackExtenderWithMTD:generalTrackTofK")) + ->setComment("Input ValueMap for track tof as kaon"); + iDesc.add("trackMTDTofPVMapTag", edm::InputTag("trackExtenderWithMTD:generalTrackTofP")) + ->setComment("Input ValueMap for track tof as proton"); + + iDesc.add("minTrackVtxWeight", 0.5)->setComment("Minimum track weight"); + iDesc.add("minTrackTimeQuality", 0.8)->setComment("Minimum MVA Quality selection on tracks"); + + iDesc.add("probPion", 0.7)->setComment("A priori probability pions"); + iDesc.add("probKaon", 0.2)->setComment("A priori probability kaons"); + iDesc.add("probProton", 0.1)->setComment("A priori probability protons"); + + iDesc.add("Tstart", 256.)->setComment("DA initial temperature T"); + iDesc.add("coolingFactor", 0.5)->setComment("DA cooling factor"); +} + +void VertexTimeAlgorithmFromTracksPID::setEvent(edm::Event& iEvent, edm::EventSetup const&) { + // additional collections required for vertex-time calculation + trackMTDTimes_ = iEvent.get(trackMTDTimeToken_); + trackMTDTimeErrors_ = iEvent.get(trackMTDTimeErrorToken_); + trackMTDTimeQualities_ = iEvent.get(trackMTDTimeQualityToken_); + trackMTDTofPi_ = iEvent.get(trackMTDTofPiToken_); + trackMTDTofK_ = iEvent.get(trackMTDTofKToken_); + trackMTDTofP_ = iEvent.get(trackMTDTofPToken_); +} + +bool VertexTimeAlgorithmFromTracksPID::vertexTime(float& vtxTime, + float& vtxTimeError, + const TransientVertex& vtx) const { + if (vtx.originalTracks().empty()) { + return false; + } + + auto const vtxTime_init = vtxTime; + auto const vtxTimeError_init = vtxTimeError; + const int max_iterations = 100; + + double tsum = 0; + double wsum = 0; + double w2sum = 0; + + double const a[3] = {probPion_, probKaon_, probProton_}; + + std::vector v_trackInfo; + v_trackInfo.reserve(vtx.originalTracks().size()); + + // initial guess + for (const auto& trk : vtx.originalTracks()) { + auto const trkWeight = vtx.trackWeight(trk); + if (trkWeight > minTrackVtxWeight_) { + auto const trkTimeQuality = trackMTDTimeQualities_[trk.trackBaseRef()]; + + if (trkTimeQuality >= minTrackTimeQuality_) { + auto const trkTime = trackMTDTimes_[trk.trackBaseRef()]; + auto const trkTimeError = trackMTDTimeErrors_[trk.trackBaseRef()]; + + v_trackInfo.emplace_back(); + auto& trkInfo = v_trackInfo.back(); + + trkInfo.trkWeight = trkWeight; + trkInfo.trkTimeError = trkTimeError; + + trkInfo.trkTimeHyp[0] = trkTime - trackMTDTofPi_[trk.trackBaseRef()]; + trkInfo.trkTimeHyp[1] = trkTime - trackMTDTofK_[trk.trackBaseRef()]; + trkInfo.trkTimeHyp[2] = trkTime - trackMTDTofP_[trk.trackBaseRef()]; + + auto const wgt = trkWeight / (trkTimeError * trkTimeError); + wsum += wgt; + + for (uint j = 0; j < 3; ++j) { + tsum += wgt * trkInfo.trkTimeHyp[j] * a[j]; + } + LOG << "vertexTimeFromTracks: track" + << " pt=" << trk.track().pt() << " eta=" << trk.track().eta() << " phi=" << trk.track().phi() + << " vtxWeight=" << trkWeight << " time=" << trkTime << " timeError=" << trkTimeError + << " timeQuality=" << trkTimeQuality << " timeHyp[pion]=" << trkInfo.trkTimeHyp[0] + << " timeHyp[kaon]=" << trkInfo.trkTimeHyp[1] << " timeHyp[proton]=" << trkInfo.trkTimeHyp[2]; + } + } + } + if (wsum > 0) { + auto t0 = tsum / wsum; + auto beta = 1. / Tstart_; + int nit = 0; + while ((nit++) < max_iterations) { + tsum = 0; + wsum = 0; + w2sum = 0; + + for (auto const& trkInfo : v_trackInfo) { + double dt = trkInfo.trkTimeError; + double e[3] = {0, 0, 0}; + const double cut_off = 4.5; + double Z = vdt::fast_exp( + -beta * cut_off); // outlier rejection term Z_0 = exp(-beta * cut_off) = exp(-beta * 0.5 * 3 * 3) + for (unsigned int j = 0; j < 3; j++) { + auto const tpull = (trkInfo.trkTimeHyp[j] - t0) / dt; + e[j] = vdt::fast_exp(-0.5 * beta * tpull * tpull); + Z += a[j] * e[j]; + } + + double wsum_trk = 0; + for (uint j = 0; j < 3; j++) { + double wt = a[j] * e[j] / Z; + double w = wt * trkInfo.trkWeight / (dt * dt); + wsum_trk += w; + tsum += w * trkInfo.trkTimeHyp[j]; + } + + wsum += wsum_trk; + w2sum += wsum_trk * wsum_trk * (dt * dt) / trkInfo.trkWeight; + } + + if (wsum < 1e-10) { + LOG << "vertexTimeFromTracks: failed while iterating"; + return false; + } + + vtxTime = tsum / wsum; + + LOG << "vertexTimeFromTracks: iteration=" << nit << ", T= " << 1 / beta << ", t=" << vtxTime + << ", t-t0=" << vtxTime - t0; + + if ((std::abs(vtxTime - t0) < 1e-4 / std::sqrt(beta)) and beta >= 1.) { + vtxTimeError = std::sqrt(w2sum) / wsum; + + LOG << "vertexTimeFromTracks: tfit = " << vtxTime << " +/- " << vtxTimeError << " trec = " << vtx.time() + << ", iteration=" << nit; + + return true; + } + + if ((std::abs(vtxTime - t0) < 1e-3) and beta < 1.) { + beta = std::min(1., beta / coolingFactor_); + } + + t0 = vtxTime; + } + + LOG << "vertexTimeFromTracks: failed to converge"; + } else { + LOG << "vertexTimeFromTracks: has no track timing info"; + } + + vtxTime = vtxTime_init; + vtxTimeError = vtxTimeError_init; + + return false; +} diff --git a/RecoVertex/PrimaryVertexProducer/src/VertexTimeAlgorithmLegacy4D.cc b/RecoVertex/PrimaryVertexProducer/src/VertexTimeAlgorithmLegacy4D.cc new file mode 100644 index 0000000000000..b6dd746fecf77 --- /dev/null +++ b/RecoVertex/PrimaryVertexProducer/src/VertexTimeAlgorithmLegacy4D.cc @@ -0,0 +1,58 @@ +#include "FWCore/Framework/interface/ConsumesCollector.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "RecoVertex/PrimaryVertexProducer/interface/VertexTimeAlgorithmLegacy4D.h" + +#ifdef PVTX_DEBUG +#define LOG edm::LogPrint("VertexTimeAlgorithmLegacy4D") +#else +#define LOG LogDebug("VertexTimeAlgorithmLegacy4D") +#endif + +VertexTimeAlgorithmLegacy4D::VertexTimeAlgorithmLegacy4D(edm::ParameterSet const& iConfig, edm::ConsumesCollector& iCC) + : VertexTimeAlgorithmBase(iConfig, iCC) {} + +void VertexTimeAlgorithmLegacy4D::fillPSetDescription(edm::ParameterSetDescription& iDesc) { + VertexTimeAlgorithmBase::fillPSetDescription(iDesc); +} + +void VertexTimeAlgorithmLegacy4D::setEvent(edm::Event& iEvent, edm::EventSetup const&){}; + +bool VertexTimeAlgorithmLegacy4D::vertexTime(float& vtxTime, float& vtxTimeError, const TransientVertex& vtx) const { + const auto num_track = vtx.originalTracks().size(); + if (num_track == 0) { + return false; + } + + double sumwt = 0.; + double sumwt2 = 0.; + double sumw = 0.; + double vartime = 0.; + + for (const auto& trk : vtx.originalTracks()) { + const double time = trk.timeExt(); + const double err = trk.dtErrorExt(); + if ((time == 0) && (err > TransientTrackBuilder::defaultInvalidTrackTimeReso)) + continue; // tracks with no time information, as implemented in TransientTrackBuilder.cc l.17 + const double inverr = err > 0. ? 1.0 / err : 0.; + const double w = inverr * inverr; + sumwt += w * time; + sumwt2 += w * time * time; + sumw += w; + } + + if (sumw > 0) { + double sumsq = sumwt2 - sumwt * sumwt / sumw; + double chisq = num_track > 1 ? sumsq / double(num_track - 1) : sumsq / double(num_track); + vartime = chisq / sumw; + + vtxTime = sumwt / sumw; + vtxTimeError = sqrt(vartime); + return true; + } + + vtxTime = 0; + vtxTimeError = 1.; + return false; +} diff --git a/SimCalorimetry/Configuration/python/SimCalorimetry_EventContent_cff.py b/SimCalorimetry/Configuration/python/SimCalorimetry_EventContent_cff.py index f87c5913acd89..9b91b16cdbb3d 100644 --- a/SimCalorimetry/Configuration/python/SimCalorimetry_EventContent_cff.py +++ b/SimCalorimetry/Configuration/python/SimCalorimetry_EventContent_cff.py @@ -7,6 +7,7 @@ 'keep *_simEcalPreshowerDigis_*_*', 'keep *_simEcalTriggerPrimitiveDigis_*_*', 'keep *_simEcalEBTriggerPrimitiveDigis_*_*', + 'keep *_simEcalEBTriggerPrimitivePhase2Digis_*_*', 'keep *_simHcalDigis_*_*', 'keep ZDCDataFramesSorted_simHcalUnsuppressedDigis_*_*', 'drop ZDCDataFramesSorted_mix_simHcalUnsuppressedDigis*_*', diff --git a/SimCalorimetry/Configuration/python/ecalDigiSequence_cff.py b/SimCalorimetry/Configuration/python/ecalDigiSequence_cff.py index 50275b8cbbb70..947fe094fbf11 100644 --- a/SimCalorimetry/Configuration/python/ecalDigiSequence_cff.py +++ b/SimCalorimetry/Configuration/python/ecalDigiSequence_cff.py @@ -21,6 +21,8 @@ _phase2_ecalDigiTask = ecalDigiTask.copy() _phase2_ecalDigiTask.add(simEcalEBTriggerPrimitiveDigis) + + from Configuration.Eras.Modifier_phase2_common_cff import phase2_common phase2_common.toReplaceWith(ecalDigiTask,_phase2_ecalDigiTask) @@ -28,9 +30,19 @@ _phase2_ecalDigiTask_devel = cms.Task() phase2_ecal_devel.toReplaceWith(ecalDigiTask,_phase2_ecalDigiTask_devel) -#phase 2 ecal + +from Configuration.Eras.Modifier_phase2_ecalTP_devel_cff import phase2_ecalTP_devel +from SimCalorimetry.EcalEBTrigPrimProducers.ecalEBTriggerPrimitivePhase2Digis_cfi import * +_phase2_ecalDigiTask_devel2 = cms.Task(simEcalEBTriggerPrimitivePhase2Digis) +phase2_ecalTP_devel.toReplaceWith(ecalDigiTask,_phase2_ecalDigiTask_devel2) + +#phase 2 ecal def _modifyEcalForPh2( process ): process.load("SimCalorimetry.EcalSimProducers.esEcalLiteDTUPedestalsProducer_cfi") process.load("SimCalorimetry.EcalSimProducers.esCATIAGainProducer_cfi") - modifyDigi_Phase2EcalPed = phase2_ecal_devel.makeProcessModifier(_modifyEcalForPh2) + + +def _modifyEcalTPForPh2( process ): + process.load("SimCalorimetry.EcalEBTrigPrimProducers.ecalEBTriggerPrimitivePhase2ESProducer_cfi") +modifyDigi_Phase2EcalTP = phase2_ecalTP_devel.makeProcessModifier(_modifyEcalTPForPh2) diff --git a/SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2AmplitudeReconstructor.h b/SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2AmplitudeReconstructor.h new file mode 100644 index 0000000000000..a7098e6c62c76 --- /dev/null +++ b/SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2AmplitudeReconstructor.h @@ -0,0 +1,38 @@ +#ifndef SimCalorimetry_EcalEBTrigPrimAlgos_EcalEBPhase2AmplitudeReconstructor_h +#define SimCalorimetry_EcalEBTrigPrimAlgos_EcalEBPhase2AmplitudeReconstructor_h + +#include +#include + +class EcalEBPhase2TPGAmplWeightIdMap; +class EcalTPGWeightGroup; + +/** \class EcalPhase2AmplitudeReconstructor +\author L. Lutton, N. Marinelli - Univ. of Notre Dame + Description: forPhase II + It uses the new Phase2 digis based on the new EB electronics + and measures the amplitude on xTals basis +*/ + +class EcalEBPhase2AmplitudeReconstructor { +private: + static const int maxSamplesUsed_ = 12; + bool debug_; + int inputsAlreadyIn_; + int buffer_[maxSamplesUsed_]; + int weights_[maxSamplesUsed_]; + int shift_; + int setInput(int input); + void process(); + int processedOutput_; + +public: + EcalEBPhase2AmplitudeReconstructor(bool debug); + virtual ~EcalEBPhase2AmplitudeReconstructor(); + virtual void process(std::vector &addout, std::vector &output); + void setParameters(uint32_t raw, + const EcalEBPhase2TPGAmplWeightIdMap *ecaltpgWeightMap, + const EcalTPGWeightGroup *ecaltpgWeightGroup); +}; + +#endif diff --git a/SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2Linearizer.h b/SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2Linearizer.h new file mode 100644 index 0000000000000..6b86db0dcfb5b --- /dev/null +++ b/SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2Linearizer.h @@ -0,0 +1,58 @@ +#ifndef SimCalorimetry_EcalEBTrigPrimAlgos_EcalEBPhase2Linearizer_h +#define SimCalorimetry_EcalEBTrigPrimAlgos_EcalEBPhase2Linearizer_h + +#include "DataFormats/EcalDigi/interface/EcalLiteDTUSample.h" +#include "DataFormats/EcalDigi/interface/EcalDigiCollections.h" +#include "CondFormats/EcalObjects/interface/EcalLiteDTUPedestals.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGLinearizationConst.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGPedestals.h" + +#include "CondFormats/EcalObjects/interface/EcalTPGCrystalStatus.h" + +#include + +/** \class EcalEBPhase2Linearizer +\author L. Lutton, N. Marinelli - Univ. of Notre Dame + Description: forPhase II + Performs the linearization of signal from Catia+LiteDTU +*/ + +class EcalEBPhase2Linearizer { +private: + bool debug_; + int uncorrectedSample_; + int gainID_; + uint base_; + uint mult_; + uint shift_; + int strip_; + bool init_; + float gainDivideByTen_ = 0.1; + std::vector coeffs_; + uint coeff_; + //I2C Stuff. Would eventually get from outside linearizer (e.g., database) + //Would also be different for each crystal + uint I2CSub_; + + const EcalLiteDTUPedestals *peds_; + const EcalEBPhase2TPGLinearizationConstant *linConsts_; + const EcalTPGCrystalStatusCode *badXStatus_; + + std::vector vectorbadXStatus_; + + int setInput(const EcalLiteDTUSample &RawSam); + + int doOutput(); + +public: + EcalEBPhase2Linearizer(bool debug); + virtual ~EcalEBPhase2Linearizer(); + + void process(const EBDigiCollectionPh2::Digi &df, std::vector &output_percry); + void setParameters(EBDetId id, + const EcalLiteDTUPedestalsMap *peds, + const EcalEBPhase2TPGLinearizationConstMap *ecaltplin, + const EcalTPGCrystalStatus *ecaltpBadX); +}; + +#endif diff --git a/SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2SpikeTagger.h b/SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2SpikeTagger.h new file mode 100644 index 0000000000000..1df64a51eee46 --- /dev/null +++ b/SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2SpikeTagger.h @@ -0,0 +1,36 @@ +#ifndef SimCalorimetry_EcalEBTrigPrimAlgos_EcalEBPhase2SpikeTagger_h +#define SimCalorimetry_EcalEBTrigPrimAlgos_EcalEBPhase2SpikeTagger_h + +#include "DataFormats/EcalDigi/interface/EcalLiteDTUSample.h" +#include "DataFormats/EcalDigi/interface/EcalDigiCollections.h" +#include "CondFormats/EcalObjects/interface/EcalLiteDTUPedestals.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGLinearizationConst.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGPedestals.h" + +#include "CondFormats/EcalObjects/interface/EcalTPGCrystalStatus.h" + +#include + +/** \class EcalEBPhase2SpikeTagger + Tags spikes on a channel basis +*/ + +class EcalEBPhase2SpikeTagger { +private: + bool debug_; + const EcalLiteDTUPedestals *peds_; + const EcalEBPhase2TPGLinearizationConstant *linConsts_; + const EcalTPGCrystalStatusCode *badXStatus_; + +public: + EcalEBPhase2SpikeTagger(bool debug); + virtual ~EcalEBPhase2SpikeTagger(); + + bool process(const std::vector &linInput); + void setParameters(EBDetId id, + const EcalLiteDTUPedestalsMap *peds, + const EcalEBPhase2TPGLinearizationConstMap *ecaltplin, + const EcalTPGCrystalStatus *ecaltpBadX); +}; + +#endif diff --git a/SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2TPFormatter.h b/SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2TPFormatter.h new file mode 100644 index 0000000000000..9a73aa394f491 --- /dev/null +++ b/SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2TPFormatter.h @@ -0,0 +1,28 @@ +#ifndef SimCalorimetry_EcalEBTrigPrimAlgos_EcalEBPhase2TPFormatter_h +#define SimCalorimetry_EcalEBTrigPrimAlgos_EcalEBPhase2TPFormatter_h + +#include "DataFormats/EcalDigi/interface/EcalEBTriggerPrimitiveSample.h" + +#include +#include + +/* + \class EcalEBPhase2TPFormatter + +*/ + +class EcalEBPhase2TPFormatter { +private: + bool debug_; + std::vector inputAmp_; + std::vector inputTime_; + +public: + EcalEBPhase2TPFormatter(bool debug); + virtual ~EcalEBPhase2TPFormatter(); + virtual void process(std::vector& ampl, + std::vector& time, + std::vector& outampl, + std::vector& outtime); +}; +#endif diff --git a/SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2TimeReconstructor.h b/SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2TimeReconstructor.h new file mode 100644 index 0000000000000..0daa2a9676103 --- /dev/null +++ b/SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2TimeReconstructor.h @@ -0,0 +1,115 @@ +#ifndef SimCalorimetry_EcalEBTrigPrimAlgos_EcalEBPhase2TimeReconstructor_h +#define SimCalorimetry_EcalEBTrigPrimAlgos_EcalEBPhase2TimeReconstructor_h + +#include +#include + +class EcalEBPhase2TPGTimeWeightIdMap; +class EcalTPGWeightGroup; + +/** \class EcalEBPhase2TimeReconstructor +\author L. Lutton, N. Marinelli - Univ. of Notre Dame + Description: forPhase II + Measures the timing of a xTal signal +*/ + +class EcalEBPhase2TimeReconstructor { +private: + static const int maxSamplesUsed_ = 12; + bool debug_; + int inputsAlreadyIn_; + int buffer_[maxSamplesUsed_]; + int weights_[maxSamplesUsed_]; + uint64_t ampIn_[maxSamplesUsed_]; + int shift_; + bool extraShift_[2] = {false, false}; + int setInput(int input); + void process(); + int processedOutput_; + + // The array invAmpPar is pre-calulated, at least for now since it has shown to be stable. We might decide at a later stage + // to make the calculation dynamic in CMSSW. + // Here some explnation of what this LUT is. + + //The sum of the digis multiplied by the time weight coefficients gives dT*A, where A is the amplitude. + //So to get dT the amplitude, calculated in EcalEBPhase2AmplitudeReconstructor, must be divided off of the result here. + //However, when this is implemented into the BCP hardware (of which we are emulating the behaviour), + //division is not a trivial operation, often requiring relatively large + //amounts of time and resources to complete. + //To optimize this operation, we instead use an approximation of that division via a lookup table (LUT). + //Think about the division as a multiplication of 1/A, then the LUT is filled with all the possible values of 1/A, + //precalculated, in the range that A takes and A itself is used to index the LUT. + //The element received is then multiplied by the dT*A result in order to get a measurement of the timing. + + //Another limitation of hardware is that we do not use floating point numbers. + //So, instead, each element of the LUT is bit shifted to the left by some amount sufficient to make every element + //of the LUT large enough such that converting it to an integer doesn't lead to a loss in performance + //(this bit shift is undone after the multiplication by a corresponding bit shift to the right). + //Another method of approximation used here is that not every element in A's range is included in the LUT. + //Instead, every 8th element is used, since the difference between, e.g., dividing by 1000 and 1008 is generally + //small whereas it can be a significant save in time and resources in hardware to use a smaller LUT. + //Note that this requires the indexing amplitude to be bit shifted by 3 to the right to compensate for the smaller size of the LUT. + //Finally, dT will be in units of ns, but to convert it to ps each element of the LUT is multiplied by 1000. + + //The pre-calculation of the LUT is given by: + //invAmpAr_ = [1000*invMult] #Since 1/0 is undefined, the array starts with 1/1. The rest of the elements will be filled in the following loop: + //for i in range(8,4096,8): #loop is set to do every 8th number, so 1/8, 1/16, 1/24, etc. 4096 is the expected range of A + //invAmpAr_.append(round((ns_to_ps_conv/i)*invMult)) + //Where ns_to_ps_conv = 1000 #this is to convert the resulting dT from units of ns to units of ps + //invMult = 2**18 #Acts as a shift by 18 bits that is done to the fractions to make them integers instead of floats. + + uint64_t invAmpAr_[512] = { + 262144000, 32768000, 16384000, 10922667, 8192000, 6553600, 5461333, 4681143, 4096000, 3640889, 3276800, 2978909, + 2730667, 2520615, 2340571, 2184533, 2048000, 1927529, 1820444, 1724632, 1638400, 1560381, 1489455, 1424696, + 1365333, 1310720, 1260308, 1213630, 1170286, 1129931, 1092267, 1057032, 1024000, 992970, 963765, 936229, + 910222, 885622, 862316, 840205, 819200, 799220, 780190, 762047, 744727, 728178, 712348, 697191, + 682667, 668735, 655360, 642510, 630154, 618264, 606815, 595782, 585143, 574877, 564966, 555390, + 546133, 537180, 528516, 520127, 512000, 504123, 496485, 489075, 481882, 474899, 468114, 461521, + 455111, 448877, 442811, 436907, 431158, 425558, 420103, 414785, 409600, 404543, 399610, 394795, + 390095, 385506, 381023, 376644, 372364, 368180, 364089, 360088, 356174, 352344, 348596, 344926, + 341333, 337814, 334367, 330990, 327680, 324436, 321255, 318136, 315077, 312076, 309132, 306243, + 303407, 300624, 297891, 295207, 292571, 289982, 287439, 284939, 282483, 280068, 277695, 275361, + 273067, 270810, 268590, 266407, 264258, 262144, 260063, 258016, 256000, 254016, 252062, 250137, + 248242, 246376, 244537, 242726, 240941, 239182, 237449, 235741, 234057, 232397, 230761, 229147, + 227556, 225986, 224438, 222912, 221405, 219919, 218453, 217007, 215579, 214170, 212779, 211406, + 210051, 208713, 207392, 206088, 204800, 203528, 202272, 201031, 199805, 198594, 197398, 196216, + 195048, 193893, 192753, 191626, 190512, 189410, 188322, 187246, 186182, 185130, 184090, 183061, + 182044, 181039, 180044, 179060, 178087, 177124, 176172, 175230, 174298, 173376, 172463, 171560, + 170667, 169782, 168907, 168041, 167184, 166335, 165495, 164663, 163840, 163025, 162218, 161419, + 160627, 159844, 159068, 158300, 157538, 156785, 156038, 155299, 154566, 153840, 153121, 152409, + 151704, 151005, 150312, 149626, 148945, 148271, 147604, 146942, 146286, 145636, 144991, 144352, + 143719, 143092, 142470, 141853, 141241, 140635, 140034, 139438, 138847, 138262, 137681, 137105, + 136533, 135967, 135405, 134848, 134295, 133747, 133203, 132664, 132129, 131598, 131072, 130550, + 130032, 129518, 129008, 128502, 128000, 127502, 127008, 126517, 126031, 125548, 125069, 124593, + 124121, 123653, 123188, 122727, 122269, 121814, 121363, 120915, 120471, 120029, 119591, 119156, + 118725, 118296, 117871, 117448, 117029, 116612, 116199, 115788, 115380, 114975, 114573, 114174, + 113778, 113384, 112993, 112605, 112219, 111836, 111456, 111078, 110703, 110330, 109960, 109592, + 109227, 108864, 108503, 108145, 107789, 107436, 107085, 106736, 106390, 106045, 105703, 105363, + 105026, 104690, 104357, 104025, 103696, 103369, 103044, 102721, 102400, 102081, 101764, 101449, + 101136, 100825, 100515, 100208, 99902, 99599, 99297, 98997, 98699, 98402, 98108, 97815, + 97524, 97234, 96947, 96661, 96376, 96094, 95813, 95534, 95256, 94980, 94705, 94432, + 94161, 93891, 93623, 93356, 93091, 92827, 92565, 92304, 92045, 91787, 91531, 91276, + 91022, 90770, 90519, 90270, 90022, 89775, 89530, 89286, 89043, 88802, 88562, 88323, + 88086, 87850, 87615, 87381, 87149, 86918, 86688, 86459, 86232, 86005, 85780, 85556, + 85333, 85112, 84891, 84672, 84454, 84237, 84021, 83806, 83592, 83379, 83168, 82957, + 82747, 82539, 82332, 82125, 81920, 81716, 81512, 81310, 81109, 80909, 80709, 80511, + 80314, 80117, 79922, 79727, 79534, 79341, 79150, 78959, 78769, 78580, 78392, 78205, + 78019, 77834, 77649, 77466, 77283, 77101, 76920, 76740, 76561, 76382, 76205, 76028, + 75852, 75677, 75502, 75329, 75156, 74984, 74813, 74642, 74473, 74304, 74136, 73968, + 73802, 73636, 73471, 73306, 73143, 72980, 72818, 72656, 72496, 72336, 72176, 72018, + 71860, 71702, 71546, 71390, 71235, 71080, 70926, 70773, 70621, 70469, 70318, 70167, + 70017, 69868, 69719, 69571, 69424, 69277, 69131, 68985, 68840, 68696, 68552, 68409, + 68267, 68125, 67983, 67843, 67702, 67563, 67424, 67285, 67148, 67010, 66873, 66737, + 66602, 66467, 66332, 66198, 66065, 65932, 65799, 65667, 65536, 65405, 65275, 65145, + 65016, 64887, 64759, 64631, 64504, 64377, 64251, 64125}; + +public: + EcalEBPhase2TimeReconstructor(bool debug); + virtual ~EcalEBPhase2TimeReconstructor(); + virtual void process(std::vector &addout, std::vector &RecoOutput, std::vector &output); + void setParameters(uint32_t raw, + const EcalEBPhase2TPGTimeWeightIdMap *ecaltpgTimeWeightMap, + const EcalTPGWeightGroup *ecaltpgWeightGroup); +}; + +#endif diff --git a/SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2TrigPrimAlgo.h b/SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2TrigPrimAlgo.h new file mode 100644 index 0000000000000..a0dad15d02e26 --- /dev/null +++ b/SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2TrigPrimAlgo.h @@ -0,0 +1,217 @@ +#ifndef SimCalorimetry_EcalEBTrigPrimAlgos_EcalEBPhase2TrigPrimAlgo_h +#define SimCalorimetry_EcalEBTrigPrimAlgos_EcalEBPhase2TrigPrimAlgo_h +/** \class EcalEBPhase2TrigPrimAlgo +\author L. Lutton, N. Marinelli - Univ. of Notre Dame + Description: forPhase II + It uses the new Phase2 digis based onthe new EB electronics + This is the main algo which plugs in all the subcomponents for the + amplitude and time measurement and the spike flagging +*/ + +#include +#include +#include + +#include "Geometry/EcalMapping/interface/EcalElectronicsMapping.h" +#include "Geometry/CaloTopology/interface/EcalTrigTowerConstituentsMap.h" +#include "DataFormats/EcalDetId/interface/EcalTriggerElectronicsId.h" +#include "DataFormats/Common/interface/SortedCollection.h" +#include "DataFormats/EcalDigi/interface/EcalDigiCollections.h" + +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/ESHandle.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "Geometry/CaloGeometry/interface/CaloGeometry.h" +#include "Geometry/Records/interface/CaloGeometryRecord.h" +#include "Geometry/CaloGeometry/interface/CaloSubdetectorGeometry.h" +#include "Geometry/CaloGeometry/interface/CaloCellGeometry.h" +#include "DataFormats/EcalDigi/interface/EcalDataFrame_Ph2.h" +#include "CondFormats/EcalObjects/interface/EcalLiteDTUPedestals.h" + +#include "SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2Linearizer.h" +#include "SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2AmplitudeReconstructor.h" +#include "SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2TimeReconstructor.h" +#include "SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2SpikeTagger.h" +#include "SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2TPFormatter.h" + +#include +#include + +class EcalTrigTowerDetId; +class ETPCoherenceTest; +class EcalEBPhase2TriggerPrimitiveSample; +class CaloSubdetectorGeometry; +class EBDataFrame_Ph2; + +class EcalEBPhase2TrigPrimAlgo { +public: + explicit EcalEBPhase2TrigPrimAlgo(const EcalTrigTowerConstituentsMap *eTTmap, + const CaloGeometry *theGeometry, + int binofmax, + bool debug); + + virtual ~EcalEBPhase2TrigPrimAlgo(); + + void run(const EBDigiCollectionPh2 *col, EcalEBPhase2TrigPrimDigiCollection &result); + + void setPointers(const EcalLiteDTUPedestalsMap *ecaltpPed, + const EcalEBPhase2TPGLinearizationConstMap *ecaltpLin, + const EcalTPGCrystalStatus *ecaltpgBadX, + const EcalEBPhase2TPGAmplWeightIdMap *ecaltpgAmplWeightMap, + const EcalEBPhase2TPGTimeWeightIdMap *ecaltpgTimeWeightMap, + const EcalTPGWeightGroup *ecaltpgWeightGroup) { + ecaltpPed_ = ecaltpPed; + ecaltpgBadX_ = ecaltpgBadX; + ecaltpLin_ = ecaltpLin; + ecaltpgAmplWeightMap_ = ecaltpgAmplWeightMap; + ecaltpgTimeWeightMap_ = ecaltpgTimeWeightMap; + ecaltpgWeightGroup_ = ecaltpgWeightGroup; + } + +private: + //old void init(const edm::EventSetup & setup); + void init(); + template + void initStructures(std::vector > > > &towMap); + template + void clean(std::vector > > > &towerMap); + + void fillMap(EBDigiCollectionPh2 const *col, + std::vector > > > &towerMap); + + int findStripNr(const EBDetId &id); + + int getIndex(const EBDigiCollectionPh2 *, EcalTrigTowerDetId &id) { return id.hashedIndex(); } + // mind that eta is continuous between barrel+endcap + // int getIndex(const EEDigiCollectionPh2 *, EcalTrigTowerDetId& id) { + // int ind=(id.ietaAbs()-18)*72 + id.iphi(); + // if (id.zside()<0) ind+=792; + // return ind; + // } + + const EcalTrigTowerConstituentsMap *eTTmap_ = nullptr; + const CaloGeometry *theGeometry_ = nullptr; + + int binOfMaximum_; + int maxNrSamples_; + bool debug_; + + int nrTowers_; // nr of towers found by fillmap method + static const unsigned int maxNrTowers_; + static const unsigned int nrSamples_; + + // data structures kept during the whole run + std::vector > striptp_; + std::vector > > > towerMapEB_; + std::vector > hitTowers_; + std::vector towtp_; + std::vector towtp2_; + + enum { nbMaxStrips_ = 5 }; + enum { nbMaxXtals_ = 5 }; + + const EcalElectronicsMapping *theMapping_; + + EcalEBPhase2Linearizer *linearizer_; + + EcalEBPhase2AmplitudeReconstructor *amplitude_reconstructor_; + EcalEBPhase2TimeReconstructor *time_reconstructor_; + EcalEBPhase2SpikeTagger *spike_tagger_; + EcalEBPhase2TPFormatter *tpFormatter_; + + // + + const EcalLiteDTUPedestalsMap *ecaltpPed_; + const EcalTPGCrystalStatus *ecaltpgBadX_; + + const EcalTPGWeightGroup *ecaltpgWeightGroup_; + const EcalEBPhase2TPGLinearizationConstMap *ecaltpLin_; + const EcalEBPhase2TPGAmplWeightIdMap *ecaltpgAmplWeightMap_; + const EcalEBPhase2TPGTimeWeightIdMap *ecaltpgTimeWeightMap_; + + EcalEBPhase2Linearizer *getLinearizer() const { return linearizer_; } + std::vector lin_out_; + // + EcalEBPhase2AmplitudeReconstructor *getAmplitudeFinder() const { return amplitude_reconstructor_; } + std::vector filt_out_; + std::vector time_out_; + std::vector amp_out_; + std::vector outEt_; + std::vector outTime_; + + EcalEBPhase2TimeReconstructor *getTimeFinder() const { return time_reconstructor_; } + EcalEBPhase2SpikeTagger *getSpikeTagger() const { return spike_tagger_; } + EcalEBPhase2TPFormatter *getTPFormatter() const { return tpFormatter_; } + + // +}; + +template +void EcalEBPhase2TrigPrimAlgo::clean(std::vector > > > &towMap) { + // clean internal data structures + for (unsigned int i = 0; i < maxNrTowers_; ++i) + for (int j = 0; j < nbMaxStrips_; ++j) + (towMap[i])[j].first = 0; + return; +} + +inline void EcalEBPhase2TrigPrimAlgo::fillMap( + EBDigiCollectionPh2 const *col, std::vector > > > &towerMap) + +{ + // implementation for Barrel + if (col) { + nrTowers_ = 0; + for (unsigned int i = 0; i < col->size(); ++i) { + EBDigiCollectionPh2::Digi samples((*col)[i]); + EcalTrigTowerDetId coarser = (*eTTmap_).towerOf(samples.id()); + int index = getIndex(col, coarser); + EBDetId id = samples.id(); + int stripnr = findStripNr(id); + + int filled = 0; + for (unsigned int ij = 0; ij < towerMap[index].size(); ++ij) + filled += towerMap[index][ij].first; + if (!filled) { + hitTowers_[nrTowers_++] = std::pair(index, coarser); + } + + //FIXME: temporary protection + int ncryst = towerMap[index][stripnr - 1].first; + if (ncryst >= nbMaxXtals_) { + continue; + } + ((towerMap[index])[stripnr - 1].second)[ncryst] = samples; + (towerMap[index])[stripnr - 1].first++; + } + + if (debug_) + LogDebug("") << "fillMap" + << "[EcalEBPhase2TrigPrimAlgo] (found " << col->size() << " frames in " << towerMap.size() + << " towers) " << std::endl; + } else { + if (debug_) + LogDebug("EcalEBPhase2TrigPrimAlgo") << "FillMap - FillMap Collection size=0 !!!!" << std::endl; + ; + } +} + +template +void EcalEBPhase2TrigPrimAlgo::initStructures(std::vector > > > &towMap) { + //initialise internal data structures + + std::vector vec0(nbMaxXtals_); + std::vector > > vec1(nbMaxStrips_); + for (int i = 0; i < nbMaxStrips_; ++i) + vec1[i] = std::pair >(0, vec0); + towMap.resize(maxNrTowers_); + for (unsigned int i = 0; i < maxNrTowers_; ++i) + towMap[i] = vec1; + + std::vector vecint(maxNrSamples_); + striptp_.resize(nbMaxStrips_); + for (int i = 0; i < nbMaxStrips_; ++i) + striptp_[i] = vecint; +} + +#endif diff --git a/SimCalorimetry/EcalEBTrigPrimAlgos/src/EcalEBPhase2AmplitudeReconstructor.cc b/SimCalorimetry/EcalEBTrigPrimAlgos/src/EcalEBPhase2AmplitudeReconstructor.cc new file mode 100644 index 0000000000000..2ac137f06f712 --- /dev/null +++ b/SimCalorimetry/EcalEBTrigPrimAlgos/src/EcalEBPhase2AmplitudeReconstructor.cc @@ -0,0 +1,140 @@ +#include +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGAmplWeightIdMap.h" +#include "CondFormats/EcalObjects/interface/EcalTPGWeightGroup.h" +#include "DataFormats/EcalDigi/interface/EcalConstants.h" +#include "CondFormats/EcalObjects/interface/EcalTPGGroups.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" + +#include + +EcalEBPhase2AmplitudeReconstructor::EcalEBPhase2AmplitudeReconstructor(bool debug) + : debug_(debug), inputsAlreadyIn_(0), shift_(13) {} + +EcalEBPhase2AmplitudeReconstructor::~EcalEBPhase2AmplitudeReconstructor() {} + +int EcalEBPhase2AmplitudeReconstructor::setInput(int input) { + if (input > 0X3FFF) { + edm::LogError("EcalEBPhase2AmplitudeReconstructor") << "ERROR IN INPUT OF AMPLITUDE FILTER" << std::endl; + return -1; + } + + if (inputsAlreadyIn_ < maxSamplesUsed_) { + if (debug_) + LogDebug("") << " EcalEBPhase2AmplitudeReconstructor::setInput inputsAlreadyIn_<5 input " << input << std::endl; + + buffer_[inputsAlreadyIn_] = input; + inputsAlreadyIn_++; + } else { + for (int i = 0; i < (maxSamplesUsed_ - 1); i++) { + buffer_[i] = buffer_[i + 1]; + if (debug_) + LogDebug("") << " EcalEBPhase2AmplitudeReconstructor::setInput inputsAlreadyIn buffer " << buffer_[i] + << std::endl; + } + buffer_[maxSamplesUsed_ - 1] = input; + } + return 1; +} + +void EcalEBPhase2AmplitudeReconstructor::process(std::vector &linout, std::vector &output) { + inputsAlreadyIn_ = 0; + for (unsigned int i = 0; i < maxSamplesUsed_; i++) { + buffer_[i] = 0; + } + + for (unsigned int i = 0; i < linout.size(); i++) { + setInput(linout[i]); + if (debug_) { + for (unsigned int j = 0; j < maxSamplesUsed_; j++) { + LogDebug("") << " buffer_ " << buffer_[j]; + } + LogDebug("") << " " << std::endl; + } + + if (i == (maxSamplesUsed_ - 1)) { + process(); + output[0] = processedOutput_; + } else if (i == (ecalPh2::sampleSize - 1)) { + process(); + output[1] = processedOutput_; + } + } + return; +} + +void EcalEBPhase2AmplitudeReconstructor::process() { + processedOutput_ = 0; + if (inputsAlreadyIn_ < maxSamplesUsed_) + return; + int64_t tmpIntOutput = 0; + for (int i = 0; i < maxSamplesUsed_; i++) { + tmpIntOutput += (weights_[i] * buffer_[i]); + if (debug_) + LogDebug("") << " AmplitudeFilter buffer " << buffer_[i] << " weight " << weights_[i] << std::endl; + } + if (tmpIntOutput < 0) + tmpIntOutput = 0; + tmpIntOutput = tmpIntOutput >> shift_; + if (debug_) + LogDebug("") << " AmplitudeFilter tmpIntOutput " << tmpIntOutput << " shift_ " << shift_ << std::endl; + if (tmpIntOutput > 0X1FFF) + tmpIntOutput = 0X1FFF; + uint output = tmpIntOutput; // should be 13 bit uint at this point + processedOutput_ = output; + if (debug_) + LogDebug("") << " AmplitudeFilter processedOutput_ " << processedOutput_ << std::endl; +} + +void EcalEBPhase2AmplitudeReconstructor::setParameters(uint32_t raw, + const EcalEBPhase2TPGAmplWeightIdMap *ecaltpgWeightMap, + const EcalTPGWeightGroup *ecaltpgWeightGroup) { + uint32_t params_[maxSamplesUsed_]; + const EcalTPGGroups::EcalTPGGroupsMap &groupmap = ecaltpgWeightGroup->getMap(); + if (debug_) + LogDebug("") << " EcalEBPhase2AmplitudeReconstructor::setParameters groupmap size " << groupmap.size() + << " channel ID " << raw << std::endl; + EcalTPGGroups::EcalTPGGroupsMapItr it = groupmap.find(raw); + if (it != groupmap.end()) { + uint32_t weightid = (*it).second; + + const EcalEBPhase2TPGAmplWeightIdMap::EcalEBPhase2TPGAmplWeightMap &weightmap = ecaltpgWeightMap->getMap(); + EcalEBPhase2TPGAmplWeightIdMap::EcalEBPhase2TPGAmplWeightMapItr itw = weightmap.find(weightid); + + (*itw).second.getValues(params_[0], + params_[1], + params_[2], + params_[3], + params_[4], + params_[5], + params_[6], + params_[7], + params_[8], + params_[9], + params_[10], + params_[11]); + + if (debug_) + LogDebug("") << " EcalEBPhase2AmplitudeReconstructor::setParameters weights after the map " << params_[0] << " " + << params_[1] << " " << params_[2] << " " << params_[3] << " " << params_[4] << " " << params_[5] + << " " << params_[6] << " " << params_[7] << " " << params_[8] << " " << params_[9] << " " + << params_[10] << " " << params_[11] << std::endl; + + // we have to transform negative coded in 13 bits into negative coded in 32 bits + // maybe this should go into the getValue method?? + + for (int i = 0; i < maxSamplesUsed_; ++i) { + weights_[i] = (params_[i] & 0x1000) ? (int)(params_[i] | 0xfffff000) : (int)(params_[i]); + } + + if (debug_) { + for (int i = 0; i < maxSamplesUsed_; ++i) { + LogDebug("") << " EcalEBPhase2AmplitudeReconstructor::setParameters weights after the cooking " << weights_[i] + << std::endl; + } + LogDebug("") << std::endl; + } + + } else + edm::LogWarning("EcalTPG") + << " EcalEBPhase2AmplitudeReconstructor::setParameters could not find EcalTPGGroupsMap entry for " << raw; +} diff --git a/SimCalorimetry/EcalEBTrigPrimAlgos/src/EcalEBPhase2Linearizer.cc b/SimCalorimetry/EcalEBTrigPrimAlgos/src/EcalEBPhase2Linearizer.cc new file mode 100644 index 0000000000000..2802081a81ffa --- /dev/null +++ b/SimCalorimetry/EcalEBTrigPrimAlgos/src/EcalEBPhase2Linearizer.cc @@ -0,0 +1,167 @@ +#include + +//#include + +#include "FWCore/MessageLogger/interface/MessageLogger.h" + +EcalEBPhase2Linearizer::EcalEBPhase2Linearizer(bool debug) + : debug_(debug), init_(false), peds_(nullptr), badXStatus_(nullptr) {} + +EcalEBPhase2Linearizer::~EcalEBPhase2Linearizer() { + if (init_) { + for (int i = 0; i < (int)vectorbadXStatus_.size(); i++) { + delete vectorbadXStatus_[i]; + } + } +} + +void EcalEBPhase2Linearizer::setParameters(EBDetId detId, + const EcalLiteDTUPedestalsMap *ecaltpPed, + const EcalEBPhase2TPGLinearizationConstMap *ecaltpLin, + const EcalTPGCrystalStatus *ecaltpBadX) + +{ + EcalLiteDTUPedestalsMap::const_iterator itped = ecaltpPed->getMap().find(detId); + if (itped != ecaltpPed->end()) + peds_ = &(*itped); + else + edm::LogError("EcalEBPhase2Linearizer") << " could not find EcalLiteDTUPedestal entry for " << detId << std::endl; + + const EcalEBPhase2TPGLinearizationConstMap &linMap = ecaltpLin->getMap(); + EcalEBPhase2TPGLinearizationConstMapIterator it = linMap.find(detId.rawId()); + if (it != linMap.end()) { + linConsts_ = &(*it); + } else + edm::LogError("EcalEBPhase2Linearizer") + << " could not find EcalEBPhase2TPGLinearizationConstMap entry for " << detId.rawId() << std::endl; + + const EcalTPGCrystalStatusMap &badXMap = ecaltpBadX->getMap(); + EcalTPGCrystalStatusMapIterator itbadX = badXMap.find(detId.rawId()); + if (itbadX != badXMap.end()) { + badXStatus_ = &(*itbadX); + } else { + edm::LogWarning("EcalTPG") << " could not find EcalTPGCrystalStatusMap entry for " << detId.rawId(); + badXStatus_ = new EcalTPGCrystalStatusCode(); + vectorbadXStatus_.push_back(&(*badXStatus_)); + init_ = true; + } +} + +int EcalEBPhase2Linearizer::doOutput() { + int tmpIntOut; + if (uncorrectedSample_) { + tmpIntOut = (uncorrectedSample_ - base_ + I2CSub_); //Substract base. Add I2C + } else { + tmpIntOut = 0; + } + if (tmpIntOut < 0) { + tmpIntOut = 0; + } + uint output = tmpIntOut; + output = (output * mult_) >> shift_; + // protect against saturation + // ........... + + return output; +} + +int EcalEBPhase2Linearizer::setInput(const EcalLiteDTUSample &RawSam) + +{ + uncorrectedSample_ = RawSam.adc(); //uncorrectedSample_ + gainID_ = RawSam.gainId(); + + base_ = peds_->mean(gainID_); + + if (gainID_ == 0) { + mult_ = linConsts_->mult_x10; + shift_ = linConsts_->shift_x10; + I2CSub_ = linConsts_->i2cSub_x10; + } else { + mult_ = linConsts_->mult_x1; + shift_ = linConsts_->shift_x1; + I2CSub_ = linConsts_->i2cSub_x1; + } + + return 1; +} + +void EcalEBPhase2Linearizer::process(const EBDigiCollectionPh2::Digi &df, std::vector &output_percry) { + //We know a tower numbering is: // S1 S2 S3 S4 S5 + + // 4 5 14 15 24 + // 3 6 13 16 23 + // 2 7 12 17 22 + // 1 8 11 18 21 + // 0 9 10 19 20 + + for (int i = 0; i < df.size(); i++) { + EcalLiteDTUSample thisSample = df[i]; + setInput(thisSample); + output_percry[i] = doOutput(); + } + + if (debug_) { + LogDebug("EcalEBPhase2Linearizer") << " mult " + << " "; + for (int i = 0; i < df.size(); i++) { + EcalLiteDTUSample thisSample = df[i]; + setInput(thisSample); + LogDebug("") << mult_ << " "; + } + LogDebug("") << " " << std::endl; + + LogDebug("") << " gainID " + << " "; + for (int i = 0; i < df.size(); i++) { + EcalLiteDTUSample thisSample = df[i]; + setInput(thisSample); + LogDebug("") << gainID_ << " "; + } + LogDebug("") << " " << std::endl; + + LogDebug("") << " Ped " + << " "; + for (int i = 0; i < df.size(); i++) { + EcalLiteDTUSample thisSample = df[i]; + setInput(thisSample); + LogDebug("") << base_ << " "; + } + LogDebug("") << " " << std::endl; + + LogDebug("") << " i2c " + << " "; + for (int i = 0; i < df.size(); i++) { + EcalLiteDTUSample thisSample = df[i]; + setInput(thisSample); + LogDebug("") << I2CSub_ << " "; + } + LogDebug("") << " " << std::endl; + + LogDebug("") << " shift " + << " "; + for (int i = 0; i < df.size(); i++) { + EcalLiteDTUSample thisSample = df[i]; + setInput(thisSample); + LogDebug("") << shift_ << " "; + } + LogDebug("") << " " << std::endl; + + LogDebug("") << " lin out " + << " "; + for (int i = 0; i < df.size(); i++) { + LogDebug("") << output_percry[i] << " "; + } + + LogDebug("") << " " << std::endl; + + LogDebug("") << " EcalEBPhase2Linearizer::process(const .. Final output " << std::endl; + LogDebug("") << " output_percry " + << " "; + for (int i = 0; i < df.size(); i++) { + LogDebug("") << output_percry[i] << " "; + } + LogDebug("") << " " << std::endl; + } + return; +} diff --git a/SimCalorimetry/EcalEBTrigPrimAlgos/src/EcalEBPhase2SpikeTagger.cc b/SimCalorimetry/EcalEBTrigPrimAlgos/src/EcalEBPhase2SpikeTagger.cc new file mode 100644 index 0000000000000..d430c093bfe52 --- /dev/null +++ b/SimCalorimetry/EcalEBTrigPrimAlgos/src/EcalEBPhase2SpikeTagger.cc @@ -0,0 +1,53 @@ +#include + +#include "FWCore/MessageLogger/interface/MessageLogger.h" + +EcalEBPhase2SpikeTagger::EcalEBPhase2SpikeTagger(bool debug) : debug_(debug) {} + +EcalEBPhase2SpikeTagger::~EcalEBPhase2SpikeTagger() {} + +void EcalEBPhase2SpikeTagger::setParameters(EBDetId detId, + const EcalLiteDTUPedestalsMap* ecaltpPed, + const EcalEBPhase2TPGLinearizationConstMap* ecaltpLin, + const EcalTPGCrystalStatus* ecaltpBadX) + +{ + if (debug_) + LogDebug("") << " EcalEBPhase2SpikeTagger::setParameters " << std::endl; + + EcalLiteDTUPedestalsMap::const_iterator itped = ecaltpPed->getMap().find(detId); + if (itped != ecaltpPed->end()) { + peds_ = &(*itped); + } else { + edm::LogError("EcalEBPhase2SpikeTagger::setParameters") + << " could not find EcalLiteDTUPedestal entry for " << detId; + throw cms::Exception("EcalEBPhase2SpikeTagger::setParameters could not find pedestals"); + } + + const EcalEBPhase2TPGLinearizationConstMap& linMap = ecaltpLin->getMap(); + EcalEBPhase2TPGLinearizationConstMapIterator it = linMap.find(detId.rawId()); + if (it != linMap.end()) { + linConsts_ = &(*it); + } else { + edm::LogError("EcalEBPhase2SpikeTagger::setParameters") + << " could not find EcalEBPhase2TPGLinearizationConstMap entry for " << detId.rawId(); + throw cms::Exception("EcalEBPhase2SpikeTagger::setParameters could not find pedestals"); + } +} + +bool EcalEBPhase2SpikeTagger::process(const std::vector& linInput) { + bool isASpike; + isASpike = false; + + if (debug_) { + LogDebug("") << "EcalEBPhase2SpikeTagger::process linearized digis " << std::endl; + for (unsigned int i = 0; i < linInput.size(); i++) { + LogDebug("") << " " << std::dec << linInput[i]; + } + LogDebug("") << std::endl; + } + + // dummy for now. It needs the algorythm to be implememted/plugged in here + + return isASpike; +} diff --git a/SimCalorimetry/EcalEBTrigPrimAlgos/src/EcalEBPhase2TPFormatter.cc b/SimCalorimetry/EcalEBTrigPrimAlgos/src/EcalEBPhase2TPFormatter.cc new file mode 100644 index 0000000000000..610809ae58f34 --- /dev/null +++ b/SimCalorimetry/EcalEBTrigPrimAlgos/src/EcalEBPhase2TPFormatter.cc @@ -0,0 +1,38 @@ +#include +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include + +EcalEBPhase2TPFormatter::EcalEBPhase2TPFormatter(bool debug) : debug_(debug) {} + +EcalEBPhase2TPFormatter::~EcalEBPhase2TPFormatter() {} + +void EcalEBPhase2TPFormatter::process(std::vector &, + std::vector &time, + std::vector &outEt, + std::vector &outTime) { + unsigned int size = amp.size(); + outEt.resize(size); + outTime.resize(size); + + for (unsigned int i = 0; i < size; ++i) { + outEt[i] = amp[i]; + outTime[i] = time[i]; + } + + for (unsigned int i = 0; i < size; ++i) { + // this is the energy compression to 12 bits to go in the DF. To be done as last thing before building the TP + //Bit shift by 1 to go from 13 bits to 12 + outEt[i] = outEt[i] >> 1; + if (outEt[i] > 0xFFF) + outEt[i] = 0xFFF; + } + + for (unsigned int i = 0; i < size; ++i) { + // this is the time compression to 5 bits to go in the DF. + outTime[i] = outTime[i] >> 6; + if (outTime[i] > 0xf) + outTime[i] = 0xf; + else if (outTime[i] < -0x10) + outTime[i] = -0x10; + } +} diff --git a/SimCalorimetry/EcalEBTrigPrimAlgos/src/EcalEBPhase2TimeReconstructor.cc b/SimCalorimetry/EcalEBTrigPrimAlgos/src/EcalEBPhase2TimeReconstructor.cc new file mode 100644 index 0000000000000..6349a2ec60950 --- /dev/null +++ b/SimCalorimetry/EcalEBTrigPrimAlgos/src/EcalEBPhase2TimeReconstructor.cc @@ -0,0 +1,174 @@ +#include +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGTimeWeightIdMap.h" +#include "CondFormats/EcalObjects/interface/EcalTPGWeightGroup.h" +#include "DataFormats/EcalDigi/interface/EcalConstants.h" +#include "CondFormats/EcalObjects/interface/EcalTPGGroups.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include + +EcalEBPhase2TimeReconstructor::EcalEBPhase2TimeReconstructor(bool debug) + : debug_(debug), inputsAlreadyIn_(0), shift_(maxSamplesUsed_) {} + +EcalEBPhase2TimeReconstructor::~EcalEBPhase2TimeReconstructor() {} + +int EcalEBPhase2TimeReconstructor::setInput(int input) { + if (input > 0X7FFF) { + edm::LogError("EcalEBPhase2TimeReconstructor::setInput") << "ERROR IN INPUT OF TIME FILTER" << std::endl; + return -1; + } + if (inputsAlreadyIn_ < maxSamplesUsed_) { + if (debug_) + LogDebug("") << " EcalEBPhase2TimeReconstructor::setInput inputsAlreadyIn_<5 input " << input << std::endl; + + buffer_[inputsAlreadyIn_] = input; + inputsAlreadyIn_++; + } else { + for (int i = 0; i < (maxSamplesUsed_ - 1); i++) { + buffer_[i] = buffer_[i + 1]; + if (debug_) + LogDebug("") << " EcalEBPhase2TimeReconstructor::setInput inputsAlreadyIn buffer " << buffer_[i] << std::endl; + } + buffer_[maxSamplesUsed_ - 1] = input; + inputsAlreadyIn_++; + } + return 1; +} + +void EcalEBPhase2TimeReconstructor::process(std::vector &addout, + std::vector &RecoOutput, + std::vector &output) { + inputsAlreadyIn_ = 0; + for (unsigned int i = 0; i < maxSamplesUsed_; i++) { + buffer_[i] = 0; + } + + //Taking in the results of the amplitude reconstruction + //Bit shifting them for use as index of invAmpAr_ lookup table + // move input amplitude (13 bits) to 9 bits to use as array index + + ampIn_[0] = ampRecoOutput[0] >> 4; + ampIn_[1] = ampRecoOutput[1] >> 4; + + for (unsigned int i = 0; i < addout.size(); i++) { + setInput(addout[i]); + + if (debug_) { + LogDebug("") << " EcalEBPhase2TimeReconstructor::process(std::vector buffer_ " << std::endl; + + for (unsigned int j = 0; j < maxSamplesUsed_; j++) { + LogDebug("") << " buffer_ " << buffer_[j]; + } + LogDebug("") << " " << std::endl; + } + + if (i == (maxSamplesUsed_ - 1)) { + if (debug_) + LogDebug("") << " EcalEBPhase2TimeReconstructor::process(std::vector) i = 11 " << std::endl; + process(); + if (debug_) + LogDebug("") << " EcalEBPhase2TimeReconstructor::process(std::vector) after process() " + << processedOutput_ << std::endl; + output[0] = processedOutput_; + if (debug_) + LogDebug("") << " EcalEBPhase2TimeReconstructor::process(std::vector) after setting the output " + << output[0] << std::endl; + } else if (i == (ecalPh2::sampleSize - 1)) { + if (debug_) + LogDebug("") << " EcalEBPhase2TimeReconstructor::process(std::vector) i = 15 " << std::endl; + process(); + output[1] = processedOutput_; + } + } + + return; +} + +void EcalEBPhase2TimeReconstructor::process() { + //UB FIXME: 5 + processedOutput_ = 0; + if (inputsAlreadyIn_ < 12) + return; + int64_t output = 0; + for (int i = 0; i < 12; i++) { + output += (weights_[i] * buffer_[i]); + if (debug_) + LogDebug("") << " TimeFilter buffer " << buffer_[i] << " weight " << weights_[i] << " output " << output + << std::endl; + } + output = output >> shift_; + if (debug_) + LogDebug("") << " TimeFilter local output " << output << std::endl; + //Dividing output by the result of the amplitude reconstruction via an approximation using the invAmpAr lookup table + int ampInd = 0; + if (debug_) + LogDebug("") << " inputsAlreadyIn_ " << inputsAlreadyIn_ << std::endl; + if (inputsAlreadyIn_ > 12) { + ampInd = 1; + } + + if (debug_) + LogDebug("") << " Begininning Final TimeFilter Calculation" << std::endl; + + int64_t tmpOutput = output * invAmpAr_[ampIn_[ampInd]]; + if (debug_) + LogDebug("") << " output*tmpInvAmpAr " << tmpOutput << std::endl; + + output = tmpOutput >> 20; + if (debug_) + LogDebug("") << " output after bit shift " << output << std::endl; + + if (output < -1024) + output = -1023; + else if (output > 1024) + output = 1023; + if (debug_) + LogDebug("") << " output after if/else " << output << std::endl; + processedOutput_ = output; + + if (debug_) + LogDebug("") << " TimeFilter final output " << processedOutput_ << std::endl; +} + +void EcalEBPhase2TimeReconstructor::setParameters(uint32_t raw, + const EcalEBPhase2TPGTimeWeightIdMap *ecaltpgWeightMap, + const EcalTPGWeightGroup *ecaltpgWeightGroup) { + uint32_t params_[12]; + const EcalTPGGroups::EcalTPGGroupsMap &groupmap = ecaltpgWeightGroup->getMap(); + if (debug_) + LogDebug("") << " EcalEBPhase2TimeReconstructor::setParameters groupmap size " << groupmap.size() << std::endl; + EcalTPGGroups::EcalTPGGroupsMapItr it = groupmap.find(raw); + if (it != groupmap.end()) { + uint32_t weightid = (*it).second; + const EcalEBPhase2TPGTimeWeightIdMap::EcalEBPhase2TPGTimeWeightMap &weightmap = ecaltpgWeightMap->getMap(); + EcalEBPhase2TPGTimeWeightIdMap::EcalEBPhase2TPGTimeWeightMapItr itw = weightmap.find(weightid); + + (*itw).second.getValues(params_[0], + params_[1], + params_[2], + params_[3], + params_[4], + params_[5], + params_[6], + params_[7], + params_[8], + params_[9], + params_[10], + params_[11]); + + if (debug_) + LogDebug("") << " EcalEBPhase2TimeReconstructor::setParameters time weights after the map " << params_[0] << " " + << params_[1] << " " << params_[2] << " " << params_[3] << " " << params_[4] << " " << params_[5] + << " " << params_[6] << " " << params_[7] << " " << params_[8] << " " << params_[9] << " " + << params_[10] << " " << params_[11] << std::endl; + + // we have to transform negative coded in 16 bits into negative coded in 32 bits + // maybe this should go into the getValue method?? + + for (int i = 0; i < 12; ++i) { + weights_[i] = (params_[i] & 0x8000) ? (int)(params_[i] | 0xffff8000) : (int)(params_[i]); + } + + } else + edm::LogWarning("EcalTPG") + << " EcalEBPhase2TimeReconstructor::setParameters could not find EcalTPGGroupsMap entry for " << raw; +} diff --git a/SimCalorimetry/EcalEBTrigPrimAlgos/src/EcalEBPhase2TrigPrimAlgo.cc b/SimCalorimetry/EcalEBTrigPrimAlgos/src/EcalEBPhase2TrigPrimAlgo.cc new file mode 100644 index 0000000000000..7aa5558cdd08f --- /dev/null +++ b/SimCalorimetry/EcalEBTrigPrimAlgos/src/EcalEBPhase2TrigPrimAlgo.cc @@ -0,0 +1,294 @@ +#include +#include +#include +#include + +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "Geometry/EcalMapping/interface/EcalElectronicsMapping.h" +#include "Geometry/EcalMapping/interface/EcalMappingRcd.h" +#include "Geometry/EcalAlgo/interface/EcalBarrelGeometry.h" + +#include "SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2TrigPrimAlgo.h" + +#include "DataFormats/EcalDetId/interface/EcalSubdetector.h" +#include "DataFormats/EcalDigi/interface/EBDataFrame_Ph2.h" +#include "DataFormats/EcalDetId/interface/EcalTrigTowerDetId.h" +#include "DataFormats/EcalDetId/interface/EBDetId.h" +#include "DataFormats/EcalDetId/interface/EcalTriggerElectronicsId.h" + +#include "CondFormats/EcalObjects/interface/EcalTPGPedestals.h" +#include "CondFormats/DataRecord/interface/EcalTPGPedestalsRcd.h" + +#include "DataFormats/EcalDigi/interface/EcalConstants.h" + +#include +#include + +//---------------------------------------------------------------------- + +const unsigned int EcalEBPhase2TrigPrimAlgo::nrSamples_ = + ecalPh2::sampleSize; // set to 16 samples, might change (less than 16) in the future +const unsigned int EcalEBPhase2TrigPrimAlgo::maxNrTowers_ = 2448; // number of towers in EB + +EcalEBPhase2TrigPrimAlgo::EcalEBPhase2TrigPrimAlgo(const EcalTrigTowerConstituentsMap *eTTmap, + const CaloGeometry *theGeometry, + int binofmax, + bool debug) + : eTTmap_(eTTmap), + theGeometry_(theGeometry), + binOfMaximum_(binofmax), + debug_(debug) + +{ + maxNrSamples_ = ecalPh2::sampleSize; + this->init(); +} + +void EcalEBPhase2TrigPrimAlgo::init() { + theMapping_ = new EcalElectronicsMapping(); + // initialise data structures + initStructures(towerMapEB_); + hitTowers_.resize(maxNrTowers_); + + linearizer_ = new EcalEBPhase2Linearizer(debug_); + lin_out_.resize(maxNrSamples_); + + amplitude_reconstructor_ = new EcalEBPhase2AmplitudeReconstructor(debug_); + filt_out_.resize(maxNrSamples_); + + tpFormatter_ = new EcalEBPhase2TPFormatter(debug_); + outEt_.resize(maxNrSamples_); + outTime_.resize(maxNrSamples_); + + // + + time_reconstructor_ = new EcalEBPhase2TimeReconstructor(debug_); + time_out_.resize(maxNrSamples_); + spike_tagger_ = new EcalEBPhase2SpikeTagger(debug_); +} +//---------------------------------------------------------------------- + +EcalEBPhase2TrigPrimAlgo::~EcalEBPhase2TrigPrimAlgo() { + delete linearizer_; + delete amplitude_reconstructor_; + delete time_reconstructor_; + delete spike_tagger_; + delete tpFormatter_; + delete theMapping_; +} + +void EcalEBPhase2TrigPrimAlgo::run(EBDigiCollectionPh2 const *digi, EcalEBPhase2TrigPrimDigiCollection &result) { + if (debug_) + LogDebug("") << " EcalEBPhase2TrigPrimAlgo: digi size " << digi->size() << std::endl; + + EcalEBPhase2TriggerPrimitiveDigi tp; + int firstSample = binOfMaximum_ - 1 - nrSamples_ / 2; + int lastSample = binOfMaximum_ - 1 + nrSamples_ / 2; + + if (debug_) { + LogDebug("") << " binOfMaximum_ " << binOfMaximum_ << " nrSamples_" << nrSamples_ << std::endl; + LogDebug("") << " first sample " << firstSample << " last " << lastSample << std::endl; + } + + clean(towerMapEB_); + fillMap(digi, towerMapEB_); + + int iChannel = 0; + int nXinBCP = 0; + for (int itow = 0; itow < nrTowers_; ++itow) { + int index = hitTowers_[itow].first; + const EcalTrigTowerDetId &thisTower = hitTowers_[itow].second; + if (debug_) + LogDebug("") << " Data for TOWER num " << itow << " index " << index << " TowerId " << thisTower << " zside " + << thisTower.zside() << " ieta " << thisTower.ieta() << " iphi " << thisTower.iphi() << " size " + << towerMapEB_[itow].size() << std::endl; + + // loop over all strips assigned to this trigger tower + int nxstals = 0; + for (unsigned int iStrip = 0; iStrip < towerMapEB_[itow].size(); ++iStrip) { + if (debug_) + LogDebug("") << " Data for STRIP num " << iStrip << std::endl; + std::vector &dataFrames = + (towerMapEB_[index])[iStrip].second; //vector of dataframes for this strip, size; nr of crystals/strip + + nxstals = (towerMapEB_[index])[iStrip].first; + if (nxstals <= 0) + continue; + if (debug_) + LogDebug("") << " Number of xTals " << nxstals << std::endl; + + //const EcalTriggerElectronicsId elId = theMapping_->getTriggerElectronicsId(dataFrames[0].id()); + + // loop over the xstals in a strip + + for (int iXstal = 0; iXstal < nxstals; iXstal++) { + const EBDetId &myid = dataFrames[iXstal].id(); + + nXinBCP++; + if (debug_) { + LogDebug("") << " Data for TOWER num " << itow << " index " << index << " TowerId " << thisTower << " size " + << towerMapEB_[itow].size() << std::endl; + LogDebug("") << "nXinBCP " << nXinBCP << " myid rawId " << myid.rawId() << " xTal iEta " << myid.ieta() + << " iPhi " << myid.iphi() << std::endl; + } + + tp = EcalEBPhase2TriggerPrimitiveDigi(myid); + tp.setSize(nrSamples_); + + iChannel++; + if (debug_) { + LogDebug("") << " " << std::endl; + LogDebug("") << " ****** iChannel " << iChannel << std::endl; + for (int i = 0; i < dataFrames[iXstal].size(); i++) { + LogDebug("") << " " << dataFrames[iXstal][i].adc(); + } + LogDebug("") << " " << std::endl; + } + + if (debug_) { + LogDebug("") << std::endl; + EBDetId id = dataFrames[iXstal].id(); + LogDebug("") << "iXstal= " << iXstal << std::endl; + LogDebug("") << "iXstal= " << iXstal << " id " << id << " EcalDataFrame_Ph2 is: " << std::endl; + for (int i = 0; i < dataFrames[iXstal].size(); i++) { + LogDebug("") << " " << std::dec << dataFrames[iXstal][i].adc(); + } + LogDebug("") << std::endl; + } + + // Call the linearizer + this->getLinearizer()->setParameters(dataFrames[iXstal].id(), ecaltpPed_, ecaltpLin_, ecaltpgBadX_); + this->getLinearizer()->process(dataFrames[iXstal], lin_out_); + + for (unsigned int i = 0; i < lin_out_.size(); i++) { + if (lin_out_[i] > 0X3FFF) + lin_out_[i] = 0X3FFF; + } + + if (debug_) { + LogDebug("") << "EcalEBPhase2TrigPrimAlgo output of linearize for channel " << iXstal << std::endl; + for (unsigned int i = 0; i < lin_out_.size(); i++) { + LogDebug("") << " " << std::dec << lin_out_[i]; + } + LogDebug("") << std::endl; + } + + // call spike finder right after the linearizer + this->getSpikeTagger()->setParameters(dataFrames[iXstal].id(), ecaltpPed_, ecaltpLin_, ecaltpgBadX_); + bool isASpike = this->getSpikeTagger()->process(lin_out_); + + //if (!isASpike) { + + // Call the amplitude reconstructor + this->getAmplitudeFinder()->setParameters(myid.rawId(), ecaltpgAmplWeightMap_, ecaltpgWeightGroup_); + this->getAmplitudeFinder()->process(lin_out_, filt_out_); + + if (debug_) { + LogDebug("") << "EcalEBPhase2TrigPrimAlgo output of amp finder is a vector of size: " << std::dec + << time_out_.size() << std::endl; + for (unsigned int ix = 0; ix < filt_out_.size(); ix++) { + LogDebug("") << std::dec << filt_out_[ix] << " "; + } + LogDebug("") << std::endl; + } + + if (debug_) { + LogDebug("") << " Ampl " + << " "; + for (unsigned int ix = 0; ix < filt_out_.size(); ix++) { + LogDebug("") << std::dec << filt_out_[ix] << " "; + } + LogDebug("") << std::endl; + } + + // call time finder + this->getTimeFinder()->setParameters(myid.rawId(), ecaltpgTimeWeightMap_, ecaltpgWeightGroup_); + this->getTimeFinder()->process(lin_out_, filt_out_, time_out_); + + if (debug_) { + LogDebug("") << " Time " + << " "; + for (unsigned int ix = 0; ix < time_out_.size(); ix++) { + LogDebug("") << std::dec << time_out_[ix] << " "; + } + LogDebug("") << std::endl; + } + + if (debug_) { + LogDebug("") << "EcalEBPhase2TrigPrimAlgo output of timefinder is a vector of size: " << std::dec + << time_out_.size() << std::endl; + for (unsigned int ix = 0; ix < time_out_.size(); ix++) { + LogDebug("") << std::dec << time_out_[ix] << " "; + } + LogDebug("") << std::endl; + } + + this->getTPFormatter()->process(filt_out_, time_out_, outEt_, outTime_); + + if (debug_) { + LogDebug("") << " compressed Et " + << " "; + for (unsigned int iSample = 0; iSample < outEt_.size(); ++iSample) { + LogDebug("") << outEt_[iSample] << " "; + } + LogDebug("") << std::endl; + + LogDebug("") << " compressed time " + << " "; + for (unsigned int iSample = 0; iSample < outEt_.size(); ++iSample) { + LogDebug("") << outTime_[iSample] << " "; + } + LogDebug("") << std::endl; + } + + if (debug_) { + LogDebug("") << " EcalEBPhase2TrigPrimAlgo after getting the formatter " << std::endl; + for (unsigned int iSample = 0; iSample < outEt_.size(); ++iSample) { + LogDebug("") << " outEt " << outEt_[iSample] << " outTime " << outTime_[iSample] << " "; + } + LogDebug("") << std::endl; + } + + // } not a spike + + // create the final TP samples + int etInADC = 0; + ; + int64_t time = -999; + int nSam = 0; + for (int iSample = 0; iSample < 16; ++iSample) { + etInADC = outEt_[iSample]; + time = outTime_[iSample]; + if (debug_) { + LogDebug("") << "TrigPrimAlgo outEt " << outEt_[iSample] << " outTime " << outTime_[iSample] << std::endl; + LogDebug("") << "TrigPrimAlgo etInADCt " << outEt_[iSample] << " outTime " << time << std::endl; + } + + tp.setSample(nSam, EcalEBPhase2TriggerPrimitiveSample(etInADC, isASpike, time)); + nSam++; + } + + result.push_back(tp); + + } // Loop over the xStals + + } //loop over strips in one tower + + if (debug_) { + if (nXinBCP > 0) + LogDebug("") << " Accepted xTals " << nXinBCP << std::endl; + } + } +} + +//---------------------------------------------------------------------- + +int EcalEBPhase2TrigPrimAlgo::findStripNr(const EBDetId &id) { + int stripnr; + int n = ((id.ic() - 1) % 100) / 20; //20 corresponds to 4 * ecal_barrel_crystals_per_strip FIXME!! + if (id.ieta() < 0) + stripnr = n + 1; + else + stripnr = nbMaxStrips_ - n; + return stripnr; +} diff --git a/SimCalorimetry/EcalEBTrigPrimProducers/plugins/EcalEBTrigPrimPhase2ESProducer.cc b/SimCalorimetry/EcalEBTrigPrimProducers/plugins/EcalEBTrigPrimPhase2ESProducer.cc new file mode 100644 index 0000000000000..6a3772fdf3ccf --- /dev/null +++ b/SimCalorimetry/EcalEBTrigPrimProducers/plugins/EcalEBTrigPrimPhase2ESProducer.cc @@ -0,0 +1,722 @@ +// user include files +#include "DataFormats/EcalDigi/interface/EcalConstants.h" +#include "FWCore/Framework/interface/ESProducer.h" +#include "FWCore/Framework/interface/ModuleFactory.h" +#include "FWCore/Framework/interface/ESHandle.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/ParameterSet/interface/FileInPath.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +// +#include "CondFormats/DataRecord/interface/EcalTPGCrystalStatusRcd.h" +#include "CondFormats/DataRecord/interface/EcalTPGPhysicsConstRcd.h" +#include "CondFormats/DataRecord/interface/EcalTPGStripStatusRcd.h" +// commented lines are for a reminder that in future we might need to implement something alike +//#include "CondFormats/DataRecord/interface/EcalTPGSpikeRcd.h" +//#include "CondFormats/DataRecord/interface/EcalTPGTowerStatusRcd.h" +#include "CondFormats/DataRecord/interface/EcalEBPhase2TPGLinearizationConstRcd.h" +#include "CondFormats/DataRecord/interface/EcalTPGWeightGroupRcd.h" +#include "CondFormats/DataRecord/interface/EcalEBPhase2TPGAmplWeightIdMapRcd.h" +#include "CondFormats/DataRecord/interface/EcalEBPhase2TPGTimeWeightIdMapRcd.h" +#include "CondFormats/DataRecord/interface/EcalEBPhase2TPGPedestalsRcd.h" +#include "CondFormats/EcalObjects/interface/EcalTPGCrystalStatus.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGLinearizationConst.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGPedestals.h" +#include "CondFormats/EcalObjects/interface/EcalTPGPhysicsConst.h" +//#include "CondFormats/EcalObjects/interface/EcalTPGSpike.h" +//#include "CondFormats/EcalObjects/interface/EcalTPGTowerStatus.h" +#include "CondFormats/EcalObjects/interface/EcalTPGStripStatus.h" +#include "CondFormats/EcalObjects/interface/EcalTPGWeightGroup.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGAmplWeightIdMap.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGTimeWeightIdMap.h" + +#include "zlib.h" +#include +#include +#include +#include + +// +// class declaration +// + +/** \class EcalEBTrigPrimPhase2ESProducer +\author L. Lutton, N. Marinelli - Univ. of Notre Dame + Description: forPhase II +*/ + +class EcalEBTrigPrimPhase2ESProducer : public edm::ESProducer { +public: + EcalEBTrigPrimPhase2ESProducer(const edm::ParameterSet &); + ~EcalEBTrigPrimPhase2ESProducer() override; + + std::unique_ptr produceLinearizationConst( + const EcalEBPhase2TPGLinearizationConstRcd &); + std::unique_ptr producePedestals(const EcalEBPhase2TPGPedestalsRcd &); + std::unique_ptr produceAmpWeight(const EcalEBPhase2TPGAmplWeightIdMapRcd &); + std::unique_ptr produceTimeWeight(const EcalEBPhase2TPGTimeWeightIdMapRcd &); + std::unique_ptr produceWeightGroup(const EcalTPGWeightGroupRcd &); + std::unique_ptr producePhysicsConst(const EcalTPGPhysicsConstRcd &); + std::unique_ptr produceBadX(const EcalTPGCrystalStatusRcd &); + + // These commented lines are a reminder that in the future we might need to implement something alike + //std::unique_ptr produceLutGroup(const EcalTPGLutGroupRcd &); + //std::uniq//std::unique_ptr produceBadStrip(const EcalTPGStripStatusRcd &); + //std::unique_ptr produceBadTT(const EcalTPGTowerStatusRcd &); + //std::unique_ptr produceSpike(const EcalTPGSpikeRcd &); + +private: + void parseTextFile(); + std::vector getRange(int subdet, int smNb, int towerNbInSm, int stripNbInTower = 0, int xtalNbInStrip = 0); + void parseWeightsFile(); + + // ----------member data --------------------------- + std::string dbFilename_; + // std::string configFilename_; + const edm::FileInPath configFilename_; + bool flagPrint_; + std::map> mapXtal_; + std::map> mapStrip_[2]; + std::map> mapTower_[2]; + std::map> mapWeight_; + std::map> mapTimeWeight_; + std::map> mapXtalToGroup_; + std::map> mapXtalToLin_; + std::map> mapPhys_; + static const int maxSamplesUsed_; + static const int nLinConst_; +}; + +// +// input stream from a gz file +// + +struct GzInputStream { + gzFile gzf; + char buffer[256]; + std::istringstream iss; + bool eof; + GzInputStream(const char *file) : eof(false) { + gzf = gzopen(file, "rb"); + edm::LogInfo("EcalEBTrigPrimPhase2ESProducer") << " New weight file " << file; + if (gzf == Z_NULL) { + eof = true; + edm::LogWarning("EcalEBTrigPrimPhase2ESProducer") << "Database file " << file << " not found!!!"; + } else + readLine(); + } + void readLine() { + char *res = gzgets(gzf, buffer, 256); + eof = (res == Z_NULL); + if (!eof) { + iss.clear(); + iss.str(buffer); + } + } + ~GzInputStream() { gzclose(gzf); } + explicit operator bool() const { return ((eof == true) ? false : !iss.fail()); } +}; + +template +GzInputStream &operator>>(GzInputStream &gis, T &var) { + while ((bool)gis && !(gis.iss >> var)) { + gis.readLine(); + } + return gis; +} + +// +// constructors and destructor +// + +const int EcalEBTrigPrimPhase2ESProducer::maxSamplesUsed_ = 12; +const int EcalEBTrigPrimPhase2ESProducer::nLinConst_ = 8; + +EcalEBTrigPrimPhase2ESProducer::EcalEBTrigPrimPhase2ESProducer(const edm::ParameterSet &iConfig) + : dbFilename_(iConfig.getUntrackedParameter("DatabaseFile", "")), + configFilename_(iConfig.getParameter("WeightTextFile")), + flagPrint_(iConfig.getParameter("WriteInFile")) { + parseWeightsFile(); + + // the following lines are needed to tell the framework what + // data is being produced + setWhatProduced(this, &EcalEBTrigPrimPhase2ESProducer::produceLinearizationConst); + setWhatProduced(this, &EcalEBTrigPrimPhase2ESProducer::producePedestals); + setWhatProduced(this, &EcalEBTrigPrimPhase2ESProducer::produceAmpWeight); + setWhatProduced(this, &EcalEBTrigPrimPhase2ESProducer::produceTimeWeight); + setWhatProduced(this, &EcalEBTrigPrimPhase2ESProducer::produceWeightGroup); + setWhatProduced(this, &EcalEBTrigPrimPhase2ESProducer::produceBadX); + // the following commented lines as a reminder for items which might need to be implemented for Phase2 + //setWhatProduced(this, &EcalEBTrigPrimPhase2ESProducer::producePhysicsConst); + //setWhatProduced(this, &EcalEBTrigPrimPhase2ESProducer::produceBadStrip); + //setWhatProduced(this, &EcalEBTrigPrimPhase2ESProducer::produceBadTT); + //setWhatProduced(this, &EcalEBTrigPrimPhase2ESProducer::produceSpike); +} + +EcalEBTrigPrimPhase2ESProducer::~EcalEBTrigPrimPhase2ESProducer() {} + +// +// member functions +// + +// ------------ method called to produce the data ------------ + +std::unique_ptr EcalEBTrigPrimPhase2ESProducer::producePedestals( + const EcalEBPhase2TPGPedestalsRcd &iRecord) { + auto prod = std::make_unique(); + + std::map>::const_iterator it; + for (it = mapXtalToLin_.begin(); it != mapXtalToLin_.end(); it++) { + EBDetId myEBDetId = EBDetId(it->first); + EcalEBPhase2TPGPedestal ped; + + ped.mean_x10 = (it->second)[0]; + ped.mean_x1 = (it->second)[3]; + prod->insert(std::make_pair(myEBDetId, ped)); + } + + return prod; +} + +std::unique_ptr EcalEBTrigPrimPhase2ESProducer::produceLinearizationConst( + const EcalEBPhase2TPGLinearizationConstRcd &iRecord) { + auto prod = std::make_unique(); + + std::map>::const_iterator it; + for (it = mapXtalToLin_.begin(); it != mapXtalToLin_.end(); it++) { + EcalEBPhase2TPGLinearizationConstant param; + + param.mult_x10 = (it->second)[1]; + param.mult_x1 = (it->second)[5]; + param.shift_x10 = (it->second)[2]; + param.shift_x1 = (it->second)[6]; + param.i2cSub_x10 = (it->second)[3]; + param.i2cSub_x1 = (it->second)[7]; + prod->setValue(it->first, param); + } + + return prod; +} + +std::unique_ptr EcalEBTrigPrimPhase2ESProducer::produceAmpWeight( + const EcalEBPhase2TPGAmplWeightIdMapRcd &iRecord) { + auto prod = std::make_unique(); + + EcalEBPhase2TPGAmplWeights weights; + std::map>::const_iterator it; + for (it = mapWeight_.begin(); it != mapWeight_.end(); it++) { + weights.setValues((it->second)[0], + (it->second)[1], + (it->second)[2], + (it->second)[3], + (it->second)[4], + (it->second)[5], + (it->second)[6], + (it->second)[7], + (it->second)[8], + (it->second)[9], + (it->second)[10], + (it->second)[11]); + prod->setValue(it->first, weights); + } + + return prod; +} + +std::unique_ptr EcalEBTrigPrimPhase2ESProducer::produceTimeWeight( + const EcalEBPhase2TPGTimeWeightIdMapRcd &iRecord) { + auto prod = std::make_unique(); + + EcalEBPhase2TPGTimeWeights weights_time; + std::map>::const_iterator it; + for (it = mapTimeWeight_.begin(); it != mapTimeWeight_.end(); it++) { + weights_time.setValues((it->second)[0], + (it->second)[1], + (it->second)[2], + (it->second)[3], + (it->second)[4], + (it->second)[5], + (it->second)[6], + (it->second)[7], + (it->second)[8], + (it->second)[9], + (it->second)[10], + (it->second)[11]); + prod->setValue(it->first, weights_time); + } + + return prod; +} + +std::unique_ptr EcalEBTrigPrimPhase2ESProducer::produceWeightGroup( + const EcalTPGWeightGroupRcd &iRecord) { + auto prod = std::make_unique(); + + const int NGROUPS = 61200; + + for (int iGroup = 0; iGroup < NGROUPS; iGroup++) { + std::map>::const_iterator it; + for (it = mapXtalToGroup_.begin(); it != mapXtalToGroup_.end(); it++) { + prod->setValue(it->first, it->second[0]); + } + } + + return prod; +} + +std::unique_ptr EcalEBTrigPrimPhase2ESProducer::producePhysicsConst( + const EcalTPGPhysicsConstRcd &iRecord) { + auto prod = std::make_unique(); + // EcalEBTrigPrimPhase2ESProducer::producePhysicsConst Needs updating if we want to keep it + + parseTextFile(); + std::map>::const_iterator it; + for (it = mapPhys_.begin(); it != mapPhys_.end(); it++) { + EcalTPGPhysicsConst::Item item; + item.EtSat = (it->second)[0]; + item.ttf_threshold_Low = (it->second)[1]; + item.ttf_threshold_High = (it->second)[2]; + item.FG_lowThreshold = (it->second)[3]; + item.FG_highThreshold = (it->second)[4]; + item.FG_lowRatio = (it->second)[5]; + item.FG_highRatio = (it->second)[6]; + prod->setValue(it->first, item); + } + + return prod; +} + +std::unique_ptr EcalEBTrigPrimPhase2ESProducer::produceBadX( + const EcalTPGCrystalStatusRcd &iRecord) { + auto prod = std::make_unique(); + + parseTextFile(); + std::map>::const_iterator it; + for (it = mapXtal_.begin(); it != mapXtal_.end(); it++) { + EcalTPGCrystalStatusCode badXValue; + badXValue.setStatusCode(0); + prod->setValue(it->first, badXValue); + } + return prod; +} + +void EcalEBTrigPrimPhase2ESProducer::parseWeightsFile() { + uint32_t id; + std::string dataCard; + std::vector param; + + int data; + std::string filename = configFilename_.fullPath(); + ; + std::string finalFileName; + size_t slash = filename.find('/'); + if (slash != 0) { + edm::FileInPath fileInPath(filename); + finalFileName = fileInPath.fullPath(); + } else { + finalFileName = filename; + edm::LogWarning("EcalEBTPGESProducer") + << "Couldnt find database file via fileinpath trying with pathname directly!!"; + } + + GzInputStream gis(finalFileName.c_str()); + while (gis >> dataCard) { + if (dataCard == "WEIGHTAMP") { + gis >> std::dec >> id; + + if (flagPrint_) { + std::cout << dataCard << " " << std::dec << id << std::endl; + } + + param.clear(); + + std::string st6; + for (int i = 0; i < maxSamplesUsed_; i++) { + gis >> std::hex >> data; + param.push_back(data); + /// debug + + if (flagPrint_) { + std::ostringstream oss; + oss << std::hex << data; + std::string result4 = oss.str(); + + st6.append("0x"); + st6.append(result4); + st6.append(" "); + } + } + + // debug + if (flagPrint_) { + std::cout << st6 << std::endl; + std::cout << std::endl; + } + + // std::cout << " WEIGHTAMP id " << id << std::endl; + mapWeight_[id] = param; + } + + if (dataCard == "WEIGHTTIME") { + gis >> std::dec >> id; + + if (flagPrint_) { + std::cout << dataCard << " " << std::dec << id << std::endl; + } + + param.clear(); + + std::string st6; + for (int i = 0; i < maxSamplesUsed_; i++) { + gis >> std::hex >> data; + //std::cout << " Parse time weight filling data " << data; + param.push_back(data); + /// debug + + if (flagPrint_) { + std::ostringstream oss; + oss << std::hex << data; + std::string result4 = oss.str(); + + st6.append("0x"); + st6.append(result4); + st6.append(" "); + } + } + + // debug + if (flagPrint_) { + std::cout << st6 << std::endl; + std::cout << std::endl; + } + mapTimeWeight_[id] = param; + } + + if (dataCard == "CRYSTAL") { + gis >> std::dec >> id; + + if (flagPrint_) { + std::cout << dataCard << " " << std::dec << id << std::endl; + } + + param.clear(); + std::string st6; + gis >> std::dec >> data; + param.push_back(data); + + if (flagPrint_) { + std::ostringstream oss; + oss << std::dec << data; + std::string result4 = oss.str(); + st6.append(result4); + st6.append(" "); + std::cout << st6 << std::endl; + std::cout << std::endl; + } + mapXtalToGroup_[id] = param; + } + + if (dataCard == "LINCONST") { + gis >> std::dec >> id; + + if (flagPrint_) { + std::cout << dataCard << " " << std::dec << id << std::endl; + } + + param.clear(); + std::string st6; + std::string st7; + + for (int i = 0; i < nLinConst_; i++) { + gis >> std::hex >> data; + param.push_back(data); + + if (flagPrint_) { + if (i < 4) { + std::ostringstream oss; + oss << std::hex << data; + std::string result6 = oss.str(); + st6.append("0x"); + st6.append(result6); + if (i != 3) + st6.append(" "); + } else if (i < 8) { + std::ostringstream oss; + oss << std::hex << data; + std::string result7 = oss.str(); + st7.append("0x"); + st7.append(result7); + if (i != 7) + st7.append(" "); + } + } + } + if (flagPrint_) { + std::cout << st6 << std::endl; + std::cout << st7 << std::endl; + } + mapXtalToLin_[id] = param; + } + } +} + +void EcalEBTrigPrimPhase2ESProducer::parseTextFile() { + if (!mapXtal_.empty()) + return; // just parse the file once! + + uint32_t id; + std::string dataCard; + std::string line; + std::ifstream infile; + std::vector param; + std::vector paramF; + int NBstripparams[2] = {4, 4}; + unsigned int data; + + std::string bufString; + std::string iString; + std::string fString; + std::string filename = "SimCalorimetry/EcalTrigPrimProducers/data/" + dbFilename_; + std::string finalFileName; + size_t slash = dbFilename_.find('/'); + if (slash != 0) { + edm::FileInPath fileInPath(filename); + finalFileName = fileInPath.fullPath(); + } else { + finalFileName = dbFilename_; + edm::LogWarning("EcalTPG") << "Couldnt find database file via fileinpath, " + "trying with pathname directly!!"; + } + + int k = 0; + + GzInputStream gis(finalFileName.c_str()); + while (gis >> dataCard) { + if (dataCard == "CRYSTAL") { + gis >> std::dec >> id; + + std::string st3; + std::string st4; + std::string st5; + + if (flagPrint_) { + // Print this comment only one time + if (k == 0) + std::cout << "COMMENT ====== barrel crystals ====== " << std::endl; + + if (k == 61200) + std::cout << "COMMENT ====== endcap crystals ====== " << std::endl; + + k = k + 1; + + std::cout << dataCard << " " << std::dec << id << std::endl; + } + + param.clear(); + for (int i = 0; i < 9; i++) { + gis >> std::hex >> data; + param.push_back(data); + + if (flagPrint_) { + if (i < 3) { + std::ostringstream oss; + oss << std::hex << data; + std::string result1 = oss.str(); + + st3.append("0x"); + st3.append(result1); + if (i != 2) + st3.append(" "); + + } else if (i > 2 && i < 6) { + std::ostringstream oss; + oss << std::hex << data; + std::string result2 = oss.str(); + + st4.append("0x"); + st4.append(result2); + if (i != 5) + st4.append(" "); + } else if (i > 5 && i < 9) { + std::ostringstream oss; + oss << std::hex << data; + std::string result3 = oss.str(); + + st5.append("0x"); + st5.append(result3); + if (i != 8) + st5.append(" "); + } + } + + } // end for + + if (flagPrint_) { + std::cout << " " << st3 << std::endl; + std::cout << " " << st4 << std::endl; + std::cout << " " << st5 << std::endl; + } + + mapXtal_[id] = param; + } + + if (dataCard == "STRIP_EB") { + gis >> std::dec >> id; + + std::string st1; + + if (flagPrint_) + std::cout << dataCard << " " << std::dec << id << std::endl; + + param.clear(); + for (int i = 0; i < NBstripparams[0]; i++) { + gis >> std::hex >> data; + param.push_back(data); + + if (flagPrint_) { + if (i == 0) { + std::cout << "0x" << std::hex << data << std::endl; + } else if (i == 1) { + std::cout << "" << std::hex << data << std::endl; + } else if (i > 1) { + std::ostringstream oss; + if (i == 2) { + oss << "0x" << std::hex << data; + std::string result4 = oss.str(); + st1.append(result4); + } else if (i == 3) { + std::ostringstream oss; + oss << " 0x" << std::hex << data; + std::string result5 = oss.str(); + + st1.append(result5); + std::cout << "" << st1 << std::endl; + } + } + } + } + + mapStrip_[0][id] = param; + } + + if (dataCard == "STRIP_EE") { + gis >> std::dec >> id; + + std::string st6; + + if (flagPrint_) { + std::cout << dataCard << " " << std::dec << id << std::endl; + } + + param.clear(); + for (int i = 0; i < NBstripparams[1]; i++) { + gis >> std::hex >> data; + param.push_back(data); + + if (flagPrint_) { + if (i == 0) { + std::cout << "0x" << std::hex << data << std::endl; + } else if (i == 1) { + std::cout << " " << std::hex << data << std::endl; + } else if (i > 1) { + std::ostringstream oss; + if (i == 2) { + oss << "0x" << std::hex << data; + std::string result4 = oss.str(); + st6.append(result4); + } else if (i == 3) { + std::ostringstream oss; + oss << " 0x" << std::hex << data; + std::string result5 = oss.str(); + + st6.append(result5); + std::cout << "" << st6 << std::endl; + } + } + } + } + + mapStrip_[1][id] = param; + } + + if (dataCard == "TOWER_EE") { + gis >> std::dec >> id; + + if (flagPrint_) + std::cout << dataCard << " " << std::dec << id << std::endl; + + param.clear(); + for (int i = 0; i < 2; i++) { + gis >> std::hex >> data; + param.push_back(data); + + if (flagPrint_) { + if (i == 1) { + std::cout << "0x" << std::dec << data << std::endl; + } else { + std::cout << " " << std::dec << data << std::endl; + } + } + } + + mapTower_[1][id] = param; + } + + if (dataCard == "TOWER_EB") { + gis >> std::dec >> id; + + if (flagPrint_) + std::cout << dataCard << " " << std::dec << id << std::endl; + + param.clear(); + for (int i = 0; i < 3; i++) { + gis >> std::dec >> data; + + if (flagPrint_) { + std::cout << " " << std::dec << data << std::endl; + } + + param.push_back(data); + } + + mapTower_[0][id] = param; + } + } +} + +/// This method is not used at all, however is a reminder that something alike will probably be needed once the mapping EB to BCPs will be in place +std::vector EcalEBTrigPrimPhase2ESProducer::getRange( + int subdet, int tccNb, int towerNbInTcc, int stripNbInTower, int xtalNbInStrip) { + std::vector range; + if (subdet == 0) { + // Barrel + range.push_back(37); // stccNbMin + range.push_back(73); // tccNbMax + range.push_back(1); // towerNbMin + range.push_back(69); // towerNbMax + range.push_back(1); // stripNbMin + range.push_back(6); // stripNbMax + range.push_back(1); // xtalNbMin + range.push_back(6); // xtalNbMax + } + + if (tccNb > 0) { + range[0] = tccNb; + range[1] = tccNb + 1; + } + if (towerNbInTcc > 0) { + range[2] = towerNbInTcc; + range[3] = towerNbInTcc + 1; + } + if (stripNbInTower > 0) { + range[4] = stripNbInTower; + range[5] = stripNbInTower + 1; + } + if (xtalNbInStrip > 0) { + range[6] = xtalNbInStrip; + range[7] = xtalNbInStrip + 1; + } + + return range; +} + +DEFINE_FWK_EVENTSETUP_MODULE(EcalEBTrigPrimPhase2ESProducer); diff --git a/SimCalorimetry/EcalEBTrigPrimProducers/plugins/EcalEBTrigPrimPhase2Producer.cc b/SimCalorimetry/EcalEBTrigPrimProducers/plugins/EcalEBTrigPrimPhase2Producer.cc new file mode 100644 index 0000000000000..3db2b7efd790a --- /dev/null +++ b/SimCalorimetry/EcalEBTrigPrimProducers/plugins/EcalEBTrigPrimPhase2Producer.cc @@ -0,0 +1,244 @@ +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/Utilities/interface/EDGetToken.h" +#include "FWCore/Utilities/interface/ESGetToken.h" +#include "FWCore/Utilities/interface/InputTag.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +// +#include "DataFormats/Common/interface/Handle.h" +#include "DataFormats/Provenance/interface/ProductID.h" +#include "DataFormats/Provenance/interface/ParameterSetID.h" +#include "DataFormats/Provenance/interface/Provenance.h" +#include "DataFormats/EcalDigi/interface/EcalDigiCollections.h" +#include "CondFormats/EcalObjects/interface/EcalLiteDTUPedestals.h" +#include "CondFormats/DataRecord/interface/EcalLiteDTUPedestalsRcd.h" + +#include "CondFormats/DataRecord/interface/EcalTPGCrystalStatusRcd.h" +#include "CondFormats/EcalObjects/interface/EcalTPGCrystalStatus.h" +#include "CondFormats/DataRecord/interface/EcalEBPhase2TPGPedestalsRcd.h" +#include "CondFormats/DataRecord/interface/EcalEBPhase2TPGLinearizationConstRcd.h" +#include "CondFormats/DataRecord/interface/EcalEBPhase2TPGAmplWeightIdMapRcd.h" +#include "CondFormats/DataRecord/interface/EcalEBPhase2TPGTimeWeightIdMapRcd.h" +#include "CondFormats/DataRecord/interface/EcalTPGWeightGroupRcd.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGAmplWeightIdMap.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGTimeWeightIdMap.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGLinearizationConst.h" +#include "CondFormats/EcalObjects/interface/EcalEBPhase2TPGPedestals.h" +#include "CondFormats/EcalObjects/interface/EcalTPGWeightGroup.h" +#include "CondFormats/DataRecord/interface/EcalTPGTowerStatusRcd.h" +#include "CondFormats/DataRecord/interface/EcalTPGSpikeRcd.h" +#include "CondFormats/EcalObjects/interface/EcalTPGSpike.h" +#include "CondFormats/EcalObjects/interface/EcalTPGTowerStatus.h" + +#include "Geometry/CaloGeometry/interface/CaloGeometry.h" +#include "Geometry/CaloTopology/interface/EcalTrigTowerConstituentsMap.h" +#include "Geometry/Records/interface/CaloGeometryRecord.h" + +// We keep these lines for future posssible necessary additions +//#include "CondFormats/EcalObjects/interface/EcalTPGTowerStatus.h" +//#include "CondFormats/DataRecord/interface/EcalTPGStripStatusRcd.h" +//#include "CondFormats/EcalObjects/interface/EcalTPGStripStatus.h" +#include "SimCalorimetry/EcalEBTrigPrimAlgos/interface/EcalEBPhase2TrigPrimAlgo.h" +#include + +// Class declaration +/** \class EcalEBTrigPrimPhase2Producer \author L. Lutton, N. Marinelli - Univ. of Notre Dame Description: forPhase II It consumes the new Phase2 digis based on the new EB electronics and plugs in the main steering algo for TP emulation It produces the EcalEBPhase2TrigPrimDigiCollection */ + +class EcalEBPhase2TrigPrimAlgo; + +class EcalEBTrigPrimPhase2Producer : public edm::stream::EDProducer<> { +public: + explicit EcalEBTrigPrimPhase2Producer(const edm::ParameterSet& conf); + + ~EcalEBTrigPrimPhase2Producer() override; + + void beginRun(const edm::Run& run, const edm::EventSetup& es) override; + void endRun(const edm::Run&, const edm::EventSetup&) override; + void produce(edm::Event& e, const edm::EventSetup& c) override; + static void fillDescriptions(edm::ConfigurationDescriptions&); + +private: + std::unique_ptr algo_; + bool debug_; + bool famos_; + int nEvent_; + edm::EDGetTokenT tokenEBdigi_; + edm::ESGetToken + theEcalEBPhase2TPGLinearization_Token_; + edm::ESGetToken theEcalEBPhase2TPGPedestals_Token_; + + edm::ESGetToken theEcalTPGPedestals_Token_; + + edm::ESGetToken theEcalTPGCrystalStatus_Token_; + edm::ESGetToken theEcalEBTPGAmplWeightIdMap_Token_; + edm::ESGetToken theEcalEBTPGTimeWeightIdMap_Token_; + + edm::ESGetToken theEcalTPGWeightGroup_Token_; + + edm::ESGetToken theEcalTPGTowerStatus_Token_; + edm::ESGetToken theEcalTPGSpike_Token_; + + edm::ESGetToken eTTmapToken_; + edm::ESGetToken theGeometryToken_; + + int binOfMaximum_; + bool fillBinOfMaximumFromHistory_; + + unsigned long long getRecords(edm::EventSetup const& setup); + unsigned long long cacheID_; +}; + +EcalEBTrigPrimPhase2Producer::EcalEBTrigPrimPhase2Producer(const edm::ParameterSet& iConfig) + : debug_(iConfig.getParameter("Debug")), + famos_(iConfig.getParameter("Famos")), + binOfMaximum_(iConfig.getParameter("binOfMaximum")) { + tokenEBdigi_ = consumes(iConfig.getParameter("barrelEcalDigis")); + + eTTmapToken_ = esConsumes(); + theGeometryToken_ = esConsumes(); + + theEcalTPGPedestals_Token_ = + esConsumes(); + theEcalEBPhase2TPGPedestals_Token_ = + esConsumes(); + + theEcalTPGCrystalStatus_Token_ = + esConsumes(); + theEcalEBPhase2TPGLinearization_Token_ = + esConsumes(); + theEcalEBTPGAmplWeightIdMap_Token_ = + esConsumes(); + theEcalEBTPGTimeWeightIdMap_Token_ = + esConsumes(); + theEcalTPGWeightGroup_Token_ = esConsumes(); + + //register your products + produces(); +} + +void EcalEBTrigPrimPhase2Producer::beginRun(edm::Run const& run, edm::EventSetup const& setup) { + auto const& theGeometry = setup.getData(theGeometryToken_); + auto const& eTTmap = setup.getData(eTTmapToken_); + + algo_ = std::make_unique(&eTTmap, &theGeometry, binOfMaximum_, debug_); + + // get a first version of the records + cacheID_ = this->getRecords(setup); + + nEvent_ = 0; +} + +void EcalEBTrigPrimPhase2Producer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("Debug", false); + desc.add("Famos", false); + desc.add("BinOfMaximum", 6); // this needs to be at the same value used for the Phase2 LiteDTU digis ! + desc.add("barrelEcalDigis", edm::InputTag("simEcalUnsuppressedDigis")); +} + +unsigned long long EcalEBTrigPrimPhase2Producer::getRecords(edm::EventSetup const& setup) { + // get parameter records for xtals + auto theEcalEBPhase2TPGLinearization_handle = setup.getHandle(theEcalEBPhase2TPGLinearization_Token_); + const EcalEBPhase2TPGLinearizationConst* ecaltpLin = theEcalEBPhase2TPGLinearization_handle.product(); + // + edm::ESHandle theEcalTPGPedestals_handle = setup.getHandle(theEcalTPGPedestals_Token_); + const EcalLiteDTUPedestalsMap* ecaltpPed = theEcalTPGPedestals_handle.product(); + // + // auto theEcalEBPhase2TPGPedestals_handle = setup.getHandle(theEcalEBPhase2TPGPedestals_Token_); + //const EcalEBPhase2TPGPedestalsMap* ebTPPedestals = theEcalEBPhase2TPGPedestals_handle.product(); + // + edm::ESHandle theEcalTPGCrystalStatus_handle = setup.getHandle(theEcalTPGCrystalStatus_Token_); + const EcalTPGCrystalStatus* ecaltpgBadX = theEcalTPGCrystalStatus_handle.product(); + // + edm::ESHandle theEcalEBTPGAmplWeightIdMap_handle = + setup.getHandle(theEcalEBTPGAmplWeightIdMap_Token_); + const EcalEBPhase2TPGAmplWeightIdMap* ecaltpgAmplWeightMap = theEcalEBTPGAmplWeightIdMap_handle.product(); + // + edm::ESHandle theEcalEBTPGTimeWeightIdMap_handle = + setup.getHandle(theEcalEBTPGTimeWeightIdMap_Token_); + const EcalEBPhase2TPGTimeWeightIdMap* ecaltpgTimeWeightMap = theEcalEBTPGTimeWeightIdMap_handle.product(); + // + edm::ESHandle theEcalTPGWeightGroup_handle = setup.getHandle(theEcalTPGWeightGroup_Token_); + const EcalTPGWeightGroup* ecaltpgWeightGroup = theEcalTPGWeightGroup_handle.product(); + // These commented out lines are for reminder for possible needed implementations + //edm::ESHandle theEcalTPGTowerStatus_handle = setup.getHandle(theEcalTPGTowerStatus_Token_); + //const EcalTPGTowerStatus* ecaltpgBadTT = theEcalTPGTowerStatus_handle.product(); + // + //edm::ESHandle theEcalTPGSpike_handle = setup.getHandle(theEcalTPGSpike_Token_); + //const EcalTPGSpike* ecaltpgSpike = theEcalTPGSpike_handle.product(); + + //////////////// + algo_->setPointers(ecaltpPed, ecaltpLin, ecaltpgBadX, ecaltpgAmplWeightMap, ecaltpgTimeWeightMap, ecaltpgWeightGroup); + + return setup.get().cacheIdentifier(); + // return setup.get().cacheIdentifier(); +} + +void EcalEBTrigPrimPhase2Producer::endRun(edm::Run const& run, edm::EventSetup const& setup) { algo_.reset(); } + +EcalEBTrigPrimPhase2Producer::~EcalEBTrigPrimPhase2Producer() {} + +// ------------ method called to produce the data ------------ +void EcalEBTrigPrimPhase2Producer::produce(edm::Event& e, const edm::EventSetup& iSetup) { + nEvent_++; + + // get input collections + edm::Handle barrelDigiHandle; + + if (!e.getByToken(tokenEBdigi_, barrelDigiHandle)) { + edm::EDConsumerBase::Labels labels; + labelsForToken(tokenEBdigi_, labels); + edm::LogWarning("EcalTPG") << " Couldnt find Barrel digis " << labels.module << " and label " + << labels.productInstance << "!!!"; + } + const auto* ebdigi = barrelDigiHandle.product(); + + if (debug_) + LogDebug("EcalEBTrigPrimPhase2Producer") + << " EcalTPG" + << " =================> Treating event " << nEvent_ << ", Number of EB digis " + << barrelDigiHandle.product()->size() << std::endl; + + auto pOut = std::make_unique(); + + // invoke algorithm + algo_->run(ebdigi, *pOut); + + if (debug_) { + LogDebug("EcalEBTrigPrimPhase2Producer") + << "produce" + << " For Barrel " << pOut->size() << " TP Digis were produced" << std::endl; + } + + // debug prints if TP > 0. The number of TP with Et>0 is also used later for a LogInfo + int nonZeroTP = 0; + int nXstal = 0; + for (unsigned int i = 0; i < pOut->size(); ++i) { + nXstal++; + for (int isam = 0; isam < (*pOut)[i].size(); ++isam) { + if ((*pOut)[i][isam].encodedEt() > 0) { + nonZeroTP++; + if (debug_) { + LogDebug("EcalEBTrigPrimPhase2Producer") + << " For xStal n " << nXstal << " xTsal Id " << (((*pOut)[i])).id() << ", TP is " << (*pOut)[i] + << " (*pOut)[i][isam].raw() " << (*pOut)[i][isam].raw() << " (*pOut)[i][isam].encodedEt() " + << (*pOut)[i][isam].encodedEt() << " (*pOut)[i][isam].time() " << (*pOut)[i][isam].time() << std::endl; + } + } + } + } + + edm::LogInfo("EcalEBTrigPrimPhase2Producer") + << "EcalTPG" + << "\n =================> For Barrel , " << pOut->size() << " TP Digis were produced (including zero ones)" + << " Non zero primitives were " << nonZeroTP << std::endl; + + // put result into the Event + e.put(std::move(pOut)); +} + +DEFINE_FWK_MODULE(EcalEBTrigPrimPhase2Producer); diff --git a/SimCalorimetry/EcalEBTrigPrimProducers/plugins/SealModules.cc b/SimCalorimetry/EcalEBTrigPrimProducers/plugins/SealModules.cc index e959e18ebaa06..f439ac86bd7cc 100644 --- a/SimCalorimetry/EcalEBTrigPrimProducers/plugins/SealModules.cc +++ b/SimCalorimetry/EcalEBTrigPrimProducers/plugins/SealModules.cc @@ -1,7 +1,4 @@ #include "FWCore/Framework/interface/MakerMacros.h" #include "EcalEBTrigPrimProducer.h" -#include "EcalEBTrigPrimAnalyzer.h" - DEFINE_FWK_MODULE(EcalEBTrigPrimProducer); -DEFINE_FWK_MODULE(EcalEBTrigPrimAnalyzer); diff --git a/SimCalorimetry/EcalEBTrigPrimProducers/python/ecalEBTriggerPrimitivePhase2Digis_cfi.py b/SimCalorimetry/EcalEBTrigPrimProducers/python/ecalEBTriggerPrimitivePhase2Digis_cfi.py new file mode 100644 index 0000000000000..fd567eac36e9c --- /dev/null +++ b/SimCalorimetry/EcalEBTrigPrimProducers/python/ecalEBTriggerPrimitivePhase2Digis_cfi.py @@ -0,0 +1,17 @@ +import FWCore.ParameterSet.Config as cms + +# +# attention: default is changed to work on unsuppressed digis!! ############## +# + +simEcalEBTriggerPrimitivePhase2Digis = cms.EDProducer("EcalEBTrigPrimPhase2Producer", + barrelEcalDigis = cms.InputTag("simEcalUnsuppressedDigis"), + binOfMaximum = cms.int32(6), + Famos = cms.bool(False), + TcpOutput = cms.bool(False), + Debug = cms.bool(False) +) + + +from Configuration.Eras.Modifier_phase2_ecalTP_devel_cff import phase2_ecalTP_devel +phase2_ecalTP_devel.toModify( simEcalEBTriggerPrimitivePhase2Digis) diff --git a/SimCalorimetry/EcalEBTrigPrimProducers/python/ecalEBTriggerPrimitivePhase2ESProducer_cfi.py b/SimCalorimetry/EcalEBTrigPrimProducers/python/ecalEBTriggerPrimitivePhase2ESProducer_cfi.py new file mode 100644 index 0000000000000..ab45cd4f266eb --- /dev/null +++ b/SimCalorimetry/EcalEBTrigPrimProducers/python/ecalEBTriggerPrimitivePhase2ESProducer_cfi.py @@ -0,0 +1,73 @@ +import os +import FWCore.ParameterSet.Config as cms + +# esmodule creating records + corresponding empty essource +EcalEBTrigPrimPhase2ESProducer = cms.ESProducer("EcalEBTrigPrimPhase2ESProducer", + DatabaseFile = cms.untracked.string('TPG_beamv5_MC_startup.txt.gz'), + WeightTextFile = cms.FileInPath('SimCalorimetry/EcalEBTrigPrimProducers/data/AmpTimeOnPeakXtalWeightsCMSSWPulse_8samples_peakOnSix_WithAndyFixes.txt.gz'), + WriteInFile = cms.bool(False) +) + +tpparams = cms.ESSource("EmptyESSource", + recordName = cms.string('EcalEBPhase2TPGLinearizationConstRcd'), + iovIsRunNotTime = cms.bool(True), + firstValid = cms.vuint32(1) +) + +tpparams2 = cms.ESSource("EmptyESSource", + recordName = cms.string('EcalEBPhase2TPGPedestalsRcd'), + iovIsRunNotTime = cms.bool(True), + firstValid = cms.vuint32(1) +) + + +tpparams4 = cms.ESSource("EmptyESSource", + recordName = cms.string('EcalEBPhase2TPGAmplWeightIdMapRcd'), + iovIsRunNotTime = cms.bool(True), + firstValid = cms.vuint32(1) +) + +tpparams17 = cms.ESSource("EmptyESSource", + recordName = cms.string('EcalEBPhase2TPGTimeWeightIdMapRcd'), + iovIsRunNotTime = cms.bool(True), + firstValid = cms.vuint32(1) +) + + +tpparams5 = cms.ESSource("EmptyESSource", + recordName = cms.string('EcalTPGWeightGroupRcd'), + iovIsRunNotTime = cms.bool(True), + firstValid = cms.vuint32(1) +) + + +tpparams12 = cms.ESSource("EmptyESSource", + recordName = cms.string('EcalTPGPhysicsConstRcd'), + iovIsRunNotTime = cms.bool(True), + firstValid = cms.vuint32(1) +) + +tpparams13 = cms.ESSource("EmptyESSource", + recordName = cms.string('EcalTPGCrystalStatusRcd'), + iovIsRunNotTime = cms.bool(True), + firstValid = cms.vuint32(1) +) + +tpparams14 = cms.ESSource("EmptyESSource", + recordName = cms.string('EcalTPGTowerStatusRcd'), + iovIsRunNotTime = cms.bool(True), + firstValid = cms.vuint32(1) +) + +tpparams15 = cms.ESSource("EmptyESSource", + recordName = cms.string('EcalTPGSpikeRcd'), + iovIsRunNotTime = cms.bool(True), + firstValid = cms.vuint32(1) +) + +tpparams16 = cms.ESSource("EmptyESSource", + recordName = cms.string('EcalTPGStripStatusRcd'), + iovIsRunNotTime = cms.bool(True), + firstValid = cms.vuint32(1) +) + diff --git a/SimCalorimetry/EcalEBTrigPrimProducers/test/testPhase2_13_1_0_pre3.py b/SimCalorimetry/EcalEBTrigPrimProducers/test/testPhase2_13_1_0_pre3.py new file mode 100644 index 0000000000000..5d15178047eb2 --- /dev/null +++ b/SimCalorimetry/EcalEBTrigPrimProducers/test/testPhase2_13_1_0_pre3.py @@ -0,0 +1,164 @@ +# Auto generated configuration file +# using: +# Revision: 1.19 +# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v +# with command line options: TTbar_14TeV_TuneCP5_cfi --conditions auto:phase2_realistic_T15 -n 1 --era Phase2C10 --eventcontent FEVTDEBUG --relval +#9000,100 -s GEN,SIM,DIGI --datatier GEN-SIM-DIGI --beamspot HLLHC14TeV --geometry Extended2026D60 --fileout file:step1_UpToDigi.root + + +import FWCore.ParameterSet.Config as cms +from Configuration.Eras.Era_Phase2C17I13M9_cff import Phase2C17I13M9 +from Configuration.Eras.Modifier_phase2_ecal_devel_cff import phase2_ecal_devel +from Configuration.Eras.Modifier_phase2_ecalTP_devel_cff import phase2_ecalTP_devel + +process = cms.Process('DIGI',Phase2C17I13M9,phase2_ecal_devel,phase2_ecalTP_devel) + +# import of standard configurations +process.load('Configuration.StandardSequences.Services_cff') +process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi') +process.load('FWCore.MessageService.MessageLogger_cfi') +process.load('Configuration.EventContent.EventContent_cff') +process.load('SimGeneral.MixingModule.mixNoPU_cfi') +process.load('Configuration.Geometry.GeometryExtended2026D88Reco_cff') +process.load('Configuration.Geometry.GeometryExtended2026D88_cff') +process.load('Configuration.StandardSequences.MagneticField_cff') +process.load('Configuration.StandardSequences.Generator_cff') +process.load('IOMC.EventVertexGenerators.VtxSmearedHLLHC14TeV_cfi') +process.load('GeneratorInterface.Core.genFilterSummary_cff') +process.load('Configuration.StandardSequences.SimIdeal_cff') +process.load('Configuration.StandardSequences.Digi_cff') +process.load('Configuration.StandardSequences.EndOfProcess_cff') +process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff') + +process.maxEvents = cms.untracked.PSet( + input = cms.untracked.int32(1), + output = cms.optional.untracked.allowed(cms.int32,cms.PSet) +) + + +process.MessageLogger = cms.Service("MessageLogger", + destinations = cms.untracked.vstring('messages'), + messages = cms.untracked.PSet(threshold = cms.untracked.string('DEBUG')), + debugModules = cms.untracked.vstring('*') + + +) + + + +# Input source +process.source = cms.Source("EmptySource") + +process.options = cms.untracked.PSet( + FailPath = cms.untracked.vstring(), + IgnoreCompletely = cms.untracked.vstring(), + Rethrow = cms.untracked.vstring(), + SkipEvent = cms.untracked.vstring(), + allowUnscheduled = cms.obsolete.untracked.bool, + canDeleteEarly = cms.untracked.vstring(), + emptyRunLumiMode = cms.obsolete.untracked.string, + eventSetup = cms.untracked.PSet( + forceNumberOfConcurrentIOVs = cms.untracked.PSet( + + ), + numberOfConcurrentIOVs = cms.untracked.uint32(1) + ), + fileMode = cms.untracked.string('FULLMERGE'), + forceEventSetupCacheClearOnNewRun = cms.untracked.bool(False), + makeTriggerResults = cms.obsolete.untracked.bool, + numberOfConcurrentLuminosityBlocks = cms.untracked.uint32(1), + numberOfConcurrentRuns = cms.untracked.uint32(1), + numberOfStreams = cms.untracked.uint32(0), + numberOfThreads = cms.untracked.uint32(1), + printDependencies = cms.untracked.bool(False), + sizeOfStackForThreadsInKB = cms.optional.untracked.uint32, + throwIfIllegalParameter = cms.untracked.bool(True), + wantSummary = cms.untracked.bool(False) +) + + + +# Production Info +process.configurationMetadata = cms.untracked.PSet( + annotation = cms.untracked.string('TTbar_14TeV_TuneCP5_cfi nevts:1'), + name = cms.untracked.string('Applications'), + version = cms.untracked.string('$Revision: 1.19 $') +) + +# Output definition + +process.FEVTDEBUGoutput = cms.OutputModule("PoolOutputModule", + SelectEvents = cms.untracked.PSet( + SelectEvents = cms.vstring('generation_step') + ), + dataset = cms.untracked.PSet( + dataTier = cms.untracked.string('GEN-SIM-DIGI'), + filterName = cms.untracked.string('') + ), + fileName = cms.untracked.string('file:/tmp/nancy/testGamma_Nancy.root'), + outputCommands = process.FEVTDEBUGEventContent.outputCommands, + splitLevel = cms.untracked.int32(0) +) + +# Additional output definition + +# Other statements +process.genstepfilter.triggerConditions=cms.vstring("generation_step") +from Configuration.AlCa.GlobalTag import GlobalTag +process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase2_realistic_T21', '') + +process.GlobalTag.toGet = cms.VPSet( + cms.PSet(record = cms.string("EcalSimPulseShapeRcd"), + tag = cms.string("EcalSimPulseShapePhaseII"), + connect = cms.string("frontier://FrontierProd/CMS_CONDITIONS") + ) +) + + + +process.generator = cms.EDFilter("Pythia8PtGun", + PGunParameters = cms.PSet( + AddAntiParticle = cms.bool(True), + MaxEta = cms.double(1.4), + MaxPhi = cms.double(3.14159265359), + MaxPt = cms.double(1000.01), + MinEta = cms.double(-1.4), + MinPhi = cms.double(-3.14159265359), + MinPt = cms.double(0.5), + ParticleID = cms.vint32(11) + ), + PythiaParameters = cms.PSet( + parameterSets = cms.vstring() + ), + Verbosity = cms.untracked.int32(0), + firstRun = cms.untracked.uint32(1), + psethack = cms.string('single electron flat Pt 0.5 to 100 GeV ') +) + +process.ProductionFilterSequence = cms.Sequence(process.generator) + +# Path and EndPath definitions +process.generation_step = cms.Path(process.pgen) +process.simulation_step = cms.Path(process.psim) +process.digitisation_step = cms.Path(process.pdigi) +process.genfiltersummary_step = cms.EndPath(process.genFilterSummary) +process.endjob_step = cms.EndPath(process.endOfProcess) +process.FEVTDEBUGoutput_step = cms.EndPath(process.FEVTDEBUGoutput) + +# Schedule definition +process.schedule = cms.Schedule(process.generation_step,process.genfiltersummary_step,process.simulation_step,process.digitisation_step,process.endjob_step,process.FEVTDEBUGoutput_step) +from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask +associatePatAlgosToolsTask(process) +# filter all path with the production filter sequence +for path in process.paths: + getattr(process,path).insert(0, process.ProductionFilterSequence) + + + +# Customisation from command line + +# Add early deletion of temporary data products to reduce peak memory need +from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete +process = customiseEarlyDelete(process) +# End adding early deletion + diff --git a/SimCalorimetry/HcalTrigPrimProducers/python/hcaltpdigi_cff.py b/SimCalorimetry/HcalTrigPrimProducers/python/hcaltpdigi_cff.py index a7babe2547f3a..b8e99c1754540 100644 --- a/SimCalorimetry/HcalTrigPrimProducers/python/hcaltpdigi_cff.py +++ b/SimCalorimetry/HcalTrigPrimProducers/python/hcaltpdigi_cff.py @@ -40,3 +40,6 @@ from Configuration.Eras.Modifier_pp_on_PbPb_run3_cff import pp_on_PbPb_run3 pp_on_PbPb_run3.toModify(HcalTPGCoderULUT, FG_HF_thresholds = [14, 19]) + +from Configuration.Eras.Modifier_pp_on_PbPb_run3_2023_cff import pp_on_PbPb_run3_2023 +pp_on_PbPb_run3_2023.toModify(HcalTPGCoderULUT, FG_HF_thresholds = [16, 19]) \ No newline at end of file diff --git a/SimDataFormats/CaloHit/interface/CaloHit.h b/SimDataFormats/CaloHit/interface/CaloHit.h index 4d67dd6726f85..b2cf00f62d01d 100644 --- a/SimDataFormats/CaloHit/interface/CaloHit.h +++ b/SimDataFormats/CaloHit/interface/CaloHit.h @@ -13,7 +13,6 @@ class CaloHit { public: CaloHit(int deti, int layi, double ei, double etai, double phii, double timi, uint32_t idi = 0); CaloHit(); - CaloHit(const CaloHit&); virtual ~CaloHit(); int det() const { return deth; } diff --git a/SimDataFormats/CaloHit/src/CaloHit.cc b/SimDataFormats/CaloHit/src/CaloHit.cc index ba06b2c6a319d..486e43106c514 100644 --- a/SimDataFormats/CaloHit/src/CaloHit.cc +++ b/SimDataFormats/CaloHit/src/CaloHit.cc @@ -11,16 +11,6 @@ CaloHit::CaloHit(int deti, int layi, double ei, double etai, double fi, double t CaloHit::CaloHit() : deth(0), layerh(0), eh(0), etah(0), phih(0), timeh(0), idh(0) {} -CaloHit::CaloHit(const CaloHit& right) { - deth = right.deth; - layerh = right.layerh; - eh = right.eh; - etah = right.etah; - phih = right.phih; - timeh = right.timeh; - idh = right.idh; -} - CaloHit::~CaloHit() {} bool CaloHit::operator<(const CaloHit& hit) const { diff --git a/SimDataFormats/CrossingFrame/interface/CrossingFrame.h b/SimDataFormats/CrossingFrame/interface/CrossingFrame.h index 6ff1e71cbc748..47cea6a6dfd8c 100644 --- a/SimDataFormats/CrossingFrame/interface/CrossingFrame.h +++ b/SimDataFormats/CrossingFrame/interface/CrossingFrame.h @@ -40,6 +40,7 @@ class CrossingFrame { CrossingFrame() : firstCrossing_(0), lastCrossing_(0), bunchSpace_(75), subdet_(""), maxNbSources_(0) {} CrossingFrame(int minb, int maxb, int bunchsp, std::string subdet, unsigned int maxNbSources); + CrossingFrame(const CrossingFrame& v) = default; ~CrossingFrame() { ; } diff --git a/SimDataFormats/GeneratorProducts/interface/GenFilterInfo.h b/SimDataFormats/GeneratorProducts/interface/GenFilterInfo.h index a197572924b4d..660a4b51824d4 100644 --- a/SimDataFormats/GeneratorProducts/interface/GenFilterInfo.h +++ b/SimDataFormats/GeneratorProducts/interface/GenFilterInfo.h @@ -14,7 +14,6 @@ class GenFilterInfo { GenFilterInfo(); GenFilterInfo(unsigned int, unsigned int); // obsolete, should be avoided for new classes GenFilterInfo(unsigned int, unsigned int, unsigned int, unsigned int, double, double, double, double); - GenFilterInfo(const GenFilterInfo&); virtual ~GenFilterInfo(); // getters diff --git a/SimDataFormats/GeneratorProducts/interface/GenLumiInfoProduct.h b/SimDataFormats/GeneratorProducts/interface/GenLumiInfoProduct.h index f48a377fc0588..f5ea7343a1e9c 100644 --- a/SimDataFormats/GeneratorProducts/interface/GenLumiInfoProduct.h +++ b/SimDataFormats/GeneratorProducts/interface/GenLumiInfoProduct.h @@ -17,7 +17,6 @@ class GenLumiInfoProduct { // constructors, destructors GenLumiInfoProduct(); GenLumiInfoProduct(const int id); - GenLumiInfoProduct(const GenLumiInfoProduct &other); virtual ~GenLumiInfoProduct(); // getters @@ -35,7 +34,6 @@ class GenLumiInfoProduct { public: XSec() : value_(-1.), error_(-1.) {} XSec(double v, double e = -1.) : value_(v), error_(e) {} - XSec(const XSec &other) : value_(other.value_), error_(other.error_) {} double value() const { return value_; } double error() const { return error_; } @@ -57,7 +55,6 @@ class GenLumiInfoProduct { public: FinalStat() : n_(0), sum_(0.0), sum2_(0.0) {} FinalStat(unsigned int n1, double sum1, double sum21) : n_(n1), sum_(sum1), sum2_(sum21) {} - FinalStat(const FinalStat &other) : n_(other.n_), sum_(other.sum_), sum2_(other.sum2_) {} unsigned int n() const { return n_; } double sum() const { return sum_; } diff --git a/SimDataFormats/GeneratorProducts/interface/GenRunInfoProduct.h b/SimDataFormats/GeneratorProducts/interface/GenRunInfoProduct.h index a96e96fa425ab..9bb7eeb2e4632 100644 --- a/SimDataFormats/GeneratorProducts/interface/GenRunInfoProduct.h +++ b/SimDataFormats/GeneratorProducts/interface/GenRunInfoProduct.h @@ -12,7 +12,6 @@ class GenRunInfoProduct { // constructors, destructors GenRunInfoProduct(); - GenRunInfoProduct(const GenRunInfoProduct &other); // getters @@ -33,7 +32,6 @@ class GenRunInfoProduct { public: XSec() : value_(-1.), error_(-1.) {} XSec(double value, double error = -1.) : value_(value), error_(error) {} - XSec(const XSec &other) : value_(other.value_), error_(other.error_) {} double value() const { return value_; } double error() const { return error_; } diff --git a/SimDataFormats/GeneratorProducts/src/GenFilterInfo.cc b/SimDataFormats/GeneratorProducts/src/GenFilterInfo.cc index ba61cc5e0d8de..1b2f3e30a6419 100644 --- a/SimDataFormats/GeneratorProducts/src/GenFilterInfo.cc +++ b/SimDataFormats/GeneratorProducts/src/GenFilterInfo.cc @@ -46,16 +46,6 @@ GenFilterInfo::GenFilterInfo(unsigned int passp, sumTotalWeights_(totalw), sumTotalWeights2_(totalw2) {} -GenFilterInfo::GenFilterInfo(const GenFilterInfo& other) - : numPassPositiveEvents_(other.numPassPositiveEvents_), - numPassNegativeEvents_(other.numPassNegativeEvents_), - numTotalPositiveEvents_(other.numTotalPositiveEvents_), - numTotalNegativeEvents_(other.numTotalNegativeEvents_), - sumPassWeights_(other.sumPassWeights_), - sumPassWeights2_(other.sumPassWeights2_), - sumTotalWeights_(other.sumTotalWeights_), - sumTotalWeights2_(other.sumTotalWeights2_) {} - GenFilterInfo::~GenFilterInfo() {} bool GenFilterInfo::mergeProduct(GenFilterInfo const& other) { diff --git a/SimDataFormats/GeneratorProducts/src/GenLumiInfoProduct.cc b/SimDataFormats/GeneratorProducts/src/GenLumiInfoProduct.cc index 543a7b33d27ad..69b9700aecf92 100644 --- a/SimDataFormats/GeneratorProducts/src/GenLumiInfoProduct.cc +++ b/SimDataFormats/GeneratorProducts/src/GenLumiInfoProduct.cc @@ -71,9 +71,6 @@ GenLumiInfoProduct::GenLumiInfoProduct() : hepidwtup_(-1) { internalProcesses_.c GenLumiInfoProduct::GenLumiInfoProduct(const int id) : hepidwtup_(id) { internalProcesses_.clear(); } -GenLumiInfoProduct::GenLumiInfoProduct(GenLumiInfoProduct const& other) - : hepidwtup_(other.hepidwtup_), internalProcesses_(other.internalProcesses_) {} - GenLumiInfoProduct::~GenLumiInfoProduct() {} bool GenLumiInfoProduct::mergeProduct(GenLumiInfoProduct const& other) { diff --git a/SimDataFormats/GeneratorProducts/src/GenRunInfoProduct.cc b/SimDataFormats/GeneratorProducts/src/GenRunInfoProduct.cc index f675e7f3406bb..bfc45ca649cf2 100644 --- a/SimDataFormats/GeneratorProducts/src/GenRunInfoProduct.cc +++ b/SimDataFormats/GeneratorProducts/src/GenRunInfoProduct.cc @@ -10,12 +10,6 @@ using namespace std; GenRunInfoProduct::GenRunInfoProduct() : externalFilterEfficiency_(-1.) {} -GenRunInfoProduct::GenRunInfoProduct(GenRunInfoProduct const &other) - : internalXSec_(other.internalXSec_), - externalXSecLO_(other.externalXSecLO_), - externalXSecNLO_(other.externalXSecNLO_), - externalFilterEfficiency_(other.externalFilterEfficiency_) {} - bool GenRunInfoProduct::isProductEqual(GenRunInfoProduct const &other) const { bool result = externalXSecLO_ == other.externalXSecLO_ && externalXSecNLO_ == other.externalXSecNLO_ && externalFilterEfficiency_ == other.externalFilterEfficiency_; diff --git a/SimDataFormats/ValidationFormats/interface/MaterialAccountingStep.h b/SimDataFormats/ValidationFormats/interface/MaterialAccountingStep.h index 83842f9e42b37..f724e90bf5795 100644 --- a/SimDataFormats/ValidationFormats/interface/MaterialAccountingStep.h +++ b/SimDataFormats/ValidationFormats/interface/MaterialAccountingStep.h @@ -54,16 +54,6 @@ class MaterialAccountingStep { return std::make_pair(part1, part2); } - /// assignement operator - MaterialAccountingStep& operator=(const MaterialAccountingStep& step) { - m_length = step.m_length; - m_radiationLengths = step.m_radiationLengths; - m_energyLoss = step.m_energyLoss; - m_in = step.m_in; - m_out = step.m_out; - return *this; - } - /// add a step MaterialAccountingStep& operator+=(const MaterialAccountingStep& step) { m_length += step.m_length; diff --git a/SimFastTiming/FastTimingCommon/src/ETLDeviceSim.cc b/SimFastTiming/FastTimingCommon/src/ETLDeviceSim.cc index f7fbf2dc31c52..4582efbd3e7d2 100644 --- a/SimFastTiming/FastTimingCommon/src/ETLDeviceSim.cc +++ b/SimFastTiming/FastTimingCommon/src/ETLDeviceSim.cc @@ -45,7 +45,7 @@ void ETLDeviceSim::getHitsResponse(const std::vectoridToDet(geoId); if (thedet == nullptr) { throw cms::Exception("ETLDeviceSim") << "GeographicalID: " << std::hex << geoId.rawId() << " (" << detId.rawId() diff --git a/SimFastTiming/FastTimingCommon/test/BuildFile.xml b/SimFastTiming/FastTimingCommon/test/BuildFile.xml index 93c8000877b1b..fa121a85b0c0a 100644 --- a/SimFastTiming/FastTimingCommon/test/BuildFile.xml +++ b/SimFastTiming/FastTimingCommon/test/BuildFile.xml @@ -1,6 +1,10 @@ + + + + diff --git a/SimFastTiming/FastTimingCommon/test/testBTLShape.cpp b/SimFastTiming/FastTimingCommon/test/testBTLShape.cpp new file mode 100644 index 0000000000000..21dea544b97f0 --- /dev/null +++ b/SimFastTiming/FastTimingCommon/test/testBTLShape.cpp @@ -0,0 +1,73 @@ +#include "SimFastTiming/FastTimingCommon/interface/BTLPulseShape.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include +#include +#include + +#include "TROOT.h" +#include "TStyle.h" +#include "TH1F.h" +#include "TCanvas.h" +#include "TF1.h" + +int main() { + edm::MessageDrop::instance()->debugEnabled = false; + + const unsigned int histsiz(BTLPulseShape::k1NSecBinsTotal); + + // shape constants and input amplitude + + const double ReferencePulseNpe_ = 100.; + const double TimeThreshold1_ = 20.; + const double TimeThreshold2_ = 50.; + const double Npe_to_V_ = 0.0064; + + const BTLPulseShape theShape; + + const size_t nampli(5); + const std::array npe{{8000., 4000., 3500., 1000., 100.}}; + std::vector histVect; + + // standard display of the implemented shape function + const int csize = 500; + TCanvas* showShape = new TCanvas("showShape", "showShape", csize, 2 * csize); + + for (size_t index = 0; index < nampli; index++) { + const double scale = npe[index] / ReferencePulseNpe_; + const std::array tATt( + theShape.timeAtThr(scale, TimeThreshold1_ * Npe_to_V_, TimeThreshold2_ * Npe_to_V_)); + + TString name = "BTLShape_" + std::to_string(index); + histVect.emplace_back(new TH1F(name, "Tabulated BTL shape", histsiz, 0., (float)(histsiz))); + + std::cout << "Tabulated BTL shape, scale vs reference = " << std::fixed << std::setw(6) << std::setprecision(2) + << scale << " maximum at [" << std::fixed << std::setw(6) << std::setprecision(2) << theShape.indexOfMax() + << " ] = " << std::fixed << std::setw(6) << std::setprecision(2) << theShape.timeOfMax() << std::endl; + std::cout << "Time at thresholds:\n" + << std::fixed << std::setw(8) << std::setprecision(3) << TimeThreshold1_ * Npe_to_V_ << " --> " << tATt[0] + << "\n" + << std::fixed << std::setw(8) << std::setprecision(3) << TimeThreshold2_ * Npe_to_V_ << " --> " << tATt[1] + << "\n" + << std::fixed << std::setw(8) << std::setprecision(3) << TimeThreshold1_ * Npe_to_V_ << " --> " << tATt[2] + << "\n" + << std::endl; + + for (unsigned int i = 0; i <= histsiz; ++i) { + const double time((i + 0.5) / BTLPulseShape::kNBinsPerNSec); + const double myShape(theShape(time)); + histVect[index]->SetBinContent(i, myShape * scale); + histVect[index]->SetBinError(i, 0.001); + std::cout << " bin = " << std::fixed << std::setw(4) << i << " time (ns) = " << std::fixed << std::setw(6) + << std::setprecision(3) << time << " shape = " << std::setw(11) << std::setprecision(8) + << myShape * scale << std::endl; + } + + showShape->cd(); + histVect[index]->SetStats(kFALSE); + histVect[index]->Draw("SAME"); + } + + showShape->SaveAs("BTLShape.pdf"); + + return 0; +} diff --git a/SimG4CMS/Calo/BuildFile.xml b/SimG4CMS/Calo/BuildFile.xml index 5d2f500750ecf..c538379e0a464 100644 --- a/SimG4CMS/Calo/BuildFile.xml +++ b/SimG4CMS/Calo/BuildFile.xml @@ -1,5 +1,6 @@ + @@ -22,7 +23,6 @@ - diff --git a/SimG4CMS/Calo/interface/HCalSD.h b/SimG4CMS/Calo/interface/HCalSD.h index b8558e0764c11..487f17732dd0a 100644 --- a/SimG4CMS/Calo/interface/HCalSD.h +++ b/SimG4CMS/Calo/interface/HCalSD.h @@ -114,6 +114,7 @@ class HCalSD : public CaloSD, public Observer { double deliveredLumi; double weight_; int depth_; + bool dd4hep_; std::vector gpar; std::vector hfLevels; std::vector hfNames; diff --git a/SimG4CMS/Calo/interface/HGCalNumberingScheme.h b/SimG4CMS/Calo/interface/HGCalNumberingScheme.h index e730f0c2ab5c3..10209a54c2cf0 100644 --- a/SimG4CMS/Calo/interface/HGCalNumberingScheme.h +++ b/SimG4CMS/Calo/interface/HGCalNumberingScheme.h @@ -31,7 +31,7 @@ class HGCalNumberingScheme { uint32_t getUnitID(int layer, int module, int cell, int iz, const G4ThreeVector& pos, double& wt); private: - void checkPosition(uint32_t index, const G4ThreeVector& pos, bool matchOnly, bool debug) const; + bool checkPosition(uint32_t index, const G4ThreeVector& pos, bool matchOnly, bool debug) const; const HGCalDDDConstants& hgcons_; const HGCalGeometryMode::GeometryMode mode_; diff --git a/SimG4CMS/Calo/plugins/HGCalMouseBiteTester.cc b/SimG4CMS/Calo/plugins/HGCalMouseBiteTester.cc new file mode 100644 index 0000000000000..8f5ebbb880090 --- /dev/null +++ b/SimG4CMS/Calo/plugins/HGCalMouseBiteTester.cc @@ -0,0 +1,199 @@ +// -*- C++ -*- +// +// Package: HGCalMouseBiteTester +// Class: HGCalMouseBiteTester +// +/**\class HGCalMouseBiteTester HGCalMouseBiteTester.cc + plugins/HGCalMouseBiteTester.cc + + Description: + + Implementation: + +*/ +// +// Original Author: Sunanda Banerjee, Pruthvi Suryadevara +// Created: Mon 2023/11/30 +// +// + +// system include files +#include +#include +#include +#include +#include +#include +#include +//#include + +// user include files +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/Framework/interface/one/EDAnalyzer.h" + +#include +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/MakerMacros.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "Geometry/Records/interface/IdealGeometryRecord.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "Geometry/HGCalCommonData/interface/HGCalDDDConstants.h" +#include "Geometry/HGCalCommonData/interface/HGCalParameters.h" +#include "Geometry/HGCalCommonData/interface/HGCalCellUV.h" +#include "Geometry/HGCalCommonData/interface/HGCalCell.h" +#include "Geometry/HGCalCommonData/interface/HGCalWaferMask.h" +#include "Geometry/HGCalCommonData/interface/HGCalWaferType.h" +#include "SimG4CMS/Calo/interface/HGCMouseBite.h" +#include "SimG4CMS/Calo/interface/HGCGuardRing.h" +#include "SimG4CMS/Calo/interface/HGCGuardRingPartial.h" +#include "G4ThreeVector.hh" + +class HGCalMouseBiteTester : public edm::one::EDAnalyzer<> { +public: + explicit HGCalMouseBiteTester(const edm::ParameterSet&); + ~HGCalMouseBiteTester() override = default; + static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); + + void beginJob() override {} + void analyze(edm::Event const& iEvent, edm::EventSetup const&) override; + void endJob() override {} + +private: + const std::string nameSense_; + const int waferU_; + const int waferV_; + const int nTrials_; + const int layer_; + const edm::ESGetToken dddToken_; + std::ofstream outputFile; +}; + +HGCalMouseBiteTester::HGCalMouseBiteTester(const edm::ParameterSet& iC) + : nameSense_(iC.getParameter("nameSense")), + waferU_(iC.getParameter("waferU")), + waferV_(iC.getParameter("waferV")), + nTrials_(iC.getParameter("numbberOfTrials")), + layer_(iC.getParameter("layer")), + dddToken_(esConsumes(edm::ESInputTag{"", nameSense_})) { + edm::LogVerbatim("HGCalGeom") << "Test Guard_Ring for wafer in layer" << layer_ << " U " << waferU_ << " V " + << waferV_ << " with " << nTrials_ << " trials"; + + outputFile.open("full1.csv"); + if (!outputFile.is_open()) { + edm::LogError("HGCalGeom") << "Could not open output file."; + } else { + outputFile << "x,y,u,v,\n"; + } +} + +void HGCalMouseBiteTester::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { + edm::ParameterSetDescription desc; + desc.add("nameSense", "HGCalEESensitive"); + desc.add("waferU", 1); + desc.add("waferV", 9); + desc.add("numbberOfTrials", 1000000); + desc.add("layer", 1); + descriptions.add("hgcalMouseBiteTester", desc); +} + +// ------------ method called to produce the data ------------ +void HGCalMouseBiteTester::analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) { + const HGCalDDDConstants& hgcons_ = iSetup.getData(dddToken_); + double waferSize_(hgcons_.waferSize(false)); + int zside(1); + int layertype = hgcons_.layerType(layer_); + int frontBack = HGCalTypes::layerFrontBack(layertype); + const std::vector angle_{90.0, 30.0}; + int index = HGCalWaferIndex::waferIndex(layer_, waferU_, waferV_); + int partialType_ = 0; + int orient = HGCalWaferType::getOrient(index, hgcons_.getParameter()->waferInfoMap_); + int placeIndex_ = HGCalCell::cellPlacementIndex(zside, frontBack, orient); + int waferType_ = HGCalWaferType::getType(index, hgcons_.getParameter()->waferInfoMap_); + double mouseBiteCut_ = waferSize_ * tan(30.0 * CLHEP::deg) - 5.0; + bool v17OrLess = hgcons_.v17OrLess(); + HGCGuardRing guardRing_(hgcons_); + HGCGuardRingPartial guardRingPartial_(hgcons_); + HGCMouseBite mouseBite_(hgcons_, angle_, mouseBiteCut_, true); + const int nFine(12), nCoarse(8); + double r2 = 0.5 * waferSize_; + double R2 = 2 * r2 / sqrt(3); + int nCells = (waferType_ == 0) ? nFine : nCoarse; + std::cout << "start" << std::endl; + HGCalCellUV wafer(waferSize_, 0.0, nFine, nCoarse); + HGCalCell wafer2(waferSize_, nFine, nCoarse); + std::pair xy = hgcons_.waferPosition(layer_, waferU_, waferV_, false, false); + double x0 = (zside > 0) ? xy.first : -xy.first; + double y0 = xy.second; + std::ofstream guard_ring("Guard_ring.csv"); + std::ofstream guard_ring_partial("Guard_ring_partial.csv"); + std::ofstream mouse_bite("Mouse_bite.csv"); + std::ofstream selected("Selected.csv"); + edm::LogVerbatim("HGCalGeom") << "\nHGCalMouseBiteTester:: nCells " << nCells << " FrontBack " << frontBack + << " Wafer Size " << waferSize_ << " and placement index " << placeIndex_ + << " WaferType " << waferType_ << " Partial " << partialType_ << " WaferX " << x0 + << " WaferY " << y0 << "\n\n"; + auto start_t = std::chrono::high_resolution_clock::now(); + + for (int i = 0; i < nTrials_; i++) { + double xi = (2 * r2 * static_cast(rand()) / RAND_MAX) - r2; + double yi = (2 * R2 * static_cast(rand()) / RAND_MAX) - R2; + bool goodPoint = true; + int ug = 0; + int vg = 0; + if (partialType_ == 11 || partialType_ == 13 || partialType_ == 15 || partialType_ == 21 || partialType_ == 23 || + partialType_ == 25 || partialType_ == 0) { + ug = 0; + vg = 0; + } else if (partialType_ == 12 || partialType_ == 14 || partialType_ == 16 || partialType_ == 22 || + partialType_ == 24) { + ug = nCells + 1; + vg = 2 * (nCells - 1); + } + std::pair xyg = wafer2.cellUV2XY2(ug, vg, placeIndex_, waferType_); + std::vector > wxy = + HGCalWaferMask::waferXY(0, placeIndex_, waferSize_, 0.0, 0.0, 0.0, v17OrLess); + for (unsigned int i = 0; i < (wxy.size() - 1); ++i) { + double xp1 = wxy[i].first; + double yp1 = wxy[i].second; + double xp2 = wxy[i + 1].first; + double yp2 = wxy[i + 1].second; + if ((((xi - xp1) / (xp2 - xp1)) - ((yi - yp1) / (yp2 - yp1))) * + (((xyg.first - xp1) / (xp2 - xp1)) - ((xyg.second - yp1) / (yp2 - yp1))) <= + 0) { + goodPoint = false; + } + } + if (goodPoint) { //Only allowing (x, y) inside a partial wafer 11, placement index 2 + partialType_ = HGCalWaferType::getPartial(index, hgcons_.getParameter()->waferInfoMap_); + G4ThreeVector point(xi, yi, 0.0); + std::pair uv5; + if (hgcons_.v17OrLess()) { + uv5 = wafer.cellUVFromXY1(xi, yi, placeIndex_, waferType_, partialType_, true, false); + } else { + uv5 = wafer.cellUVFromXY2(xi, yi, placeIndex_, waferType_, partialType_, true, false); + } + if (guardRingPartial_.exclude(point, zside, frontBack, layer_, waferU_, waferV_)) { + guard_ring_partial << xi << "," << yi << std::endl; + } else if (mouseBite_.exclude(point, zside, layer_, waferU_, waferV_)) { + mouse_bite << xi << "," << yi << std::endl; + } else { + selected << xi << "," << yi << std::endl; + outputFile << xi << "," << yi << "," << uv5.first << "," << uv5.second << "," << std::endl; + } + } + } + guard_ring.close(); + guard_ring_partial.close(); + mouse_bite.close(); + selected.close(); + outputFile.close(); + auto end_t = std::chrono::high_resolution_clock::now(); + auto diff_t = end_t - start_t; + edm::LogVerbatim("HGCalGeom") << "Execution time for " << nTrials_ + << " events = " << std::chrono::duration(diff_t).count() << " ms"; +} + +// define this as a plug-in +DEFINE_FWK_MODULE(HGCalMouseBiteTester); diff --git a/SimG4CMS/Calo/src/CaloSD.cc b/SimG4CMS/Calo/src/CaloSD.cc index f0e1c5c6b2b4d..56ad1e0eb1f99 100644 --- a/SimG4CMS/Calo/src/CaloSD.cc +++ b/SimG4CMS/Calo/src/CaloSD.cc @@ -5,6 +5,7 @@ #include "SimG4CMS/Calo/interface/CaloSD.h" #include "SimDataFormats/SimHitMaker/interface/CaloSlaveSD.h" +#include "SimG4Core/Geometry/interface/DD4hep2DDDName.h" #include "SimG4Core/Notification/interface/TrackInformation.h" #include "SimG4Core/Notification/interface/G4TrackToParticleID.h" #include "SimG4Core/Notification/interface/SimTrackManager.h" @@ -22,7 +23,6 @@ #include "G4ParticleTable.hh" #include "G4SystemOfUnits.hh" #include "G4PhysicalConstants.hh" -#include "DD4hep/Filter.h" #include #include @@ -147,7 +147,7 @@ CaloSD::CaloSD(const std::string& name, G4LogicalVolume* lv = nullptr; G4String name = static_cast(fineNames[useFines[i]]); for (lvcite = lvs->begin(); lvcite != lvs->end(); lvcite++) { - G4String namx(static_cast(dd4hep::dd::noNamespace((*lvcite)->GetName()))); + G4String namx(static_cast(DD4hep2DDDName::nameMatterLV((*lvcite)->GetName(), dd4hep))); if (namx == name) { lv = (*lvcite); break; @@ -487,7 +487,8 @@ bool CaloSD::hitExists(const G4Step* aStep, int k) { bool CaloSD::checkHit(int k) { #ifdef EDM_ML_DEBUG - edm::LogVerbatim("CaloSim") << "CaloSD: checkHit for " << k; + edm::LogVerbatim("CaloSim") << "CaloSD: checkHit for " << k << " for map " << useMap << ":" << &hitMap[k] << " Nhits " + << nCheckedHits[k] << " HC " << theHC[k] << " ID " << currentID[k]; #endif //look in the HitContainer whether a hit with the same ID already exists: bool found = false; diff --git a/SimG4CMS/Calo/src/CaloTrkProcessing.cc b/SimG4CMS/Calo/src/CaloTrkProcessing.cc index 6a0dcff9ed9be..04f4fa494e78a 100644 --- a/SimG4CMS/Calo/src/CaloTrkProcessing.cc +++ b/SimG4CMS/Calo/src/CaloTrkProcessing.cc @@ -1,3 +1,4 @@ +#include "SimG4Core/Geometry/interface/DD4hep2DDDName.h" #include "SimG4Core/Notification/interface/BeginOfEvent.h" #include "SimG4Core/Notification/interface/TrackInformation.h" #include "SimG4Core/Notification/interface/SimTrackManager.h" @@ -12,7 +13,6 @@ #include "G4Step.hh" #include "G4Track.hh" #include "G4SystemOfUnits.hh" -#include "DD4hep/Filter.h" #include //#define EDM_ML_DEBUG @@ -82,7 +82,7 @@ CaloTrkProcessing::CaloTrkProcessing(const std::string& name, G4LogicalVolume* lv = nullptr; G4String name(csps.caloNames_[i]); for (lvcite = lvs->begin(); lvcite != lvs->end(); lvcite++) { - G4String namx(static_cast(dd4hep::dd::noNamespace((*lvcite)->GetName()))); + G4String namx(DD4hep2DDDName::noNameSpace(static_cast((*lvcite)->GetName()))); if (namx == name) { lv = (*lvcite); break; @@ -107,7 +107,7 @@ CaloTrkProcessing::CaloTrkProcessing(const std::string& name, lv = nullptr; name = static_cast(csps.insideNames_[istart + k]); for (lvcite = lvs->begin(); lvcite != lvs->end(); lvcite++) { - G4String namx(static_cast(dd4hep::dd::noNamespace((*lvcite)->GetName()))); + G4String namx(DD4hep2DDDName::noNameSpace(static_cast((*lvcite)->GetName()))); if (namx == name) { lv = (*lvcite); break; @@ -129,7 +129,7 @@ CaloTrkProcessing::CaloTrkProcessing(const std::string& name, G4LogicalVolume* lv = nullptr; G4String name = static_cast(fineNames[useFines[i]]); for (lvcite = lvs->begin(); lvcite != lvs->end(); lvcite++) { - G4String namx(static_cast(dd4hep::dd::noNamespace((*lvcite)->GetName()))); + G4String namx(DD4hep2DDDName::noNameSpace(static_cast((*lvcite)->GetName()))); if (namx == name) { lv = (*lvcite); break; diff --git a/SimG4CMS/Calo/src/ECalSD.cc b/SimG4CMS/Calo/src/ECalSD.cc index 78304d0498ffc..5b8ca4497770f 100644 --- a/SimG4CMS/Calo/src/ECalSD.cc +++ b/SimG4CMS/Calo/src/ECalSD.cc @@ -4,6 +4,7 @@ /////////////////////////////////////////////////////////////////////////////// #include "SimG4CMS/Calo/interface/ECalSD.h" #include "SimG4CMS/Calo/interface/EcalDumpGeometry.h" +#include "SimG4Core/Geometry/interface/DD4hep2DDDName.h" #include "SimG4Core/Notification/interface/TrackInformation.h" #include "Geometry/EcalCommonData/interface/EcalBarrelNumberingScheme.h" #include "Geometry/EcalCommonData/interface/EcalBaseNumber.h" @@ -18,8 +19,6 @@ #include "FWCore/ServiceRegistry/interface/Service.h" #include "CommonTools/UtilAlgos/interface/TFileService.h" -#include "DD4hep/Filter.h" - #include "G4LogicalVolumeStore.hh" #include "G4LogicalVolume.hh" #include "G4Step.hh" @@ -127,8 +126,9 @@ ECalSD::ECalSD(const std::string& name, int type0 = dumpGeom / 1000; type += (10 * type0); - if (nullptr != scheme) + if (nullptr != scheme) { setNumberingScheme(scheme); + } #ifdef EDM_ML_DEBUG edm::LogVerbatim("EcalSim") << "Constructing a ECalSD with name " << GetName(); #endif @@ -218,7 +218,8 @@ double ECalSD::getEnergyDeposit(const G4Step* aStep) { edep *= wt2; } #ifdef EDM_ML_DEBUG - edm::LogVerbatim("EcalSim") << lv->GetName() << " " << dd4hep::dd::noNamespace(lv->GetName()) + edm::LogVerbatim("EcalSim") << lv->GetName() << " " + << DD4hep2DDDName::noNameSpace(static_cast(lv->GetName())) << " Light Collection Efficiency " << weight << ":" << wt1 << " wt2= " << wt2 << " Weighted Energy Deposit " << edep / CLHEP::MeV << " MeV at " << preStepPoint->GetPosition(); @@ -292,7 +293,7 @@ uint16_t ECalSD::getRadiationLength(const G4StepPoint* hitPoint, const G4Logical double radl = hitPoint->GetMaterial()->GetRadlen(); thisX0 = (uint16_t)floor(scaleRL * crystalDepth / radl); #ifdef plotDebug - const std::string& lvname = dd4hep::dd::noNamespace(lv->GetName()); + const std::string lvname = DD4hep2DDDName::noNameSpace(static_cast(lv->GetName())); int k1 = (lvname.find("EFRY") != std::string::npos) ? 2 : 0; int k2 = (lvname.find("refl") != std::string::npos) ? 1 : 0; int kk = k1 + k2; @@ -302,7 +303,8 @@ uint16_t ECalSD::getRadiationLength(const G4StepPoint* hitPoint, const G4Logical #endif #ifdef EDM_ML_DEBUG G4ThreeVector localPoint = setToLocal(hitPoint->GetPosition(), hitPoint->GetTouchable()); - edm::LogVerbatim("EcalSim") << lv->GetName() << " " << dd4hep::dd::noNamespace(lv->GetName()) << " Global " + edm::LogVerbatim("EcalSim") << lv->GetName() << " " + << DD4hep2DDDName::noNameSpace(static_cast(lv->GetName())) << " Global " << hitPoint->GetPosition() << ":" << (hitPoint->GetPosition()).rho() << " Local " << localPoint << " Crystal Length " << crystalLength << " Radl " << radl << " crystalDepth " << crystalDepth << " Index " << thisX0 << " : " @@ -338,7 +340,7 @@ void ECalSD::initMap() { const G4LogicalVolumeStore* lvs = G4LogicalVolumeStore::GetInstance(); std::map nameMap; for (auto lvi = lvs->begin(), lve = lvs->end(); lvi != lve; ++lvi) - nameMap.emplace(dd4hep::dd::noNamespace((*lvi)->GetName()), *lvi); + nameMap.emplace(DD4hep2DDDName::noNameSpace(static_cast((*lvi)->GetName())), *lvi); for (unsigned int it = 0; it < ecalSimParameters_->lvNames_.size(); ++it) { const std::string& matname = ecalSimParameters_->matNames_[it]; @@ -419,7 +421,7 @@ void ECalSD::initMap() { for (auto ite : xtalLMap) { std::string name("Unknown"); if (ite.first != nullptr) - name = dd4hep::dd::noNamespace((ite.first)->GetName()); + name = DD4hep2DDDName::noNameSpace(static_cast((ite.first)->GetName())); edm::LogVerbatim("EcalSim") << " " << i << " " << ite.first << " " << name << " L = " << ite.second; ++i; } @@ -440,13 +442,15 @@ double ECalSD::curve_LY(const G4LogicalVolume* lv) { } else { edm::LogWarning("EcalSim") << "ECalSD: light coll curve : wrong distance " << "to APD " << dapd << " crlength = " << crystalLength << ":" << crystalDepth - << " crystal name = " << lv->GetName() << " " << dd4hep::dd::noNamespace(lv->GetName()) + << " crystal name = " << lv->GetName() << " " + << DD4hep2DDDName::noNameSpace(static_cast(lv->GetName())) << " z of localPoint = " << currentLocalPoint.z() << " take weight = " << weight; } } #ifdef EDM_ML_DEBUG edm::LogVerbatim("EcalSim") << "ECalSD: light coll curve : crlength = " << crystalLength << " Depth " << crystalDepth - << " crystal name = " << lv->GetName() << " " << dd4hep::dd::noNamespace(lv->GetName()) + << " crystal name = " << lv->GetName() << " " + << DD4hep2DDDName::noNameSpace(static_cast(lv->GetName())) << " z of localPoint = " << currentLocalPoint.z() << " take weight = " << weight; #endif return weight; @@ -461,8 +465,8 @@ void ECalSD::getBaseNumber(const G4Step* aStep) { //Get name and copy numbers if (theSize > 1) { for (int ii = 0; ii < theSize; ii++) { - std::string_view name = dd4hep::dd::noNamespace(touch->GetVolume(ii)->GetName()); - theBaseNumber.addLevel(std::string(name), touch->GetReplicaNumber(ii)); + std::string name = DD4hep2DDDName::noNameSpace(static_cast(touch->GetVolume(ii)->GetName())); + theBaseNumber.addLevel(name, touch->GetReplicaNumber(ii)); #ifdef EDM_ML_DEBUG edm::LogVerbatim("EcalSim") << "ECalSD::getBaseNumber(): Adding level " << ii << ": " << name << "[" << touch->GetReplicaNumber(ii) << "]"; @@ -489,7 +493,8 @@ double ECalSD::getBirkL3(const G4Step* aStep) { weight = 1.; } #ifdef EDM_ML_DEBUG - edm::LogVerbatim("EcalSim") << "ECalSD::getBirkL3 in " << dd4hep::dd::noNamespace(mat->GetName()) << " Charge " + edm::LogVerbatim("EcalSim") << "ECalSD::getBirkL3 in " + << DD4hep2DDDName::noNameSpace(static_cast(mat->GetName())) << " Charge " << charge << " dE/dx " << dedx << " Birk Const " << rkb << " Weight = " << weight << " dE " << aStep->GetTotalEnergyDeposit(); #endif diff --git a/SimG4CMS/Calo/src/EcalDumpGeometry.cc b/SimG4CMS/Calo/src/EcalDumpGeometry.cc index c8c599d971c70..d4e8fee1cf6d0 100644 --- a/SimG4CMS/Calo/src/EcalDumpGeometry.cc +++ b/SimG4CMS/Calo/src/EcalDumpGeometry.cc @@ -3,10 +3,9 @@ #include "Geometry/EcalCommonData/interface/EcalBaseNumber.h" #include "Geometry/EcalCommonData/interface/EcalEndcapNumberingScheme.h" #include "Geometry/EcalCommonData/interface/EcalPreshowerNumberingScheme.h" +#include "SimG4Core/Geometry/interface/DD4hep2DDDName.h" #include "SimG4CMS/Calo/interface/EcalDumpGeometry.h" -#include "DD4hep/Filter.h" - #include EcalDumpGeometry::EcalDumpGeometry(const std::vector& names, @@ -20,7 +19,7 @@ EcalDumpGeometry::EcalDumpGeometry(const std::vector& names, G4cout << " Type: " << type << " Depth Names " << name1_ << ":" << name2_ << " with " << names.size() << " LVs: " << ss.str() << G4endl; for (const auto& name : names) { - std::string namex = (static_cast(dd4hep::dd::noNamespace(name))).substr(0, 4); + std::string namex = DD4hep2DDDName::noNameSpace(static_cast(name)).substr(0, 4); if (std::find(names_.begin(), names_.end(), namex) == names_.end()) names_.emplace_back(namex); } @@ -60,7 +59,7 @@ void EcalDumpGeometry::dumpTouch(G4VPhysicalVolume* pv, unsigned int leafDepth) G4LogicalVolume* lv = pv->GetLogicalVolume(); bool flag = ((type_ / 10) % 10 > 0); - std::string lvname = (static_cast(dd4hep::dd::noNamespace(lv->GetName()))); + std::string lvname = DD4hep2DDDName::noNameSpace(static_cast(lv->GetName())); std::string namex = lvname.substr(0, 4); EcalBaseNumber theBaseNumber; for (unsigned int k = 0; k < names_.size(); ++k) { @@ -73,8 +72,8 @@ void EcalDumpGeometry::dumpTouch(G4VPhysicalVolume* pv, unsigned int leafDepth) theBaseNumber.setSize(theSize + 1); std::stringstream ss; for (int ii = theSize; ii >= 0; --ii) { - std::string_view name = dd4hep::dd::noNamespace(fHistory_.GetVolume(ii)->GetName()); - theBaseNumber.addLevel(static_cast(name), fHistory_.GetVolume(ii)->GetCopyNo()); + std::string name = DD4hep2DDDName::noNameSpace(static_cast(fHistory_.GetVolume(ii)->GetName())); + theBaseNumber.addLevel(name, fHistory_.GetVolume(ii)->GetCopyNo()); ss << " " << ii << " " << name << ":" << fHistory_.GetVolume(ii)->GetCopyNo(); } uint32_t id = (((type_ % 10) == 0) ? ebNumbering_.getUnitID(theBaseNumber) diff --git a/SimG4CMS/Calo/src/HCalSD.cc b/SimG4CMS/Calo/src/HCalSD.cc index 3fb266eeb9793..19f4299038cf0 100644 --- a/SimG4CMS/Calo/src/HCalSD.cc +++ b/SimG4CMS/Calo/src/HCalSD.cc @@ -7,6 +7,7 @@ #include "SimG4CMS/Calo/interface/HcalTestNumberingScheme.h" #include "SimG4CMS/Calo/interface/HcalDumpGeometry.h" #include "SimG4CMS/Calo/interface/HFFibreFiducial.h" +#include "SimG4Core/Geometry/interface/DD4hep2DDDName.h" #include "SimG4Core/Notification/interface/TrackInformation.h" #include "SimG4Core/Notification/interface/G4TrackToParticleID.h" #include "DataFormats/HcalDetId/interface/HcalDetId.h" @@ -25,8 +26,6 @@ #include "G4PhysicalConstants.hh" #include "Randomize.hh" -#include "DD4hep/Filter.h" - #include #include #include @@ -77,7 +76,7 @@ HCalSD::HCalSD(const std::string& name, //static SimpleConfigurable bk3(1.75, "HCalSD:BirkC3"); // Values from NIM 80 (1970) 239-244: as implemented in Geant3 - bool dd4hep = p.getParameter("g4GeometryDD4hepSource"); + dd4hep_ = p.getParameter("g4GeometryDD4hepSource"); edm::ParameterSet m_HC = p.getParameter("HCalSD"); useBirk = m_HC.getParameter("UseBirkLaw"); double bunit = (CLHEP::g / (CLHEP::MeV * CLHEP::cm2)); @@ -175,12 +174,12 @@ HCalSD::HCalSD(const std::string& name, std::stringstream ss0; ss0 << "HCalSD: Names to be tested for Volume = HF has " << hfNames.size() << " elements"; #endif - int addlevel = dd4hep ? 1 : 0; + int addlevel = dd4hep_ ? 1 : 0; for (unsigned int i = 0; i < hfNames.size(); ++i) { - G4String namv(static_cast(dd4hep::dd::noNamespace(hfNames[i]))); + std::string namv(DD4hep2DDDName::nameMatterLV(hfNames[i], dd4hep_)); lv = nullptr; for (auto lvol : *lvs) { - if (dd4hep::dd::noNamespace(lvol->GetName()) == namv) { + if (DD4hep2DDDName::nameMatterLV(lvol->GetName(), dd4hep_) == namv) { lv = lvol; break; } @@ -211,7 +210,7 @@ HCalSD::HCalSD(const std::string& name, for (auto const& namx : matNames) { const G4Material* mat = nullptr; for (matite = matTab->begin(); matite != matTab->end(); ++matite) { - if (static_cast(dd4hep::dd::noNamespace((*matite)->GetName())) == namx) { + if (DD4hep2DDDName::nameMatterLV((*matite)->GetName(), dd4hep_) == namx) { mat = (*matite); break; } @@ -315,10 +314,10 @@ void HCalSD::fillLogVolumeVector(const std::string& value, std::stringstream ss3; ss3 << "HCalSD: " << lvnames.size() << " names to be tested for Volume <" << value << ">:"; for (unsigned int i = 0; i < lvnames.size(); ++i) { - G4String namv(static_cast(dd4hep::dd::noNamespace(lvnames[i]))); + std::string namv(DD4hep2DDDName::nameMatterLV(lvnames[i], dd4hep_)); lv = nullptr; for (auto lvol : *lvs) { - if (dd4hep::dd::noNamespace(lvol->GetName()) == namv) { + if (DD4hep2DDDName::nameMatterLV(lvol->GetName(), dd4hep_) == namv) { lv = lvol; break; } @@ -1029,7 +1028,7 @@ void HCalSD::plotProfile(const G4Step* aStep, const G4ThreeVector& global, doubl double depth = -2000; int idx = 4; for (int n = 0; n < touch->GetHistoryDepth(); ++n) { - G4String name(static_cast(dd4hep::dd::noNamespace(touch->GetVolume(n)->GetName()))); + G4String name(static_cast(DD4hep2DDDName::nameMatterLV(touch->GetVolume(n)->GetName(), dd4hep_))); #ifdef EDM_ML_DEBUG edm::LogVerbatim("HcalSim") << "plotProfile Depth " << n << " Name " << name; #endif diff --git a/SimG4CMS/Calo/src/HGCGuardRing.cc b/SimG4CMS/Calo/src/HGCGuardRing.cc index fe6c951facbd9..977bf0e044428 100644 --- a/SimG4CMS/Calo/src/HGCGuardRing.cc +++ b/SimG4CMS/Calo/src/HGCGuardRing.cc @@ -25,9 +25,13 @@ HGCGuardRing::HGCGuardRing(const HGCalDDDConstants& hgc) bool HGCGuardRing::exclude(G4ThreeVector& point, int zside, int frontBack, int layer, int waferU, int waferV) { bool check(false); - if ((modeUV_ == HGCalGeometryMode::Hexagon8Module) || (modeUV_ == HGCalGeometryMode::Hexagon8Cassette)) { + if (hgcons_.waferHexagon8Module()) { int index = HGCalWaferIndex::waferIndex(layer, waferU, waferV); int partial = HGCalWaferType::getPartial(index, hgcons_.getParameter()->waferInfoMap_); +#ifdef EDM_ML_DEBUG + edm::LogVerbatim("HGCSim") << "HGCGuardRing:: Layer " << layer << " wafer " << waferU << ":" << waferV << " index " + << index << " partial " << partial; +#endif if (partial == HGCalTypes::WaferFull) { double dx = std::abs(point.x()); double dy = std::abs(point.y()); @@ -44,8 +48,11 @@ bool HGCGuardRing::exclude(G4ThreeVector& point, int zside, int frontBack, int l << HGCalTypes::WaferFull << " x " << dx << ":" << xmax_ << " y " << dy << ":" << ymax_ << " check " << check; #endif - } else { + } else if (partial > 0) { int orient = HGCalWaferType::getOrient(index, hgcons_.getParameter()->waferInfoMap_); +#ifdef EDM_ML_DEBUG + edm::LogVerbatim("HGCSim") << "HGCGuardRing:: Orient " << orient << " Mode " << modeUV_; +#endif if (modeUV_ == HGCalGeometryMode::Hexagon8Module) { std::vector > wxy = HGCalWaferMask::waferXY(partial, orient, zside, waferSize_, offset_, 0.0, 0.0, v17OrLess_); @@ -73,6 +80,8 @@ bool HGCGuardRing::exclude(G4ThreeVector& point, int zside, int frontBack, int l edm::LogVerbatim("HGCSim") << st1.str(); #endif } + } else { + check = true; } } return check; diff --git a/SimG4CMS/Calo/src/HGCGuardRingPartial.cc b/SimG4CMS/Calo/src/HGCGuardRingPartial.cc index bc0d4e27a1dce..fea159c7001a5 100644 --- a/SimG4CMS/Calo/src/HGCGuardRingPartial.cc +++ b/SimG4CMS/Calo/src/HGCGuardRingPartial.cc @@ -4,6 +4,7 @@ #include "Geometry/HGCalCommonData/interface/HGCalWaferType.h" #include "Geometry/HGCalCommonData/interface/HGCalTypes.h" #include +#include //#define EDM_ML_DEBUG @@ -25,28 +26,41 @@ HGCGuardRingPartial::HGCGuardRingPartial(const HGCalDDDConstants& hgc) bool HGCGuardRingPartial::exclude(G4ThreeVector& point, int zside, int frontBack, int layer, int waferU, int waferV) { bool check(false); - if (modeUV_ == HGCalGeometryMode::Hexagon8Cassette) { + if ((modeUV_ == HGCalGeometryMode::Hexagon8Cassette) || (modeUV_ == HGCalGeometryMode::Hexagon8CalibCell)) { int index = HGCalWaferIndex::waferIndex(layer, waferU, waferV); int partial = HGCalWaferType::getPartial(index, hgcons_.getParameter()->waferInfoMap_); int type = HGCalWaferType::getType(index, hgcons_.getParameter()->waferInfoMap_); +#ifdef EDM_ML_DEBUG + edm::LogVerbatim("HGCSim") << "HGCGuardRingPatial:: Layer " << layer << " wafer " << waferU << ":" << waferV + << " index " << index << " partial " << partial << " type " << type; +#endif if (partial == HGCalTypes::WaferFull) { return (check); + } else if (partial < 0) { + return true; } else { int orient = HGCalWaferType::getOrient(index, hgcons_.getParameter()->waferInfoMap_); int placement = HGCalCell::cellPlacementIndex(zside, frontBack, orient); - double delX = 0.5 * waferSize_; - double delY = 2 * delX / sqrt3_; - double dx = (zside > 0) ? -point.x() : point.x(); + double dx = point.x(); double dy = point.y(); - double tresh = std::abs(offset_ / cos_1[placement]); +#ifdef EDM_ML_DEBUG + edm::LogVerbatim("HGCSim") << "HGCGuardRingPatial:: orient " << orient << " placement " << placement << " dx " + << dx << " dy " << dy; +#endif if (type > 0) { - check |= std::abs(dy - (dx * tan_1[placement])) < tresh; - check |= std::abs(dy - (dx * tan_1[placement]) + ((HGCalTypes::c10 * delY * 0.5) / cos_1[placement])) < tresh; - check |= std::abs(dy * cot_1[placement] - (dx)) < tresh; + for (int ii = HGCalTypes::WaferPartLDOffset; + ii < (HGCalTypes::WaferPartLDOffset + HGCalTypes::WaferPartLDCount); + ii++) { + std::array criterion = HGCalWaferMask::maskCut(ii, placement, waferSize_, offset_, v17OrLess_); + check |= std::abs(criterion[0] * dy + criterion[1] * dx + criterion[2]) < criterion[3]; + } } else { - check |= std::abs((dy * cot_1[placement]) - dx + ((c22_ * delX) / cos_1[placement])) < tresh; - check |= std::abs(dy - (dx * tan_1[placement]) - ((c27_ * delY) / cos_1[placement])) < tresh; - check |= std::abs(dy - (dx * tan_1[placement]) + ((c27_ * delY) / cos_1[placement])) < tresh; + for (int ii = HGCalTypes::WaferPartHDOffset; + ii < (HGCalTypes::WaferPartHDOffset + HGCalTypes::WaferPartHDCount); + ii++) { + std::array criterion = HGCalWaferMask::maskCut(ii, placement, waferSize_, offset_, v17OrLess_); + check |= std::abs(criterion[0] * dy + criterion[1] * dx + criterion[2]) < criterion[3]; + } } } #ifdef EDM_ML_DEBUG diff --git a/SimG4CMS/Calo/src/HGCalNumberingScheme.cc b/SimG4CMS/Calo/src/HGCalNumberingScheme.cc index 787a82133e997..e976aa57de6b7 100644 --- a/SimG4CMS/Calo/src/HGCalNumberingScheme.cc +++ b/SimG4CMS/Calo/src/HGCalNumberingScheme.cc @@ -179,25 +179,30 @@ uint32_t HGCalNumberingScheme::getUnitID(int layer, int module, int cell, int iz #ifdef EDM_ML_DEBUG } else { edm::LogVerbatim("HGCSim") << "Radius/Phi " << id[0] << ":" << id[1] << " Type " << id[2] << " Layer|iz " << layer - << ":" << iz << " ERROR"; + << ":" << iz << " for i/p Layer " << layer << " module " << module << " cell " << cell + << " iz " << iz << " pos " << pos << " wt " << wt << " ERROR"; #endif } } #ifdef EDM_ML_DEBUG - bool matchOnly = ((mode_ == HGCalGeometryMode::Hexagon8Module) || (mode_ == HGCalGeometryMode::Hexagon8Cassette)); + bool matchOnly = hgcons_.waferHexagon8Module(); bool debug = hgcons_.waferHexagon8File(); if (debug) edm::LogVerbatim("HGCSim") << "HGCalNumberingScheme::i/p " << det_ << ":" << layer << ":" << module << ":" << cell << ":" << iz << ":" << pos.x() << ":" << pos.y() << ":" << pos.z() << " ID " << std::hex << index << std::dec << " wt " << wt; - checkPosition(index, pos, matchOnly, debug); + bool ok = checkPosition(index, pos, matchOnly, debug); + if (matchOnly && (!ok)) + edm::LogVerbatim("HGCSim") << "HGCalNumberingScheme::i/p " << det_ << ":" << layer << ":" << module << ":" << cell + << ":" << iz << ":" << pos.x() << ":" << pos.y() << ":" << pos.z() << " ID " << std::hex + << index << std::dec << " wt " << wt << " flag " << ok << " ERROR"; #endif return index; } -void HGCalNumberingScheme::checkPosition(uint32_t index, const G4ThreeVector& pos, bool matchOnly, bool debug) const { +bool HGCalNumberingScheme::checkPosition(uint32_t index, const G4ThreeVector& pos, bool matchOnly, bool debug) const { std::pair xy; - bool ok(false); + bool ok(false), iok(true); double z1(0), tolR(14.0), tolZ(1.0); int lay(-1); if (index == 0) { @@ -234,6 +239,8 @@ void HGCalNumberingScheme::checkPosition(uint32_t index, const G4ThreeVector& po : ""); if (matchOnly && match) ck = ""; + if (!ck.empty()) + iok = false; if (!(match && inok && outok) || debug) { edm::LogVerbatim("HGCSim") << "HGCalNumberingScheme::Detector " << det_ << " Layer " << lay << " R " << r2 << ":" << r1 << ":" << rrange.first << ":" << rrange.second << " Z " << z2 << ":" << z1 << ":" @@ -250,11 +257,17 @@ void HGCalNumberingScheme::checkPosition(uint32_t index, const G4ThreeVector& po double dx = (xx - xy.first); double dy = (pos.y() - xy.second); double dR = std::sqrt(dx * dx + dy * dy); - ck = (dR > tolR) ? " ***** ERROR *****" : ""; + if (dR > tolR) { + ck = " ***** ERROR *****"; + iok = false; + } else { + ck = ""; + } edm::LogVerbatim("HGCSim") << "HGCalNumberingScheme " << HGCSiliconDetId(index) << " original position " << xx << ":" << pos.y() << " derived " << xy.first << ":" << xy.second << " Difference " << dR << ck; } } } + return iok; } diff --git a/SimG4CMS/Calo/src/HGCalSD.cc b/SimG4CMS/Calo/src/HGCalSD.cc index c426eb6b0eace..0743754f9fc8c 100644 --- a/SimG4CMS/Calo/src/HGCalSD.cc +++ b/SimG4CMS/Calo/src/HGCalSD.cc @@ -334,11 +334,10 @@ void HGCalSD::update(const BeginOfJob* job) { } else { throw cms::Exception("Unknown", "HGCalSD") << "Cannot find HGCalDDDConstants for " << nameX_ << "\n"; } - if ((nHC_ > 1) && calibCells_) { + if ((nHC_ > 1) && calibCells_) newCollection(collName_[1], ps_); - cellOffset_ = std::make_unique( - waferSize_, hgcons_->getUVMax(0), hgcons_->getUVMax(1), guardRingOffset_, mouseBiteCut_); - } + cellOffset_ = std::make_unique( + waferSize_, hgcons_->getUVMax(0), hgcons_->getUVMax(1), guardRingOffset_, mouseBiteCut_); } void HGCalSD::initRun() {} diff --git a/SimG4CMS/Calo/test/python/minbias2023_cfg.py b/SimG4CMS/Calo/test/python/minbias2023_cfg.py new file mode 100644 index 0000000000000..f5ec22dd3e0b8 --- /dev/null +++ b/SimG4CMS/Calo/test/python/minbias2023_cfg.py @@ -0,0 +1,106 @@ +############################################################################### +# Way to use this: +# cmsRun minbias2023_cfg.py type=DDD +# +# Options for type: DDD, DD4hep +# +############################################################################### +import FWCore.ParameterSet.Config as cms +import os, sys, importlib, re, random +import FWCore.ParameterSet.VarParsing as VarParsing + +#################################################################### +### SETUP OPTIONS +options = VarParsing.VarParsing('standard') +options.register('type', + "DDD", + VarParsing.VarParsing.multiplicity.singleton, + VarParsing.VarParsing.varType.string, + "type of operations: DDD, DD4hep") + +### get and parse the command line arguments +options.parseArguments() + +print(options) + +#################################################################### +# Use the options +if (options.type == "DD4hep"): + from Configuration.Eras.Era_Run3_2023_cff import Run3_2023 + process = cms.Process("Sim",Run3_2023) + geomfile = "Configuration.Geometry.GeometryDD4hepExtended2023_cff" + outfile = 'minbias_FTFP_BERT_EMM_DD4hep.root' +else: + from Configuration.Eras.Era_Run3_DDD_cff import Run3_DDD + from Configuration.Eras.Modifier_run3_egamma_2023_cff import run3_egamma_2023 + process = cms.Process("Sim",Run3_DDD,run3_egamma_2023) + geomfile = "Configuration.Geometry.GeometryExtended2023_cff" + outfile = 'minbias_FTFP_BERT_EMM_DDD.root' + +print("Geometry file: ", geomfile) +print("Output file: ", outfile) + +process.load("SimG4CMS.Calo.PythiaMinBias_cfi") +process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi") +process.load('FWCore.MessageService.MessageLogger_cfi') +process.load("IOMC.EventVertexGenerators.VtxSmearedGauss_cfi") +process.load(geomfile) +process.load("Configuration.StandardSequences.MagneticField_cff") +process.load("Configuration.EventContent.EventContent_cff") +process.load('Configuration.StandardSequences.Generator_cff') +process.load('Configuration.StandardSequences.SimIdeal_cff') +process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff') +from Configuration.AlCa.GlobalTag import GlobalTag +process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase1_2022_realistic', '') + +if 'MessageLogger' in process.__dict__: + process.MessageLogger.G4cerr=dict() + process.MessageLogger.SimG4CoreApplication=dict() + +process.source = cms.Source("EmptySource") + +process.maxEvents = cms.untracked.PSet( + input = cms.untracked.int32(10) +) + +process.Timing = cms.Service("Timing") + +process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck", + oncePerEventMode = cms.untracked.bool(True), + showMallocInfo = cms.untracked.bool(True), + dump = cms.untracked.bool(True), + ignoreTotal = cms.untracked.int32(1) +) + +process.load("IOMC.RandomEngine.IOMC_cff") +process.RandomNumberGeneratorService.generator.initialSeed = 456789 +process.RandomNumberGeneratorService.g4SimHits.initialSeed = 9876 +process.RandomNumberGeneratorService.VtxSmeared.initialSeed = 123456789 +process.rndmStore = cms.EDProducer("RandomEngineStateProducer") + +process.TFileService = cms.Service("TFileService", + fileName = cms.string('minbias_FTFP_BERT_EMM.root') +) + +# Event output +process.output = cms.OutputModule("PoolOutputModule", + process.FEVTSIMEventContent, + fileName = cms.untracked.string(outfile) +) + +process.generation_step = cms.Path(process.pgen) +process.simulation_step = cms.Path(process.psim) +process.out_step = cms.EndPath(process.output) + +process.g4SimHits.Physics.type = 'SimG4Core/Physics/FTFP_BERT_EMM' + +# Schedule definition +process.schedule = cms.Schedule(process.generation_step, + process.simulation_step, + process.out_step + ) + +# filter all path with the production filter sequence +for path in process.paths: + getattr(process,path)._seq = process.generator * getattr(process,path)._seq + diff --git a/SimG4CMS/Calo/test/python/runHGC9_cfg.py b/SimG4CMS/Calo/test/python/runHGC9_cfg.py new file mode 100644 index 0000000000000..0168e6535f468 --- /dev/null +++ b/SimG4CMS/Calo/test/python/runHGC9_cfg.py @@ -0,0 +1,79 @@ +import FWCore.ParameterSet.Config as cms +from Configuration.Eras.Era_Phase2C17I13M9_cff import Phase2C17I13M9 +from Configuration.Eras.Modifier_phase2_hgcalOnly_cff import phase2_hgcalOnly +from Configuration.Eras.Modifier_phase2_hgcalV18_cff import phase2_hgcalV18 + +process = cms.Process("PROD",Phase2C17I13M9,phase2_hgcalOnly,phase2_hgcalV18) +process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi") +process.load("IOMC.EventVertexGenerators.VtxSmearedGauss_cfi") +process.load("Geometry.HGCalCommonData.testHGCalV18OReco_cff") +process.load("Configuration.StandardSequences.MagneticField_cff") +process.load("Configuration.EventContent.EventContent_cff") +process.load('Configuration.StandardSequences.Generator_cff') +process.load('Configuration.StandardSequences.SimIdeal_cff') +process.load('FWCore.MessageService.MessageLogger_cfi') +process.load('SimG4CMS.Calo.hgcalHitScintillator_cfi') +process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff") +from Configuration.AlCa.GlobalTag import GlobalTag +process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase2_realistic_T21', '') + +if hasattr(process,'MessageLogger'): + process.MessageLogger.HGCalGeom=dict() + process.MessageLogger.HGCalSim=dict() + process.MessageLogger.HGCSim=dict() + +process.load("IOMC.RandomEngine.IOMC_cff") +process.RandomNumberGeneratorService.generator.initialSeed = 456789 +process.RandomNumberGeneratorService.g4SimHits.initialSeed = 9876 +process.RandomNumberGeneratorService.VtxSmeared.initialSeed = 123456789 + +process.Timing = cms.Service("Timing") + +process.maxEvents = cms.untracked.PSet( + input = cms.untracked.int32(50) +) + +process.source = cms.Source("EmptySource", + firstRun = cms.untracked.uint32(1), + firstEvent = cms.untracked.uint32(1) +) + +process.generator = cms.EDProducer("FlatRandomEGunProducer", + PGunParameters = cms.PSet( + PartID = cms.vint32(211), + MinEta = cms.double(1.50), + MaxEta = cms.double(2.20), + MinPhi = cms.double(-3.1415926), + MaxPhi = cms.double(-1.5707963), + MinE = cms.double(100.00), + MaxE = cms.double(100.00) + ), + Verbosity = cms.untracked.int32(0), + AddAntiParticle = cms.bool(True) +) + +process.output = cms.OutputModule("PoolOutputModule", + process.FEVTSIMEventContent, + fileName = cms.untracked.string('hgcV18O.root') +) + +process.hgcalHitScintillator.tileFileName = "extraTiles.txt" +process.g4SimHits.HGCScintSD.TileFileName = "extraTiles.txt" + +process.generation_step = cms.Path(process.pgen) +process.simulation_step = cms.Path(process.psim) +process.analysis_step = cms.Path(process.hgcalHitScintillator) +process.out_step = cms.EndPath(process.output) + +process.g4SimHits.Physics.type = 'SimG4Core/Physics/FTFP_BERT_EMM' + +# Schedule definition +process.schedule = cms.Schedule(process.generation_step, + process.simulation_step, + process.analysis_step, + process.out_step + ) + +# filter all path with the production filter sequence +for path in process.paths: + getattr(process,path)._seq = process.generator * getattr(process,path)._seq diff --git a/SimG4CMS/Calo/test/python/testHGCalMouseBite_cfg.py b/SimG4CMS/Calo/test/python/testHGCalMouseBite_cfg.py new file mode 100644 index 0000000000000..1f7cda275d83a --- /dev/null +++ b/SimG4CMS/Calo/test/python/testHGCalMouseBite_cfg.py @@ -0,0 +1,69 @@ +import FWCore.ParameterSet.Config as cms +import os, sys, imp, re +import FWCore.ParameterSet.VarParsing as VarParsing + +#################################################################### +### SETUP OPTIONS +options = VarParsing.VarParsing('standard') +options.register('type', + "V18", + VarParsing.VarParsing.multiplicity.singleton, + VarParsing.VarParsing.varType.string, + "type of operations: V16, V17, V17Shift") + +### get and parse the command line arguments +options.parseArguments() +print(options) + +from Configuration.Eras.Era_Phase2C17I13M9_cff import Phase2C17I13M9 + +process = cms.Process("HGCalMouseBiteTest",Phase2C17I13M9) + +#################################################################### +# Use the options +if (options.type == "V18"): + geomFile = "Geometry.HGCalCommonData.testHGCalV18Reco_cff" +elif (options.type == "V16"): + geomFile = "Configuration.Geometry.GeometryExtended2026D88_cff" +else: + geomFile = "Configuration.Geometry.GeometryExtended2026D92_cff" + +print("Geometry file: ", geomFile) + +process.load("SimGeneral.HepPDTESSource.pdt_cfi") +process.load(geomFile) +process.load('FWCore.MessageService.MessageLogger_cfi') + +if hasattr(process,'MessageLogger'): + process.MessageLogger.HGCalGeom=dict() + process.MessageLogger.HGCSim=dict() + +process.load("IOMC.RandomEngine.IOMC_cff") +process.RandomNumberGeneratorService.generator.initialSeed = 456789 + +process.source = cms.Source("EmptySource") + +process.generator = cms.EDProducer("FlatRandomEGunProducer", + PGunParameters = cms.PSet( + PartID = cms.vint32(14), + MinEta = cms.double(-3.5), + MaxEta = cms.double(3.5), + MinPhi = cms.double(-3.14159265359), + MaxPhi = cms.double(3.14159265359), + MinE = cms.double(9.99), + MaxE = cms.double(10.01) + ), + AddAntiParticle = cms.bool(False), + Verbosity = cms.untracked.int32(0), + firstRun = cms.untracked.uint32(1) + ) + +process.maxEvents = cms.untracked.PSet( + input = cms.untracked.int32(1) +) + + +process.load("SimG4CMS.Calo.hgcalMouseBiteTester_cfi") + + +process.p1 = cms.Path(process.generator*process.hgcalMouseBiteTester) diff --git a/SimG4CMS/CherenkovAnalysis/BuildFile.xml b/SimG4CMS/CherenkovAnalysis/BuildFile.xml index a12305460cc41..80ab29da2e1e4 100644 --- a/SimG4CMS/CherenkovAnalysis/BuildFile.xml +++ b/SimG4CMS/CherenkovAnalysis/BuildFile.xml @@ -1,11 +1,11 @@ + - diff --git a/SimG4CMS/CherenkovAnalysis/interface/DreamSD.h b/SimG4CMS/CherenkovAnalysis/interface/DreamSD.h index 514d7c4a17e55..e143eb83bca89 100644 --- a/SimG4CMS/CherenkovAnalysis/interface/DreamSD.h +++ b/SimG4CMS/CherenkovAnalysis/interface/DreamSD.h @@ -11,8 +11,6 @@ #include "G4PhysicsFreeVector.hh" -#include - #include const int MAXPHOTONS = 500; // Maximum number of photons we can store diff --git a/SimG4CMS/CherenkovAnalysis/src/DreamSD.cc b/SimG4CMS/CherenkovAnalysis/src/DreamSD.cc index db3fc73ca3197..59f665de5cb5f 100644 --- a/SimG4CMS/CherenkovAnalysis/src/DreamSD.cc +++ b/SimG4CMS/CherenkovAnalysis/src/DreamSD.cc @@ -6,6 +6,7 @@ #include "DetectorDescription/Core/interface/DDValue.h" #include "DetectorDescription/DDCMS/interface/DDCompactView.h" #include "DetectorDescription/DDCMS/interface/DDFilteredView.h" +#include "SimG4Core/Geometry/interface/DD4hep2DDDName.h" #include "SimG4Core/Notification/interface/TrackInformation.h" #include "G4LogicalVolume.hh" @@ -122,7 +123,7 @@ void DreamSD::initMap(const std::string &sd) { const cms::DDFilter filter("ReadOutName", sd); cms::DDFilteredView fv((*cpvDD4hep_), filter); while (fv.firstChild()) { - std::string name = static_cast(dd4hep::dd::noNamespace(fv.name())); + std::string name = DD4hep2DDDName::noNameSpace(static_cast(fv.name())); std::vector paras(fv.parameters()); #ifdef EDM_ML_DEBUG edm::LogVerbatim("EcalSim") << "DreamSD::initMap (for " << sd << "): Solid " << name << " Shape " @@ -179,7 +180,7 @@ void DreamSD::fillMap(const std::string &name, double length, double width) { for (auto lvcite = lvs->begin(); lvcite != lvs->end(); lvcite++) { edm::LogVerbatim("EcalSim") << name << " vs " << (*lvcite)->GetName(); std::string namex = static_cast((*lvcite)->GetName()); - if (name == static_cast(dd4hep::dd::noNamespace(namex))) { + if (name == DD4hep2DDDName::noNameSpace(static_cast(namex))) { lv = (*lvcite); break; } @@ -421,7 +422,7 @@ double DreamSD::getAverageNumberOfPhotons_(const double charge, bool DreamSD::setPbWO2MaterialProperties_(G4Material *aMaterial) { std::string pbWO2Name("E_PbWO4"); std::string name = static_cast(aMaterial->GetName()); - if (static_cast(dd4hep::dd::noNamespace(name)) != pbWO2Name) { // Wrong material! + if (DD4hep2DDDName::noNameSpace(name) != pbWO2Name) { // Wrong material! edm::LogWarning("EcalSim") << "This is not the right material: " << "expecting " << pbWO2Name << ", got " << aMaterial->GetName(); return false; diff --git a/SimG4CMS/Forward/interface/ZdcSD.h b/SimG4CMS/Forward/interface/ZdcSD.h index 80f301f96c138..8ea4948a5d878 100644 --- a/SimG4CMS/Forward/interface/ZdcSD.h +++ b/SimG4CMS/Forward/interface/ZdcSD.h @@ -26,9 +26,9 @@ class ZdcSD : public CaloSD { void initRun() override; double calculateCherenkovDeposit(const G4Step *); - double calculateMeanNumberOfPhotons(int, double, double); - double photonEnergyDist(int, double, double); - double generatePhotonEnergy(int, double, double); + double calculateMeanNumberOfPhotons(double, double, double); + double photonEnergyDist(double, double, double); + double generatePhotonEnergy(double, double, double); double pmtEfficiency(double); double convertEnergyToWavelength(double); diff --git a/SimG4CMS/Forward/src/ZdcSD.cc b/SimG4CMS/Forward/src/ZdcSD.cc index 8dc52abe76f7f..95f16c5db92ce 100644 --- a/SimG4CMS/Forward/src/ZdcSD.cc +++ b/SimG4CMS/Forward/src/ZdcSD.cc @@ -74,8 +74,6 @@ void ZdcSD::initRun() { } bool ZdcSD::ProcessHits(G4Step* aStep, G4TouchableHistory*) { - NaNTrap(aStep); - if (useShowerLibrary) getFromLibrary(aStep); @@ -98,6 +96,13 @@ bool ZdcSD::ProcessHits(G4Step* aStep, G4TouchableHistory*) { int primaryID = getTrackID(theTrack); currentID[0].setID(unitID, time, primaryID, depth); double energy = calculateCherenkovDeposit(aStep); + + // Russian Roulette + double wt2 = theTrack->GetWeight(); + if (wt2 > 0.0) { + energy *= wt2; + } + if (G4TrackToParticleID::isGammaElectronPositron(theTrack)) { edepositEM = energy; edepositHAD = 0; @@ -105,7 +110,7 @@ bool ZdcSD::ProcessHits(G4Step* aStep, G4TouchableHistory*) { edepositEM = 0; edepositHAD = energy; } - if (!hitExists(aStep, 0) && edepositEM + edepositHAD > 0.) { + if (!hitExists(aStep, 0) && energy > 0.) { #ifdef EDM_ML_DEBUG G4ThreeVector pre = aStep->GetPreStepPoint()->GetPosition(); edm::LogVerbatim("ZdcSD") << pre.x() << " " << pre.y() << " " << pre.z(); @@ -175,14 +180,14 @@ double ZdcSD::getEnergyDeposit(const G4Step* aStep) { // preStepPoint information G4StepPoint* preStepPoint = aStep->GetPreStepPoint(); - G4VPhysicalVolume* currentPV = preStepPoint->GetPhysicalVolume(); - std::string nameVolume = ForwardName::getName(currentPV->GetName()); const G4ThreeVector& hitPoint = preStepPoint->GetPosition(); const G4ThreeVector& hit_mom = preStepPoint->GetMomentumDirection(); G4double stepL = aStep->GetStepLength() / cm; G4double beta = preStepPoint->GetBeta(); G4double charge = preStepPoint->GetCharge(); + if (charge == 0.0) + return 0.0; // theTrack information G4Track* theTrack = aStep->GetTrack(); @@ -210,6 +215,7 @@ double ZdcSD::getEnergyDeposit(const G4Step* aStep) { G4StepPoint* postStepPoint = aStep->GetPostStepPoint(); G4VPhysicalVolume* postPV = postStepPoint->GetPhysicalVolume(); std::string postnameVolume = ForwardName::getName(postPV->GetName()); + std::string nameVolume = preStepPoint->GetPhysicalVolume()->GetName(); edm::LogVerbatim("ForwardSim") << "ZdcSD:: getEnergyDeposit: \n" << " preStepPoint: " << nameVolume << "," << stepL << "," << stepE << "," << beta << "," << charge << "\n" @@ -218,7 +224,7 @@ double ZdcSD::getEnergyDeposit(const G4Step* aStep) { << " Etot(GeV)= " << theTrack->GetTotalEnergy() / CLHEP::GeV; #endif const double bThreshold = 0.67; - if ((beta > bThreshold) && (charge != 0) && (nameVolume == "ZDC_EMFiber" || nameVolume == "ZDC_HadFiber")) { + if (beta > bThreshold) { #ifdef EDM_ML_DEBUG edm::LogVerbatim("ForwardSim") << "ZdcSD:: getEnergyDeposit: pass "; #endif @@ -328,28 +334,11 @@ double ZdcSD::getEnergyDeposit(const G4Step* aStep) { << "," << charge << "," << beta << "," << stepL << "," << d_qz << "," << variant << "," << meanNCherPhot << "," << poissNCherPhot << "," << NCherPhot; #endif - // --constants----------------- - // << "," << photEnSpectrDE - // << "," << nMedium - // << "," << bThreshold - // << "," << thFibDirRad - // << "," << thFullReflRad - // << "," << effPMTandTransport - // --other variables----------- - // << "," << curprocess - // << "," << nameProcess - // << "," << name - // << "," << rad - // << "," << mat } else { // determine failure mode: beta, charge, and/or nameVolume if (beta <= bThreshold) edm::LogVerbatim("ForwardSim") << "ZdcSD:: getEnergyDeposit: fail beta=" << beta; - if (charge == 0) - edm::LogVerbatim("ForwardSim") << "ZdcSD:: getEnergyDeposit: fail charge=0"; - if (!(nameVolume == "ZDC_EMFiber" || nameVolume == "ZDC_HadFiber")) - edm::LogVerbatim("ForwardSim") << "ZdcSD:: getEnergyDeposit: fail nv=" << nameVolume; } return NCherPhot; @@ -370,131 +359,120 @@ const double HBARC = 6.582119514E-16 /*eV*s*/ * 2.99792458E8 /*m/s*/; // hbar * // Calculate the Cherenkov deposit corresponding to a G4Step double ZdcSD::calculateCherenkovDeposit(const G4Step* aStep) { - G4Material* material = aStep->GetTrack()->GetMaterial(); - - if (material->GetName() != "quartz") - return 0.0; // 0 deposit if material is not quartz - else { - const G4StepPoint* pPreStepPoint = aStep->GetPreStepPoint(); - const G4StepPoint* pPostStepPoint = aStep->GetPostStepPoint(); - const G4String volumeName = pPreStepPoint->GetTouchable()->GetVolume(0)->GetLogicalVolume()->GetName(); - - G4ThreeVector pre = pPreStepPoint->GetPosition(); - G4ThreeVector post = pPostStepPoint->GetPosition(); - - if ((post - pre).mag() < 1E-9) - return 0.0; + const G4StepPoint* pPreStepPoint = aStep->GetPreStepPoint(); + G4double charge = pPreStepPoint->GetCharge() / CLHEP::eplus; + if (charge == 0.0 || aStep->GetStepLength() < 1e-9 * CLHEP::mm) + return 0.0; - //Convert step coordinates to local (fiber) coodinates - const G4ThreeVector localPre = setToLocal(pre, pPreStepPoint->GetTouchable()); - const G4ThreeVector localPost = setToLocal(post, pPreStepPoint->GetTouchable()); - // Calculate the unit direction vector in local coordinates + const G4StepPoint* pPostStepPoint = aStep->GetPostStepPoint(); - const G4ThreeVector particleDirection = (localPost - localPre) / (localPost - localPre).mag(); + G4ThreeVector pre = pPreStepPoint->GetPosition(); + G4ThreeVector post = pPostStepPoint->GetPosition(); - const G4DynamicParticle* aParticle = aStep->GetTrack()->GetDynamicParticle(); - int charge = round(aParticle->GetDefinition()->GetPDGCharge()); + //Convert step coordinates to local (fiber) coodinates + const G4ThreeVector localPre = setToLocal(pre, pPreStepPoint->GetTouchable()); + const G4ThreeVector localPost = setToLocal(post, pPreStepPoint->GetTouchable()); - if (charge == 0) - return 0.0; + // Calculate the unit direction vector in local coordinates + const G4ThreeVector particleDirection = (localPost - localPre) / (localPost - localPre).mag(); - double beta = 0.5 * (pPreStepPoint->GetBeta() + pPostStepPoint->GetBeta()); - double stepLength = aStep->GetStepLength() / 1000; // Geant4 stepLength is in "mm" + double beta = 0.5 * (pPreStepPoint->GetBeta() + pPostStepPoint->GetBeta()); + double stepLength = aStep->GetStepLength() / 1000; // Geant4 stepLength is in "mm" - int nPhotons; // Number of Cherenkov photons + int nPhotons; // Number of Cherenkov photons - nPhotons = G4Poisson(calculateMeanNumberOfPhotons(charge, beta, stepLength)); + nPhotons = G4Poisson(calculateMeanNumberOfPhotons(charge, beta, stepLength)); - double totalE = 0.0; + double totalE = 0.0; - for (int i = 0; i < nPhotons; i++) { - // uniform refractive index in PMT range -> uniform energy distribution - double photonE = EMIN + G4UniformRand() * (EMAX - EMIN); - // UPDATE: taking into account dispersion relation -> energy distribution + for (int i = 0; i < nPhotons; ++i) { + // uniform refractive index in PMT range -> uniform energy distribution + double photonE = EMIN + G4UniformRand() * (EMAX - EMIN); + // UPDATE: taking into account dispersion relation -> energy distribution - if (G4UniformRand() > pmtEfficiency(convertEnergyToWavelength(photonE))) - continue; + if (G4UniformRand() > pmtEfficiency(convertEnergyToWavelength(photonE))) + continue; - double omega = G4UniformRand() * twopi; - double thetaC = acos(1.0 / (beta * RINDEX)); + double omega = G4UniformRand() * twopi; + double cosTheta = std::min(1.0 / (beta * RINDEX), 1.0); + double sinTheta = std::sqrt((1. - cosTheta) * (1.0 + cosTheta)); #ifdef EDM_ML_DEBUG - edm::LogVerbatim("ZdcSD") << "E_gamma: " << photonE << "\t omega: " << omega << "\t thetaC: " << thetaC; + edm::LogVerbatim("ZdcSD") << "E_gamma: " << photonE << "\t omega: " << omega << "\t thetaC: " << cosTheta; #endif - // Calculate momentum direction w.r.t primary particle (z-direction) - double px = photonE * sin(thetaC) * cos(omega); - double py = photonE * sin(thetaC) * sin(omega); - double pz = photonE * cos(thetaC); - G4ThreeVector photonMomentum(px, py, pz); + // Calculate momentum direction w.r.t primary particle (z-direction) + double px = photonE * sinTheta * std::cos(omega); + double py = photonE * sinTheta * std::sin(omega); + double pz = photonE * cosTheta; + G4ThreeVector photonMomentum(px, py, pz); #ifdef EDM_ML_DEBUG - edm::LogVerbatim("ZdcSD") << "pPR = (" << particleDirection.x() << "," << particleDirection.y() << "," - << particleDirection.z() << ")"; - edm::LogVerbatim("ZdcSD") << "pCH = (" << px << "," << py << "," << pz << ")"; + edm::LogVerbatim("ZdcSD") << "pPR = (" << particleDirection.x() << "," << particleDirection.y() << "," + << particleDirection.z() << ")"; + edm::LogVerbatim("ZdcSD") << "pCH = (" << px << "," << py << "," << pz << ")"; #endif - // Rotate to the fiber reference frame - photonMomentum.rotateUz(particleDirection); + // Rotate to the fiber reference frame + photonMomentum.rotateUz(particleDirection); #ifdef EDM_ML_DEBUG - edm::LogVerbatim("ZdcSD") << "pLAB = (" << photonMomentum.x() << "," << photonMomentum.y() << "," - << photonMomentum.z() << ")"; + edm::LogVerbatim("ZdcSD") << "pLAB = (" << photonMomentum.x() << "," << photonMomentum.y() << "," + << photonMomentum.z() << ")"; #endif - // Get random position along G4Step - G4ThreeVector photonPosition = localPre + G4UniformRand() * (localPost - localPre); - - // 2D vectors to calculate impact position (x*,y*) - G4TwoVector r0(photonPosition); - G4TwoVector v0(photonMomentum); - - double R = 0.3; /*mm, fiber radius*/ - double R2 = 0.3 * 0.3; - - if (r0.mag() < R && photonMomentum.z() < 0.0) { - // 2nd order polynomial coefficients - double a = v0.mag2(); - double b = 2.0 * r0 * v0; - double c = r0.mag2() - R2; - - if (a < 1E-6) - totalE += 1; //photonE /*eV*/; - else { - // calculate intersection point - solving 2nd order polynomial - double t = (-b + sqrt(b * b - 4.0 * a * c)) / (2.0 * a); - G4ThreeVector n(r0.x() + v0.x() * t, r0.y() + v0.y() * t, 0.0); // surface normal - double cosTheta = (n * photonMomentum) / (n.mag() * photonE); // cosine of incident angle - - if (cosTheta >= NAperRINDEX) // lightguide condition - totalE += 1; //photonE /*eV*/; - } + // Get random position along G4Step + G4ThreeVector photonPosition = localPre + G4UniformRand() * (localPost - localPre); + + // 2D vectors to calculate impact position (x*,y*) + G4TwoVector r0(photonPosition); + G4TwoVector v0(photonMomentum); + + double R = 0.3; /*mm, fiber radius*/ + double R2 = 0.3 * 0.3; + + if (r0.mag() < R && photonMomentum.z() < 0.0) { + // 2nd order polynomial coefficients + double a = v0.mag2(); + double b = 2.0 * r0 * v0; + double c = r0.mag2() - R2; + + if (a < 1E-6) + totalE += 1; //photonE /*eV*/; + else { + // calculate intersection point - solving 2nd order polynomial + double t = (-b + sqrt(b * b - 4.0 * a * c)) / (2.0 * a); + G4ThreeVector n(r0.x() + v0.x() * t, r0.y() + v0.y() * t, 0.0); // surface normal + double cosTheta = (n * photonMomentum) / (n.mag() * photonE); // cosine of incident angle + + if (cosTheta >= NAperRINDEX) // lightguide condition + totalE += 1; //photonE /*eV*/; } + } #ifdef EDM_ML_DEBUG - edm::LogVerbatim("ZdcSD") << "r = (" << photonPosition.x() << "," << photonPosition.y() << "," - << photonPosition.z() << ")" << std::endl; + edm::LogVerbatim("ZdcSD") << "r = (" << photonPosition.x() << "," << photonPosition.y() << "," << photonPosition.z() + << ")" << std::endl; #endif - } + } #ifdef EDM_ML_DEBUG - if (nPhotons > 30) { - edm::LogVerbatim("ZdcSD") << totalE; + if (nPhotons > 30) { + edm::LogVerbatim("ZdcSD") << totalE; - if (totalE > 0) - edm::LogVerbatim("ZdcSD") << pre.x() << " " << pre.y() << " " << pre.z() << " " << totalE << std::endl; - } -#endif - return totalE; + if (totalE > 0) + edm::LogVerbatim("ZdcSD") << pre.x() << " " << pre.y() << " " << pre.z() << " " << totalE; } +#endif + return totalE; } // Calculate mean number of Cherenkov photons in the sensitivity range (300-650 nm) // for a given step length for a particle with given charge and beta -double ZdcSD::calculateMeanNumberOfPhotons(int charge, double beta, double stepLength) { +double ZdcSD::calculateMeanNumberOfPhotons(double charge, double beta, double stepLength) { // Return mean number of Cherenkov photons return (ALPHA * charge * charge * stepLength) / HBARC * (EMAX - EMIN) * (1.0 - 1.0 / (beta * beta * RINDEX * RINDEX)); } // Evaluate photon pdf -double ZdcSD::photonEnergyDist(int charge, double beta, double E) { +double ZdcSD::photonEnergyDist(double charge, double beta, double E) { const std::vector ENERGY_TAB{1.75715, 1.81902, 1.88311, 1.94944, 2.0183, 2.08939, 2.16302, 2.23919, 2.31789, 2.39954, 2.48416, 2.57175, 2.66232, 2.75643, 2.85349, 2.95411, 3.05756, 3.16528, 3.2774, 3.39218, 3.5123, 3.6359, 3.76394, 3.89642, @@ -509,7 +487,7 @@ double ZdcSD::photonEnergyDist(int charge, double beta, double E) { } // Generate a photon with the given minimum energy accourding to the energy distribution -double ZdcSD::generatePhotonEnergy(int charge, double beta, double Emin) { +double ZdcSD::generatePhotonEnergy(double charge, double beta, double Emin) { double photonE; // Use rejection method diff --git a/SimG4CMS/HGCalTestBeam/plugins/BuildFile.xml b/SimG4CMS/HGCalTestBeam/plugins/BuildFile.xml index 22011317508cd..cc760d1b52380 100644 --- a/SimG4CMS/HGCalTestBeam/plugins/BuildFile.xml +++ b/SimG4CMS/HGCalTestBeam/plugins/BuildFile.xml @@ -12,11 +12,11 @@ + - diff --git a/SimG4CMS/HGCalTestBeam/plugins/HGCPassive.cc b/SimG4CMS/HGCalTestBeam/plugins/HGCPassive.cc index d9b19aeda0eda..496c52c29e1be 100644 --- a/SimG4CMS/HGCalTestBeam/plugins/HGCPassive.cc +++ b/SimG4CMS/HGCalTestBeam/plugins/HGCPassive.cc @@ -12,6 +12,7 @@ // to retreive hits #include "SimDataFormats/CaloHit/interface/PassiveHit.h" +#include "SimG4Core/Geometry/interface/DD4hep2DDDName.h" #include "SimG4Core/Notification/interface/BeginOfEvent.h" #include "SimG4Core/Notification/interface/BeginOfRun.h" #include "SimG4Core/Notification/interface/Observer.h" @@ -23,7 +24,6 @@ #include "G4TransportationManager.hh" #include "G4TouchableHistory.hh" #include "G4Track.hh" -#include "DD4hep/Filter.h" #include #include @@ -157,9 +157,9 @@ void HGCPassive::update(const G4Step *aStep) { if (((aStep->GetPostStepPoint() == nullptr) || (aStep->GetTrack()->GetNextVolume() == nullptr)) && (aStep->IsLastStepInVolume())) { #ifdef EDM_ML_DEBUG - edm::LogVerbatim("HGCSim") << static_cast(dd4hep::dd::noNamespace(plv->GetName())) << " F|L Step " - << aStep->IsFirstStepInVolume() << ":" << aStep->IsLastStepInVolume() << " Position" - << aStep->GetPreStepPoint()->GetPosition() << " Track " + edm::LogVerbatim("HGCSim") << DD4hep2DDDName::noNameSpace(static_cast(plv->GetName())) + << " F|L Step " << aStep->IsFirstStepInVolume() << ":" << aStep->IsLastStepInVolume() + << " Position" << aStep->GetPreStepPoint()->GetPosition() << " Track " << aStep->GetTrack()->GetDefinition()->GetParticleName() << " at" << aStep->GetTrack()->GetPosition() << " Volume " << aStep->GetTrack()->GetVolume() << ":" << aStep->GetTrack()->GetNextVolume() << " Status " @@ -194,7 +194,7 @@ void HGCPassive::update(const G4Step *aStep) { auto it = (init_) ? mapLV_.find(plv) : findLV(plv); #ifdef EDM_ML_DEBUG edm::LogVerbatim("HGCSim") << "Level: " << level << ":" << i << " " - << static_cast(dd4hep::dd::noNamespace(plv->GetName())) + << DD4hep2DDDName::noNameSpace(static_cast(plv->GetName())) << " flag in the List " << (it != mapLV_.end()); #endif if (it != mapLV_.end()) { @@ -239,7 +239,7 @@ G4VPhysicalVolume *HGCPassive::getTopPV() { HGCPassive::volumeIterator HGCPassive::findLV(G4LogicalVolume *plv) { auto itr = mapLV_.find(plv); if (itr == mapLV_.end()) { - std::string name = static_cast(dd4hep::dd::noNamespace(plv->GetName())); + std::string name = DD4hep2DDDName::noNameSpace(static_cast(plv->GetName())); for (unsigned int k = 0; k < LVNames_.size(); ++k) { if (name.find(LVNames_[k]) != std::string::npos) { mapLV_[plv] = std::pair(k, name); @@ -249,7 +249,7 @@ HGCPassive::volumeIterator HGCPassive::findLV(G4LogicalVolume *plv) { } } if (topLV_ == nullptr) { - if (static_cast(dd4hep::dd::noNamespace(plv->GetName())) == motherName_) + if (DD4hep2DDDName::noNameSpace(static_cast(plv->GetName())) == motherName_) topLV_ = plv; } return itr; diff --git a/SimG4CMS/HGCalTestBeam/python/HGCalTB230SepXML_cfi.py b/SimG4CMS/HGCalTestBeam/python/HGCalTB230SepXML_cfi.py new file mode 100644 index 0000000000000..4273533fb9336 --- /dev/null +++ b/SimG4CMS/HGCalTestBeam/python/HGCalTB230SepXML_cfi.py @@ -0,0 +1,21 @@ +import FWCore.ParameterSet.Config as cms + +XMLIdealGeometryESSource = cms.ESSource("XMLIdealGeometryESSource", + geomXMLFiles = cms.vstring('Geometry/CMSCommonData/data/materials.xml', + 'Geometry/CMSCommonData/data/rotations.xml', + 'Geometry/HGCalCommonData/data/hgcalMaterial/v2/hgcalMaterial.xml', + 'Geometry/HGCalTBCommonData/data/TB230/Sep230/cms.xml', + 'Geometry/HGCalTBCommonData/data/TB230/Sep230/hgcal.xml', + 'Geometry/HGCalTBCommonData/data/TB230/Sep230/hgcalBeam.xml', + 'Geometry/HGCalTBCommonData/data/TB230/Sep230/hgcalcell.xml', + 'Geometry/HGCalTBCommonData/data/TB230/Sep230/hgcalwafer.xml', + 'Geometry/HGCalTBCommonData/data/TB230/Sep230/hgcalEE.xml', + 'Geometry/HGCalTBCommonData/data/TB230/Sep230/hgcalCons.xml', + 'Geometry/HGCalTBCommonData/data/TB230/Sep230/hgcalConsData.xml', + 'Geometry/HGCalTBCommonData/data/TB230/Sep230/hgcalsense.xml', + 'Geometry/HGCalTBCommonData/data/TB230/hgcProdCuts.xml', + ), + rootNodeName = cms.string('cms:OCMS') +) + + diff --git a/SimG4CMS/HGCalTestBeam/test/HGCalTB230Jul_cfg.py b/SimG4CMS/HGCalTestBeam/test/HGCalTB230Aug_cfg.py similarity index 98% rename from SimG4CMS/HGCalTestBeam/test/HGCalTB230Jul_cfg.py rename to SimG4CMS/HGCalTestBeam/test/HGCalTB230Aug_cfg.py index d28b02d58f24c..efa5f0f0262a3 100644 --- a/SimG4CMS/HGCalTestBeam/test/HGCalTB230Jul_cfg.py +++ b/SimG4CMS/HGCalTestBeam/test/HGCalTB230Aug_cfg.py @@ -8,7 +8,7 @@ process.load('Configuration.StandardSequences.Services_cff') process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi') process.load('Configuration.EventContent.EventContent_cff') -process.load('Geometry.HGCalTBCommonData.testTB230JulXML_cfi') +process.load('Geometry.HGCalTBCommonData.testTB230AugXML_cfi') process.load('Geometry.HGCalCommonData.hgcalEENumberingInitialization_cfi') process.load('Geometry.HGCalCommonData.hgcalEEParametersInitialization_cfi') process.load('Geometry.HcalTestBeamData.hcalTB06Parameters_cff') diff --git a/SimG4CMS/HGCalTestBeam/test/HGCalTB230Sep_cfg.py b/SimG4CMS/HGCalTestBeam/test/HGCalTB230Sep_cfg.py new file mode 100644 index 0000000000000..2d82d05bcca6e --- /dev/null +++ b/SimG4CMS/HGCalTestBeam/test/HGCalTB230Sep_cfg.py @@ -0,0 +1,134 @@ +import FWCore.ParameterSet.Config as cms +from Configuration.Eras.Modifier_hgcaltb_cff import hgcaltb + +process = cms.Process('SIM', hgcaltb) + +# import of standard configurations +process.load("FWCore.MessageService.MessageLogger_cfi") +process.load('Configuration.StandardSequences.Services_cff') +process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi') +process.load('Configuration.EventContent.EventContent_cff') +process.load('Geometry.HGCalTBCommonData.testTB230SepXML_cfi') +process.load('Geometry.HGCalCommonData.hgcalEENumberingInitialization_cfi') +process.load('Geometry.HGCalCommonData.hgcalEEParametersInitialization_cfi') +process.load('Geometry.HcalTestBeamData.hcalTB06Parameters_cff') +process.load('Configuration.StandardSequences.MagneticField_0T_cff') +process.load('Configuration.StandardSequences.Generator_cff') +process.load('IOMC.EventVertexGenerators.VtxSmearedFlat_cfi') +process.load('GeneratorInterface.Core.genFilterSummary_cff') +process.load('Configuration.StandardSequences.SimIdeal_cff') +process.load('Configuration.StandardSequences.EndOfProcess_cff') +process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff') +process.load('SimG4CMS.HGCalTestBeam.HGCalTB23Analyzer_cfi') + +process.maxEvents = cms.untracked.PSet( + input = cms.untracked.int32(1000) +) + +if 'MessageLogger' in process.__dict__: + process.MessageLogger.G4cerr=dict() + process.MessageLogger.G4cout=dict() + process.MessageLogger.HGCSim=dict() + process.MessageLogger.CaloSim=dict() + process.MessageLogger.FlatThetaGun=dict() + process.MessageLogger.FlatEvtVtx=dict() + +# Input source +process.source = cms.Source("EmptySource") + +process.options = cms.untracked.PSet( +) + +# Production Info +process.configurationMetadata = cms.untracked.PSet( + annotation = cms.untracked.string('SingleMuonE200_cfi nevts:10'), + name = cms.untracked.string('Applications'), + version = cms.untracked.string('$Revision: 1.19 $') +) + +# Output definition + +process.RAWSIMoutput = cms.OutputModule("PoolOutputModule", + SelectEvents = cms.untracked.PSet( + SelectEvents = cms.vstring('generation_step') + ), + dataset = cms.untracked.PSet( + dataTier = cms.untracked.string('GEN-SIM'), + filterName = cms.untracked.string('') + ), + eventAutoFlushCompressedSize = cms.untracked.int32(5242880), + fileName = cms.untracked.string('file:gensim.root'), + outputCommands = process.RAWSIMEventContent.outputCommands, + splitLevel = cms.untracked.int32(0) +) + +# Additional output definition +process.TFileService = cms.Service("TFileService", + fileName = cms.string('TBGenSimSep.root') + ) + +# Other statements +process.genstepfilter.triggerConditions=cms.vstring("generation_step") +from Configuration.AlCa.GlobalTag import GlobalTag +process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '') + +process.generator = cms.EDProducer("FlatRandomEThetaGunProducer", + AddAntiParticle = cms.bool(False), + PGunParameters = cms.PSet( + MinE = cms.double(199.99), + MaxE = cms.double(200.01), + MinTheta = cms.double(0.0), + MaxTheta = cms.double(0.0), + MinPhi = cms.double(-3.14159265359), + MaxPhi = cms.double(3.14159265359), + PartID = cms.vint32(11) + ), + Verbosity = cms.untracked.int32(1), + firstRun = cms.untracked.uint32(1), + psethack = cms.string('single electron E 10') +) +process.VtxSmeared.MinZ = -100.0 +process.VtxSmeared.MaxZ = -100.0 +#process.VtxSmeared.MinX = -1.0 +#process.VtxSmeared.MaxX = 1.0 +#process.VtxSmeared.MinY = -1.0 +#process.VtxSmeared.MaxY = 1.0 +process.g4SimHits.OnlySDs = ['HGCalSensitiveDetector', 'HcalTB06BeamDetector'] +process.g4SimHits.HGCSD.Detectors = 1 +process.g4SimHits.HGCSD.RejectMouseBite = False +process.g4SimHits.HGCSD.RotatedWafer = False + +process.g4SimHits.CaloTrkProcessing.TestBeam = True +process.g4SimHits.HCalSD.ForTBHCAL = True +process.g4SimHits.NonBeamEvent = True +process.g4SimHits.UseMagneticField = False + +process.g4SimHits.EventVerbose = 2 +process.g4SimHits.SteppingVerbosity = 2 +process.g4SimHits.StepVerboseThreshold= 0.1 +process.g4SimHits.VerboseEvents = [1] +process.g4SimHits.VertexNumber = [] +process.g4SimHits.VerboseTracks =[] + +# Path and EndPath definitions +process.generation_step = cms.Path(process.pgen) +process.simulation_step = cms.Path(process.psim) +process.genfiltersummary_step = cms.EndPath(process.genFilterSummary) +process.analysis_step = cms.Path(process.HGCalTB23Analyzer) +process.endjob_step = cms.EndPath(process.endOfProcess) +process.RAWSIMoutput_step = cms.EndPath(process.RAWSIMoutput) + +process.g4SimHits.Physics.type = 'SimG4Core/Physics/FTFP_BERT_EMN' + +# Schedule definition +process.schedule = cms.Schedule(process.generation_step, + process.simulation_step, +# process.analysis_step, + process.endjob_step, + process.RAWSIMoutput_step, + ) +# filter all path with the production filter sequence +for path in process.paths: + getattr(process,path)._seq = process.generator * getattr(process,path)._seq + + diff --git a/SimG4CMS/Muon/BuildFile.xml b/SimG4CMS/Muon/BuildFile.xml index 8244c9f2db440..6940021ab5bbe 100644 --- a/SimG4CMS/Muon/BuildFile.xml +++ b/SimG4CMS/Muon/BuildFile.xml @@ -1,8 +1,9 @@ - + + diff --git a/SimG4CMS/Muon/src/MuonG4Numbering.cc b/SimG4CMS/Muon/src/MuonG4Numbering.cc index ba37228633706..46092903411f3 100644 --- a/SimG4CMS/Muon/src/MuonG4Numbering.cc +++ b/SimG4CMS/Muon/src/MuonG4Numbering.cc @@ -1,10 +1,10 @@ #include "SimG4CMS/Muon/interface/MuonG4Numbering.h" +#include "SimG4Core/Geometry/interface/DD4hep2DDDName.h" #include "CondFormats/GeometryObjects/interface/MuonOffsetMap.h" #include "Geometry/MuonNumbering/interface/MuonBaseNumber.h" #include "Geometry/MuonNumbering/interface/MuonGeometryConstants.h" #include "FWCore/MessageLogger/interface/MessageLogger.h" -#include "DD4hep/Filter.h" #include "G4VPhysicalVolume.hh" #include "G4VTouchable.hh" #include "G4Step.hh" @@ -54,7 +54,7 @@ MuonBaseNumber MuonG4Numbering::PhysicalVolumeToBaseNumber(const G4Step* aStep) int copyno = vol->GetCopyNo(); int extra(0); if (dd4hep_ && (offMap_ != nullptr)) { - std::string namx = static_cast(dd4hep::dd::noNamespace(vol->GetName())); + std::string namx = DD4hep2DDDName::noNameSpace(static_cast(vol->GetName())); std::size_t last = namx.rfind('_'); std::string name = ((last == std::string::npos) ? namx : (namx.substr(0, last))); auto itr = offMap_->muonMap_.find(name); diff --git a/SimG4CMS/Tracker/src/TrackerG4SimHitNumberingScheme.cc b/SimG4CMS/Tracker/src/TrackerG4SimHitNumberingScheme.cc index 20aba434b730d..7ea0c220b4d9e 100644 --- a/SimG4CMS/Tracker/src/TrackerG4SimHitNumberingScheme.cc +++ b/SimG4CMS/Tracker/src/TrackerG4SimHitNumberingScheme.cc @@ -1,8 +1,8 @@ #include "SimG4CMS/Tracker/interface/TrackerG4SimHitNumberingScheme.h" #include "Geometry/TrackerNumberingBuilder/interface/GeometricDet.h" - +#include "SimG4Core/Geometry/interface/DD4hep2DDDName.h" #include "FWCore/MessageLogger/interface/MessageLogger.h" -#include "DD4hep/Filter.h" + #include "G4TransportationManager.hh" #include "G4Navigator.hh" #include "G4VTouchable.hh" @@ -62,9 +62,11 @@ void TrackerG4SimHitNumberingScheme::touchToNavStory(const G4VTouchable* v, int levels = v->GetHistoryDepth(); for (int k = 0; k <= levels; ++k) { - if (dd4hep::dd::noNamespace(v->GetVolume(k)->GetLogicalVolume()->GetName()) != "TOBInactive") { - st.emplace_back( - std::pair(v->GetVolume(k)->GetCopyNo(), v->GetVolume(k)->GetLogicalVolume()->GetName())); + if (DD4hep2DDDName::noNameSpace(static_cast(v->GetVolume(k)->GetLogicalVolume()->GetName())) != + "TOBInactive") { + st.emplace_back(std::pair( + v->GetVolume(k)->GetCopyNo(), + DD4hep2DDDName::noNameSpace(static_cast(v->GetVolume(k)->GetLogicalVolume()->GetName())))); #ifdef EDM_ML_DEBUG debugint.emplace_back(v->GetVolume(k)->GetCopyNo()); debugstring.emplace_back(v->GetVolume(k)->GetLogicalVolume()->GetName()); @@ -72,9 +74,9 @@ void TrackerG4SimHitNumberingScheme::touchToNavStory(const G4VTouchable* v, } } #ifdef EDM_ML_DEBUG - LogDebug("TrackerSimDebugNumbering") << " G4 TrackerG4SimHitNumberingScheme " << debugint.size(); + edm::LogVerbatim("TrackerSimDebugNumbering") << " G4 TrackerG4SimHitNumberingScheme " << debugint.size(); for (u_int32_t jj = 0; jj < debugstring.size(); jj++) - LogDebug("TrackerSimDebugNumbering") << " " << debugstring[jj]; + edm::LogVerbatim("TrackerSimDebugNumbering") << " " << debugstring[jj]; #endif } @@ -87,7 +89,7 @@ unsigned int TrackerG4SimHitNumberingScheme::g4ToNumberingScheme(const G4VToucha #ifdef EDM_ML_DEBUG dumpG4VPV(v); - LogDebug("TrackerSimDebugNumbering") << " Returning: " << directMap_[st]; + edm::LogVerbatim("TrackerSimDebugNumbering") << " Returning: " << directMap_[st]; #endif return directMap_[st]; @@ -96,9 +98,9 @@ unsigned int TrackerG4SimHitNumberingScheme::g4ToNumberingScheme(const G4VToucha void TrackerG4SimHitNumberingScheme::dumpG4VPV(const G4VTouchable* v) { int levels = v->GetHistoryDepth(); - LogDebug("TrackerSimDebugNumbering") << " NAME : " << v->GetVolume()->GetLogicalVolume()->GetName(); + edm::LogVerbatim("TrackerSimDebugNumbering") << " NAME : " << v->GetVolume()->GetLogicalVolume()->GetName(); for (int k = 0; k <= levels; k++) { - LogDebug("TrackerSimInfoNumbering") << " Hist: " << v->GetVolume(k)->GetLogicalVolume()->GetName() << " Copy " - << v->GetVolume(k)->GetCopyNo(); + edm::LogVerbatim("TrackerSimInfoNumbering") + << " Hist: " << v->GetVolume(k)->GetLogicalVolume()->GetName() << " Copy " << v->GetVolume(k)->GetCopyNo(); } } diff --git a/SimG4Core/Application/interface/Phase2SteppingAction.h b/SimG4Core/Application/interface/Phase2SteppingAction.h index c59c97adeebdc..645aab75f71b0 100644 --- a/SimG4Core/Application/interface/Phase2SteppingAction.h +++ b/SimG4Core/Application/interface/Phase2SteppingAction.h @@ -19,7 +19,7 @@ class CMSSteppingVerbose; class Phase2SteppingAction : public G4UserSteppingAction { public: - explicit Phase2SteppingAction(const CMSSteppingVerbose*, const edm::ParameterSet&, bool hasW); + explicit Phase2SteppingAction(const CMSSteppingVerbose*, const edm::ParameterSet&, bool, bool); ~Phase2SteppingAction() override = default; void UserSteppingAction(const G4Step* aStep) final; @@ -56,6 +56,7 @@ class Phase2SteppingAction : public G4UserSteppingAction { bool initialized{false}; bool killBeamPipe{false}; bool hasWatcher; + bool dd4hep_; std::vector maxTrackTimes, ekinMins; std::vector maxTimeNames, ekinNames, ekinParticles; @@ -64,6 +65,7 @@ class Phase2SteppingAction : public G4UserSteppingAction { std::vector deadRegions; std::vector ekinVolumes; std::vector ekinPDG; + G4String cmseName_, trackerName_, caloName_, btlName_, cms2ZDCName_; }; inline bool Phase2SteppingAction::isInsideDeadRegion(const G4Region* reg) const { diff --git a/SimG4Core/Application/interface/StackingAction.h b/SimG4Core/Application/interface/StackingAction.h index a058971e35c78..6e997fb25a14f 100644 --- a/SimG4Core/Application/interface/StackingAction.h +++ b/SimG4Core/Application/interface/StackingAction.h @@ -79,6 +79,8 @@ class StackingAction : public G4UserStackingAction { const G4Region* regionMuonIron{nullptr}; const G4Region* regionPreShower{nullptr}; const G4Region* regionCastor{nullptr}; + const G4Region* regionZDC{nullptr}; + const G4Region* regionHGcal{nullptr}; const G4Region* regionWorld{nullptr}; // Russian roulette energy limits @@ -96,6 +98,10 @@ class StackingAction : public G4UserStackingAction { double nRusRoPreShower; double gRusRoCastor; double nRusRoCastor; + double gRusRoZDC; + double nRusRoZDC; + double gRusRoHGcal; + double nRusRoHGcal; double gRusRoWorld; double nRusRoWorld; // flags diff --git a/SimG4Core/Application/interface/SteppingAction.h b/SimG4Core/Application/interface/SteppingAction.h index 4f62b36dda686..e5601908a5bc9 100644 --- a/SimG4Core/Application/interface/SteppingAction.h +++ b/SimG4Core/Application/interface/SteppingAction.h @@ -19,7 +19,7 @@ class CMSSteppingVerbose; class SteppingAction : public G4UserSteppingAction { public: - explicit SteppingAction(const CMSSteppingVerbose*, const edm::ParameterSet&, bool hasW); + explicit SteppingAction(const CMSSteppingVerbose*, const edm::ParameterSet&, bool, bool); ~SteppingAction() override = default; void UserSteppingAction(const G4Step* aStep) final; @@ -31,6 +31,7 @@ class SteppingAction : public G4UserSteppingAction { inline bool isInsideDeadRegion(const G4Region* reg) const; inline bool isOutOfTimeWindow(const G4Region* reg, const double& time) const; + inline bool isForZDC(const G4LogicalVolume* lv, int pdg) const; bool isLowEnergy(const G4LogicalVolume*, const G4Track*) const; void PrintKilledTrack(const G4Track*, const TrackStatus&) const; @@ -39,6 +40,7 @@ class SteppingAction : public G4UserSteppingAction { const G4VPhysicalVolume* calo{nullptr}; const CMSSteppingVerbose* steppingVerbose{nullptr}; const G4LogicalVolume* m_CMStoZDC{nullptr}; + const G4Region* m_ZDCRegion{nullptr}; double theCriticalEnergyForVacuum; double theCriticalDensity; double maxTrackTime; @@ -54,7 +56,9 @@ class SteppingAction : public G4UserSteppingAction { bool initialized{false}; bool killBeamPipe{false}; + bool m_CMStoZDCtransport; bool hasWatcher; + bool dd4hep_; std::vector maxTrackTimes, ekinMins; std::vector maxTimeNames, ekinNames, ekinParticles; @@ -63,6 +67,7 @@ class SteppingAction : public G4UserSteppingAction { std::vector deadRegions; std::vector ekinVolumes; std::vector ekinPDG; + std::string trackerName_, caloName_, cms2ZDCName_; }; inline bool SteppingAction::isInsideDeadRegion(const G4Region* reg) const { @@ -87,4 +92,8 @@ inline bool SteppingAction::isOutOfTimeWindow(const G4Region* reg, const double& return (time > tofM); } +inline bool SteppingAction::isForZDC(const G4LogicalVolume* lv, int pdg) const { + return (m_CMStoZDCtransport && lv == m_CMStoZDC && (pdg == 22 || pdg == 2112)); +} + #endif diff --git a/SimG4Core/Application/plugins/OscarMTProducer.cc b/SimG4Core/Application/plugins/OscarMTProducer.cc index 0d51b73a0a8df..807e59a4f1b8d 100644 --- a/SimG4Core/Application/plugins/OscarMTProducer.cc +++ b/SimG4Core/Application/plugins/OscarMTProducer.cc @@ -23,7 +23,6 @@ #include "SimG4Core/Application/interface/RunManagerMTWorker.h" #include "SimG4Core/Notification/interface/TmpSimEvent.h" #include "SimG4Core/Notification/interface/TmpSimVertex.h" -#include "SimG4Core/Notification/interface/TmpSimTrack.h" #include "SimG4Core/SensitiveDetector/interface/SensitiveTkDetector.h" #include "SimG4Core/SensitiveDetector/interface/SensitiveCaloDetector.h" @@ -40,9 +39,9 @@ #include "Randomize.hh" // for some reason void doesn't compile -class OscarMTProducer : public edm::stream::EDProducer, edm::RunCache > { +class OscarMTProducer : public edm::stream::EDProducer, edm::RunCache> { public: - typedef std::vector > Producers; + typedef std::vector> Producers; explicit OscarMTProducer(edm::ParameterSet const& p, const OscarMTMasterThread*); ~OscarMTProducer() override; @@ -122,71 +121,28 @@ OscarMTProducer::OscarMTProducer(edm::ParameterSet const& p, const OscarMTMaster assert(m_masterThread); m_masterThread->callConsumes(consumesCollector()); - // List of produced containers + // declair hit collections produces().setBranchAlias("SimTracks"); produces().setBranchAlias("SimVertices"); - produces("TrackerHitsPixelBarrelLowTof"); - produces("TrackerHitsPixelBarrelHighTof"); - produces("TrackerHitsTIBLowTof"); - produces("TrackerHitsTIBHighTof"); - produces("TrackerHitsTIDLowTof"); - produces("TrackerHitsTIDHighTof"); - produces("TrackerHitsPixelEndcapLowTof"); - produces("TrackerHitsPixelEndcapHighTof"); - produces("TrackerHitsTOBLowTof"); - produces("TrackerHitsTOBHighTof"); - produces("TrackerHitsTECLowTof"); - produces("TrackerHitsTECHighTof"); - - produces("TotemHitsT1"); - produces("TotemHitsT2Gem"); - produces("TotemHitsRP"); - produces("CTPPSPixelHits"); - produces("CTPPSTimingHits"); - produces("FP420SI"); - produces("BSCHits"); - produces("PLTHits"); - produces("BCM1FHits"); - produces("BHMHits"); - produces("FastTimerHitsBarrel"); - produces("FastTimerHitsEndcap"); - - produces("EcalHitsEB"); - produces("EcalHitsEE"); - produces("EcalHitsES"); - produces("HcalHits"); - produces("CaloHitsTk"); - produces("HGCHitsEE"); - produces("HGCHitsHEfront"); - produces("HGCHitsHEback"); - produces("CalibrationHGCHitsEE"); - produces("CalibrationHGCHitsHEfront"); - produces("CalibrationHGCHitsHEback"); - - produces("MuonDTHits"); - produces("MuonCSCHits"); - produces("MuonRPCHits"); - produces("MuonGEMHits"); - produces("MuonME0Hits"); - produces("CastorPL"); - produces("CastorFI"); - produces("CastorBU"); - produces("CastorTU"); - produces("EcalTBH4BeamHits"); - produces("HcalTB06BeamHits"); - produces("ZDCHITS"); - produces("ChamberHits"); - produces("FibreHits"); - produces("WedgeHits"); - produces("HFNoseHits"); - produces("TotemHitsT2Scint"); + + auto trackHits = p.getParameter>("TrackHits"); + for (auto const& ss : trackHits) { + produces(ss); + } + + auto caloHits = p.getParameter>("CaloHits"); + for (auto const& ss : caloHits) { + produces(ss); + } //register any products auto& producers = m_runManagerWorker->producers(); for (auto& ptr : producers) { ptr->registerProducts(producesCollector()); } - edm::LogVerbatim("SimG4CoreApplication") << "OscarMTProducer is constructed"; + edm::LogVerbatim("SimG4CoreApplication") + << "OscarMTProducer is constructed with hit collections:" << trackHits.size() << " tracking type; " + << caloHits.size() << " calo type; " << producers.size() << " watcher type."; } OscarMTProducer::~OscarMTProducer() { @@ -313,9 +269,9 @@ void OscarMTProducer::produce(edm::Event& e, const edm::EventSetup& es) { e.put(std::move(p1)); e.put(std::move(p2)); - for (auto& tracker : sTk) { + for (auto const& tracker : sTk) { const std::vector& v = tracker->getNames(); - for (auto& name : v) { + for (auto const& name : v) { std::unique_ptr product(new edm::PSimHitContainer); tracker->fillHits(*product, name); if (0 < m_verbose && product != nullptr && !product->empty()) @@ -323,9 +279,9 @@ void OscarMTProducer::produce(edm::Event& e, const edm::EventSetup& es) { e.put(std::move(product), name); } } - for (auto& calo : sCalo) { + for (auto const& calo : sCalo) { const std::vector& v = calo->getNames(); - for (auto& name : v) { + for (auto const& name : v) { std::unique_ptr product(new edm::PCaloHitContainer); calo->fillHits(*product, name); if (0 < m_verbose && product != nullptr && !product->empty()) diff --git a/SimG4Core/Application/python/g4SimHits_cfi.py b/SimG4Core/Application/python/g4SimHits_cfi.py index 8abb56bb2cb4c..24396c4b6691d 100644 --- a/SimG4Core/Application/python/g4SimHits_cfi.py +++ b/SimG4Core/Application/python/g4SimHits_cfi.py @@ -92,7 +92,9 @@ ThresholdForGeometryExceptions = cms.double(0.1), ## in GeV TraceExceptions = cms.bool(False), CheckGeometry = cms.untracked.bool(False), - OnlySDs = cms.vstring('ZdcSensitiveDetector', 'TotemT2ScintSensitiveDetector', 'TotemSensitiveDetector', 'RomanPotSensitiveDetector', 'PLTSensitiveDetector', 'MuonSensitiveDetector', 'MtdSensitiveDetector', 'BCM1FSensitiveDetector', 'EcalSensitiveDetector', 'CTPPSSensitiveDetector', 'BSCSensitiveDetector', 'CTPPSDiamondSensitiveDetector', 'FP420SensitiveDetector', 'BHMSensitiveDetector', 'CastorSensitiveDetector', 'CaloTrkProcessing', 'HcalSensitiveDetector', 'TkAccumulatingSensitiveDetector'), + OnlySDs = cms.vstring('BSCSensitiveDetector','BCM1FSensitiveDetector','BHMSensitiveDetector','CTPPSDiamondSensitiveDetector','CTPPSSensitiveDetector','CaloTrkProcessing','CastorSensitiveDetector','EcalSensitiveDetector','HcalSensitiveDetector','MuonSensitiveDetector','PLTSensitiveDetector','RomanPotSensitiveDetector','TkAccumulatingSensitiveDetector','TotemSensitiveDetector','TotemT2ScintSensitiveDetector','ZdcSensitiveDetector'), + TrackHits = cms.vstring('BCM1FHits','BHMHits','BSCHits','CTPPSPixelHits','CTPPSTimingHits','MuonCSCHits','MuonDTHits','MuonGEMHits','MuonME0Hits','MuonRPCHits','PLTHits','TotemHitsRP','TotemHitsT1','TrackerHitsPixelEndcapLowTof','TrackerHitsPixelEndcapHighTof','TrackerHitsPixelBarrelLowTof','TrackerHitsPixelBarrelHighTof','TrackerHitsTECLowTof','TrackerHitsTECHighTof','TrackerHitsTIBLowTof','TrackerHitsTIBHighTof','TrackerHitsTIDLowTof','TrackerHitsTIDHighTof','TrackerHitsTOBLowTof','TrackerHitsTOBHighTof'), + CaloHits = cms.vstring('CaloHitsTk','CastorBU','CastorFI','CastorPL','CastorTU','EcalHitsEB','EcalHitsEE','EcalHitsES','HcalHits','ZDCHITS'), Init = cms.PSet( DefaultVoxelDensity = cms.double(2.0), VoxelRegions = cms.vstring(), @@ -314,6 +316,8 @@ RusRoMuonIronGamma = cms.double(0.3), RusRoPreShowerGamma = cms.double(0.3), RusRoCastorGamma = cms.double(0.3), + RusRoZDCGamma = cms.double(0.3), + RusRoHGcalGamma = cms.double(1.3), RusRoWorldGamma = cms.double(0.3), RusRoNeutronEnergyLimit = cms.double(10.0), ## (MeV) RusRoEcalNeutron = cms.double(0.1), @@ -321,6 +325,8 @@ RusRoMuonIronNeutron = cms.double(0.1), RusRoPreShowerNeutron = cms.double(0.1), RusRoCastorNeutron = cms.double(0.1), + RusRoZDCNeutron = cms.double(0.1), + RusRoHGcalNeutron = cms.double(1.1), RusRoWorldNeutron = cms.double(0.1), RusRoProtonEnergyLimit = cms.double(0.0), RusRoEcalProton = cms.double(1.0), @@ -338,7 +344,13 @@ ), SteppingAction = cms.PSet( common_maximum_time, + CMStoZDCtransport = cms.bool(False), MaxNumberOfSteps = cms.int32(20000), + CMSName = cms.string('CMSE'), + TrackerName = cms.string('Tracker'), + CaloName = cms.string('CALO'), + BTLName = cms.string('BarrelTimingLayer'), + CMS2ZDCName = cms.string('CMStoZDC'), EkinNames = cms.vstring(), EkinThresholds = cms.vdouble(), EkinParticles = cms.vstring() @@ -592,8 +604,8 @@ ), ZdcSD = cms.PSet( Verbosity = cms.int32(0), - UseShowerLibrary = cms.bool(True), - UseShowerHits = cms.bool(False), + UseShowerLibrary = cms.bool(False), + UseShowerHits = cms.bool(True), FiberDirection = cms.double(45.0), ZdcHitEnergyCut = cms.double(10.0) ), @@ -670,12 +682,18 @@ from Configuration.Eras.Modifier_run3_common_cff import run3_common run3_common.toModify( g4SimHits, CastorSD = dict( useShowerLibrary = False ) ) run3_common.toModify( g4SimHits, LHCTransport = True ) +run3_common.toModify( g4SimHits, + OnlySDs = ['BSCSensitiveDetector','BCM1FSensitiveDetector','BHMSensitiveDetector','CTPPSDiamondSensitiveDetector','CTPPSSensitiveDetector','CaloTrkProcessing','EcalSensitiveDetector','HcalSensitiveDetector','MuonSensitiveDetector','PLTSensitiveDetector','RomanPotSensitiveDetector','TkAccumulatingSensitiveDetector','TotemSensitiveDetector','TotemT2ScintSensitiveDetector','ZdcSensitiveDetector'], + TrackHits = ['BCM1FHits','BHMHits','BSCHits','CTPPSPixelHits','CTPPSTimingHits','MuonCSCHits','MuonDTHits','MuonGEMHits','MuonME0Hits','MuonRPCHits','PLTHits','TotemHitsRP','TotemHitsT1','TrackerHitsPixelEndcapLowTof','TrackerHitsPixelEndcapHighTof','TrackerHitsPixelBarrelLowTof','TrackerHitsPixelBarrelHighTof','TrackerHitsTECLowTof','TrackerHitsTECHighTof','TrackerHitsTIBLowTof','TrackerHitsTIBHighTof','TrackerHitsTIDLowTof','TrackerHitsTIDHighTof','TrackerHitsTOBLowTof','TrackerHitsTOBHighTof'], + CaloHits = ['CaloHitsTk','EcalHitsEB','EcalHitsEE','EcalHitsES','HcalHits','TotemHitsT2Scint','ZDCHITS'] ) ## -## Disable PPS from Run 3 PbPb runs +## Disable PPS from Run 3 PbPb runs and enable ZDC ## from Configuration.Eras.Modifier_pp_on_PbPb_run3_cff import pp_on_PbPb_run3 -pp_on_PbPb_run3.toModify( g4SimHits, LHCTransport = False ) +pp_on_PbPb_run3.toModify(g4SimHits, LHCTransport = False) +# SteppingAction = dict( +# CMStoZDCtransport = True) ) ## ## Change ECAL time slices @@ -697,7 +715,9 @@ ## from Configuration.Eras.Modifier_h2tb_cff import h2tb h2tb.toModify(g4SimHits, - OnlySDs = ['EcalSensitiveDetector', 'CaloTrkProcessing', 'HcalTB06BeamDetector', 'HcalSensitiveDetector'], + OnlySDs = ['CaloTrkProcessing','EcalSensitiveDetector','FP420SensitiveDetector','HcalTB06BeamDetector','HcalSensitiveDetector'], + TrackHits = ['FP420SI'], + CaloHits = ['CaloHitsTk','ChamberHits','EcalHitsEB','EcalHitsEE','EcalHitsES','EcalTBH4BeamHits','FibreHits','HFNoseHits','HcalHits','HcalTB06BeamHits','WedgeHits'], ECalSD = dict( TestBeam = True ), CaloSD = dict( @@ -713,7 +733,8 @@ ## DD4hep migration ## from Configuration.ProcessModifiers.dd4hep_cff import dd4hep -dd4hep.toModify( g4SimHits, g4GeometryDD4hepSource = True ) +dd4hep.toModify( g4SimHits, + g4GeometryDD4hepSource = True ) ## ## Selection of SD's for Phase2, exclude PPS @@ -721,7 +742,9 @@ from Configuration.Eras.Modifier_phase2_common_cff import phase2_common phase2_common.toModify(g4SimHits, - OnlySDs = ['ZdcSensitiveDetector', 'RomanPotSensitiveDetector', 'PLTSensitiveDetector', 'MuonSensitiveDetector', 'MtdSensitiveDetector', 'BCM1FSensitiveDetector', 'EcalSensitiveDetector', 'CTPPSSensitiveDetector', 'HGCalSensitiveDetector', 'CTPPSDiamondSensitiveDetector', 'FP420SensitiveDetector', 'BHMSensitiveDetector', 'HFNoseSensitiveDetector', 'HGCScintillatorSensitiveDetector', 'CaloTrkProcessing', 'HcalSensitiveDetector', 'TkAccumulatingSensitiveDetector'], + OnlySDs = ['BCM1FSensitiveDetector','BHMSensitiveDetector','CTPPSDiamondSensitiveDetector','CTPPSSensitiveDetector','CaloTrkProcessing','EcalSensitiveDetector','HFNoseSensitiveDetector','HGCScintillatorSensitiveDetector','HGCalSensitiveDetector','HcalSensitiveDetector','MtdSensitiveDetector','MuonSensitiveDetector','PLTSensitiveDetector','RomanPotSensitiveDetector','TkAccumulatingSensitiveDetector','ZdcSensitiveDetector'], + TrackHits = ['BCM1FHits','BHMHits','CTPPSPixelHits','CTPPSTimingHits','FastTimerHitsBarrel','FastTimerHitsEndcap','HFNoseHits','MuonCSCHits','MuonDTHits','MuonGEMHits','MuonME0Hits','MuonRPCHits','PLTHits','TrackerHitsPixelEndcapLowTof','TrackerHitsPixelEndcapHighTof','TrackerHitsPixelBarrelLowTof','TrackerHitsPixelBarrelHighTof','TrackerHitsTECLowTof','TrackerHitsTECHighTof','TrackerHitsTIBLowTof','TrackerHitsTIBHighTof','TrackerHitsTIDLowTof','TrackerHitsTIDHighTof','TrackerHitsTOBLowTof','TrackerHitsTOBHighTof'], + CaloHits = ["CalibrationHGCHitsEE",'CalibrationHGCHitsHEback',"CalibrationHGCHitsHEfront",'CaloHitsTk','EcalHitsEB','HFNoseHits',"HGCHitsEE","HGCHitsHEback","HGCHitsHEfront",'HcalHits','ZDCHITS'], LHCTransport = False, MuonSD = dict( HaveDemoChambers = False ) @@ -729,7 +752,9 @@ from Configuration.Eras.Modifier_hgcaltb_cff import hgcaltb hgcaltb.toModify(g4SimHits, - OnlySDs = ['AHcalSensitiveDetector', 'HGCSensitiveDetector', 'HGCalSensitiveDetector', 'HGCalTB1601SensitiveDetector', 'HcalTB06BeamDetector'], + OnlySDs = ['AHcalSensitiveDetector','CaloTrkProcessing','HFNoseSensitiveDetector','HGCSensitiveDetector','HGCalSensitiveDetector','HGCalTB1601SensitiveDetector','HcalTB06BeamDetector'], + TrackHits = ['FP420SI'], + CaloHits = ['CalibrationHGCHitsEE','CalibrationHGCHitsHEback','CalibrationHGCHitsHEfront','CaloHitsTk','ChamberHits','HFNoseHits','HGCHitsEE','HGCHitsHEback','HGCHitsHEfront','HcalHits','HcalTB06BeamHits','WedgeHits'], NonBeamEvent = True, UseMagneticField = False, CaloSD = dict( @@ -741,6 +766,14 @@ ForTBHCAL = True) ) +from Configuration.Eras.Modifier_phase2_hgcalOnly_cff import phase2_hgcalOnly +phase2_hgcalOnly.toModify(g4SimHits, + OnlySDs = ['CaloTrkProcessing','HGCScintillatorSensitiveDetector','HGCalSensitiveDetector'], + TrackHits = [], + CaloHits = ["CalibrationHGCHitsEE",'CalibrationHGCHitsHEback',"CalibrationHGCHitsHEfront","CaloHitsTk","HGCHitsEE","HGCHitsHEback","HGCHitsHEfront",], + LHCTransport = False +) + from Configuration.Eras.Modifier_phase2_hgcalV18_cff import phase2_hgcalV18 phase2_hgcalV18.toModify(g4SimHits, HGCSD = dict( diff --git a/SimG4Core/Application/src/EventAction.cc b/SimG4Core/Application/src/EventAction.cc index 603ef1817c009..fdbe3cf9d8716 100644 --- a/SimG4Core/Application/src/EventAction.cc +++ b/SimG4Core/Application/src/EventAction.cc @@ -2,7 +2,6 @@ #include "SimG4Core/Application/interface/SimRunInterface.h" #include "SimG4Core/Notification/interface/TmpSimEvent.h" #include "SimG4Core/Notification/interface/TmpSimVertex.h" -#include "SimG4Core/Notification/interface/TmpSimTrack.h" #include "SimG4Core/Notification/interface/BeginOfEvent.h" #include "SimG4Core/Notification/interface/EndOfEvent.h" #include "SimG4Core/Notification/interface/CMSSteppingVerbose.h" @@ -23,8 +22,6 @@ EventAction::EventAction(const edm::ParameterSet& p, m_debug(p.getUntrackedParameter("debug", false)) {} void EventAction::BeginOfEventAction(const G4Event* anEvent) { - m_trackManager->reset(); - BeginOfEvent e(anEvent); m_beginOfEventSignal(&e); @@ -41,17 +38,17 @@ void EventAction::BeginOfEventAction(const G4Event* anEvent) { void EventAction::EndOfEventAction(const G4Event* anEvent) { if (m_printRandom) { edm::LogVerbatim("SimG4CoreApplication") - << " EndOfEvent " << anEvent->GetEventID() << " Random number: " << G4UniformRand(); + << "EventACtion::EndOfEventAction: " << anEvent->GetEventID() << " Random number: " << G4UniformRand(); } if (!m_stopFile.empty() && std::ifstream(m_stopFile.c_str())) { edm::LogWarning("SimG4CoreApplication") - << "EndOfEventAction: termination signal received at event " << anEvent->GetEventID(); + << "EventACtion::EndOfEventAction: termination signal received at event " << anEvent->GetEventID(); // soft abort run m_runInterface->abortRun(true); } if (anEvent->GetNumberOfPrimaryVertex() == 0) { - edm::LogWarning("SimG4CoreApplication") << "EndOfEventAction: event " << anEvent->GetEventID() - << " must have failed (no G4PrimaryVertices found) and will be skipped "; + edm::LogWarning("SimG4CoreApplication") << "EventACtion::EndOfEventAction: event " << anEvent->GetEventID() + << " must have failed (no G4PrimaryVertices found) and will be skipped"; return; } diff --git a/SimG4Core/Application/src/GFlashHadronShowerModel.cc b/SimG4Core/Application/src/GFlashHadronShowerModel.cc index 1b52af762ffa9..4c868c6041446 100644 --- a/SimG4Core/Application/src/GFlashHadronShowerModel.cc +++ b/SimG4Core/Application/src/GFlashHadronShowerModel.cc @@ -2,6 +2,7 @@ #include "SimG4Core/Application/interface/GFlashHadronShowerModel.h" #include "SimG4Core/Application/interface/SteppingAction.h" +#include "SimG4Core/Geometry/interface/DD4hep2DDDName.h" #include "SimGeneral/GFlash/interface/GflashHadronShowerProfile.h" #include "SimGeneral/GFlash/interface/GflashPiKShowerProfile.h" @@ -168,7 +169,7 @@ void GFlashHadronShowerModel::makeHits(const G4FastTrack& fastTrack) { if (aSensitive == nullptr) continue; - G4String nameCalor = aCurrentVolume->GetName(); + G4String nameCalor = (G4String)(DD4hep2DDDName::noNameSpace(lv->GetName())); nameCalor.assign(nameCalor, 0, 2); G4double samplingWeight = 1.0; if (nameCalor == "HB") { diff --git a/SimG4Core/Application/src/LowEnergyFastSimModel.cc b/SimG4Core/Application/src/LowEnergyFastSimModel.cc index 3f42c370e917c..f5e5c38f3ec2c 100644 --- a/SimG4Core/Application/src/LowEnergyFastSimModel.cc +++ b/SimG4Core/Application/src/LowEnergyFastSimModel.cc @@ -3,6 +3,7 @@ #include "SimG4Core/Application/interface/LowEnergyFastSimModel.h" #include "SimG4Core/Application/interface/TrackingAction.h" +#include "SimG4Core/Geometry/interface/DD4hep2DDDName.h" #include "SimG4Core/Notification/interface/TrackInformation.h" #include "G4VFastSimulationModel.hh" @@ -29,7 +30,7 @@ LowEnergyFastSimModel::LowEnergyFastSimModel(const G4String& name, G4Region* reg fMaterial = nullptr; auto table = G4Material::GetMaterialTable(); for (auto const& mat : *table) { - G4String nam = mat->GetName(); + G4String nam = (G4String)(DD4hep2DDDName::noNameSpace(mat->GetName())); size_t n = nam.size(); if (n > 4) { G4String sn = nam.substr(n - 5, 5); @@ -39,7 +40,7 @@ LowEnergyFastSimModel::LowEnergyFastSimModel(const G4String& name, G4Region* reg } } } - G4String nm = (nullptr == fMaterial) ? "not found!" : fMaterial->GetName(); + G4String nm = (nullptr == fMaterial) ? "not found!" : (G4String)(DD4hep2DDDName::noNameSpace(fMaterial->GetName())); edm::LogVerbatim("LowEnergyFastSimModel") << "LowEGFlash material: <" << nm << ">"; } @@ -92,10 +93,16 @@ void LowEnergyFastSimModel::DoIt(const G4FastTrack& fastTrack, G4FastStep& fastS spot.SetPosition(pos); fHitMaker.make(&spot, &fastTrack); + // Russian roulette + double wt2 = track->GetWeight(); + if (wt2 <= 0.0) { + wt2 = 1.0; + } + // tail energy deposition const G4double etail = energy - inPointEnergy; const G4int nspots = etail; - const G4double tailEnergy = etail / (nspots + 1); + const G4double tailEnergy = etail * wt2 / (nspots + 1); /* edm::LogVerbatim("LowEnergyFastSimModel") << track->GetDefinition()->GetParticleName() << " Ekin(MeV)=" << energy << " material: <" diff --git a/SimG4Core/Application/src/Phase2SteppingAction.cc b/SimG4Core/Application/src/Phase2SteppingAction.cc index d433163e3fc07..58441db55cb79 100644 --- a/SimG4Core/Application/src/Phase2SteppingAction.cc +++ b/SimG4Core/Application/src/Phase2SteppingAction.cc @@ -1,5 +1,5 @@ #include "SimG4Core/Application/interface/Phase2SteppingAction.h" - +#include "SimG4Core/Geometry/interface/DD4hep2DDDName.h" #include "SimG4Core/Notification/interface/TrackInformation.h" #include "SimG4Core/Notification/interface/CMSSteppingVerbose.h" @@ -13,8 +13,11 @@ #include "FWCore/MessageLogger/interface/MessageLogger.h" #include "FWCore/Utilities/interface/isFinite.h" -Phase2SteppingAction::Phase2SteppingAction(const CMSSteppingVerbose* sv, const edm::ParameterSet& p, bool hasW) - : steppingVerbose(sv), hasWatcher(hasW) { +Phase2SteppingAction::Phase2SteppingAction(const CMSSteppingVerbose* sv, + const edm::ParameterSet& p, + bool hasW, + bool dd4hep) + : steppingVerbose(sv), hasWatcher(hasW), dd4hep_(dd4hep) { theCriticalEnergyForVacuum = (p.getParameter("CriticalEnergyForVacuum") * CLHEP::MeV); if (0.0 < theCriticalEnergyForVacuum) { killBeamPipe = true; @@ -30,6 +33,11 @@ Phase2SteppingAction::Phase2SteppingAction(const CMSSteppingVerbose* sv, const e ekinMins = p.getParameter >("EkinThresholds"); ekinNames = p.getParameter >("EkinNames"); ekinParticles = p.getParameter >("EkinParticles"); + cmseName_ = (G4String)(p.getParameter("CMSName")); + trackerName_ = (G4String)(p.getParameter("TrackerName")); + caloName_ = (G4String)(p.getParameter("CaloName")); + btlName_ = (G4String)(p.getParameter("BTLName")); + cms2ZDCName_ = p.getParameter("CMS2ZDCName"); edm::LogVerbatim("SimG4CoreApplication") << "Phase2SteppingAction:: KillBeamPipe = " << killBeamPipe @@ -38,7 +46,9 @@ Phase2SteppingAction::Phase2SteppingAction(const CMSSteppingVerbose* sv, const e << " MaxTrackTime = " << maxTrackTime / CLHEP::ns << " ns;" << " MaxZCentralCMS = " << maxZCentralCMS / CLHEP::m << " m" << " MaxTrackTimeForward = " << maxTrackTimeForward / CLHEP::ns << " ns" - << " MaxNumberOfSteps = " << maxNumberOfSteps; + << " MaxNumberOfSteps = " << maxNumberOfSteps << "\n" + << " Names of special volumes: " << cmseName_ << " " << trackerName_ << " " << caloName_ << " " + << btlName_; numberTimes = maxTrackTimes.size(); if (numberTimes > 0) { @@ -246,14 +256,14 @@ bool Phase2SteppingAction::isLowEnergy(const G4LogicalVolume* lv, const G4Track* bool Phase2SteppingAction::initPointer() { const G4PhysicalVolumeStore* pvs = G4PhysicalVolumeStore::GetInstance(); for (auto const& pvcite : *pvs) { - const G4String& pvname = pvcite->GetName(); - if (pvname == "Tracker" || pvname == "tracker:Tracker_1") { + const std::string& pvname = (std::string)(DD4hep2DDDName::namePV(pvcite->GetName(), dd4hep_)); + if (pvname == trackerName_) { tracker = pvcite; - } else if (pvname == "CALO" || pvname == "caloBase:CALO_1") { + } else if (pvname == caloName_) { calo = pvcite; - } else if (pvname == "BarrelTimingLayer" || pvname == "btl:BarrelTimingLayer_1") { + } else if (pvname == btlName_) { btl = pvcite; - } else if (pvname == "CMSE" || pvname == "cms:CMSE_1") { + } else if (pvname == cmseName_) { cmse = pvcite; } if (tracker && calo && btl && cmse) @@ -266,9 +276,9 @@ bool Phase2SteppingAction::initPointer() { if (numberEkins > 0) { ekinVolumes.resize(numberEkins, nullptr); for (auto const& lvcite : *lvs) { - const G4String& lvname = lvcite->GetName(); + std::string lvname = (std::string)(DD4hep2DDDName::nameMatterLV(lvcite->GetName(), dd4hep_)); for (unsigned int i = 0; i < numberEkins; ++i) { - if (lvname == (G4String)(ekinNames[i])) { + if (lvname == ekinNames[i]) { ekinVolumes[i] = lvcite; break; } diff --git a/SimG4Core/Application/src/RunManagerMTWorker.cc b/SimG4Core/Application/src/RunManagerMTWorker.cc index 01038ab0b4ac1..1c5f00a395da7 100644 --- a/SimG4Core/Application/src/RunManagerMTWorker.cc +++ b/SimG4Core/Application/src/RunManagerMTWorker.cc @@ -328,8 +328,8 @@ void RunManagerMTWorker::initializeG4(RunManagerMT* runManagerMaster, const edm: auto sensDets = sim::attachSD( m_sdMakers, es, runManagerMaster->catalog(), m_p, m_tls->trackManager.get(), *(m_tls->registry.get())); - m_tls->sensTkDets.swap(sensDets.first); - m_tls->sensCaloDets.swap(sensDets.second); + m_tls->sensTkDets = sensDets.first; + m_tls->sensCaloDets = sensDets.second; edm::LogVerbatim("SimG4CoreApplication") << "RunManagerMTWorker::InitializeG4: Sensitive Detectors are built in thread " << thisID << " found " @@ -440,12 +440,13 @@ void RunManagerMTWorker::initializeUserActions() { // different stepping actions for Run2,3 and Phase2 G4UserSteppingAction* userSteppingAction; + bool dd4hep = m_p.getParameter("g4GeometryDD4hepSource"); if (m_isPhase2) { - auto ptr = new Phase2SteppingAction(m_sVerbose.get(), m_pSteppingAction, m_hasWatchers); + auto ptr = new Phase2SteppingAction(m_sVerbose.get(), m_pSteppingAction, m_hasWatchers, dd4hep); Connect(ptr); userSteppingAction = (G4UserSteppingAction*)ptr; } else { - auto ptr = new SteppingAction(m_sVerbose.get(), m_pSteppingAction, m_hasWatchers); + auto ptr = new SteppingAction(m_sVerbose.get(), m_pSteppingAction, m_hasWatchers, dd4hep); Connect(ptr); userSteppingAction = (G4UserSteppingAction*)ptr; } diff --git a/SimG4Core/Application/src/StackingAction.cc b/SimG4Core/Application/src/StackingAction.cc index 4339142883ea5..e70fe26c07b04 100644 --- a/SimG4Core/Application/src/StackingAction.cc +++ b/SimG4Core/Application/src/StackingAction.cc @@ -3,6 +3,7 @@ #include "SimG4Core/Notification/interface/MCTruthUtil.h" #include "SimG4Core/Notification/interface/TrackInformation.h" #include "SimG4Core/Notification/interface/CMSSteppingVerbose.h" +#include "SimG4Core/Notification/interface/G4TrackToParticleID.h" #include "FWCore/MessageLogger/interface/MessageLogger.h" @@ -57,6 +58,11 @@ StackingAction::StackingAction(const TrackingAction* trka, const edm::ParameterS nRusRoCastor = p.getParameter("RusRoCastorNeutron"); nRusRoWorld = p.getParameter("RusRoWorldNeutron"); + gRusRoZDC = p.getParameter("RusRoZDCGamma"); + gRusRoHGcal = p.getParameter("RusRoHGcalGamma"); + nRusRoZDC = p.getParameter("RusRoZDCNeutron"); + nRusRoHGcal = p.getParameter("RusRoHGcalNeutron"); + if (gRusRoEnerLim > 0.0 && (gRusRoEcal < 1.0 || gRusRoHcal < 1.0 || gRusRoMuonIron < 1.0 || gRusRoPreShower < 1.0 || gRusRoCastor < 1.0 || gRusRoWorld < 1.0)) { gRRactive = true; @@ -121,6 +127,8 @@ StackingAction::StackingAction(const TrackingAction* trka, const edm::ParameterS << " HCAL Prob= " << gRusRoHcal << "\n" << " MuonIron Prob= " << gRusRoMuonIron << "\n" << " PreShower Prob= " << gRusRoPreShower << "\n" + << " HGCAL Prob= " << gRusRoHGcal << "\n" + << " ZDC Prob= " << gRusRoZDC << "\n" << " CASTOR Prob= " << gRusRoCastor << "\n" << " World Prob= " << gRusRoWorld; } @@ -132,6 +140,8 @@ StackingAction::StackingAction(const TrackingAction* trka, const edm::ParameterS << " HCAL Prob= " << nRusRoHcal << "\n" << " MuonIron Prob= " << nRusRoMuonIron << "\n" << " PreShower Prob= " << nRusRoPreShower << "\n" + << " HGCAL Prob= " << nRusRoHGcal << "\n" + << " ZDC Prob= " << nRusRoZDC << "\n" << " CASTOR Prob= " << nRusRoCastor << "\n" << " World Prob= " << nRusRoWorld; } @@ -239,9 +249,11 @@ G4ClassificationOfNewTrack StackingAction::ClassifyNewTrack(const G4Track* aTrac } } - if (killDeltaRay && classification != fKill && subType == fIonisation) { + if (killDeltaRay && classification != fKill && aTrack->GetParentID() > 0 && + G4TrackToParticleID::isGammaElectronPositron(aTrack)) { classification = fKill; } + if (killInCalo && classification != fKill && isThisRegion(reg, caloRegions)) { classification = fKill; } @@ -275,7 +287,7 @@ G4ClassificationOfNewTrack StackingAction::ClassifyNewTrack(const G4Track* aTrac double currentWeight = aTrack->GetWeight(); if (1.0 >= currentWeight) { - double prob = 1.0; + double prob = 1.001; double elim = 0.0; // neutron @@ -289,6 +301,10 @@ G4ClassificationOfNewTrack StackingAction::ClassifyNewTrack(const G4Track* aTrac prob = nRusRoMuonIron; } else if (reg == regionPreShower) { prob = nRusRoPreShower; + } else if (reg == regionHGcal) { + prob = nRusRoHGcal; + } else if (reg == regionZDC) { + prob = nRusRoZDC; } else if (reg == regionCastor) { prob = nRusRoCastor; } else if (reg == regionWorld) { @@ -311,6 +327,10 @@ G4ClassificationOfNewTrack StackingAction::ClassifyNewTrack(const G4Track* aTrac prob = gRusRoHcal; } else if (reg == regionMuonIron) { prob = gRusRoMuonIron; + } else if (reg == regionHGcal) { + prob = gRusRoHGcal; + } else if (reg == regionZDC) { + prob = gRusRoZDC; } else if (reg == regionCastor) { prob = gRusRoCastor; } else if (reg == regionWorld) { @@ -373,6 +393,12 @@ void StackingAction::initPointer() { if ((gRusRoCastor < 1.0 || nRusRoCastor < 1.0) && rname == "CastorRegion") { regionCastor = reg; } + if ((gRusRoZDC < 1.0 || nRusRoZDC < 1.0) && rname == "ZDCRegion") { + regionZDC = reg; + } + if ((gRusRoHGcal < 1.0 || nRusRoHGcal < 1.0) && rname == "HGCalRegion") { + regionHGcal = reg; + } if ((gRusRoWorld < 1.0 || nRusRoWorld < 1.0) && rname == "DefaultRegionForTheWorld") { regionWorld = reg; } diff --git a/SimG4Core/Application/src/SteppingAction.cc b/SimG4Core/Application/src/SteppingAction.cc index 4788345dba976..377a642149db3 100644 --- a/SimG4Core/Application/src/SteppingAction.cc +++ b/SimG4Core/Application/src/SteppingAction.cc @@ -1,5 +1,5 @@ #include "SimG4Core/Application/interface/SteppingAction.h" - +#include "SimG4Core/Geometry/interface/DD4hep2DDDName.h" #include "SimG4Core/Notification/interface/TrackInformation.h" #include "SimG4Core/Notification/interface/CMSSteppingVerbose.h" @@ -13,14 +13,15 @@ #include "FWCore/MessageLogger/interface/MessageLogger.h" #include "FWCore/Utilities/interface/isFinite.h" -//#define DebugLog +//#define EDM_ML_DEBUG -SteppingAction::SteppingAction(const CMSSteppingVerbose* sv, const edm::ParameterSet& p, bool hasW) - : steppingVerbose(sv), hasWatcher(hasW) { +SteppingAction::SteppingAction(const CMSSteppingVerbose* sv, const edm::ParameterSet& p, bool hasW, bool dd4hep) + : steppingVerbose(sv), hasWatcher(hasW), dd4hep_(dd4hep) { theCriticalEnergyForVacuum = (p.getParameter("CriticalEnergyForVacuum") * CLHEP::MeV); if (0.0 < theCriticalEnergyForVacuum) { killBeamPipe = true; } + m_CMStoZDCtransport = (p.getParameter("CMStoZDCtransport")); theCriticalDensity = (p.getParameter("CriticalDensity") * CLHEP::g / CLHEP::cm3); maxZCentralCMS = p.getParameter("MaxZCentralCMS") * CLHEP::m; maxTrackTime = p.getParameter("MaxTrackTime") * CLHEP::ns; @@ -32,6 +33,9 @@ SteppingAction::SteppingAction(const CMSSteppingVerbose* sv, const edm::Paramete ekinMins = p.getParameter >("EkinThresholds"); ekinNames = p.getParameter >("EkinNames"); ekinParticles = p.getParameter >("EkinParticles"); + trackerName_ = p.getParameter("TrackerName"); + caloName_ = p.getParameter("CaloName"); + cms2ZDCName_ = p.getParameter("CMS2ZDCName"); edm::LogVerbatim("SimG4CoreApplication") << "SteppingAction:: KillBeamPipe = " << killBeamPipe @@ -40,7 +44,8 @@ SteppingAction::SteppingAction(const CMSSteppingVerbose* sv, const edm::Paramete << " MaxTrackTime = " << maxTrackTime / CLHEP::ns << " ns;" << " MaxZCentralCMS = " << maxZCentralCMS / CLHEP::m << " m" << " MaxTrackTimeForward = " << maxTrackTimeForward / CLHEP::ns << " ns" - << " MaxNumberOfSteps = " << maxNumberOfSteps; + << " MaxNumberOfSteps = " << maxNumberOfSteps << " ZDC: " << m_CMStoZDCtransport << "\n" + << " Names of special volumes: " << trackerName_ << " " << caloName_; numberTimes = maxTrackTimes.size(); if (numberTimes > 0) { @@ -135,9 +140,17 @@ void SteppingAction::UserSteppingAction(const G4Step* aStep) { const G4LogicalVolume* lv = postStep->GetPhysicalVolume()->GetLogicalVolume(); const G4Region* theRegion = lv->GetRegion(); - // kill in dead regions - if (lv != m_CMStoZDC && isInsideDeadRegion(theRegion)) + // kill in dead regions except CMStoZDC volume + if (isInsideDeadRegion(theRegion) && !isForZDC(lv, std::abs(theTrack->GetParticleDefinition()->GetPDGEncoding()))) { tstat = sDeadRegion; + } + + // kill particles leaving ZDC + if (sAlive == sVeryForward && m_CMStoZDCtransport) { + const G4Region* preRegion = preStep->GetPhysicalVolume()->GetLogicalVolume()->GetRegion(); + if (preRegion == m_ZDCRegion && preRegion != theRegion) + tstat = sDeadRegion; + } // kill out of time if (sAlive == tstat) { @@ -171,7 +184,7 @@ void SteppingAction::UserSteppingAction(const G4Step* aStep) { } else { theTrack->SetTrackStatus(fStopAndKill); isKilled = true; -#ifdef DebugLog +#ifdef EDM_ML_DEBUG PrintKilledTrack(theTrack, tstat); #endif } @@ -200,26 +213,35 @@ bool SteppingAction::isLowEnergy(const G4LogicalVolume* lv, const G4Track* theTr bool SteppingAction::initPointer() { const G4PhysicalVolumeStore* pvs = G4PhysicalVolumeStore::GetInstance(); for (auto const& pvcite : *pvs) { - const G4String& pvname = pvcite->GetName(); - if (pvname == "Tracker" || pvname == "tracker:Tracker_1") { + const std::string& pvname = (std::string)(DD4hep2DDDName::namePV(pvcite->GetName(), dd4hep_)); + if (pvname == trackerName_) { tracker = pvcite; - } else if (pvname == "CALO" || pvname == "caloBase:CALO_1") { + } else if (pvname == caloName_) { calo = pvcite; } if (tracker && calo) break; } +#ifdef EDM_ML_DEBUG + edm::LogVerbatim("SimG4CoreApplication") << pvs->size() << " Physical volume in the store"; + for (auto const& pvcite : *pvs) + edm::LogVerbatim("SimG4CoreApplication") << pvcite << " corresponds to " << pvcite->GetName(); +#endif const G4LogicalVolumeStore* lvs = G4LogicalVolumeStore::GetInstance(); - ekinVolumes.resize(numberEkins, nullptr); +#ifdef EDM_ML_DEBUG + edm::LogVerbatim("SimG4CoreApplication") << lvs->size() << " Logical volume in the store"; + for (auto const& lvcite : *lvs) + edm::LogVerbatim("SimG4CoreApplication") << lvcite << " corresponds to " << lvcite->GetName(); +#endif for (auto const& lvcite : *lvs) { - const G4String& lvname = lvcite->GetName(); - if (lvname == "CMStoZDC") { + std::string lvname = (std::string)(DD4hep2DDDName::nameMatterLV(lvcite->GetName(), dd4hep_)); + if (lvname == cms2ZDCName_) { m_CMStoZDC = lvcite; } for (unsigned int i = 0; i < numberEkins; ++i) { - if (lvname == (G4String)(ekinNames[i])) { + if (lvname == ekinNames[i]) { ekinVolumes[i] = lvcite; break; } @@ -244,10 +266,15 @@ bool SteppingAction::initPointer() { } const G4RegionStore* rs = G4RegionStore::GetInstance(); - if (numberTimes > 0) { - maxTimeRegions.resize(numberTimes, nullptr); - for (auto const& rcite : *rs) { - const G4String& rname = rcite->GetName(); +#ifdef EDM_ML_DEBUG + edm::LogVerbatim("SimG4CoreApplication") << rs->size() << " Regions in the store"; + for (auto const& rcite : *rs) + edm::LogVerbatim("SimG4CoreApplication") << rcite << " corresponds to " << rcite->GetName(); +#endif + for (auto const& rcite : *rs) { + const G4String& rname = rcite->GetName(); + if (numberTimes > 0) { + maxTimeRegions.resize(numberTimes, nullptr); for (unsigned int i = 0; i < numberTimes; ++i) { if (rname == (G4String)(maxTimeNames[i])) { maxTimeRegions[i] = rcite; @@ -255,11 +282,8 @@ bool SteppingAction::initPointer() { } } } - } - if (ndeadRegions > 0) { - deadRegions.resize(ndeadRegions, nullptr); - for (auto const& rcite : *rs) { - const G4String& rname = rcite->GetName(); + if (ndeadRegions > 0) { + deadRegions.resize(ndeadRegions, nullptr); for (unsigned int i = 0; i < ndeadRegions; ++i) { if (rname == (G4String)(deadRegionNames[i])) { deadRegions[i] = rcite; @@ -267,6 +291,9 @@ bool SteppingAction::initPointer() { } } } + if (m_CMStoZDCtransport && rname == "ZDCRegion") { + m_ZDCRegion = rcite; + } } return true; } diff --git a/SimG4Core/Geometry/interface/DD4hep2DDDName.h b/SimG4Core/Geometry/interface/DD4hep2DDDName.h index 90dd713e00137..12914f5a1f44d 100644 --- a/SimG4Core/Geometry/interface/DD4hep2DDDName.h +++ b/SimG4Core/Geometry/interface/DD4hep2DDDName.h @@ -4,9 +4,10 @@ #include namespace DD4hep2DDDName { - std::string_view nameMatterLV(const std::string& name, bool dd4hep); + std::string noNameSpace(const std::string& name); + std::string nameMatterLV(const std::string& name, bool dd4hep); std::string nameSolid(const std::string& name, bool dd4hep); - std::string_view namePV(const std::string& name, bool dd4hep); + std::string namePV(const std::string& name, bool dd4hep); }; // namespace DD4hep2DDDName #endif diff --git a/SimG4Core/Geometry/src/DD4hep2DDDName.cc b/SimG4Core/Geometry/src/DD4hep2DDDName.cc index 9e4e852e26700..f050563b60645 100644 --- a/SimG4Core/Geometry/src/DD4hep2DDDName.cc +++ b/SimG4Core/Geometry/src/DD4hep2DDDName.cc @@ -1,14 +1,19 @@ #include "SimG4Core/Geometry/interface/DD4hep2DDDName.h" -#include -std::string_view DD4hep2DDDName::nameMatterLV(const std::string& name, bool dd4hep) { - return (dd4hep ? (dd4hep::dd::noNamespace(name)) : name); +std::string DD4hep2DDDName::noNameSpace(const std::string& name) { + std::size_t found = name.find(':'); + std::string nam = (found == std::string::npos) ? name : name.substr(found + 1, (name.size() - found)); + return nam; +} + +std::string DD4hep2DDDName::nameMatterLV(const std::string& name, bool dd4hep) { + return (dd4hep ? (DD4hep2DDDName::noNameSpace(name)) : name); } std::string DD4hep2DDDName::nameSolid(const std::string& name, bool dd4hep) { if (!dd4hep) return name; - std::string nam = static_cast(dd4hep::dd::noNamespace(name)); + std::string nam = DD4hep2DDDName::noNameSpace(name); auto n = nam.find("_shape"); if (n != std::string::npos) nam = nam.substr(0, n); @@ -17,10 +22,10 @@ std::string DD4hep2DDDName::nameSolid(const std::string& name, bool dd4hep) { return nam; } -std::string_view DD4hep2DDDName::namePV(const std::string& name, bool dd4hep) { +std::string DD4hep2DDDName::namePV(const std::string& name, bool dd4hep) { if (!dd4hep) return name; - std::string_view nam = (dd4hep::dd::noNamespace(name)); + std::string nam = DD4hep2DDDName::noNameSpace(name); auto n = nam.rfind('_'); return ((n != std::string::npos) ? nam.substr(0, n) : nam); } diff --git a/SimG4Core/PhysicsLists/interface/CMSEmStandardPhysicsXS.h b/SimG4Core/PhysicsLists/interface/CMSEmStandardPhysicsXS.h index 58db48b435732..957fc79bd4be8 100644 --- a/SimG4Core/PhysicsLists/interface/CMSEmStandardPhysicsXS.h +++ b/SimG4Core/PhysicsLists/interface/CMSEmStandardPhysicsXS.h @@ -30,6 +30,7 @@ class CMSEmStandardPhysicsXS : public G4VPhysicsConstructor { G4double fSafetyFactor; G4double fLambdaLimit; G4MscStepLimitType fStepLimitType; + bool fG4HepEmActive; }; #endif diff --git a/SimG4Core/PhysicsLists/plugins/FTFPCMS_BERT_EMN.cc b/SimG4Core/PhysicsLists/plugins/FTFPCMS_BERT_EMN.cc index 8169310af657c..fec087f1ca73a 100644 --- a/SimG4Core/PhysicsLists/plugins/FTFPCMS_BERT_EMN.cc +++ b/SimG4Core/PhysicsLists/plugins/FTFPCMS_BERT_EMN.cc @@ -8,6 +8,7 @@ #include "G4IonPhysics.hh" #include "G4StoppingPhysics.hh" #include "G4HadronElasticPhysics.hh" +#include "G4HadronicParameters.hh" FTFPCMS_BERT_EMN::FTFPCMS_BERT_EMN(const edm::ParameterSet& p) : PhysicsList(p) { int ver = p.getUntrackedParameter("Verbosity", 0); @@ -28,12 +29,25 @@ FTFPCMS_BERT_EMN::FTFPCMS_BERT_EMN(const edm::ParameterSet& p) : PhysicsList(p) // Synchroton Radiation & GN Physics G4EmExtraPhysics* gn = new G4EmExtraPhysics(ver); RegisterPhysics(gn); + bool mu = p.getParameter("G4MuonPairProductionByMuon"); + gn->MuonToMuMu(mu); + edm::LogVerbatim("PhysicsList") << " Muon pair production by muons: " << mu; } // Decays this->RegisterPhysics(new G4DecayPhysics(ver)); if (hadPhys) { + bool ngen = p.getParameter("G4NeutronGeneralProcess"); + bool bc = p.getParameter("G4BCHadronicProcess"); + bool hn = p.getParameter("G4LightHyperNucleiTracking"); + auto param = G4HadronicParameters::Instance(); + param->SetEnableNeutronGeneralProcess(ngen); + param->SetEnableBCParticles(bc); + param->SetEnableHyperNuclei(hn); + edm::LogVerbatim("PhysicsList") << " Eneble neutron general process: " << ngen + << "\n Enable b- and c- hadron physics: " << bc + << "\n Enable light hyper-nuclei physics: " << hn; // Hadron Elastic scattering RegisterPhysics(new G4HadronElasticPhysics(ver)); diff --git a/SimG4Core/PhysicsLists/src/CMSEmStandardPhysics.cc b/SimG4Core/PhysicsLists/src/CMSEmStandardPhysics.cc index 4506ece7e8501..8679b7cbab274 100644 --- a/SimG4Core/PhysicsLists/src/CMSEmStandardPhysics.cc +++ b/SimG4Core/PhysicsLists/src/CMSEmStandardPhysics.cc @@ -4,13 +4,13 @@ #include "G4SystemOfUnits.hh" #include "G4ParticleDefinition.hh" +#include "G4LossTableManager.hh" #include "G4EmParameters.hh" #include "G4EmBuilder.hh" #include "G4ComptonScattering.hh" #include "G4GammaConversion.hh" #include "G4PhotoElectricEffect.hh" -#include "G4LivermorePhotoElectricModel.hh" #include "G4MscStepLimitType.hh" @@ -37,14 +37,12 @@ #include "G4PhysicsListHelper.hh" #include "G4BuilderType.hh" #include "G4GammaGeneralProcess.hh" -#include "G4LossTableManager.hh" #include "G4ProcessManager.hh" #include "G4TransportationWithMsc.hh" #include "G4RegionStore.hh" #include "G4Region.hh" -#include CMSEmStandardPhysics::CMSEmStandardPhysics(G4int ver, const edm::ParameterSet& p) : G4VPhysicsConstructor("CMSEmStandard_emm") { @@ -136,8 +134,6 @@ void CMSEmStandardPhysics::ConstructProcess() { // e- particle = G4Electron::Electron(); - G4eIonisation* eioni = new G4eIonisation(); - G4UrbanMscModel* msc1 = new G4UrbanMscModel(); G4WentzelVIModel* msc2 = new G4WentzelVIModel(); msc1->SetHighEnergyLimit(highEnergyLimit); @@ -203,13 +199,12 @@ void CMSEmStandardPhysics::ConstructProcess() { ssm->SetLowEnergyLimit(highEnergyLimit); ssm->SetActivationLowEnergyLimit(highEnergyLimit); - ph->RegisterProcess(eioni, particle); + ph->RegisterProcess(new G4eIonisation(), particle); ph->RegisterProcess(new G4eBremsstrahlung(), particle); ph->RegisterProcess(ss, particle); // e+ particle = G4Positron::Positron(); - eioni = new G4eIonisation(); msc1 = new G4UrbanMscModel(); msc2 = new G4WentzelVIModel(); @@ -274,7 +269,7 @@ void CMSEmStandardPhysics::ConstructProcess() { ssm->SetLowEnergyLimit(highEnergyLimit); ssm->SetActivationLowEnergyLimit(highEnergyLimit); - ph->RegisterProcess(eioni, particle); + ph->RegisterProcess(new G4eIonisation(), particle); ph->RegisterProcess(new G4eBremsstrahlung(), particle); ph->RegisterProcess(new G4eplusAnnihilation(), particle); ph->RegisterProcess(ss, particle); diff --git a/SimG4Core/PhysicsLists/src/CMSEmStandardPhysicsXS.cc b/SimG4Core/PhysicsLists/src/CMSEmStandardPhysicsXS.cc index def39f9031402..b3cdba6aa2fda 100644 --- a/SimG4Core/PhysicsLists/src/CMSEmStandardPhysicsXS.cc +++ b/SimG4Core/PhysicsLists/src/CMSEmStandardPhysicsXS.cc @@ -1,4 +1,5 @@ #include "SimG4Core/PhysicsLists/interface/CMSEmStandardPhysicsXS.h" +#include "SimG4Core/PhysicsLists/interface/CMSHepEmTrackingManager.h" #include "FWCore/MessageLogger/interface/MessageLogger.h" #include "G4SystemOfUnits.hh" @@ -10,35 +11,25 @@ #include "G4ComptonScattering.hh" #include "G4GammaConversion.hh" #include "G4PhotoElectricEffect.hh" -#include "G4RayleighScattering.hh" -#include "G4PEEffectFluoModel.hh" -#include "G4KleinNishinaModel.hh" -#include "G4LowEPComptonModel.hh" -#include "G4BetheHeitler5DModel.hh" -#include "G4LivermorePhotoElectricModel.hh" + +#include "G4MscStepLimitType.hh" #include "G4eMultipleScattering.hh" #include "G4hMultipleScattering.hh" -#include "G4MscStepLimitType.hh" +#include "G4eCoulombScatteringModel.hh" +#include "G4CoulombScattering.hh" +#include "G4WentzelVIModel.hh" #include "G4UrbanMscModel.hh" #include "G4GoudsmitSaundersonMscModel.hh" -#include "G4DummyModel.hh" -#include "G4WentzelVIModel.hh" -#include "G4CoulombScattering.hh" -#include "G4eCoulombScatteringModel.hh" #include "G4eIonisation.hh" #include "G4eBremsstrahlung.hh" -#include "G4Generator2BS.hh" -#include "G4SeltzerBergerModel.hh" -#include "G4ePairProduction.hh" -#include "G4UniversalFluctuation.hh" - #include "G4eplusAnnihilation.hh" #include "G4hIonisation.hh" #include "G4ionIonisation.hh" +#include "G4ParticleTable.hh" #include "G4Gamma.hh" #include "G4Electron.hh" #include "G4Positron.hh" @@ -53,9 +44,6 @@ #include "G4RegionStore.hh" #include "G4Region.hh" -#include "G4GammaGeneralProcess.hh" - -#include "G4SystemOfUnits.hh" CMSEmStandardPhysicsXS::CMSEmStandardPhysicsXS(G4int ver, const edm::ParameterSet& p) : G4VPhysicsConstructor("CMSEmStandard_emn") { @@ -65,12 +53,10 @@ CMSEmStandardPhysicsXS::CMSEmStandardPhysicsXS(G4int ver, const edm::ParameterSe param->SetDefaults(); param->SetVerbose(ver); param->SetApplyCuts(true); - param->SetMinEnergy(100 * CLHEP::eV); - param->SetNumberOfBinsPerDecade(20); param->SetStepFunction(0.8, 1 * CLHEP::mm); param->SetMscRangeFactor(0.2); param->SetMscStepLimitType(fMinimal); - param->SetFluo(true); + param->SetFluo(false); param->SetUseMottCorrection(true); // use Mott-correction for e-/e+ msc gs SetPhysicsType(bElectromagnetic); fRangeFactor = p.getParameter("G4MscRangeFactor"); @@ -88,6 +74,16 @@ CMSEmStandardPhysicsXS::CMSEmStandardPhysicsXS(G4int ver, const edm::ParameterSe double tcut = p.getParameter("G4TrackingCut") * CLHEP::MeV; param->SetLowestElectronEnergy(tcut); param->SetLowestMuHadEnergy(tcut); + fG4HepEmActive = p.getParameter("G4HepEmActive"); + if (fG4HepEmActive) { + // At the moment, G4HepEm supports only one configuration of MSC, so use + // the most generic parameters everywhere. + param->SetMscRangeFactor(fRangeFactor); + param->SetMscGeomFactor(fGeomFactor); + param->SetMscSafetyFactor(fSafetyFactor); + param->SetMscLambdaLimit(fLambdaLimit); + param->SetMscStepLimitType(fStepLimitType); + } } void CMSEmStandardPhysicsXS::ConstructParticle() { @@ -112,40 +108,29 @@ void CMSEmStandardPhysicsXS::ConstructProcess() { G4NuclearStopping* pnuc(nullptr); // high energy limit for e+- scattering models - G4double highEnergyLimit = G4EmParameters::Instance()->MscEnergyLimit(); + auto param = G4EmParameters::Instance(); + G4double highEnergyLimit = param->MscEnergyLimit(); + const G4Region* aRegion = G4RegionStore::GetInstance()->GetRegion("HcalRegion", false); const G4Region* bRegion = G4RegionStore::GetInstance()->GetRegion("HGCalRegion", false); // Add gamma EM Processes G4ParticleDefinition* particle = G4Gamma::Gamma(); - // Photoelectric - G4PhotoElectricEffect* pe = new G4PhotoElectricEffect(); - G4VEmModel* theLivermorePEModel = new G4LivermorePhotoElectricModel(); - pe->SetEmModel(theLivermorePEModel); - - // Compton scattering - G4ComptonScattering* cs = new G4ComptonScattering; - cs->SetEmModel(new G4KleinNishinaModel()); + G4PhotoElectricEffect* pee = new G4PhotoElectricEffect(); - // Gamma conversion - G4GammaConversion* gc = new G4GammaConversion(); - G4VEmModel* conv = new G4BetheHeitler5DModel(); - gc->SetEmModel(conv); - - if (G4EmParameters::Instance()->GeneralProcessActive()) { + if (param->GeneralProcessActive()) { G4GammaGeneralProcess* sp = new G4GammaGeneralProcess(); - sp->AddEmProcess(pe); - sp->AddEmProcess(cs); - sp->AddEmProcess(gc); - sp->AddEmProcess(new G4RayleighScattering()); + sp->AddEmProcess(pee); + sp->AddEmProcess(new G4ComptonScattering()); + sp->AddEmProcess(new G4GammaConversion()); G4LossTableManager::Instance()->SetGammaGeneralProcess(sp); ph->RegisterProcess(sp, particle); + } else { - ph->RegisterProcess(pe, particle); - ph->RegisterProcess(cs, particle); - ph->RegisterProcess(gc, particle); - ph->RegisterProcess(new G4RayleighScattering(), particle); + ph->RegisterProcess(pee, particle); + ph->RegisterProcess(new G4ComptonScattering(), particle); + ph->RegisterProcess(new G4GammaConversion(), particle); } // e- @@ -228,29 +213,15 @@ void CMSEmStandardPhysicsXS::ConstructProcess() { ssm->SetLowEnergyLimit(highEnergyLimit); ssm->SetActivationLowEnergyLimit(highEnergyLimit); - // ionisation - G4eIonisation* eioni = new G4eIonisation(); - - // bremsstrahlung - G4eBremsstrahlung* brem = new G4eBremsstrahlung(); - G4SeltzerBergerModel* br1 = new G4SeltzerBergerModel(); - G4eBremsstrahlungRelModel* br2 = new G4eBremsstrahlungRelModel(); - br1->SetAngularDistribution(new G4Generator2BS()); - br2->SetAngularDistribution(new G4Generator2BS()); - brem->SetEmModel(br1); - brem->SetEmModel(br2); - br1->SetHighEnergyLimit(CLHEP::GeV); - - G4ePairProduction* ee = new G4ePairProduction(); - // register processes - ph->RegisterProcess(eioni, particle); - ph->RegisterProcess(brem, particle); - ph->RegisterProcess(ee, particle); + ph->RegisterProcess(new G4eIonisation(), particle); + ph->RegisterProcess(new G4eBremsstrahlung(), particle); ph->RegisterProcess(ss, particle); // e+ particle = G4Positron::Positron(); + msc3 = nullptr; + msc4 = nullptr; // multiple scattering msc1 = new G4UrbanMscModel(); @@ -326,26 +297,18 @@ void CMSEmStandardPhysicsXS::ConstructProcess() { ssm->SetLowEnergyLimit(highEnergyLimit); ssm->SetActivationLowEnergyLimit(highEnergyLimit); - // ionisation - eioni = new G4eIonisation(); - - // bremsstrahlung - brem = new G4eBremsstrahlung(); - br1 = new G4SeltzerBergerModel(); - br2 = new G4eBremsstrahlungRelModel(); - br1->SetAngularDistribution(new G4Generator2BS()); - br2->SetAngularDistribution(new G4Generator2BS()); - brem->SetEmModel(br1); - brem->SetEmModel(br2); - br1->SetHighEnergyLimit(CLHEP::GeV); - // register processes - ph->RegisterProcess(eioni, particle); - ph->RegisterProcess(brem, particle); - ph->RegisterProcess(ee, particle); + ph->RegisterProcess(new G4eIonisation(), particle); + ph->RegisterProcess(new G4eBremsstrahlung(), particle); ph->RegisterProcess(new G4eplusAnnihilation(), particle); ph->RegisterProcess(ss, particle); + if (fG4HepEmActive) { + auto* hepEmTM = new CMSHepEmTrackingManager(highEnergyLimit); + G4Electron::Electron()->SetTrackingManager(hepEmTM); + G4Positron::Positron()->SetTrackingManager(hepEmTM); + } + // generic ion particle = G4GenericIon::GenericIon(); G4ionIonisation* ionIoni = new G4ionIonisation(); diff --git a/SimG4Core/PrintGeomInfo/plugins/PrintGeomInfoAction.cc b/SimG4Core/PrintGeomInfo/plugins/PrintGeomInfoAction.cc index e4bee63c2bc5b..f1ccfaea89c8f 100644 --- a/SimG4Core/PrintGeomInfo/plugins/PrintGeomInfoAction.cc +++ b/SimG4Core/PrintGeomInfo/plugins/PrintGeomInfoAction.cc @@ -1,3 +1,4 @@ +#include "SimG4Core/Geometry/interface/DD4hep2DDDName.h" #include "SimG4Core/Notification/interface/BeginOfRun.h" #include "SimG4Core/Notification/interface/Observer.h" #include "SimG4Core/Watcher/interface/SimWatcher.h" @@ -605,10 +606,10 @@ void PrintGeomInfoAction::getTouch(G4VPhysicalVolume *pv, std::string mother = "World"; if (pv->GetMotherLogical()) - mother = static_cast(dd4hep::dd::noNamespace(pv->GetMotherLogical()->GetName())); + mother = static_cast(DD4hep2DDDName::noNameSpace(pv->GetMotherLogical()->GetName())); G4LogicalVolume *lv = pv->GetLogicalVolume(); - std::string lvname = static_cast(dd4hep::dd::noNamespace(lv->GetName())); + std::string lvname = static_cast(DD4hep2DDDName::noNameSpace(lv->GetName())); unsigned int copy = static_cast(pv->GetCopyNo()); std::string name = lvname + ":" + std::to_string(copy) + "_" + mother + ":" + std::to_string(copym); diff --git a/SimG4Core/PrintGeomInfo/plugins/PrintMaterialBudgetInfo.cc b/SimG4Core/PrintGeomInfo/plugins/PrintMaterialBudgetInfo.cc index 0f95aca15a148..d592c12aaac9b 100644 --- a/SimG4Core/PrintGeomInfo/plugins/PrintMaterialBudgetInfo.cc +++ b/SimG4Core/PrintGeomInfo/plugins/PrintMaterialBudgetInfo.cc @@ -30,6 +30,7 @@ #include #include +#include #include #include #include @@ -66,7 +67,6 @@ class PrintMaterialBudgetInfo : public SimWatcher, std::string name; int nchar; mpvpv thePVTree; - G4VPhysicalVolume* theTopPV; G4NavigationHistory fHistory; bool volumeFound; unsigned int levelFound; @@ -76,7 +76,7 @@ class PrintMaterialBudgetInfo : public SimWatcher, std::vector elementNames; std::vector elementTotalWeight; std::vector elementWeightFraction; - // + std::string stringLaTeXUnderscore(std::string stringname); std::string stringLaTeXSuperscript(std::string stringname); }; @@ -106,7 +106,8 @@ PrintMaterialBudgetInfo::~PrintMaterialBudgetInfo() {} void PrintMaterialBudgetInfo::update(const BeginOfRun* run) { G4Random::setTheEngine(new CLHEP::RanecuEngine); // Physical Volume - theTopPV = G4TransportationManager::GetTransportationManager()->GetNavigatorForTracking()->GetWorldVolume(); + G4VPhysicalVolume* theTopPV = + G4TransportationManager::GetTransportationManager()->GetNavigatorForTracking()->GetWorldVolume(); assert(theTopPV); // Logical Volume G4LogicalVolume* lv = theTopPV->GetLogicalVolume(); diff --git a/SimG4Core/PrintGeomInfo/test/TouchFileCompare.cpp b/SimG4Core/PrintGeomInfo/test/TouchFileCompare.cpp index daa65c4134762..56a6ed139a6eb 100644 --- a/SimG4Core/PrintGeomInfo/test/TouchFileCompare.cpp +++ b/SimG4Core/PrintGeomInfo/test/TouchFileCompare.cpp @@ -14,6 +14,7 @@ // //////////////////////////////////////////////////////////////////////////////// +#include "SimG4Core/Geometry/interface/DD4hep2DDDName.h" #include #include #include @@ -42,12 +43,6 @@ std::vector splitString(const std::string& fLine) { return result; } -std::string noNameSpace(std::string& name) { - std::size_t found = name.find(":"); - std::string nam = (found == std::string::npos) ? name : name.substr(found + 1, (name.size() - found)); - return nam; -} - void CompareFiles(const char* sdFileDDD, const char* sdFileDD4hep, const char* touchFileDDD, @@ -91,7 +86,7 @@ void CompareFiles(const char* sdFileDDD, std::vector items = splitString(std::string(buffer)); ++all; if (items.size() > 0) { - sdDD4hep.emplace_back(noNameSpace(items[0])); + sdDD4hep.emplace_back(DD4hep2DDDName::noNameSpace(items[0])); if (((debug / 10) % 10) > 0) std::cout << "[" << good << "] " << sdDD4hep.back() << std::endl; ++good; diff --git a/SimG4Core/PrintGeomInfo/test/python/g4OverlapCheck2026DD4hep_cfg.py b/SimG4Core/PrintGeomInfo/test/python/g4OverlapCheck2026DD4hep_cfg.py index e525fda8ed79f..a17093b1912b6 100644 --- a/SimG4Core/PrintGeomInfo/test/python/g4OverlapCheck2026DD4hep_cfg.py +++ b/SimG4Core/PrintGeomInfo/test/python/g4OverlapCheck2026DD4hep_cfg.py @@ -3,7 +3,7 @@ # cmsRun g4OverlapCheck2026DD4hep_cfg.py geometry=D92 tol=0.1 # # Options for geometry D86, D88, D91, D92, D93, D94, D95, D96, D98, D99, -# D100, D101, D102, D103 +# D100, D101, D102, D103, D104, D105, D106 # ############################################################################### import FWCore.ParameterSet.Config as cms @@ -17,7 +17,7 @@ "D88", VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.string, - "geometry of operations: D86, D88, D91, D92, D93, D94, D95, D96, D98, D99, D100, D101, D102, D103") + "geometry of operations: D86, D88, D91, D92, D93, D94, D95, D96, D98, D99, D100, D101, D102, D103, D104, D105, D106") options.register('tol', 0.1, VarParsing.VarParsing.multiplicity.singleton, diff --git a/SimG4Core/PrintGeomInfo/test/python/g4OverlapCheck2026DDD_cfg.py b/SimG4Core/PrintGeomInfo/test/python/g4OverlapCheck2026DDD_cfg.py index 5754e522047f9..b6d1ff212c3c0 100644 --- a/SimG4Core/PrintGeomInfo/test/python/g4OverlapCheck2026DDD_cfg.py +++ b/SimG4Core/PrintGeomInfo/test/python/g4OverlapCheck2026DDD_cfg.py @@ -3,7 +3,7 @@ # cmsRun g4OverlapCheck2026DDD_cfg.py geometry=D88 tol=0.1 # # Options for geometry D86, D88, D91, D92, D93, D94, D95, D96, D98, D99, -# D100, D101, D102, D103 +# D100, D101, D102, D103, D104, D105, D106 # ############################################################################### import FWCore.ParameterSet.Config as cms @@ -17,7 +17,7 @@ "D88", VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.string, - "geometry of operations: D86, D88, D91, D92, D93, D94, D95, D96, D98, D99, D100, D101, D102, D103") + "geometry of operations: D86, D88, D91, D92, D93, D94, D95, D96, D98, D99, D100, D101, D102, D103, D104, D105, D106") options.register('tol', 0.1, VarParsing.VarParsing.multiplicity.singleton, diff --git a/SimG4Core/PrintGeomInfo/test/python/runDD4hep2026_cfg.py b/SimG4Core/PrintGeomInfo/test/python/runDD4hep2026_cfg.py index 867ca3234a59d..440da0281478a 100644 --- a/SimG4Core/PrintGeomInfo/test/python/runDD4hep2026_cfg.py +++ b/SimG4Core/PrintGeomInfo/test/python/runDD4hep2026_cfg.py @@ -3,7 +3,7 @@ # cmsRun runDD4hep2026_cfg.py geometry=D92 # # Options for geometry D86, D88, D91, D92, D93, D94, D95, D96, D97, D98, D99, -# D100, D101, D102, D103 +# D100, D101, D102, D103, D104, D105, D106 # ############################################################################### import FWCore.ParameterSet.Config as cms @@ -17,7 +17,7 @@ "D92", VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.string, - "geometry of operations: D86, D88, D91, D92, D93, D94, D95, D96, D97, D98, D99, D100, D101, D102, D103") + "geometry of operations: D86, D88, D91, D92, D93, D94, D95, D96, D97, D98, D99, D100, D101, D102, D103, D104, D105, D106") ### get and parse the command line arguments options.parseArguments() diff --git a/SimG4Core/PrintGeomInfo/test/python/runDDD2026_cfg.py b/SimG4Core/PrintGeomInfo/test/python/runDDD2026_cfg.py index f795052613f15..57702756810bc 100644 --- a/SimG4Core/PrintGeomInfo/test/python/runDDD2026_cfg.py +++ b/SimG4Core/PrintGeomInfo/test/python/runDDD2026_cfg.py @@ -3,7 +3,7 @@ # cmsRun runDDD2026_cfg.py geometry=D88 # # Options for geometry D86, D88, D91, D92, D93, D94, D95, D96, D97, D98, D99, -# D100 D101, D102, D103 +# D100 D101, D102, D103, D104, D105, D106 # ############################################################################### import FWCore.ParameterSet.Config as cms @@ -17,7 +17,7 @@ "D92", VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.string, - "geometry of operations: D86, D88, D91, D92, D93, D94, D95, D96, D97, D98, D99, D100, D101, D102, D103") + "geometry of operations: D86, D88, D91, D92, D93, D94, D95, D96, D97, D98, D99, D100, D101, D102, D103, D104, D105, D106") ### get and parse the command line arguments options.parseArguments() diff --git a/SimG4Core/PrintGeomInfo/test/python/runMaterialBudgeInfo2026_cfg.py b/SimG4Core/PrintGeomInfo/test/python/runMaterialBudgeInfo2026_cfg.py index 66a92aed404db..ec2c4d8888bdc 100644 --- a/SimG4Core/PrintGeomInfo/test/python/runMaterialBudgeInfo2026_cfg.py +++ b/SimG4Core/PrintGeomInfo/test/python/runMaterialBudgeInfo2026_cfg.py @@ -4,7 +4,7 @@ # # Options for type DDD, DD4hep # Options for geometry D86, D88, D91, D92, D93, D95, D96, D97, D98, D99, -# D100, D101, D102, D103 +# D100, D101, D102, D103, D104, D105, D106 # ################################################################################ import FWCore.ParameterSet.Config as cms @@ -23,7 +23,7 @@ "D92", VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.string, - "geometry of operations: D86, D88, D91, D92, D93, D95, D96, D97, D98, D99, D100, D101, D102, D103") + "geometry of operations: D86, D88, D91, D92, D93, D95, D96, D97, D98, D99, D100, D101, D102, D103, D104, D105, D106") options.register('detector', "Tracker", VarParsing.VarParsing.multiplicity.singleton, diff --git a/SimG4Core/PrintGeomInfo/test/python/runPrintG4Solids2_cfg.py b/SimG4Core/PrintGeomInfo/test/python/runPrintG4Solids2_cfg.py index d24f8ff6059fa..f8adca69f37ac 100644 --- a/SimG4Core/PrintGeomInfo/test/python/runPrintG4Solids2_cfg.py +++ b/SimG4Core/PrintGeomInfo/test/python/runPrintG4Solids2_cfg.py @@ -3,7 +3,7 @@ # cmsRun grunPrintG4Solids_cfg.py geometry=D98 dd4hep=False # # Options for geometry D88, D91, D92, D93, D94, D95, D96, D98, D99, D100, -# D101, D102, D103 +# D101, D102, D103, D104,D105, D106 # Options for type DDD, DD4hep # ############################################################################### @@ -18,7 +18,7 @@ "D88", VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.string, - "geometry of operations: D88, D91, D92, D93, D94, D95, D96, D98, D99, D100, D101, D102, D103") + "geometry of operations: D88, D91, D92, D93, D94, D95, D96, D98, D99, D100, D101, D102, D103, D104,D105, D106") options.register('type', "DDD", VarParsing.VarParsing.multiplicity.singleton, diff --git a/SimG4Core/PrintGeomInfo/test/python/runPrintG4Solids_cfg.py b/SimG4Core/PrintGeomInfo/test/python/runPrintG4Solids_cfg.py index 2c63ca2af16a0..310748fe7aef5 100644 --- a/SimG4Core/PrintGeomInfo/test/python/runPrintG4Solids_cfg.py +++ b/SimG4Core/PrintGeomInfo/test/python/runPrintG4Solids_cfg.py @@ -3,7 +3,7 @@ # cmsRun grunPrintG4Solids_cfg.py geometry=D98 dd4hep=False # # Options for geometry D88, D91, D92, D93, D94, D95, D96, D98, D99, D100, -# D101, D102, D103 +# D101, D102, D103, D104, D105, D106 # Options for type DDD, DD4hep # ############################################################################### @@ -18,7 +18,7 @@ "D88", VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.string, - "geometry of operations: D88, D91, D92, D93, D94, D95, D96, D98, D99, D100, D101, D102, D103") + "geometry of operations: D88, D91, D92, D93, D94, D95, D96, D98, D99, D100, D101, D102, D103, D104, D105, D106") options.register('type', "DDD", VarParsing.VarParsing.multiplicity.singleton, diff --git a/SimG4Core/PrintGeomInfo/test/python/runPrintG4Touch2026_cfg.py b/SimG4Core/PrintGeomInfo/test/python/runPrintG4Touch2026_cfg.py index 53b661701bcdd..0d3ad26f9bfe4 100644 --- a/SimG4Core/PrintGeomInfo/test/python/runPrintG4Touch2026_cfg.py +++ b/SimG4Core/PrintGeomInfo/test/python/runPrintG4Touch2026_cfg.py @@ -3,7 +3,7 @@ # cmsRun grunPrintG4Touch_cfg.py geometry=D98 type=DDD # # Options for geometry D88, D91, D92, D93, D94, D95, D96, D98, D99, D100, -# D101, D102, D103 +# D101, D102, D103, D104,D105, D106 # Options for type DDD, DD4hep # ############################################################################### @@ -18,7 +18,7 @@ "D88", VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.string, - "geometry of operations: D88, D91, D92, D93, D94, D95, D96, D98, D99, D100, D101, D102, D103") + "geometry of operations: D88, D91, D92, D93, D94, D95, D96, D98, D99, D100, D101, D102, D103, D104,D105, D106") options.register('type', "DDD", VarParsing.VarParsing.multiplicity.singleton, diff --git a/SimG4Core/PrintGeomInfo/test/python/runPrintSolid2026_cfg.py b/SimG4Core/PrintGeomInfo/test/python/runPrintSolid2026_cfg.py index 7583501050a02..eec071ff2a9a3 100644 --- a/SimG4Core/PrintGeomInfo/test/python/runPrintSolid2026_cfg.py +++ b/SimG4Core/PrintGeomInfo/test/python/runPrintSolid2026_cfg.py @@ -4,7 +4,7 @@ # # Options for type DDD, DD4hep # Options for geometry D86, D88, D91, D92, D93, D95, D96, D97, D98, D99, -# D100, D101, D102, D103 +# D100, D101, D102, D103, D104,D105, D106 # ################################################################################ import FWCore.ParameterSet.Config as cms @@ -23,7 +23,7 @@ "D92", VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.string, - "geometry of operations: D86, D88, D91, D92, D93, D95, D96, D97, D98, D99, D100, D101, D102, D103") + "geometry of operations: D86, D88, D91, D92, D93, D95, D96, D97, D98, D99, D100, D101, D102, D103, D104,D105, D106") ### get and parse the command line arguments options.parseArguments() diff --git a/SimG4Core/PrintGeomInfo/test/python/runSens2026_cfg.py b/SimG4Core/PrintGeomInfo/test/python/runSens2026_cfg.py index d4280f5257b35..04534ea10a8d5 100644 --- a/SimG4Core/PrintGeomInfo/test/python/runSens2026_cfg.py +++ b/SimG4Core/PrintGeomInfo/test/python/runSens2026_cfg.py @@ -3,7 +3,7 @@ # cmsRun runSens2026_cfg.py geometry=D92 type=DDD # # Options for geometry D86, D88, D91, D92, D93, D94, D95, D96, D98, D99, -# D100, D101, D102, D103 +# D100, D101, D102, D103, D104, D105, D106 # Options for type DDD, DD4hep # ############################################################################### @@ -18,7 +18,7 @@ "D92", VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.string, - "geometry of operations: D86, D88, D91, D92, D93, D94, D95, D96, D98, D99, D100, D101, D102, D103") + "geometry of operations: D86, D88, D91, D92, D93, D94, D95, D96, D98, D99, D100, D101, D102, D103, D104, D105, D106") options.register('type', "DDD", VarParsing.VarParsing.multiplicity.singleton, diff --git a/SimG4Core/PrintGeomInfo/test/python/runSummary2026_cfg.py b/SimG4Core/PrintGeomInfo/test/python/runSummary2026_cfg.py index d689194a55a0f..ab3d2e1cf865f 100644 --- a/SimG4Core/PrintGeomInfo/test/python/runSummary2026_cfg.py +++ b/SimG4Core/PrintGeomInfo/test/python/runSummary2026_cfg.py @@ -3,7 +3,7 @@ # cmsRun runSummary2026_cfg.py geometry=D92 # # Options for geometry D86, D88, D91, D92, D93, D94, D95, D96, D98, D99, -# D100, D101, D102, D103 +# D100, D101, D102, D103, D104, D105, D106 # ############################################################################### import FWCore.ParameterSet.Config as cms @@ -17,7 +17,7 @@ "D88", VarParsing.VarParsing.multiplicity.singleton, VarParsing.VarParsing.varType.string, - "geometry of operations: D86, D88, D91, D92, D93, D94, D95, D96, D98, D99, D100, D101, D102, D103") + "geometry of operations: D86, D88, D91, D92, D93, D94, D95, D96, D98, D99, D100, D101, D102, D103, D104, D105, D106") ### get and parse the command line arguments options.parseArguments() diff --git a/SimG4Core/SensitiveDetector/src/AttachSD.cc b/SimG4Core/SensitiveDetector/src/AttachSD.cc index 5ddd8999b73db..879fb77df5944 100644 --- a/SimG4Core/SensitiveDetector/src/AttachSD.cc +++ b/SimG4Core/SensitiveDetector/src/AttachSD.cc @@ -36,10 +36,10 @@ std::pair, std::vector if (sd->isCaloSD()) { detList.second.push_back(static_cast(sd.release())); - ss << " + calo SD"; + ss << " is calo SD"; } else { detList.first.push_back(static_cast(sd.release())); - ss << " + tracking SD"; + ss << " is tracking SD"; } edm::LogVerbatim("SimG4CoreSensitiveDetector") << ss.str(); } diff --git a/SimGeneral/MixingModule/python/digitizersCosmics_cfi.py b/SimGeneral/MixingModule/python/digitizersCosmics_cfi.py index 9c1bce518f9bc..94b6d7e14c059 100644 --- a/SimGeneral/MixingModule/python/digitizersCosmics_cfi.py +++ b/SimGeneral/MixingModule/python/digitizersCosmics_cfi.py @@ -8,6 +8,7 @@ from SimGeneral.MixingModule.hcalDigitizer_cfi import * from SimGeneral.MixingModule.castorDigitizer_cfi import * from SimGeneral.MixingModule.trackingTruthProducer_cfi import * +from SimGeneral.MixingModule.caloTruthProducer_cfi import * pixelDigitizer.TofLowerCut=cms.double(18.5) pixelDigitizer.TofUpperCut=cms.double(43.5) @@ -34,30 +35,39 @@ ) ) -theDigitizersValid = cms.PSet( - pixel = cms.PSet( - pixelDigitizer - ), - strip = cms.PSet( - stripDigitizer - ), - ecal = cms.PSet( - ecalDigitizer - ), - hcal = cms.PSet( - hcalDigitizer - ), - castor = cms.PSet( - castorDigitizer - ), - mergedtruth = cms.PSet( - trackingParticles - ) -) - from Configuration.Eras.Modifier_run3_common_cff import run3_common run3_common.toModify( theDigitizers, castor = None ) -run3_common.toModify( theDigitizersValid, castor = None ) +from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import hgceeDigitizer, hgchebackDigitizer, hgchefrontDigitizer, HGCAL_noise_fC, HGCAL_noise_heback, HFNose_noise_fC, HGCAL_chargeCollectionEfficiencies, HGCAL_ileakParam_toUse, HGCAL_cceParams_toUse, HGCAL_noises + +from Configuration.Eras.Modifier_phase2_hgcal_cff import phase2_hgcal +phase2_hgcal.toModify( theDigitizers, + hgceeDigitizer = cms.PSet(hgceeDigitizer), + hgchebackDigitizer = cms.PSet(hgchebackDigitizer), + hgchefrontDigitizer = cms.PSet(hgchefrontDigitizer), + calotruth = cms.PSet(caloParticles), #HGCAL still needs calotruth for production mode +) + +from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import hfnoseDigitizer + +from Configuration.Eras.Modifier_phase2_hfnose_cff import phase2_hfnose +phase2_hfnose.toModify( theDigitizers, + hfnoseDigitizer = cms.PSet(hfnoseDigitizer), +) + +from SimGeneral.MixingModule.ecalTimeDigitizer_cfi import ecalTimeDigitizer +from Configuration.Eras.Modifier_phase2_timing_cff import phase2_timing +phase2_timing.toModify( theDigitizers, + ecalTime = ecalTimeDigitizer.clone() ) + +from SimFastTiming.Configuration.SimFastTiming_cff import mtdDigitizer +from Configuration.Eras.Modifier_phase2_timing_layer_cff import phase2_timing_layer +phase2_timing_layer.toModify( theDigitizers, + fastTimingLayer = mtdDigitizer.clone() ) +from Configuration.Eras.Modifier_phase2_tracker_cff import phase2_tracker +phase2_tracker.toModify(theDigitizers, + strip = None) +theDigitizersValid = cms.PSet(theDigitizers, + mergedtruth = cms.PSet(trackingParticles)) diff --git a/SimGeneral/MixingModule/python/mixPoolSource_cfi.py b/SimGeneral/MixingModule/python/mixPoolSource_cfi.py index c4faa18310ffb..9c22ff0d4319b 100644 --- a/SimGeneral/MixingModule/python/mixPoolSource_cfi.py +++ b/SimGeneral/MixingModule/python/mixPoolSource_cfi.py @@ -1,7 +1,3 @@ import FWCore.ParameterSet.Config as cms -#FileNames = cms.untracked.vstring('dataset:/RelValMinBias/CMSSW_4_2_0_pre4-START42_V1-v1/GEN-SIM-DIGI-RAW-HLTDEBUG') - -FileNames = cms.untracked.vstring('/store/relval/CMSSW_5_0_0_pre6/RelValProdMinBias/GEN-SIM-RAW/START50_V5-v1/0195/1AD9E627-7316-E111-B3A5-001A9281173C.root', - '/store/relval/CMSSW_5_0_0_pre6/RelValProdMinBias/GEN-SIM-RAW/START50_V5-v1/0196/0477EED1-7516-E111-B834-0018F3D0962E.root', - '/store/relval/CMSSW_5_0_0_pre6/RelValProdMinBias/GEN-SIM-RAW/START50_V5-v1/0197/3E1F5CF3-DB16-E111-A7C7-001A928116CE.root') +FileNames = cms.untracked.vstring() diff --git a/SimMuon/DTDigitizer/plugins/DTChamberMasker.cc b/SimMuon/DTDigitizer/plugins/DTChamberMasker.cc index 5f0ef724f3592..179a841a66808 100644 --- a/SimMuon/DTDigitizer/plugins/DTChamberMasker.cc +++ b/SimMuon/DTDigitizer/plugins/DTChamberMasker.cc @@ -25,7 +25,7 @@ // user include files #include "FWCore/Framework/interface/ConsumesCollector.h" #include "FWCore/Framework/interface/Frameworkfwd.h" -#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/Framework/interface/global/EDProducer.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "DataFormats/Common/interface/Handle.h" @@ -60,25 +60,22 @@ // class declaration // -class DTChamberMasker : public edm::stream::EDProducer<> { +class DTChamberMasker : public edm::global::EDProducer<> { public: explicit DTChamberMasker(const edm::ParameterSet &); - ~DTChamberMasker() override; static void fillDescriptions(edm::ConfigurationDescriptions &); private: - void produce(edm::Event &, const edm::EventSetup &) override; - - void beginRun(edm::Run const &, edm::EventSetup const &) override; + void produce(edm::StreamID, edm::Event &, const edm::EventSetup &) const override; void createMaskedChamberCollection(edm::ESHandle &); // ----------member data --------------------------- - edm::EDGetTokenT m_digiToken; - edm::ESGetToken m_agingObjToken; - std::map m_ChEffs; + const edm::EDGetTokenT m_digiToken; + const edm::EDPutTokenT m_putToken; + const edm::ESGetToken m_agingObjToken; }; // @@ -89,23 +86,24 @@ class DTChamberMasker : public edm::stream::EDProducer<> { // constructors and destructor // DTChamberMasker::DTChamberMasker(const edm::ParameterSet &iConfig) - : m_digiToken(consumes(iConfig.getParameter("digiTag"))), - m_agingObjToken(esConsumes()) { - produces(); -} - -DTChamberMasker::~DTChamberMasker() {} + : m_digiToken(consumes(iConfig.getParameter("digiTag"))), + m_putToken(produces()), + m_agingObjToken(esConsumes()) {} // // member functions // // ------------ method called to produce the data ------------ -void DTChamberMasker::produce(edm::Event &event, const edm::EventSetup &conditions) { +void DTChamberMasker::produce(edm::StreamID, edm::Event &event, const edm::EventSetup &conditions) const { edm::Service randGenService; CLHEP::HepRandomEngine &randGen = randGenService->getEngine(event.streamID()); - std::unique_ptr filteredDigis(new DTDigiCollection()); + MuonSystemAging const &agingObj = conditions.getData(m_agingObjToken); + + auto const &chEffs = agingObj.m_DTChambEffs; + + DTDigiCollection filteredDigis; if (!m_digiToken.isUninitialized()) { edm::Handle dtDigis; @@ -113,23 +111,14 @@ void DTChamberMasker::produce(edm::Event &event, const edm::EventSetup &conditio for (const auto &dtLayerId : (*dtDigis)) { uint32_t rawId = (dtLayerId.first).chamberId().rawId(); - auto chEffIt = m_ChEffs.find(rawId); + auto chEffIt = chEffs.find(rawId); - if (chEffIt == m_ChEffs.end() || randGen.flat() <= chEffIt->second) - filteredDigis->put(dtLayerId.second, dtLayerId.first); + if (chEffIt == chEffs.end() || randGen.flat() <= chEffIt->second) + filteredDigis.put(dtLayerId.second, dtLayerId.first); } } - event.put(std::move(filteredDigis)); -} - -// ------------ method called when starting to processes a run ------------ -void DTChamberMasker::beginRun(edm::Run const &run, edm::EventSetup const &iSetup) { - m_ChEffs.clear(); - - edm::ESHandle agingObj = iSetup.getHandle(m_agingObjToken); - - m_ChEffs = agingObj->m_DTChambEffs; + event.emplace(m_putToken, std::move(filteredDigis)); } // ------------ method fills 'descriptions' with the allowed parameters for the diff --git a/SimMuon/GEMDigitizer/plugins/GEMChamberMasker.cc b/SimMuon/GEMDigitizer/plugins/GEMChamberMasker.cc index ffe2efa7d79b4..e13e2fd09a70e 100644 --- a/SimMuon/GEMDigitizer/plugins/GEMChamberMasker.cc +++ b/SimMuon/GEMDigitizer/plugins/GEMChamberMasker.cc @@ -8,7 +8,7 @@ // user include files #include "FWCore/Framework/interface/Frameworkfwd.h" -#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/Framework/interface/global/EDProducer.h" #include "FWCore/Framework/interface/ConsumesCollector.h" #include "FWCore/Framework/interface/ESHandle.h" #include "FWCore/Utilities/interface/ESGetToken.h" @@ -27,29 +27,25 @@ // class declaration // -class GEMChamberMasker : public edm::stream::EDProducer<> { +class GEMChamberMasker : public edm::global::EDProducer<> { public: explicit GEMChamberMasker(const edm::ParameterSet&); - ~GEMChamberMasker() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: - void produce(edm::Event&, const edm::EventSetup&) override; - - void beginRun(edm::Run const&, edm::EventSetup const&) override; - void endRun(edm::Run const&, edm::EventSetup const&) override; + void produce(edm::StreamID, edm::Event&, const edm::EventSetup&) const override; // ----------member data --------------------------- - edm::InputTag digiTag_; - bool ge11Minus_; - bool ge11Plus_; - bool ge21Minus_; - bool ge21Plus_; - - edm::EDGetTokenT m_digiTag; - edm::ESGetToken m_agingObj; - std::map m_maskedGEMIDs; + const edm::InputTag digiTag_; + const bool ge11Minus_; + const bool ge11Plus_; + const bool ge21Minus_; + const bool ge21Plus_; + + const edm::EDGetTokenT m_digiTag; + const edm::EDPutTokenT m_putToken; + const edm::ESGetToken m_agingObj; }; // @@ -68,28 +64,28 @@ GEMChamberMasker::GEMChamberMasker(const edm::ParameterSet& iConfig) ge11Minus_(iConfig.getParameter("ge11Minus")), ge11Plus_(iConfig.getParameter("ge11Plus")), ge21Minus_(iConfig.getParameter("ge21Minus")), - ge21Plus_(iConfig.getParameter("ge21Plus")) { - m_digiTag = consumes(digiTag_); - m_agingObj = esConsumes(); - produces(); -} - -GEMChamberMasker::~GEMChamberMasker() {} + ge21Plus_(iConfig.getParameter("ge21Plus")), + m_digiTag(consumes(digiTag_)), + m_putToken(produces()), + m_agingObj(esConsumes()) {} // // member functions // // ------------ method called to produce the data ------------ -void GEMChamberMasker::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { +void GEMChamberMasker::produce(edm::StreamID, edm::Event& iEvent, const edm::EventSetup& iSetup) const { using namespace edm; - std::unique_ptr filteredDigis(new GEMDigiCollection()); + GEMDigiCollection filteredDigis; + + auto const& agingObj = iSetup.getData(m_agingObj); + + auto const& maskedGEMIDs = agingObj.m_GEMChambEffs; if (!digiTag_.label().empty()) { - edm::Handle gemDigis; - iEvent.getByToken(m_digiTag, gemDigis); + GEMDigiCollection const& gemDigis = iEvent.get(m_digiTag); - for (const auto& gemLayerId : (*gemDigis)) { + for (const auto& gemLayerId : gemDigis) { auto chambId = gemLayerId.first.chamberId(); bool keepDigi = (!ge11Minus_ && chambId.station() == 1 && chambId.region() < 0) || @@ -98,27 +94,15 @@ void GEMChamberMasker::produce(edm::Event& iEvent, const edm::EventSetup& iSetup (!ge21Plus_ && chambId.station() == 2 && chambId.region() > 0); uint32_t rawId = chambId.rawId(); - if (keepDigi || m_maskedGEMIDs.find(rawId) == m_maskedGEMIDs.end()) { - filteredDigis->put(gemLayerId.second, gemLayerId.first); + if (keepDigi || maskedGEMIDs.find(rawId) == maskedGEMIDs.end()) { + filteredDigis.put(gemLayerId.second, gemLayerId.first); } } } - iEvent.put(std::move(filteredDigis)); + iEvent.emplace(m_putToken, std::move(filteredDigis)); } -// ------------ method called when starting to processes a run ------------ - -void GEMChamberMasker::beginRun(edm::Run const& run, edm::EventSetup const& iSetup) { - edm::ESHandle agingObj = iSetup.getHandle(m_agingObj); - - m_maskedGEMIDs = agingObj->m_GEMChambEffs; -} - -// ------------ method called when ending the processing of a run ------------ - -void GEMChamberMasker::endRun(edm::Run const&, edm::EventSetup const&) {} - void GEMChamberMasker::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { edm::ParameterSetDescription desc; desc.add("digiTag", edm::InputTag("simMuonGEMDigis")); diff --git a/SimMuon/GEMDigitizer/plugins/GEMDigiProducer.cc b/SimMuon/GEMDigitizer/plugins/GEMDigiProducer.cc index 8bf6c6d8de683..3610995f88c03 100644 --- a/SimMuon/GEMDigitizer/plugins/GEMDigiProducer.cc +++ b/SimMuon/GEMDigitizer/plugins/GEMDigiProducer.cc @@ -84,7 +84,7 @@ void GEMDigiProducer::fillDescriptions(edm::ConfigurationDescriptions& descripti desc.add("mixLabel", "mix"); desc.add("signalPropagationSpeed", 0.66); - desc.add("timeResolution", 5.); + desc.add("timeResolution", 18.); desc.add("timeJitter", 1.0); desc.add("averageShapingTime", 50.0); desc.add("averageEfficiency", 0.98); @@ -104,7 +104,6 @@ void GEMDigiProducer::fillDescriptions(edm::ConfigurationDescriptions& descripti desc.add("simulateElectronBkg", true); // flase == simulate only neutral bkg desc.add("simulateIntrinsicNoise", false); - desc.add("bx0filter", false); desc.add("instLumi", 7.5); // in units of 1E34 cm^-2 s^-1. Internally the background is parmetrized from FLUKA+GEANT result at 5E+34 (PU 140). We are adding a 1.5 factor for PU 200 diff --git a/SimMuon/GEMDigitizer/plugins/ME0ChamberMasker.cc b/SimMuon/GEMDigitizer/plugins/ME0ChamberMasker.cc index b35992fd29b54..18f92a7e80b90 100644 --- a/SimMuon/GEMDigitizer/plugins/ME0ChamberMasker.cc +++ b/SimMuon/GEMDigitizer/plugins/ME0ChamberMasker.cc @@ -12,7 +12,7 @@ // user include files #include "FWCore/Framework/interface/Frameworkfwd.h" -#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/Framework/interface/global/EDProducer.h" #include "FWCore/Framework/interface/ConsumesCollector.h" #include "FWCore/Framework/interface/ESHandle.h" #include "FWCore/Utilities/interface/ESGetToken.h" @@ -31,26 +31,22 @@ // class declaration // -class ME0ChamberMasker : public edm::stream::EDProducer<> { +class ME0ChamberMasker : public edm::global::EDProducer<> { public: explicit ME0ChamberMasker(const edm::ParameterSet&); - ~ME0ChamberMasker() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: - void produce(edm::Event&, const edm::EventSetup&) override; - - void beginRun(edm::Run const&, edm::EventSetup const&) override; - void endRun(edm::Run const&, edm::EventSetup const&) override; + void produce(edm::StreamID, edm::Event&, const edm::EventSetup&) const override; // ----------member data --------------------------- - bool me0Minus_; - bool me0Plus_; - edm::InputTag digiTag_; - edm::EDGetTokenT m_digiTag; - edm::ESGetToken m_agingObjTag; - std::map m_maskedME0IDs; + const bool me0Minus_; + const bool me0Plus_; + const edm::InputTag digiTag_; + const edm::EDGetTokenT m_digiTag; + const edm::EDPutTokenT m_putToken; + const edm::ESGetToken m_agingObjTag; }; // @@ -67,54 +63,43 @@ class ME0ChamberMasker : public edm::stream::EDProducer<> { ME0ChamberMasker::ME0ChamberMasker(const edm::ParameterSet& iConfig) : me0Minus_(iConfig.getParameter("me0Minus")), me0Plus_(iConfig.getParameter("me0Plus")), - digiTag_(iConfig.getParameter("digiTag")) { - m_digiTag = consumes(digiTag_); - m_agingObjTag = esConsumes(); - produces(); -} - -ME0ChamberMasker::~ME0ChamberMasker() {} + digiTag_(iConfig.getParameter("digiTag")), + m_digiTag(consumes(digiTag_)), + m_putToken(produces()), + m_agingObjTag(esConsumes()) {} // // member functions // // ------------ method called to produce the data ------------ -void ME0ChamberMasker::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) { +void ME0ChamberMasker::produce(edm::StreamID, edm::Event& iEvent, const edm::EventSetup& iSetup) const { using namespace edm; - std::unique_ptr filteredDigis(new ME0DigiPreRecoCollection()); + + MuonSystemAging const& agingObj = iSetup.getData(m_agingObjTag); + + auto const& maskedME0IDs = agingObj.m_ME0ChambEffs; + + ME0DigiPreRecoCollection filteredDigis; if (!digiTag_.label().empty()) { - edm::Handle me0Digis; - iEvent.getByToken(m_digiTag, me0Digis); + ME0DigiPreRecoCollection const& me0Digis = iEvent.get(m_digiTag); - for (const auto& me0LayerId : (*me0Digis)) { + for (const auto& me0LayerId : me0Digis) { auto chambId = me0LayerId.first.chamberId(); bool keepDigi = (!me0Minus_ && chambId.region() < 0) || (!me0Plus_ && chambId.region() > 0); uint32_t rawId = chambId.rawId(); - if (keepDigi || m_maskedME0IDs.find(rawId) == m_maskedME0IDs.end()) { - filteredDigis->put(me0LayerId.second, me0LayerId.first); + if (keepDigi || maskedME0IDs.find(rawId) == maskedME0IDs.end()) { + filteredDigis.put(me0LayerId.second, me0LayerId.first); } } } - iEvent.put(std::move(filteredDigis)); + iEvent.emplace(m_putToken, std::move(filteredDigis)); } -// ------------ method called when starting to processes a run ------------ - -void ME0ChamberMasker::beginRun(edm::Run const& run, edm::EventSetup const& iSetup) { - edm::ESHandle agingObj = iSetup.getHandle(m_agingObjTag); - - m_maskedME0IDs = agingObj->m_ME0ChambEffs; -} - -// ------------ method called when ending the processing of a run ------------ - -void ME0ChamberMasker::endRun(edm::Run const&, edm::EventSetup const&) {} - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void ME0ChamberMasker::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { edm::ParameterSetDescription desc; diff --git a/SimMuon/GEMDigitizer/python/muonGEMDigis_cfi.py b/SimMuon/GEMDigitizer/python/muonGEMDigis_cfi.py index 6684b2aaf0c15..146dc34109ff7 100644 --- a/SimMuon/GEMDigitizer/python/muonGEMDigis_cfi.py +++ b/SimMuon/GEMDigitizer/python/muonGEMDigis_cfi.py @@ -7,10 +7,10 @@ premix_stage2.toModify(simMuonGEMDigis, mixLabel = "mixData") from Configuration.Eras.Modifier_run2_common_cff import run2_common -run2_common.toModify( simMuonGEMDigis, instLumi = 1.5, bx0filter = True) +run2_common.toModify( simMuonGEMDigis, instLumi = 1.5) from Configuration.Eras.Modifier_run3_common_cff import run3_common -run3_common.toModify( simMuonGEMDigis, instLumi = 2.0, bx0filter = True) +run3_common.toModify( simMuonGEMDigis, instLumi = 2.0) from Configuration.Eras.Modifier_phase2_common_cff import phase2_common -phase2_common.toModify( simMuonGEMDigis, instLumi = 5, bx0filter = False) +phase2_common.toModify( simMuonGEMDigis, instLumi = 5) diff --git a/SimMuon/GEMDigitizer/src/GEMSignalModel.cc b/SimMuon/GEMDigitizer/src/GEMSignalModel.cc index 8eaf0ac7e60d3..249cae2c3319d 100644 --- a/SimMuon/GEMDigitizer/src/GEMSignalModel.cc +++ b/SimMuon/GEMDigitizer/src/GEMSignalModel.cc @@ -20,7 +20,6 @@ GEMSignalModel::GEMSignalModel(const edm::ParameterSet& config) timeResolution_(config.getParameter("timeResolution")), timeJitter_(config.getParameter("timeJitter")), signalPropagationSpeed_(config.getParameter("signalPropagationSpeed")), - bx0filter_(config.getParameter("bx0filter")), resolutionX_(config.getParameter("resolutionX")), cspeed(geant_units::operators::convertMmToCm(CLHEP::c_light)), // average energy required to remove an electron due to ionization for an Ar/CO2 gas mixture (in the ratio of 70/30) is 28.1 eV @@ -38,8 +37,6 @@ void GEMSignalModel::simulate(const GEMEtaPartition* roll, if (hit.energyLoss() < energyMinCut) continue; const int bx(getSimHitBx(&hit, engine)); - if (bx != 0 and bx0filter_) - continue; const std::vector >& cluster(simulateClustering(top, &hit, bx, engine)); for (const auto& digi : cluster) { detectorHitMap_.emplace(digi, &hit); diff --git a/SimTracker/TrackTriggerAssociation/interface/StubAssociation.h b/SimTracker/TrackTriggerAssociation/interface/StubAssociation.h index 5820b3910150f..bb1c796eee06d 100644 --- a/SimTracker/TrackTriggerAssociation/interface/StubAssociation.h +++ b/SimTracker/TrackTriggerAssociation/interface/StubAssociation.h @@ -21,7 +21,6 @@ namespace tt { StubAssociation() { setup_ = nullptr; } StubAssociation(const Setup* setup) : setup_(setup) {} ~StubAssociation() {} - // insert a TPPtr and its associated collection of TTstubRefs into the underlayering maps void insert(const TPPtr& tpPtr, const std::vector& ttSTubRefs); // returns map containing TTStubRef and their associated collection of TPPtrs diff --git a/SimTracker/TrackTriggerAssociation/plugins/StubAssociator.cc b/SimTracker/TrackTriggerAssociation/plugins/StubAssociator.cc index 87a8ca9b558cb..9b669b53130d7 100644 --- a/SimTracker/TrackTriggerAssociation/plugins/StubAssociator.cc +++ b/SimTracker/TrackTriggerAssociation/plugins/StubAssociator.cc @@ -1,4 +1,4 @@ -#include "FWCore/Framework/interface/stream/EDProducer.h" +#include "FWCore/Framework/interface/global/EDProducer.h" #include "FWCore/Framework/interface/Run.h" #include "FWCore/Framework/interface/EventSetup.h" #include "FWCore/Framework/interface/Event.h" @@ -34,18 +34,12 @@ namespace tt { * \author Thomas Schuh * \date 2020, Apr */ - class StubAssociator : public stream::EDProducer<> { + class StubAssociator : public global::EDProducer<> { public: explicit StubAssociator(const ParameterSet&); - ~StubAssociator() override {} private: - void beginRun(const Run&, const EventSetup&) override; - void produce(Event&, const EventSetup&) override; - void endJob() {} - - // helper classe to store configurations - const Setup* setup_ = nullptr; + void produce(StreamID, Event&, const EventSetup&) const override; // ED input token of TTStubs EDGetTokenT getTokenTTStubDetSetVec_; // ED input token of TTClusterAssociation @@ -65,20 +59,16 @@ namespace tt { putTokenReconstructable_ = produces(iConfig.getParameter("BranchReconstructable")); putTokenSelection_ = produces(iConfig.getParameter("BranchSelection")); // book ES product - esGetTokenSetup_ = esConsumes(); + esGetTokenSetup_ = esConsumes(); } - void StubAssociator::beginRun(const Run& iRun, const EventSetup& iSetup) { - setup_ = &iSetup.getData(esGetTokenSetup_); - } + void StubAssociator::produce(StreamID, Event& iEvent, const EventSetup& iSetup) const { + auto const& setup = iSetup.getData(esGetTokenSetup_); - void StubAssociator::produce(Event& iEvent, const EventSetup& iSetup) { // associate TTStubs with TrackingParticles - Handle handleTTStubDetSetVec; - iEvent.getByToken(getTokenTTStubDetSetVec_, handleTTStubDetSetVec); - Handle handleTTClusterAssMap; - Handle handleTTStubAssMap; - iEvent.getByToken(getTokenTTClusterAssMap_, handleTTClusterAssMap); + Handle handleTTStubDetSetVec = iEvent.getHandle(getTokenTTStubDetSetVec_); + auto const& ttClusterAssMap = iEvent.get(getTokenTTClusterAssMap_); + map> mapTPPtrsTTStubRefs; auto isNonnull = [](const TPPtr& tpPtr) { return tpPtr.isNonnull(); }; for (TTStubDetSetVec::const_iterator ttModule = handleTTStubDetSetVec->begin(); @@ -88,8 +78,7 @@ namespace tt { const TTStubRef ttStubRef = makeRefTo(handleTTStubDetSetVec, ttStub); set tpPtrs; for (unsigned int iClus = 0; iClus < 2; iClus++) { - const vector& assocPtrs = - handleTTClusterAssMap->findTrackingParticlePtrs(ttStubRef->clusterRef(iClus)); + const vector& assocPtrs = ttClusterAssMap.findTrackingParticlePtrs(ttStubRef->clusterRef(iClus)); copy_if(assocPtrs.begin(), assocPtrs.end(), inserter(tpPtrs, tpPtrs.begin()), isNonnull); } for (const TPPtr& tpPtr : tpPtrs) @@ -97,13 +86,13 @@ namespace tt { } } // associate reconstructable TrackingParticles with TTStubs - StubAssociation reconstructable(setup_); - StubAssociation selection(setup_); + StubAssociation reconstructable(&setup); + StubAssociation selection(&setup); for (const auto& p : mapTPPtrsTTStubRefs) { - if (!setup_->useForReconstructable(*p.first) || !setup_->reconstructable(p.second)) + if (!setup.useForReconstructable(*p.first) || !setup.reconstructable(p.second)) continue; reconstructable.insert(p.first, p.second); - if (setup_->useForAlgEff(*p.first)) + if (setup.useForAlgEff(*p.first)) selection.insert(p.first, p.second); } iEvent.emplace(putTokenReconstructable_, std::move(reconstructable)); diff --git a/SimTracker/TrackTriggerAssociation/plugins/TTClusterAssociator.h b/SimTracker/TrackTriggerAssociation/plugins/TTClusterAssociator.h index a6c1e92513f9d..da5372e2a1822 100644 --- a/SimTracker/TrackTriggerAssociation/plugins/TTClusterAssociator.h +++ b/SimTracker/TrackTriggerAssociation/plugins/TTClusterAssociator.h @@ -47,9 +47,6 @@ class TTClusterAssociator : public edm::stream::EDProducer<> { /// Constructors explicit TTClusterAssociator(const edm::ParameterSet& iConfig); - /// Destructor - ~TTClusterAssociator() override; - private: /// Data members edm::Handle > thePixelDigiSimLinkHandle_; @@ -64,8 +61,6 @@ class TTClusterAssociator : public edm::stream::EDProducer<> { edm::ESGetToken theTrackerGeometryToken_; /// Mandatory methods - void beginRun(const edm::Run& run, const edm::EventSetup& iSetup) override; - void endRun(const edm::Run& run, const edm::EventSetup& iSetup) override; void produce(edm::Event& iEvent, const edm::EventSetup& iSetup) override; }; /// Close class @@ -93,23 +88,11 @@ TTClusterAssociator::TTClusterAssociator(const edm::ParameterSet& iConfig) { } theTrackerGeometryToken_ = esConsumes(); -} - -/// Destructor -template -TTClusterAssociator::~TTClusterAssociator() {} -/// Begin run -template -void TTClusterAssociator::beginRun(const edm::Run& run, const edm::EventSetup& iSetup) { /// Print some information when loaded edm::LogInfo("TTClusterAssociator< ") << templateNameFinder() << " > loaded."; } -/// End run -template -void TTClusterAssociator::endRun(const edm::Run& run, const edm::EventSetup& iSetup) {} - /// Implement the producer template <> void TTClusterAssociator::produce(edm::Event& iEvent, const edm::EventSetup& iSetup); diff --git a/SimTracker/TrackTriggerAssociation/plugins/TTStubAssociator.h b/SimTracker/TrackTriggerAssociation/plugins/TTStubAssociator.h index 1799fca4e867b..f43c6a9fd5a6d 100644 --- a/SimTracker/TrackTriggerAssociation/plugins/TTStubAssociator.h +++ b/SimTracker/TrackTriggerAssociation/plugins/TTStubAssociator.h @@ -48,9 +48,6 @@ class TTStubAssociator : public edm::stream::EDProducer<> { /// Constructors explicit TTStubAssociator(const edm::ParameterSet& iConfig); - /// Destructor - ~TTStubAssociator() override; - private: /// Data members std::vector ttStubsInputTags_; @@ -63,8 +60,6 @@ class TTStubAssociator : public edm::stream::EDProducer<> { edm::ESGetToken theTrackerTopologyToken_; /// Mandatory methods - void beginRun(const edm::Run& run, const edm::EventSetup& iSetup) override; - void endRun(const edm::Run& run, const edm::EventSetup& iSetup) override; void produce(edm::Event& iEvent, const edm::EventSetup& iSetup) override; }; /// Close class @@ -94,23 +89,11 @@ TTStubAssociator::TTStubAssociator(const edm::ParameterSet& iConfig) { theTrackerGeometryToken_ = esConsumes(); theTrackerTopologyToken_ = esConsumes(); -} - -/// Destructor -template -TTStubAssociator::~TTStubAssociator() {} -/// Begin run -template -void TTStubAssociator::beginRun(const edm::Run& run, const edm::EventSetup& iSetup) { /// Print some information when loaded edm::LogInfo("TTStubAssociator< ") << templateNameFinder() << " > loaded."; } -/// End run -template -void TTStubAssociator::endRun(const edm::Run& run, const edm::EventSetup& iSetup) {} - /// Implement the producer template <> void TTStubAssociator::produce(edm::Event& iEvent, const edm::EventSetup& iSetup); diff --git a/SimTracker/TrackTriggerAssociation/plugins/TTTrackAssociator.h b/SimTracker/TrackTriggerAssociation/plugins/TTTrackAssociator.h index a936057ae03c6..0c1a507a07006 100644 --- a/SimTracker/TrackTriggerAssociation/plugins/TTTrackAssociator.h +++ b/SimTracker/TrackTriggerAssociation/plugins/TTTrackAssociator.h @@ -59,8 +59,6 @@ class TTTrackAssociator : public edm::stream::EDProducer<> { bool TTTrackAllowOneFalse2SStub; /// Mandatory methods - void beginRun(const edm::Run& run, const edm::EventSetup& iSetup) override; - void endRun(const edm::Run& run, const edm::EventSetup& iSetup) override; void produce(edm::Event& iEvent, const edm::EventSetup& iSetup) override; }; /// Close class @@ -90,22 +88,13 @@ TTTrackAssociator::TTTrackAssociator(const edm::ParameterSet& iConfig) { produces >(iTag.instance()); } -} - -/// Destructor -template -TTTrackAssociator::~TTTrackAssociator() {} - -/// Begin run -template -void TTTrackAssociator::beginRun(const edm::Run& run, const edm::EventSetup& iSetup) { /// Print some information when loaded edm::LogInfo("TTStubAssociator< ") << "TTTrackAssociator< " << templateNameFinder() << " > loaded."; } -/// End run +/// Destructor template -void TTTrackAssociator::endRun(const edm::Run& run, const edm::EventSetup& iSetup) {} +TTTrackAssociator::~TTTrackAssociator() {} /// Implement the producer template <> diff --git a/SimTracker/TrackerMaterialAnalysis/plugins/TrackingMaterialProducer.cc b/SimTracker/TrackerMaterialAnalysis/plugins/TrackingMaterialProducer.cc index 8e1458eaf43c1..0b5fcd0616ec4 100644 --- a/SimTracker/TrackerMaterialAnalysis/plugins/TrackingMaterialProducer.cc +++ b/SimTracker/TrackerMaterialAnalysis/plugins/TrackingMaterialProducer.cc @@ -27,6 +27,8 @@ #include "G4VPhysicalVolume.hh" #include "G4AffineTransform.hh" +#include + #include "TrackingMaterialProducer.h" // Uncomment the following #define directive to have the full list of @@ -48,7 +50,7 @@ static const G4LogicalVolume* GetVolume(const std::string& name) { #endif for (G4LogicalVolumeStore::const_iterator volume = lvs->begin(); volume != lvs->end(); ++volume) { - if ((const std::string&)(*volume)->GetName() == name) + if ((const std::string)(dd4hep::dd::noNamespace((*volume)->GetName())) == name) return (*volume); } return nullptr; @@ -189,8 +191,9 @@ void TrackingMaterialProducer::update(const BeginOfTrack* event) { bool TrackingMaterialProducer::isSelectedFast(const G4TouchableHistory* touchable) { for (int d = touchable->GetHistoryDepth() - 1; d >= 0; --d) { - if (std::find(m_selectedNames.begin(), m_selectedNames.end(), touchable->GetVolume(d)->GetName()) != - m_selectedNames.end()) + if (std::find(m_selectedNames.begin(), + m_selectedNames.end(), + (std::string)(dd4hep::dd::noNamespace(touchable->GetVolume(d)->GetName()))) != m_selectedNames.end()) return true; } return false; diff --git a/SimTransport/PPSProtonTransport/python/PPSTransportESSources_cfi.py b/SimTransport/PPSProtonTransport/python/PPSTransportESSources_cfi.py index 409420fac930c..7e6afe9b0b041 100644 --- a/SimTransport/PPSProtonTransport/python/PPSTransportESSources_cfi.py +++ b/SimTransport/PPSProtonTransport/python/PPSTransportESSources_cfi.py @@ -3,7 +3,7 @@ # beam optics from CondCore.CondDB.CondDB_cfi import * from CalibPPS.ESProducers.ctppsBeamParametersFromLHCInfoESSource_cfi import * -from CalibPPS.ESProducers.ctppsInterpolatedOpticalFunctionsESSource_cff import * +from CalibPPS.ESProducers.ctppsInterpolatedOpticalFunctionsESSource_cfi import * ctppsInterpolatedOpticalFunctionsESSource.lhcInfoLabel = "" """ diff --git a/SimTransport/TotemRPProtonTransportParametrization/interface/TMultiDimFet.h b/SimTransport/TotemRPProtonTransportParametrization/interface/TMultiDimFet.h index a3249bdbce460..b787758a1ec19 100644 --- a/SimTransport/TotemRPProtonTransportParametrization/interface/TMultiDimFet.h +++ b/SimTransport/TotemRPProtonTransportParametrization/interface/TMultiDimFet.h @@ -129,7 +129,7 @@ class TMultiDimFet : public TNamed { public: TMultiDimFet(); - // TMultiDimFet(const TMultiDimFet &in); + TMultiDimFet(const TMultiDimFet &in) = default; const TMultiDimFet &operator=(const TMultiDimFet &in); TMultiDimFet(Int_t dimension, EMDFPolyType type = kMonomials, Option_t *option = ""); diff --git a/SimTransport/TotemRPProtonTransportParametrization/src/TMultiDimFet.cc b/SimTransport/TotemRPProtonTransportParametrization/src/TMultiDimFet.cc index c8d4df375b3d2..15965c095e98b 100644 --- a/SimTransport/TotemRPProtonTransportParametrization/src/TMultiDimFet.cc +++ b/SimTransport/TotemRPProtonTransportParametrization/src/TMultiDimFet.cc @@ -82,7 +82,7 @@ TMultiDimFet::TMultiDimFet() { const TMultiDimFet &TMultiDimFet::operator=(const TMultiDimFet &in) { if (this == &in) { - return in; + return *this; } fMeanQuantity = in.fMeanQuantity; // Mean of dependent quantity @@ -152,7 +152,7 @@ const TMultiDimFet &TMultiDimFet::operator=(const TMultiDimFet &in) { fShowCorrelation = in.fShowCorrelation; // print correlation matrix fIsUserFunction = in.fIsUserFunction; // Flag for user defined function fIsVerbose = in.fIsVerbose; // - return in; + return *this; } //____________________________________________________________________ diff --git a/TauAnalysis/MCEmbeddingTools/plugins/DYToMuMuGenFilter.cc b/TauAnalysis/MCEmbeddingTools/plugins/DYToMuMuGenFilter.cc index 8bb153d2c67c6..a5206e56c9e6b 100644 --- a/TauAnalysis/MCEmbeddingTools/plugins/DYToMuMuGenFilter.cc +++ b/TauAnalysis/MCEmbeddingTools/plugins/DYToMuMuGenFilter.cc @@ -15,25 +15,17 @@ class DYToMuMuGenFilter : public edm::stream::EDFilter<> { public: explicit DYToMuMuGenFilter(const edm::ParameterSet&); - ~DYToMuMuGenFilter() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: - void beginStream(edm::StreamID) override; bool filter(edm::Event&, const edm::EventSetup&) override; - void endStream() override; edm::InputTag inputTag_; edm::EDGetTokenT genParticleCollection_; edm::Handle gen_handle; - //virtual void beginRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void endRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - //virtual void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - // ----------member data --------------------------- }; @@ -42,8 +34,6 @@ DYToMuMuGenFilter::DYToMuMuGenFilter(const edm::ParameterSet& iConfig) { genParticleCollection_ = consumes(inputTag_); } -DYToMuMuGenFilter::~DYToMuMuGenFilter() {} - bool DYToMuMuGenFilter::filter(edm::Event& iEvent, const edm::EventSetup& iSetup) { iEvent.getByToken(genParticleCollection_, gen_handle); @@ -81,11 +71,6 @@ bool DYToMuMuGenFilter::filter(edm::Event& iEvent, const edm::EventSetup& iSetup } return false; } -// ------------ method called once each stream before processing any runs, lumis or events ------------ -void DYToMuMuGenFilter::beginStream(edm::StreamID) {} - -// ------------ method called once each stream after processing all runs, lumis and events ------------ -void DYToMuMuGenFilter::endStream() {} // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void DYToMuMuGenFilter::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { diff --git a/TauAnalysis/MCEmbeddingTools/plugins/MuMuForEmbeddingSelector.cc b/TauAnalysis/MCEmbeddingTools/plugins/MuMuForEmbeddingSelector.cc index 42125b59fae4a..f5c0eefd45e3a 100644 --- a/TauAnalysis/MCEmbeddingTools/plugins/MuMuForEmbeddingSelector.cc +++ b/TauAnalysis/MCEmbeddingTools/plugins/MuMuForEmbeddingSelector.cc @@ -39,19 +39,11 @@ class MuMuForEmbeddingSelector : public edm::stream::EDProducer<> { public: explicit MuMuForEmbeddingSelector(const edm::ParameterSet&); - ~MuMuForEmbeddingSelector() override; static void fillDescriptions(edm::ConfigurationDescriptions& descriptions); private: - void beginStream(edm::StreamID) override; void produce(edm::Event&, const edm::EventSetup&) override; - void endStream() override; - - //virtual void beginRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void endRun(edm::Run const&, edm::EventSetup const&) override; - //virtual void beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; - //virtual void endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) override; // ----------member data --------------------------- edm::EDGetTokenT> ZmumuCandidates_; @@ -87,11 +79,6 @@ MuMuForEmbeddingSelector::MuMuForEmbeddingSelector(const edm::ParameterSet& iCon //now do what ever other initialization is needed } -MuMuForEmbeddingSelector::~MuMuForEmbeddingSelector() { - // do anything here that needs to be done at destruction time - // (e.g. close files, deallocate resources etc.) -} - // // member functions // @@ -120,44 +107,6 @@ void MuMuForEmbeddingSelector::produce(edm::Event& iEvent, const edm::EventSetup iEvent.put(std::move(prod)); } -// ------------ method called once each stream before processing any runs, lumis or events ------------ -void MuMuForEmbeddingSelector::beginStream(edm::StreamID) {} - -// ------------ method called once each stream after processing all runs, lumis and events ------------ -void MuMuForEmbeddingSelector::endStream() {} - -// ------------ method called when starting to processes a run ------------ -/* -void -MuMuForEmbeddingSelector::beginRun(edm::Run const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when ending the processing of a run ------------ -/* -void -MuMuForEmbeddingSelector::endRun(edm::Run const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when starting to processes a luminosity block ------------ -/* -void -MuMuForEmbeddingSelector::beginLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) -{ -} -*/ - -// ------------ method called when ending the processing of a luminosity block ------------ -/* -void -MuMuForEmbeddingSelector::endLuminosityBlock(edm::LuminosityBlock const&, edm::EventSetup const&) -{ -} -*/ - // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void MuMuForEmbeddingSelector::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { //The following says we do not know what parameters are allowed so do no validation diff --git a/TopQuarkAnalysis/TopEventProducers/src/PseudoTopProducer.cc b/TopQuarkAnalysis/TopEventProducers/src/PseudoTopProducer.cc index c6c0153e6e924..839dfee5a5d35 100644 --- a/TopQuarkAnalysis/TopEventProducers/src/PseudoTopProducer.cc +++ b/TopQuarkAnalysis/TopEventProducers/src/PseudoTopProducer.cc @@ -24,12 +24,7 @@ class PseudoTopProducer : public edm::stream::EDProducer<> { bool isFromHadron(const reco::Candidate* p) const; bool isBHadron(const reco::Candidate* p) const; bool isBHadron(const unsigned int pdgId) const; - void insertAllDaughters(const reco::Candidate* p, std::set& list) const; - const reco::Candidate* getLast(const reco::Candidate* p); - reco::GenParticleRef buildGenParticle(const reco::Candidate* p, - reco::GenParticleRefProd& refHandle, - std::auto_ptr& outColl) const; typedef reco::Particle::LorentzVector LorentzVector; private: @@ -531,15 +526,6 @@ void PseudoTopProducer::produce(edm::Event& event, const edm::EventSetup& eventS event.put(std::move(pseudoTop)); } -const reco::Candidate* PseudoTopProducer::getLast(const reco::Candidate* p) { - for (size_t i = 0, n = p->numberOfDaughters(); i < n; ++i) { - const reco::Candidate* dau = p->daughter(i); - if (p->pdgId() == dau->pdgId()) - return getLast(dau); - } - return p; -} - bool PseudoTopProducer::isFromHadron(const reco::Candidate* p) const { for (size_t i = 0, n = p->numberOfMothers(); i < n; ++i) { const reco::Candidate* mother = p->mother(i); @@ -596,19 +582,5 @@ bool PseudoTopProducer::isBHadron(const unsigned int absPdgId) const { return false; } -reco::GenParticleRef PseudoTopProducer::buildGenParticle(const reco::Candidate* p, - reco::GenParticleRefProd& refHandle, - std::auto_ptr& outColl) const { - reco::GenParticle pOut(*dynamic_cast(p)); - pOut.clearMothers(); - pOut.clearDaughters(); - pOut.resetMothers(refHandle.id()); - pOut.resetDaughters(refHandle.id()); - - outColl->push_back(pOut); - - return reco::GenParticleRef(refHandle, outColl->size() - 1); -} - #include "FWCore/Framework/interface/MakerMacros.h" DEFINE_FWK_MODULE(PseudoTopProducer); diff --git a/TopQuarkAnalysis/TopEventSelection/plugins/TtFullHadSignalSelMVATrainer.h b/TopQuarkAnalysis/TopEventSelection/plugins/TtFullHadSignalSelMVATrainer.h deleted file mode 100644 index d0d99a8cab704..0000000000000 --- a/TopQuarkAnalysis/TopEventSelection/plugins/TtFullHadSignalSelMVATrainer.h +++ /dev/null @@ -1,45 +0,0 @@ -#ifndef TtFullHadSignalSelMVATrainer_h -#define TtFullHadSignalSelMVATrainer_h - -#include "FWCore/Framework/interface/Event.h" -#include "FWCore/Framework/interface/EventSetup.h" -#include "FWCore/Framework/interface/EDAnalyzer.h" -#include "FWCore/Framework/interface/Frameworkfwd.h" -#include "FWCore/Framework/interface/MakerMacros.h" -#include "FWCore/Framework/interface/ESHandle.h" -#include "FWCore/Utilities/interface/InputTag.h" -#include "FWCore/ParameterSet/interface/ParameterSet.h" - -#include "PhysicsTools/MVAComputer/interface/HelperMacros.h" -#include "PhysicsTools/MVAComputer/interface/MVAComputerCache.h" - -#include "AnalysisDataFormats/TopObjects/interface/TtEvent.h" -#include "DataFormats/PatCandidates/interface/Jet.h" -#include "DataFormats/RecoCandidate/interface/RecoCandidate.h" - -#ifndef TtFullHadSignalSelMVARcd_defined // to avoid conflicts with the TtFullHadSignalSelMVAComputer -#define TtFullHadSignalSelMVARcd_defined -MVA_COMPUTER_CONTAINER_DEFINE(TtFullHadSignalSelMVA); // defines TtFullHadSignalSelMVA -#endif - -class TtFullHadSignalSelMVATrainer : public edm::EDAnalyzer { -public: - explicit TtFullHadSignalSelMVATrainer(const edm::ParameterSet&); - ~TtFullHadSignalSelMVATrainer() override; - -private: - void analyze(const edm::Event& evt, const edm::EventSetup& setup) override; - void beginJob() override; - - edm::EDGetTokenT > jetsToken_; - edm::EDGetTokenT genEvtToken_; - - int whatData_; - int maxEv_; - int selEv; - double weight_; - - PhysicsTools::MVAComputerCache mvaComputer; -}; - -#endif diff --git a/TopQuarkAnalysis/TopEventSelection/plugins/TtSemiLepSignalSelMVATrainer.h b/TopQuarkAnalysis/TopEventSelection/plugins/TtSemiLepSignalSelMVATrainer.h deleted file mode 100644 index c0c84c88888cb..0000000000000 --- a/TopQuarkAnalysis/TopEventSelection/plugins/TtSemiLepSignalSelMVATrainer.h +++ /dev/null @@ -1,59 +0,0 @@ -#ifndef TtSemiLepSignalSelMVATrainer_h -#define TtSemiLepSignalSelMVATrainer_h - -#include "FWCore/Framework/interface/Event.h" -#include "FWCore/Framework/interface/EventSetup.h" -#include "FWCore/Framework/interface/EDAnalyzer.h" -#include "FWCore/Framework/interface/Frameworkfwd.h" -#include "FWCore/Framework/interface/MakerMacros.h" -#include "FWCore/Framework/interface/ESHandle.h" -#include "FWCore/Utilities/interface/InputTag.h" -#include "FWCore/ParameterSet/interface/ParameterSet.h" - -#include "PhysicsTools/MVAComputer/interface/HelperMacros.h" -#include "PhysicsTools/MVAComputer/interface/MVAComputerCache.h" - -#include "AnalysisDataFormats/TopObjects/interface/TtEvent.h" -#include "DataFormats/PatCandidates/interface/MET.h" -#include "DataFormats/PatCandidates/interface/Jet.h" -#include "DataFormats/PatCandidates/interface/Muon.h" -#include "DataFormats/PatCandidates/interface/Electron.h" -#include "DataFormats/RecoCandidate/interface/RecoCandidate.h" - -#ifndef TtSemiLepSignalSelMVARcd_defined // to avoid conflicts with the TtSemiSignalSelMVAComputer -#define TtSemiLepSignalSelMVARcd_defined -MVA_COMPUTER_CONTAINER_DEFINE(TtSemiLepSignalSelMVA); // defines TtSemiLepSignalSelMVA -#endif - -class TtSemiLepSignalSelMVATrainer : public edm::EDAnalyzer { -public: - explicit TtSemiLepSignalSelMVATrainer(const edm::ParameterSet&); - ~TtSemiLepSignalSelMVATrainer() override; - -private: - void analyze(const edm::Event& evt, const edm::EventSetup& setup) override; - void beginJob() override; - - double DeltaPhi(const math::XYZTLorentzVector& v1, const math::XYZTLorentzVector& v2); - double DeltaR(const math::XYZTLorentzVector& v1, const math::XYZTLorentzVector& v2); - - // pt sorting stuff - struct JetwithHigherPt { - bool operator()(const pat::Jet& j1, const pat::Jet& j2) const { return j1.pt() > j2.pt(); }; - }; - - edm::EDGetTokenT > muonsToken_; - edm::EDGetTokenT > electronsToken_; - edm::EDGetTokenT > jetsToken_; - edm::EDGetTokenT > METsToken_; - edm::EDGetTokenT genEvtToken_; - - int lepChannel_; - int whatData_; - int maxEv_; - int selEv; - - PhysicsTools::MVAComputerCache mvaComputer; -}; - -#endif diff --git a/TopQuarkAnalysis/TopEventSelection/python/TtFullHadSignalSelMVATrainer_cfi.py b/TopQuarkAnalysis/TopEventSelection/python/TtFullHadSignalSelMVATrainer_cfi.py deleted file mode 100644 index e98a4e6c2a5df..0000000000000 --- a/TopQuarkAnalysis/TopEventSelection/python/TtFullHadSignalSelMVATrainer_cfi.py +++ /dev/null @@ -1,24 +0,0 @@ -import FWCore.ParameterSet.Config as cms - -# -# module to make mvaTraining for jet parton associations -# -buildTraintree = cms.EDAnalyzer("TtFullHadSignalSelMVATrainer", - #input tags used in the example - jets = cms.InputTag("selectedPatJets"), - - #three possibilities: - # whatData=-1: in your training-file both, signal and background events are available - # whatData=1: your training-file contains only signal events - # whatData=0: your training-file contains only background events - whatData = cms.int32(1), - - #maximum number of training events to be used - # maxEv = -1: all events are used - # for example maxEv = 5000: writes only the first 5000 events to the training tree - maxEv = cms.int32(-1), - #event weight to be put here: - # standard is set to 1, so no weight is applied - # for example if different samples with different integrated luminosities have to be combined - weight = cms.double(1.0) -) diff --git a/TopQuarkAnalysis/TopEventSelection/python/TtSemiLepSignalSelMVATrainer_cfi.py b/TopQuarkAnalysis/TopEventSelection/python/TtSemiLepSignalSelMVATrainer_cfi.py deleted file mode 100644 index e78e3b890927b..0000000000000 --- a/TopQuarkAnalysis/TopEventSelection/python/TtSemiLepSignalSelMVATrainer_cfi.py +++ /dev/null @@ -1,30 +0,0 @@ -import FWCore.ParameterSet.Config as cms - -# -# module to make mvaTraining for jet parton associations -# -buildTraintree = cms.EDAnalyzer("TtSemiLepSignalSelMVATrainer", - #input tags used in the example - muons = cms.InputTag("selectedPatMuons"), - elecs = cms.InputTag("selectedPatElectrons"), - jets = cms.InputTag("cleanPatJets"), - mets = cms.InputTag("patMETs"), - - # ------------------------------------------------ - # select semileptonic signal channel - # (all others are taken as background for training) - # 1: electron, 2: muon, 3: tau - # ------------------------------------------------ - lepChannel = cms.int32(2), - - #three possibilities: - # whatData=-1: in your training-file both, signal and background events are available - # whatData=1: your training-file contains only signal events - # whatData=0: your training-file contains only background events - whatData = cms.int32(1), - - #maximum number of training events to be used - # maxEv = -1: all events are used - # for example maxEv = 5000: writes only the first 5000 events to the training tree - maxEv = cms.int32(-1), -) diff --git a/TopQuarkAnalysis/TopJetCombination/plugins/TtSemiLepJetCombMVATrainer.h b/TopQuarkAnalysis/TopJetCombination/plugins/TtSemiLepJetCombMVATrainer.h deleted file mode 100644 index 55905de3673ff..0000000000000 --- a/TopQuarkAnalysis/TopJetCombination/plugins/TtSemiLepJetCombMVATrainer.h +++ /dev/null @@ -1,56 +0,0 @@ -#ifndef TtSemiLepJetCombMVATrainer_h -#define TtSemiLepJetCombMVATrainer_h - -#include "FWCore/Framework/interface/Event.h" -#include "FWCore/Framework/interface/EventSetup.h" -#include "FWCore/Framework/interface/EDAnalyzer.h" -#include "FWCore/Framework/interface/Frameworkfwd.h" -#include "FWCore/Framework/interface/MakerMacros.h" -#include "FWCore/Framework/interface/ESHandle.h" -#include "FWCore/Utilities/interface/InputTag.h" -#include "FWCore/ParameterSet/interface/ParameterSet.h" - -#include "DataFormats/RecoCandidate/interface/RecoCandidate.h" -#include "DataFormats/PatCandidates/interface/MET.h" -#include "DataFormats/PatCandidates/interface/Jet.h" - -#include "AnalysisDataFormats/TopObjects/interface/TopGenEvent.h" -#include "AnalysisDataFormats/TopObjects/interface/TtGenEvent.h" -#include "AnalysisDataFormats/TopObjects/interface/TtSemiLepEvtPartons.h" - -#include "PhysicsTools/MVAComputer/interface/HelperMacros.h" -#include "PhysicsTools/MVAComputer/interface/MVAComputerCache.h" - -#ifndef TtSemiLepJetCombMVARcd_defined // to avoid conflicts with the TtSemiLepJetCombMVAComputer -#define TtSemiLepJetCombMVARcd_defined -MVA_COMPUTER_CONTAINER_DEFINE(TtSemiLepJetCombMVA); // defines TtSemiLepJetCombMVARcd -#endif - -class TtSemiLepJetCombMVATrainer : public edm::EDAnalyzer { -public: - explicit TtSemiLepJetCombMVATrainer(const edm::ParameterSet&); - ~TtSemiLepJetCombMVATrainer() override; - -private: - void beginJob() override; - void analyze(const edm::Event& evt, const edm::EventSetup& setup) override; - void endJob() override; - - WDecay::LeptonType readLeptonType(const std::string& str); - - edm::EDGetTokenT genEvtToken_; - edm::EDGetTokenT > lepsToken_; - edm::EDGetTokenT > jetsToken_; - edm::EDGetTokenT > metsToken_; - edm::EDGetTokenT > > matchingToken_; - - int maxNJets_; - - WDecay::LeptonType leptonType_; - - PhysicsTools::MVAComputerCache mvaComputer; - - unsigned int nEvents[5]; -}; - -#endif diff --git a/TopQuarkAnalysis/TopJetCombination/python/TtSemiLepJetCombMVATrainer_cfi.py b/TopQuarkAnalysis/TopJetCombination/python/TtSemiLepJetCombMVATrainer_cfi.py deleted file mode 100644 index 7064b3b858c5d..0000000000000 --- a/TopQuarkAnalysis/TopJetCombination/python/TtSemiLepJetCombMVATrainer_cfi.py +++ /dev/null @@ -1,30 +0,0 @@ -import FWCore.ParameterSet.Config as cms - -# -# module to perform MVA training for jet-parton association -# -trainTtSemiLepJetCombMVA = cms.EDAnalyzer("TtSemiLepJetCombMVATrainer", - #------------------------------------------------- - # sources (leptons, jets, MET, jet-parton matching) - #------------------------------------------------- - leps = cms.InputTag("selectedPatMuons"), - jets = cms.InputTag("selectedPatJets"), - mets = cms.InputTag("patMETs"), - matching = cms.InputTag("ttSemiLepJetPartonMatch"), - - # ------------------------------------------------ - # select semileptonic signal channel - # (all others are taken as background for training) - # either "kElec", "kMuon" or "kTau" - # ------------------------------------------------ - leptonType = cms.string("kMuon"), - - # ------------------------------------------------ - # maximum number of jets to be considered - # (has to be >= 4, can be set to -1 if you - # want to take all) - # ------------------------------------------------ - maxNJets = cms.int32(4) -) - - diff --git a/TrackPropagation/Geant4e/test/Geant4e_example_cfg.py b/TrackPropagation/Geant4e/test/Geant4e_example_cfg.py index 249f8e17f350e..79abf3148bb64 100644 --- a/TrackPropagation/Geant4e/test/Geant4e_example_cfg.py +++ b/TrackPropagation/Geant4e/test/Geant4e_example_cfg.py @@ -28,7 +28,7 @@ process.source = cms.Source("PoolSource", fileNames = cms.untracked.vstring( - '/store/relval/CMSSW_12_5_0_pre3/RelValSingleMuPt10/GEN-SIM-RECO/124X_mcRun3_2022_realistic_v8-v2/10000/6a6528c0-9d66-4358-bacc-158c40b439cf.root' + '/store/relval/CMSSW_14_0_0_pre2/RelValSingleMuPt10/GEN-SIM-RECO/133X_mcRun3_2023_realistic_v3_STD-v2/2590000/da5cf255-2d65-41ec-ac89-45daf33c66d3.root', ), ) diff --git a/TrackingTools/PatternTools/interface/bqueue.h b/TrackingTools/PatternTools/interface/bqueue.h index f40852c6c162e..15211d533427f 100644 --- a/TrackingTools/PatternTools/interface/bqueue.h +++ b/TrackingTools/PatternTools/interface/bqueue.h @@ -60,13 +60,13 @@ namespace cmsutils { private: _bqueue_item() : back(0), value(), refCount(0) {} - _bqueue_item(boost::intrusive_ptr<_bqueue_item > tail, const T &val) : back(tail), value(val), refCount(0) {} + _bqueue_item(boost::intrusive_ptr<_bqueue_item > &tail, const T &val) : back(tail), value(val), refCount(0) {} // move - _bqueue_item(boost::intrusive_ptr<_bqueue_item > tail, T &&val) + _bqueue_item(boost::intrusive_ptr<_bqueue_item > &tail, T &&val) : back(tail), value(std::move(val)), refCount(0) {} // emplace template - _bqueue_item(boost::intrusive_ptr<_bqueue_item > tail, Args &&...args) + _bqueue_item(boost::intrusive_ptr<_bqueue_item > &tail, Args &&...args) : back(tail), value(std::forward(args)...), refCount(0) {} boost::intrusive_ptr<_bqueue_item > back; T const value; @@ -110,6 +110,7 @@ namespace cmsutils { it = t2.it; return *this; } + _bqueue_itr(const _bqueue_itr &t2) = default; friend class bqueue; private: diff --git a/TrackingTools/TrackAssociator/test/TrivialExample.cc b/TrackingTools/TrackAssociator/test/TrivialExample.cc deleted file mode 100644 index f003c5684b8d9..0000000000000 --- a/TrackingTools/TrackAssociator/test/TrivialExample.cc +++ /dev/null @@ -1,76 +0,0 @@ -// -*- C++ -*- -// -// Package: TrackAssociator -// Class: TrivialExample -// -/* - - Description: Trivial example to use get energy for a collection of ctfWithMaterialTracks - -*/ -// -// Original Author: Dmytro Kovalskyi - -#include "FWCore/Framework/interface/EDAnalyzer.h" -#include "FWCore/Framework/interface/Event.h" -#include "FWCore/Framework/interface/EventSetup.h" -#include "FWCore/ParameterSet/interface/ParameterSet.h" -#include "FWCore/Framework/interface/MakerMacros.h" - -#include "DataFormats/TrackReco/interface/Track.h" -#include "DataFormats/Common/interface/Handle.h" - -#include "TrackingTools/TrackAssociator/interface/TrackDetectorAssociator.h" -#include "TrackingTools/TrackAssociator/interface/TrackAssociatorParameters.h" - -class TrivialExample : public edm::EDAnalyzer { - public: - explicit TrivialExample(const edm::ParameterSet&); - virtual ~TrivialExample(){} - virtual void analyze (const edm::Event&, const edm::EventSetup&); - - private: - TrackDetectorAssociator trackAssociator_; - TrackAssociatorParameters parameters_; -}; - -TrivialExample::TrivialExample(const edm::ParameterSet& iConfig) -{ - // TrackAssociator parameters - edm::ParameterSet parameters = iConfig.getParameter("TrackAssociatorParameters"); - parameters_.loadParameters( parameters ); - trackAssociator_.useDefaultPropagator(); -} - -void TrivialExample::analyze( const edm::Event& iEvent, const edm::EventSetup& iSetup) -{ - // get reco tracks - edm::Handle recoTracks; - iEvent.getByLabel("ctfWithMaterialTracks", recoTracks); - if (! recoTracks.isValid() ) throw cms::Exception("FatalError") << "No reco tracks were found\n"; - - for(reco::TrackCollection::const_iterator recoTrack = recoTracks->begin(); - recoTrack != recoTracks->end(); ++recoTrack){ - - if (recoTrack->pt() < 2) continue; // skip low Pt tracks - - - TrackDetMatchInfo info = trackAssociator_.associate(iEvent, iSetup, *recoTrack, parameters_); - - edm::LogVerbatim("TrackAssociator") << "\n-------------------------------------------------------\n Track (pt,eta,phi): " << - recoTrack->pt() << " , " << recoTrack->eta() << " , " << recoTrack->phi() ; - edm::LogVerbatim("TrackAssociator") << "Ecal energy in crossed crystals based on RecHits: " << - info.crossedEnergy(TrackDetMatchInfo::EcalRecHits); - edm::LogVerbatim("TrackAssociator") << "Ecal energy in 3x3 crystals based on RecHits: " << - info.nXnEnergy(TrackDetMatchInfo::EcalRecHits, 1); - edm::LogVerbatim("TrackAssociator") << "Hcal energy in crossed towers based on RecHits: " << - info.crossedEnergy(TrackDetMatchInfo::HcalRecHits); - edm::LogVerbatim("TrackAssociator") << "Hcal energy in 3x3 towers based on RecHits: " << - info.nXnEnergy(TrackDetMatchInfo::HcalRecHits, 1); - edm::LogVerbatim("TrackAssociator") << "Number of muon segment matches: " << info.numberOfSegments(); - - } -} - -//define this as a plug-in -DEFINE_FWK_MODULE(TrivialExample); diff --git a/TrackingTools/TransientTrack/interface/TransientTrackBuilder.h b/TrackingTools/TransientTrack/interface/TransientTrackBuilder.h index 21280d9c2d5e4..8e20fffcc995d 100644 --- a/TrackingTools/TransientTrack/interface/TransientTrackBuilder.h +++ b/TrackingTools/TransientTrack/interface/TransientTrackBuilder.h @@ -69,6 +69,7 @@ class TransientTrackBuilder { const MagneticField* field() const { return theField; } const edm::ESHandle trackingGeometry() const { return theTrackingGeometry; } + static constexpr float defaultInvalidTrackTimeReso = 0.350f; private: const MagneticField* theField; diff --git a/TrackingTools/TransientTrack/src/TransientTrackBuilder.cc b/TrackingTools/TransientTrack/src/TransientTrackBuilder.cc index afe55fa17f3dc..748b15814938c 100644 --- a/TrackingTools/TransientTrack/src/TransientTrackBuilder.cc +++ b/TrackingTools/TransientTrack/src/TransientTrackBuilder.cc @@ -13,10 +13,6 @@ using namespace reco; using namespace std; using namespace edm; -namespace { - constexpr float defaultInvalidTrackReso = 0.350f; -} - TransientTrack TransientTrackBuilder::build(const Track* t) const { return TransientTrack(*t, theField, theTrackingGeometry); } @@ -105,11 +101,11 @@ vector TransientTrackBuilder::build(const edm::Handle 1e-6 ? timeReso : defaultInvalidTrackReso); // make the error much larger than the BS time width + timeReso = (timeReso > 1e-6 ? timeReso + : defaultInvalidTrackTimeReso); // make the error much larger than the BS time width if (edm::isNotFinite(time)) { time = 0.0; - timeReso = defaultInvalidTrackReso; + timeReso = defaultInvalidTrackTimeReso; } ttVect.push_back(TransientTrack(ref, time, timeReso, theField, theTrackingGeometry)); } @@ -125,11 +121,11 @@ vector TransientTrackBuilder::build(const edm::Handle 1e-6 ? timeReso : defaultInvalidTrackReso); // make the error much larger than the BS time width + timeReso = (timeReso > 1e-6 ? timeReso + : defaultInvalidTrackTimeReso); // make the error much larger than the BS time width if (edm::isNotFinite(time)) { time = 0.0; - timeReso = defaultInvalidTrackReso; + timeReso = defaultInvalidTrackTimeReso; } ttVect.push_back(TransientTrack(new GsfTransientTrack(ref, time, timeReso, theField, theTrackingGeometry))); } @@ -148,11 +144,11 @@ vector TransientTrackBuilder::build(const edm::Handle(trkColl, i).castTo(); double time = trackTimes[ref]; double timeReso = trackTimeResos[ref]; - timeReso = - (timeReso > 1e-6 ? timeReso : defaultInvalidTrackReso); // make the error much larger than the BS time width + timeReso = (timeReso > 1e-6 ? timeReso + : defaultInvalidTrackTimeReso); // make the error much larger than the BS time width if (edm::isNotFinite(time)) { time = 0.0; - timeReso = defaultInvalidTrackReso; + timeReso = defaultInvalidTrackTimeReso; } ttVect.push_back(TransientTrack(new GsfTransientTrack( RefToBase(trkColl, i).castTo(), time, timeReso, theField, theTrackingGeometry))); @@ -160,11 +156,11 @@ vector TransientTrackBuilder::build(const edm::Handle(trkColl, i).castTo(); double time = trackTimes[ref]; double timeReso = trackTimeResos[ref]; - timeReso = - (timeReso > 1e-6 ? timeReso : defaultInvalidTrackReso); // make the error much larger than the BS time width + timeReso = (timeReso > 1e-6 ? timeReso + : defaultInvalidTrackTimeReso); // make the error much larger than the BS time width if (edm::isNotFinite(time)) { time = 0.0; - timeReso = defaultInvalidTrackReso; + timeReso = defaultInvalidTrackTimeReso; } ttVect.push_back(TransientTrack( RefToBase(trkColl, i).castTo(), time, timeReso, theField, theTrackingGeometry)); diff --git a/Utilities/General/test/test_precomputed_value_sort.cpp b/Utilities/General/test/test_precomputed_value_sort.cpp index 8df1654a9ad57..14e6b2a573a42 100644 --- a/Utilities/General/test/test_precomputed_value_sort.cpp +++ b/Utilities/General/test/test_precomputed_value_sort.cpp @@ -4,6 +4,7 @@ #include #include #include +#include using namespace std; @@ -23,7 +24,6 @@ class Phi { class Point { public: Point(float x = 0, float y = 0); - Point(const Point& p); float r() const { return sqrt(X * X + Y * Y); } Phi phi() const { return Phi(atan2(Y, X)); } float X, Y; @@ -37,8 +37,6 @@ ostream& operator<<(ostream& o, const Point* p) { return o << *p; } Point::Point(float x, float y) : X(x), Y(y) { cout << "New Point" << *this << endl; } -Point::Point(const Point& p) : X(p.X), Y(p.Y) { cout << "New Point (copy)" << *this << endl; } - // A trivial operation on Point float extractR1(const Point& p) { return p.r(); } @@ -69,6 +67,30 @@ int main() { v1.push_back(Point(-5.12, 0.321)); v1.push_back(Point(-5.12, -0.321)); + vector r; + r.reserve(v1.size()); + vector phi; + phi.reserve(v1.size()); + for (vector::iterator i = v1.begin(); i != v1.end(); ++i) { + r.push_back(i->r()); + phi.push_back(i->phi()); + } + std::sort(r.begin(), r.end()); + std::sort(phi.begin(), phi.end()); + + cout << endl; + cout << "Sorted R's:" << endl; + for (size_t i = 0; i < r.size(); i++) { + cout << r[i] << endl; + } + + cout << endl; + cout << "Sorted Phi's:" << endl; + for (size_t i = 0; i < phi.size(); i++) { + cout << phi[i] << endl; + } + cout << endl; + // A vector of pointer to Points vector v2; for (vector::iterator i = v1.begin(); i != v1.end(); ++i) { @@ -87,30 +109,45 @@ int main() { cout << "Sorted with ExtractR1 : " << endl; copy(v1.begin(), v1.end(), ostream_iterator(cout, "\n")); cout << endl; + for (size_t i = 0; i < r.size(); i++) { + assert(v1[i].r() == r[i]); + } // Sort v2 cout << "Sorted with ExtractR2: " << endl; precomputed_value_sort(v2.begin(), v2.end(), extractR2); copy(v2.begin(), v2.end(), ostream_iterator(cout, "\n")); cout << endl; + for (size_t i = 0; i < r.size(); i++) { + assert(v2[i]->r() == r[i]); + } // Sort v3 using a BinaryPredicate cout << "Sort with LessR: " << endl; precomputed_value_sort(v3.begin(), v3.end(), extractR2, lessR); copy(v3.begin(), v3.end(), ostream_iterator(cout, "\n")); cout << endl; + for (size_t i = 0; i < r.size(); i++) { + assert(v3[i]->r() == r[i]); + } - // Sort v3 using phi + // Sort v2 using phi cout << "Sort with ExtractPhi2: " << endl; - precomputed_value_sort(v3.begin(), v3.end(), extractPhi2); - copy(v3.begin(), v3.end(), ostream_iterator(cout, "\n")); + precomputed_value_sort(v2.begin(), v2.end(), extractPhi2); + copy(v2.begin(), v2.end(), ostream_iterator(cout, "\n")); cout << endl; + for (size_t i = 0; i < phi.size(); i++) { + assert(v2[i]->phi() == phi[i]); + } // Sort v3 using a BinaryPredicate cout << "Sort with LessDPhi: " << endl; precomputed_value_sort(v3.begin(), v3.end(), extractPhi2, lessDPhi); copy(v3.begin(), v3.end(), ostream_iterator(cout, "\n")); cout << endl; + for (size_t i = 0; i < phi.size(); i++) { + assert(v3[i]->phi() == phi[i]); + } return 0; } diff --git a/Utilities/RelMon/python/directories2html.py b/Utilities/RelMon/python/directories2html.py index 4da2cc0905f42..73c769bb96041 100755 --- a/Utilities/RelMon/python/directories2html.py +++ b/Utilities/RelMon/python/directories2html.py @@ -23,6 +23,8 @@ sys.argv=theargv import os +import hashlib + if "RELMON_SA" in os.environ: from .dirstructure import Comparison,Directory from .definitions import * @@ -32,7 +34,6 @@ from Utilities.RelMon.definitions import * from Utilities.RelMon.utils import unpickler - import hashlib #------------------------------------------------------------------------------- def encode_obj_url(url): @@ -933,8 +934,9 @@ def make_summary_table(indir,aggregation_rules,aggregation_rules_twiki, hashing_ #-----------UPDATES------ def hash_name(file_name, flag): - #print " HashFILE name: "+file_name if flag: #if hashing flag is ON then return + if (3,0,0) <= sys.version_info: + return hashlib.md5(file_name.encode('utf-8')).hexdigest()[:10] return hashlib.md5(file_name).hexdigest()[:10] #md5 hashed file name with length 10 else: return file_name #return standart name diff --git a/Utilities/ReleaseScripts/test/BuildFile.xml b/Utilities/ReleaseScripts/test/BuildFile.xml index c33943c32914e..9c17b56d1c695 100644 --- a/Utilities/ReleaseScripts/test/BuildFile.xml +++ b/Utilities/ReleaseScripts/test/BuildFile.xml @@ -1,4 +1,6 @@ + + diff --git a/Utilities/StaticAnalyzers/scripts/edm-global-class.py b/Utilities/StaticAnalyzers/scripts/edm-global-class.py index e9937bcc8e5bf..32968f024c713 100755 --- a/Utilities/StaticAnalyzers/scripts/edm-global-class.py +++ b/Utilities/StaticAnalyzers/scripts/edm-global-class.py @@ -135,6 +135,8 @@ visited.add(node) if node in Hdg: stack = [(node, iter(Hdg[node]))] + else: + stack = [] if node in Idg: Qdg = nx.dfs_preorder_nodes(Idg, node) for q in Qdg: diff --git a/Utilities/StaticAnalyzers/src/PsetExistsFCallChecker.cpp b/Utilities/StaticAnalyzers/src/PsetExistsFCallChecker.cpp index 9860624d1eafd..b3b0c81edfb85 100644 --- a/Utilities/StaticAnalyzers/src/PsetExistsFCallChecker.cpp +++ b/Utilities/StaticAnalyzers/src/PsetExistsFCallChecker.cpp @@ -46,6 +46,8 @@ namespace clangcms { const NamedDecl *PD = llvm::dyn_cast_or_null(AC->getDecl()); if (!PD) return; + if (!PD->hasAttr()) + return; std::string pname = support::getQualifiedName(*PD); Report(mname, pname, CCE); @@ -59,6 +61,8 @@ namespace clangcms { const NamedDecl *PD = llvm::dyn_cast_or_null(AC->getDecl()); if (!PD) return; + if (!PD->hasAttr()) + return; std::string mname = support::getQualifiedName(*MD); std::string pname = support::getQualifiedName(*PD); Report(mname, pname, CE); @@ -82,6 +86,8 @@ namespace clangcms { const NamedDecl *PD = llvm::dyn_cast_or_null(AC->getDecl()); if (!PD) return; + if (!PD->hasAttr()) + return; std::string pname = support::getQualifiedName(*PD); Report(mname, pname, CE); diff --git a/Validation/CTPPS/plugins/CTPPSHepMCDistributionPlotter.cc b/Validation/CTPPS/plugins/CTPPSHepMCDistributionPlotter.cc index 358c31f8f54e6..b93032e19164d 100644 --- a/Validation/CTPPS/plugins/CTPPSHepMCDistributionPlotter.cc +++ b/Validation/CTPPS/plugins/CTPPSHepMCDistributionPlotter.cc @@ -15,8 +15,7 @@ #include "SimDataFormats/GeneratorProducts/interface/HepMCProduct.h" -#include "CondFormats/RunInfo/interface/LHCInfo.h" -#include "CondFormats/DataRecord/interface/LHCInfoRcd.h" +#include "CondTools/RunInfo/interface/LHCInfoCombined.h" #include "TFile.h" #include "TH1D.h" @@ -29,13 +28,20 @@ class CTPPSHepMCDistributionPlotter : public edm::one::EDAnalyzer<> { public: explicit CTPPSHepMCDistributionPlotter(const edm::ParameterSet &); + static void fillDescriptions(edm::ConfigurationDescriptions &descriptions); + private: void analyze(const edm::Event &, const edm::EventSetup &) override; void endJob() override; - edm::EDGetTokenT tokenHepMC_; - edm::ESGetToken lhcInfoESToken_; - std::string outputFile_; + const edm::EDGetTokenT tokenHepMC_; + + const edm::ESGetToken lhcInfoToken_; + const edm::ESGetToken lhcInfoPerLSToken_; + const edm::ESGetToken lhcInfoPerFillToken_; + const bool useNewLHCInfo_; + + const std::string outputFile_; std::unique_ptr h_vtx_x_, h_vtx_y_, h_vtx_z_, h_vtx_t_; std::unique_ptr h_xi_, h_th_x_, h_th_y_; @@ -51,7 +57,12 @@ using namespace HepMC; CTPPSHepMCDistributionPlotter::CTPPSHepMCDistributionPlotter(const edm::ParameterSet &iConfig) : tokenHepMC_(consumes(iConfig.getParameter("tagHepMC"))), - lhcInfoESToken_(esConsumes(ESInputTag("", iConfig.getParameter("lhcInfoLabel")))), + + lhcInfoToken_(esConsumes(ESInputTag("", iConfig.getParameter("lhcInfoLabel")))), + lhcInfoPerLSToken_(esConsumes(ESInputTag("", iConfig.getParameter("lhcInfoPerLSLabel")))), + lhcInfoPerFillToken_(esConsumes(ESInputTag("", iConfig.getParameter("lhcInfoPerFillLabel")))), + useNewLHCInfo_(iConfig.getParameter("useNewLHCInfo")), + outputFile_(iConfig.getParameter("outputFile")), h_vtx_x_(new TH1D("h_vtx_x", ";vtx_x (mm)", 100, 0., 0.)), @@ -65,9 +76,22 @@ CTPPSHepMCDistributionPlotter::CTPPSHepMCDistributionPlotter(const edm::Paramete //---------------------------------------------------------------------------------------------------- +void CTPPSHepMCDistributionPlotter::fillDescriptions(edm::ConfigurationDescriptions &descriptions) { + edm::ParameterSetDescription desc; + + desc.add("lhcInfoLabel", "")->setComment("label of the LHCInfo record"); + desc.add("lhcInfoPerLSLabel", "")->setComment("label of the LHCInfoPerLS record"); + desc.add("lhcInfoPerFillLabel", "")->setComment("label of the LHCInfoPerFill record"); + desc.add("useNewLHCInfo", false)->setComment("flag whether to use new LHCInfoPer* records or old LHCInfo"); + + desc.add("outputFile", "")->setComment("output file"); + + descriptions.add("ctppsHepMCDistributionPlotterDefault", desc); +} + void CTPPSHepMCDistributionPlotter::analyze(const edm::Event &iEvent, const edm::EventSetup &iSetup) { // get conditions - const auto &lhcInfo = iSetup.getData(lhcInfoESToken_); + LHCInfoCombined lhcInfoCombined(iSetup, lhcInfoPerLSToken_, lhcInfoPerFillToken_, lhcInfoToken_, useNewLHCInfo_); // get input edm::Handle hHepMC; @@ -98,7 +122,7 @@ void CTPPSHepMCDistributionPlotter::analyze(const edm::Event &iEvent, const edm: continue; const auto &mom = part->momentum(); - const double p_nom = lhcInfo.energy(); + const double p_nom = lhcInfoCombined.energy; if (mom.rho() / p_nom < 0.7) continue; diff --git a/Validation/CTPPS/plugins/CTPPSLHCInfoPlotter.cc b/Validation/CTPPS/plugins/CTPPSLHCInfoPlotter.cc index 8d80b0246ad0a..bf89ded95ff21 100644 --- a/Validation/CTPPS/plugins/CTPPSLHCInfoPlotter.cc +++ b/Validation/CTPPS/plugins/CTPPSLHCInfoPlotter.cc @@ -71,8 +71,8 @@ CTPPSLHCInfoPlotter::CTPPSLHCInfoPlotter(const edm::ParameterSet &iConfig) -0.005, 1.005)), - h_fill_(new TH1D("h_fill", ";fill", 4001, 3999.5, 8000.5)), - h_run_(new TH1D("h_run", ";run", 6000, 270E3, 330E3)) {} + h_fill_(new TH1D("h_fill", ";fill", 6001, 3999.5, 10000.5)), + h_run_(new TH1D("h_run", ";run", 6000, 270E3, 430E3)) {} //---------------------------------------------------------------------------------------------------- @@ -86,7 +86,7 @@ void CTPPSLHCInfoPlotter::fillDescriptions(edm::ConfigurationDescriptions &descr desc.add("outputFile", "")->setComment("output file"); - descriptions.add("ctppsLHCInfoPlotter", desc); + descriptions.add("ctppsLHCInfoPlotterDefault", desc); } //---------------------------------------------------------------------------------------------------- diff --git a/Validation/CTPPS/plugins/CTPPSProtonReconstructionEfficiencyEstimatorData.cc b/Validation/CTPPS/plugins/CTPPSProtonReconstructionEfficiencyEstimatorData.cc index cdef4aacf5f2a..ee88320d42151 100644 --- a/Validation/CTPPS/plugins/CTPPSProtonReconstructionEfficiencyEstimatorData.cc +++ b/Validation/CTPPS/plugins/CTPPSProtonReconstructionEfficiencyEstimatorData.cc @@ -12,8 +12,7 @@ #include "FWCore/Framework/interface/ESHandle.h" #include "FWCore/Framework/interface/ESWatcher.h" -#include "CondFormats/RunInfo/interface/LHCInfo.h" -#include "CondFormats/DataRecord/interface/LHCInfoRcd.h" +#include "CondTools/RunInfo/interface/LHCInfoCombined.h" #include "CondFormats/DataRecord/interface/CTPPSInterpolatedOpticsRcd.h" #include "CondFormats/PPSObjects/interface/LHCInterpolatedOpticalFunctionsSetCollection.h" #include "CondFormats/DataRecord/interface/PPSAssociationCutsRcd.h" @@ -53,7 +52,10 @@ class CTPPSProtonReconstructionEfficiencyEstimatorData : public edm::one::EDAnal edm::EDGetTokenT tokenTracks_; edm::EDGetTokenT tokenRecoProtonsMultiRP_; - edm::ESGetToken lhcInfoESToken_; + const edm::ESGetToken lhcInfoToken_; + const edm::ESGetToken lhcInfoPerLSToken_; + const edm::ESGetToken lhcInfoPerFillToken_; + const bool useNewLHCInfo_; edm::ESGetToken opticsESToken_; edm::ESGetToken ppsAssociationCutsToken_; @@ -243,7 +245,11 @@ CTPPSProtonReconstructionEfficiencyEstimatorData::CTPPSProtonReconstructionEffic tokenRecoProtonsMultiRP_( consumes(iConfig.getParameter("tagRecoProtonsMultiRP"))), - lhcInfoESToken_(esConsumes(ESInputTag("", iConfig.getParameter("lhcInfoLabel")))), + lhcInfoToken_(esConsumes(ESInputTag("", iConfig.getParameter("lhcInfoLabel")))), + lhcInfoPerLSToken_(esConsumes(ESInputTag("", iConfig.getParameter("lhcInfoPerLSLabel")))), + lhcInfoPerFillToken_(esConsumes(ESInputTag("", iConfig.getParameter("lhcInfoPerFillLabel")))), + useNewLHCInfo_(iConfig.getParameter("useNewLHCInfo")), + opticsESToken_(esConsumes(ESInputTag("", iConfig.getParameter("opticsLabel")))), ppsAssociationCutsToken_( esConsumes(ESInputTag("", iConfig.getParameter("ppsAssociationCutsLabel")))), @@ -283,7 +289,10 @@ void CTPPSProtonReconstructionEfficiencyEstimatorData::fillDescriptions(edm::Con desc.add("tagTracks", edm::InputTag())->setComment("input tag for local lite tracks"); desc.add("tagRecoProtonsMultiRP", edm::InputTag())->setComment("input tag for multi-RP reco protons"); - desc.add("lhcInfoLabel", "")->setComment("label of LHCInfo data"); + desc.add("lhcInfoLabel", "")->setComment("label of the LHCInfo record"); + desc.add("lhcInfoPerLSLabel", "")->setComment("label of the LHCInfoPerLS record"); + desc.add("lhcInfoPerFillLabel", "")->setComment("label of the LHCInfoPerFill record"); + desc.add("useNewLHCInfo", false)->setComment("flag whether to use new LHCInfoPer* records or old LHCInfo"); desc.add("opticsLabel", "")->setComment("label of optics data"); desc.add("ppsAssociationCutsLabel", "")->setComment("label of PPSAssociationCuts data"); @@ -311,7 +320,7 @@ void CTPPSProtonReconstructionEfficiencyEstimatorData::fillDescriptions(edm::Con desc.addUntracked("verbosity", 0)->setComment("verbosity level"); - descriptions.add("ctppsProtonReconstructionEfficiencyEstimatorData", desc); + descriptions.add("ctppsProtonReconstructionEfficiencyEstimatorDataDefault", desc); } //---------------------------------------------------------------------------------------------------- @@ -321,7 +330,8 @@ void CTPPSProtonReconstructionEfficiencyEstimatorData::analyze(const edm::Event std::ostringstream os; // get conditions - const auto &lhcInfo = iSetup.getData(lhcInfoESToken_); + const LHCInfoCombined lhcInfoCombined( + iSetup, lhcInfoPerLSToken_, lhcInfoPerFillToken_, lhcInfoToken_, useNewLHCInfo_); const auto &opticalFunctions = iSetup.getData(opticsESToken_); const auto &ppsAssociationCuts = iSetup.getData(ppsAssociationCutsToken_); @@ -660,8 +670,10 @@ void CTPPSProtonReconstructionEfficiencyEstimatorData::analyze(const edm::Event evp.idx_N = i; evp.idx_F = j; - evp.x_cut = ass_cut.isSatisfied(ass_cut.qX, tr_i.x(), tr_i.y(), lhcInfo.crossingAngle(), tr_i.x() - tr_j.x()); - evp.y_cut = ass_cut.isSatisfied(ass_cut.qY, tr_i.x(), tr_i.y(), lhcInfo.crossingAngle(), tr_i.y() - tr_j.y()); + evp.x_cut = + ass_cut.isSatisfied(ass_cut.qX, tr_i.x(), tr_i.y(), lhcInfoCombined.crossingAngle(), tr_i.x() - tr_j.x()); + evp.y_cut = + ass_cut.isSatisfied(ass_cut.qY, tr_i.x(), tr_i.y(), lhcInfoCombined.crossingAngle(), tr_i.y() - tr_j.y()); evp.match = evp.x_cut && evp.y_cut; diff --git a/Validation/CTPPS/plugins/CTPPSProtonReconstructionEfficiencyEstimatorMC.cc b/Validation/CTPPS/plugins/CTPPSProtonReconstructionEfficiencyEstimatorMC.cc index e415b13ac01bf..07772a66c42df 100644 --- a/Validation/CTPPS/plugins/CTPPSProtonReconstructionEfficiencyEstimatorMC.cc +++ b/Validation/CTPPS/plugins/CTPPSProtonReconstructionEfficiencyEstimatorMC.cc @@ -12,8 +12,7 @@ #include "DataFormats/CTPPSDetId/interface/CTPPSDetId.h" -#include "CondFormats/RunInfo/interface/LHCInfo.h" -#include "CondFormats/DataRecord/interface/LHCInfoRcd.h" +#include "CondTools/RunInfo/interface/LHCInfoCombined.h" #include "SimDataFormats/GeneratorProducts/interface/HepMCProduct.h" @@ -40,6 +39,8 @@ class CTPPSProtonReconstructionEfficiencyEstimatorMC : public edm::one::EDAnalyz public: explicit CTPPSProtonReconstructionEfficiencyEstimatorMC(const edm::ParameterSet &); + static void fillDescriptions(edm::ConfigurationDescriptions &descriptions); + private: void analyze(const edm::Event &, const edm::EventSetup &) override; void endJob() override; @@ -53,7 +54,10 @@ class CTPPSProtonReconstructionEfficiencyEstimatorMC : public edm::one::EDAnalyz edm::EDGetTokenT tokenRecoProtonsMultiRP_; - edm::ESGetToken lhcInfoESToken_; + const edm::ESGetToken lhcInfoToken_; + const edm::ESGetToken lhcInfoPerLSToken_; + const edm::ESGetToken lhcInfoPerFillToken_; + const bool useNewLHCInfo_; unsigned int rpId_45_N_, rpId_45_F_; unsigned int rpId_56_N_, rpId_56_F_; @@ -103,7 +107,11 @@ CTPPSProtonReconstructionEfficiencyEstimatorMC::CTPPSProtonReconstructionEfficie tracksToken_(consumes(iConfig.getParameter("tagTracks"))), tokenRecoProtonsMultiRP_( consumes(iConfig.getParameter("tagRecoProtonsMultiRP"))), - lhcInfoESToken_(esConsumes(ESInputTag("", iConfig.getParameter("lhcInfoLabel")))), + + lhcInfoToken_(esConsumes(ESInputTag("", iConfig.getParameter("lhcInfoLabel")))), + lhcInfoPerLSToken_(esConsumes(ESInputTag("", iConfig.getParameter("lhcInfoPerLSLabel")))), + lhcInfoPerFillToken_(esConsumes(ESInputTag("", iConfig.getParameter("lhcInfoPerFillLabel")))), + useNewLHCInfo_(iConfig.getParameter("useNewLHCInfo")), rpId_45_N_(iConfig.getParameter("rpId_45_N")), rpId_45_F_(iConfig.getParameter("rpId_45_F")), @@ -128,11 +136,37 @@ CTPPSProtonReconstructionEfficiencyEstimatorMC::CTPPSProtonReconstructionEfficie //---------------------------------------------------------------------------------------------------- +void CTPPSProtonReconstructionEfficiencyEstimatorMC::fillDescriptions(edm::ConfigurationDescriptions &descriptions) { + edm::ParameterSetDescription desc; + + desc.add("tagTracks", edm::InputTag())->setComment("input tag for local lite tracks"); + desc.add("tagRecoProtonsMultiRP", edm::InputTag())->setComment("input tag for multi-RP reco protons"); + + desc.add("lhcInfoLabel", "")->setComment("label of the LHCInfo record"); + desc.add("lhcInfoPerLSLabel", "")->setComment("label of the LHCInfoPerLS record"); + desc.add("lhcInfoPerFillLabel", "")->setComment("label of the LHCInfoPerFill record"); + desc.add("useNewLHCInfo", false)->setComment("flag whether to use new LHCInfoPer* records or old LHCInfo"); + + desc.add("rpId_45_N", 0)->setComment("decimal RP id for 45 near"); + desc.add("rpId_45_F", 0)->setComment("decimal RP id for 45 far"); + desc.add("rpId_56_N", 0)->setComment("decimal RP id for 56 near"); + desc.add("rpId_56_F", 0)->setComment("decimal RP id for 56 far"); + + desc.add("outputFile", "output.root")->setComment("output file name"); + + desc.addUntracked("verbosity", 0)->setComment("verbosity level"); + + descriptions.add("ctppsProtonReconstructionEfficiencyEstimatorMCDefault", desc); +} + +//---------------------------------------------------------------------------------------------------- + void CTPPSProtonReconstructionEfficiencyEstimatorMC::analyze(const edm::Event &iEvent, const edm::EventSetup &iSetup) { std::ostringstream os; // get conditions - const auto &lhcInfo = iSetup.getData(lhcInfoESToken_); + const LHCInfoCombined lhcInfoCombined( + iSetup, lhcInfoPerLSToken_, lhcInfoPerFillToken_, lhcInfoToken_, useNewLHCInfo_); // get input edm::Handle hHepMCAfterSmearing; @@ -184,7 +218,7 @@ void CTPPSProtonReconstructionEfficiencyEstimatorMC::analyze(const edm::Event &i info.arm = (mom.z() > 0.) ? 0 : 1; - const double p_nom = lhcInfo.energy(); + const double p_nom = lhcInfoCombined.energy; info.xi = (p_nom - mom.rho()) / p_nom; particleInfo[part->barcode()] = std::move(info); diff --git a/Validation/CTPPS/plugins/CTPPSProtonReconstructionSimulationValidator.cc b/Validation/CTPPS/plugins/CTPPSProtonReconstructionSimulationValidator.cc index 686d0e2ade00c..647cc426083eb 100644 --- a/Validation/CTPPS/plugins/CTPPSProtonReconstructionSimulationValidator.cc +++ b/Validation/CTPPS/plugins/CTPPSProtonReconstructionSimulationValidator.cc @@ -12,8 +12,7 @@ #include "DataFormats/CTPPSDetId/interface/CTPPSDetId.h" -#include "CondFormats/RunInfo/interface/LHCInfo.h" -#include "CondFormats/DataRecord/interface/LHCInfoRcd.h" +#include "CondTools/RunInfo/interface/LHCInfoCombined.h" #include "SimDataFormats/GeneratorProducts/interface/HepMCProduct.h" @@ -38,6 +37,8 @@ class CTPPSProtonReconstructionSimulationValidator : public edm::one::EDAnalyzer public: explicit CTPPSProtonReconstructionSimulationValidator(const edm::ParameterSet &); + static void fillDescriptions(edm::ConfigurationDescriptions &descriptions); + private: void analyze(const edm::Event &, const edm::EventSetup &) override; void endJob() override; @@ -47,7 +48,7 @@ class CTPPSProtonReconstructionSimulationValidator : public edm::one::EDAnalyzer const reco::ForwardProton &rec_pr, const HepMC::FourVector &vtx, const HepMC::FourVector &mom, - const LHCInfo &lhcInfo); + const double energy); edm::EDGetTokenT tokenHepMCBeforeSmearing_; edm::EDGetTokenT tokenHepMCAfterSmearing_; @@ -55,7 +56,10 @@ class CTPPSProtonReconstructionSimulationValidator : public edm::one::EDAnalyzer edm::EDGetTokenT tokenRecoProtonsSingleRP_; edm::EDGetTokenT tokenRecoProtonsMultiRP_; - edm::ESGetToken lhcInfoESToken_; + const edm::ESGetToken lhcInfoToken_; + const edm::ESGetToken lhcInfoPerLSToken_; + const edm::ESGetToken lhcInfoPerFillToken_; + const bool useNewLHCInfo_; std::string outputFile_; @@ -201,14 +205,35 @@ CTPPSProtonReconstructionSimulationValidator::CTPPSProtonReconstructionSimulatio consumes(iConfig.getParameter("tagRecoProtonsSingleRP"))), tokenRecoProtonsMultiRP_( consumes(iConfig.getParameter("tagRecoProtonsMultiRP"))), - lhcInfoESToken_(esConsumes(ESInputTag("", iConfig.getParameter("lhcInfoLabel")))), + lhcInfoToken_(esConsumes(ESInputTag("", iConfig.getParameter("lhcInfoLabel")))), + lhcInfoPerLSToken_(esConsumes(ESInputTag("", iConfig.getParameter("lhcInfoPerLSLabel")))), + lhcInfoPerFillToken_(esConsumes(ESInputTag("", iConfig.getParameter("lhcInfoPerFillLabel")))), + useNewLHCInfo_(iConfig.getParameter("useNewLHCInfo")), outputFile_(iConfig.getParameter("outputFile")) {} //---------------------------------------------------------------------------------------------------- +void CTPPSProtonReconstructionSimulationValidator::fillDescriptions(edm::ConfigurationDescriptions &descriptions) { + edm::ParameterSetDescription desc; + + desc.add("lhcInfoLabel", "")->setComment("label of the LHCInfo record"); + desc.add("lhcInfoPerLSLabel", "")->setComment("label of the LHCInfoPerLS record"); + desc.add("lhcInfoPerFillLabel", "")->setComment("label of the LHCInfoPerFill record"); + desc.add("useNewLHCInfo", false)->setComment("flag whether to use new LHCInfoPer* records or old LHCInfo"); + + desc.add("outputFile", "output.root")->setComment("output file name"); + + desc.addUntracked("verbosity", 0)->setComment("verbosity level"); + + descriptions.add("ctppsProtonReconstructionSimulationValidatorDefault", desc); +} + +//---------------------------------------------------------------------------------------------------- + void CTPPSProtonReconstructionSimulationValidator::analyze(const edm::Event &iEvent, const edm::EventSetup &iSetup) { // get conditions - const auto &lhcInfo = iSetup.getData(lhcInfoESToken_); + const LHCInfoCombined lhcInfoCombined( + iSetup, lhcInfoPerLSToken_, lhcInfoPerFillToken_, lhcInfoToken_, useNewLHCInfo_); // get input edm::Handle hHepMCBeforeSmearing; @@ -319,7 +344,7 @@ void CTPPSProtonReconstructionSimulationValidator::analyze(const edm::Event &iEv if (rec_pr.method() == reco::ForwardProton::ReconstructionMethod::multiRP) meth_idx = 1; - fillPlots(meth_idx, idx, rec_pr, vtx, mom, lhcInfo); + fillPlots(meth_idx, idx, rec_pr, vtx, mom, lhcInfoCombined.energy); } } @@ -355,8 +380,8 @@ void CTPPSProtonReconstructionSimulationValidator::fillPlots(unsigned int meth_i const reco::ForwardProton &rec_pr, const HepMC::FourVector &vtx, const HepMC::FourVector &mom, - const LHCInfo &lhcInfo) { - const double p_nom = lhcInfo.energy(); + const double energy) { + const double p_nom = energy; const double xi_simu = (p_nom - mom.rho()) / p_nom; const double th_x_simu = mom.x() / mom.rho(); const double th_y_simu = mom.y() / mom.rho(); diff --git a/Validation/CTPPS/python/ctppsHepMCDistributionPlotter_cfi.py b/Validation/CTPPS/python/ctppsHepMCDistributionPlotter_cfi.py new file mode 100644 index 0000000000000..28baf6954a35b --- /dev/null +++ b/Validation/CTPPS/python/ctppsHepMCDistributionPlotter_cfi.py @@ -0,0 +1,8 @@ +from Validation.CTPPS.CTPPSHepMCDistributionPlotterDefault_cfi import CTPPSHepMCDistributionPlotterDefault as _CTPPSHepMCDistributionPlotterDefault +CTPPSHepMCDistributionPlotter = _CTPPSHepMCDistributionPlotterDefault.clone() + +from Configuration.Eras.Modifier_run3_common_cff import run3_common +run3_common.toModify(CTPPSHepMCDistributionPlotter, useNewLHCInfo = True) + +from Configuration.Eras.Modifier_ctpps_directSim_cff import ctpps_directSim +ctpps_directSim.toModify(CTPPSHepMCDistributionPlotter, useNewLHCInfo = False) diff --git a/Validation/CTPPS/python/ctppsLHCInfoPlotter_cff.py b/Validation/CTPPS/python/ctppsLHCInfoPlotter_cff.py deleted file mode 100644 index 66f0c853de77c..0000000000000 --- a/Validation/CTPPS/python/ctppsLHCInfoPlotter_cff.py +++ /dev/null @@ -1,3 +0,0 @@ -from Validation.CTPPS.ctppsLHCInfoPlotter_cfi import * -from Configuration.Eras.Modifier_run3_common_cff import run3_common -run3_common.toModify(ctppsLHCInfoPlotter, useNewLHCInfo = True) \ No newline at end of file diff --git a/Validation/CTPPS/python/ctppsLHCInfoPlotter_cfi.py b/Validation/CTPPS/python/ctppsLHCInfoPlotter_cfi.py new file mode 100644 index 0000000000000..4b8f96416747d --- /dev/null +++ b/Validation/CTPPS/python/ctppsLHCInfoPlotter_cfi.py @@ -0,0 +1,8 @@ +from Validation.CTPPS.ctppsLHCInfoPlotterDefault_cfi import ctppsLHCInfoPlotterDefault as _ctppsLHCInfoPlotterDefault +ctppsLHCInfoPlotter = _ctppsLHCInfoPlotterDefault.clone() + +from Configuration.Eras.Modifier_run3_common_cff import run3_common +run3_common.toModify(ctppsLHCInfoPlotter, useNewLHCInfo = True) + +from Configuration.Eras.Modifier_ctpps_directSim_cff import ctpps_directSim +ctpps_directSim.toModify(ctppsLHCInfoPlotter, useNewLHCInfo = False) diff --git a/Validation/CTPPS/python/ctppsProtonReconstructionEfficiencyEstimatorData_cfi.py b/Validation/CTPPS/python/ctppsProtonReconstructionEfficiencyEstimatorData_cfi.py new file mode 100644 index 0000000000000..47d2360e623c3 --- /dev/null +++ b/Validation/CTPPS/python/ctppsProtonReconstructionEfficiencyEstimatorData_cfi.py @@ -0,0 +1,8 @@ +from Validation.CTPPS.ctppsProtonReconstructionEfficiencyEstimatorDataDefault_cfi import ctppsProtonReconstructionEfficiencyEstimatorDataDefault as _ctppsProtonReconstructionEfficiencyEstimatorDataDefault +ctppsProtonReconstructionEfficiencyEstimatorData = _ctppsProtonReconstructionEfficiencyEstimatorDataDefault.clone() + +from Configuration.Eras.Modifier_run3_common_cff import run3_common +run3_common.toModify(ctppsProtonReconstructionEfficiencyEstimatorData, useNewLHCInfo = True) + +from Configuration.Eras.Modifier_ctpps_directSim_cff import ctpps_directSim +ctpps_directSim.toModify(ctppsProtonReconstructionEfficiencyEstimatorData, useNewLHCInfo = False) diff --git a/Validation/CTPPS/python/ctppsProtonReconstructionEfficiencyEstimatorMC_cfi.py b/Validation/CTPPS/python/ctppsProtonReconstructionEfficiencyEstimatorMC_cfi.py new file mode 100644 index 0000000000000..4aa239bd04c70 --- /dev/null +++ b/Validation/CTPPS/python/ctppsProtonReconstructionEfficiencyEstimatorMC_cfi.py @@ -0,0 +1,8 @@ +from Validation.CTPPS.ctppsProtonReconstructionEfficiencyEstimatorMCDefault_cfi import ctppsProtonReconstructionEfficiencyEstimatorMCDefault as _ctppsProtonReconstructionEfficiencyEstimatorMCDefault +ctppsProtonReconstructionEfficiencyEstimatorMC = ctppsProtonReconstructionEfficiencyEstimatorMCDefault.clone() + +from Configuration.Eras.Modifier_run3_common_cff import run3_common +run3_common.toModify(ctppsProtonReconstructionEfficiencyEstimatorMC, useNewLHCInfo = True) + +from Configuration.Eras.Modifier_ctpps_directSim_cff import ctpps_directSim +ctpps_directSim.toModify(ctppsProtonReconstructionEfficiencyEstimatorMC, useNewLHCInfo = False) diff --git a/Validation/CTPPS/python/ctppsProtonReconstructionSimulationValidator_cfi.py b/Validation/CTPPS/python/ctppsProtonReconstructionSimulationValidator_cfi.py new file mode 100644 index 0000000000000..e1c1b4ddee8d9 --- /dev/null +++ b/Validation/CTPPS/python/ctppsProtonReconstructionSimulationValidator_cfi.py @@ -0,0 +1,8 @@ +from Validation.CTPPS.ctppsProtonReconstructionSimulationValidatorDefault_cfi import ctppsProtonReconstructionSimulationValidatorDefault as _ctppsProtonReconstructionSimulationValidatorDefault +ctppsProtonReconstructionSimulationValidator = ctppsProtonReconstructionSimulationValidatorDefault.clone() + +from Configuration.Eras.Modifier_run3_common_cff import run3_common +run3_common.toModify(ctppsProtonReconstructionSimulationValidator, useNewLHCInfo = True) + +from Configuration.Eras.Modifier_ctpps_directSim_cff import ctpps_directSim +ctpps_directSim.toModify(ctppsProtonReconstructionSimulationValidator, useNewLHCInfo = False) diff --git a/Validation/CTPPS/test/simu/run_multiple b/Validation/CTPPS/test/simu/run_multiple index fa9e6a2a8fff2..34cc9042cb851 100755 --- a/Validation/CTPPS/test/simu/run_multiple +++ b/Validation/CTPPS/test/simu/run_multiple @@ -20,7 +20,8 @@ pids="" function RunOne() { local config="$1" - local era="$2" + local era_mod_path="$2" + local era="$3" local cfg="simu_${config}_cfg.py" local log="simu_${config}.log" @@ -29,6 +30,7 @@ function RunOne() local out_protons="simu_${config}_protons.root" cat "$inputDir/template_cfg.py" | sed "\ + s|\$ERA_MOD_PATH|$era_mod_path|;\ s|\$ERA|$era|;\ s|\$CONFIG|$config|;\ s|\$N_EVENTS|$n_events|;\ @@ -43,13 +45,13 @@ function RunOne() #---------------------------------------------------------------------------------------------------- -RunOne "2016" "Run2_2016" +RunOne "2016" "Configuration.Eras" "Run2_2016" -RunOne "2017" "Run2_2017" +RunOne "2017" "Configuration.Eras" "Run2_2017" -RunOne "2018" "Run2_2018" +RunOne "2018" "Configuration.Eras" "Run2_2018" -RunOne "2022" "Run3" +RunOne "2022" "Configuration.ProcessModifiers" "Run3_CTPPS_directSim" rc=0 for pid in $pids diff --git a/Validation/CTPPS/test/simu/template_cfg.py b/Validation/CTPPS/test/simu/template_cfg.py index 5587411a833b9..030f071056534 100644 --- a/Validation/CTPPS/test/simu/template_cfg.py +++ b/Validation/CTPPS/test/simu/template_cfg.py @@ -1,10 +1,10 @@ import FWCore.ParameterSet.Config as cms -from Configuration.Eras.Era_$ERA_cff import * +from $ERA_MOD_PATH.Era_$ERA_cff import * process = cms.Process('CTPPSTest', $ERA) process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi') -process.load('Validation.CTPPS.ctppsLHCInfoPlotter_cff') +process.load('Validation.CTPPS.ctppsLHCInfoPlotter_cfi') process.load('Configuration.Generator.randomXiThetaGunProducer_cfi') process.load("CondCore.CondDB.CondDB_cfi") @@ -32,15 +32,8 @@ process.CondDB, toGet = cms.VPSet(cms.PSet( record = cms.string('CTPPSPixelAnalysisMaskRcd'), - tag = cms.string("CTPPSPixelAnalysisMask_Run3_v1_hlt")), - cms.PSet( - record = cms.string('LHCInfoPerLSRcd'), - tag = cms.string("LHCInfoPerLS_endFill_Run3_mc_v1")), - cms.PSet( - record = cms.string('LHCInfoPerFillRcd'), - tag = cms.string("LHCInfoPerFill_endFill_Run3_mc_v1")), - ) -) + tag = cms.string("CTPPSPixelAnalysisMask_Run3_v1_hlt")) + )) # random seeds process.RandomNumberGeneratorService = cms.Service("RandomNumberGeneratorService", diff --git a/Validation/Configuration/python/ECALHCAL.py b/Validation/Configuration/python/ECALHCAL.py index 4f7d073fb02c0..053787aaf9d6d 100644 --- a/Validation/Configuration/python/ECALHCAL.py +++ b/Validation/Configuration/python/ECALHCAL.py @@ -37,8 +37,8 @@ def customise(process): process.schedule.append(process.generation_step) process.schedule.append(process.simulation_step) - process.ecalMultiFitUncalibRecHit.cpu.EBdigiCollection = cms.InputTag("simEcalDigis","ebDigis") - process.ecalMultiFitUncalibRecHit.cpu.EEdigiCollection = cms.InputTag("simEcalDigis","eeDigis") + process.ecalMultiFitUncalibRecHitCPU.EBdigiCollection = "simEcalDigis:ebDigis" + process.ecalMultiFitUncalibRecHitCPU.EEdigiCollection = "simEcalDigis:eeDigis" process.ecalPreshowerRecHit.ESdigiCollection = cms.InputTag("simEcalPreshowerDigis") delattr(process,"hbhereco") diff --git a/Validation/Configuration/python/valid3Dt.py b/Validation/Configuration/python/valid3Dt.py new file mode 100644 index 0000000000000..0d15cb8771d83 --- /dev/null +++ b/Validation/Configuration/python/valid3Dt.py @@ -0,0 +1,17 @@ +import FWCore.ParameterSet.Config as cms + +def customise(process): + process.mtdTracksValid.inputTagV = 'offlinePrimaryVertices' + process.mtdTracksValid.t0SafePID = 'tofPID3D:t0safe' + process.mtdTracksValid.sigmat0SafePID = 'tofPID3D:sigmat0safe' + process.mtdTracksValid.sigmat0PID = 'tofPID3D:sigmat0' + process.mtdTracksValid.t0PID = 'tofPID3D:t0' + + process.vertices4DValid.offline4DPV = 'offlinePrimaryVertices' + process.vertices4DValid.t0PID = 'tofPID3D:t0' + process.vertices4DValid.t0SafePID = 'tofPID3D:t0safe' + process.vertices4DValid.sigmat0SafePID = 'tofPID3D:sigmat0safe' + process.vertices4DValid.probPi = 'tofPID3D:probPi' + process.vertices4DValid.probK = 'tofPID3D:probK' + process.vertices4DValid.probP = 'tofPID3D:probP' + return(process) diff --git a/Validation/HGCalValidation/plugins/HGCalTriggerValidator.cc b/Validation/HGCalValidation/plugins/HGCalTriggerValidator.cc index a48dd778b139f..fd4efabbfc881 100644 --- a/Validation/HGCalValidation/plugins/HGCalTriggerValidator.cc +++ b/Validation/HGCalValidation/plugins/HGCalTriggerValidator.cc @@ -78,6 +78,29 @@ struct Histograms { dqm::reco::MonitorElement *h_cl3d_srrmax_; dqm::reco::MonitorElement *h_cl3d_srrmean_; dqm::reco::MonitorElement *h_cl3d_emaxe_; + dqm::reco::MonitorElement *h_cl3d_hoe_; + dqm::reco::MonitorElement *h_cl3d_meanz_; + dqm::reco::MonitorElement *h_cl3d_layer10_; + dqm::reco::MonitorElement *h_cl3d_layer50_; + dqm::reco::MonitorElement *h_cl3d_layer90_; + dqm::reco::MonitorElement *h_cl3d_first1layers_; + dqm::reco::MonitorElement *h_cl3d_first3layers_; + dqm::reco::MonitorElement *h_cl3d_first5layers_; + dqm::reco::MonitorElement *h_cl3d_firstHcal1layers_; + dqm::reco::MonitorElement *h_cl3d_firstHcal3layers_; + dqm::reco::MonitorElement *h_cl3d_firstHcal5layers_; + dqm::reco::MonitorElement *h_cl3d_last1layers_; + dqm::reco::MonitorElement *h_cl3d_last3layers_; + dqm::reco::MonitorElement *h_cl3d_last5layers_; + dqm::reco::MonitorElement *h_cl3d_emax1layers_; + dqm::reco::MonitorElement *h_cl3d_emax3layers_; + dqm::reco::MonitorElement *h_cl3d_emax5layers_; + dqm::reco::MonitorElement *h_cl3d_eot_; + dqm::reco::MonitorElement *h_cl3d_ebm0_; + dqm::reco::MonitorElement *h_cl3d_ebm1_; + dqm::reco::MonitorElement *h_cl3d_hbm_; + dqm::reco::MonitorElement *h_cl3d_ntc67_; + dqm::reco::MonitorElement *h_cl3d_ntc90_; dqm::reco::MonitorElement *h_cl3d_bdteg_; dqm::reco::MonitorElement *h_cl3d_quality_; @@ -146,61 +169,96 @@ void HGCalTriggerValidator::bookHistograms(DQMStore::IBooker &iBooker, //initiating histograms // trigger cells - histograms.h_tc_n_ = iBooker.book1D("tc_n", "trigger cell number; number", 400, 0, 400); - histograms.h_tc_mipPt_ = iBooker.book1D("tc_mipPt", "trigger cell mipPt; mipPt", 400, 0, 400); - histograms.h_tc_pt_ = iBooker.book1D("tc_pt", "trigger cell pt; pt [GeV]", 15, 0, 15); - histograms.h_tc_energy_ = iBooker.book1D("tc_energy", "trigger cell energy; energy [GeV]", 70, 0, 70); - histograms.h_tc_eta_ = iBooker.book1D("tc_eta", "trigger cell eta; eta", 60, -3.14, 3.14); - histograms.h_tc_phi_ = iBooker.book1D("tc_phi", "trigger cell phi; phi", 60, -3.14, 3.14); - histograms.h_tc_x_ = iBooker.book1D("tc_x", "trigger cell x; x [cm]", 500, -250, 250); - histograms.h_tc_y_ = iBooker.book1D("tc_y", "trigger cell y; y [cm]", 500, -250, 250); - histograms.h_tc_z_ = iBooker.book1D("tc_z", "trigger cell z; z [cm]", 1100, -550, 550); + histograms.h_tc_n_ = iBooker.book1D("tc_n", "trigger cell number; number", 100, 10000, 40000); + histograms.h_tc_mipPt_ = iBooker.book1D("tc_mipPt", "trigger cell mipPt; mipPt", 100, 0, 10); + histograms.h_tc_pt_ = iBooker.book1D("tc_pt", "trigger cell pt; pt [GeV]", 100, 0, 2); + histograms.h_tc_energy_ = iBooker.book1D("tc_energy", "trigger cell energy; energy [GeV]", 100, 0, 5); + histograms.h_tc_eta_ = iBooker.book1D("tc_eta", "trigger cell eta; eta", 320, -3.2, 3.2); + histograms.h_tc_phi_ = iBooker.book1D("tc_phi", "trigger cell phi; phi", 100, -M_PI, M_PI); + histograms.h_tc_x_ = iBooker.book1D("tc_x", "trigger cell x; x [cm]", 100, -250, 250); + histograms.h_tc_y_ = iBooker.book1D("tc_y", "trigger cell y; y [cm]", 100, -250, 250); + histograms.h_tc_z_ = iBooker.book1D("tc_z", "trigger cell z; z [cm]", 100, -550, 550); histograms.h_tc_layer_ = iBooker.book1D("tc_layer", "trigger cell layer; layer", 50, 0, 50); // cluster 2D histograms - histograms.h_cl_n_ = iBooker.book1D("cl_n", "cluster2D number; number", 80, 0, 80); - histograms.h_cl_mipPt_ = iBooker.book1D("cl_mipPt", "cluster2D mipPt; mipPt", 600, 0, 600); - histograms.h_cl_pt_ = iBooker.book1D("cl_pt", "cluster2D pt; pt [GeV]", 20, 0, 20); - histograms.h_cl_energy_ = iBooker.book1D("cl_energy", "cluster2D energy; energy [GeV]", 80, 0, 80); - histograms.h_cl_eta_ = iBooker.book1D("cl_eta", "cluster2D eta; eta", 60, -3.14, 3.14); - histograms.h_cl_phi_ = iBooker.book1D("cl_phi", "cluster2D phi; phi", 60, -3.14, 3.14); - histograms.h_cl_cells_n_ = iBooker.book1D("cl_cells_n", "cluster2D cells_n; cells_n", 16, 0, 16); + histograms.h_cl_n_ = iBooker.book1D("cl_n", "cluster2D number; number", 100, 10000, 40000); + histograms.h_cl_mipPt_ = iBooker.book1D("cl_mipPt", "cluster2D mipPt; mipPt", 100, 0, 10); + histograms.h_cl_pt_ = iBooker.book1D("cl_pt", "cluster2D pt; pt [GeV]", 100, 0, 2); + histograms.h_cl_energy_ = iBooker.book1D("cl_energy", "cluster2D energy; energy [GeV]", 100, 0, 5); + histograms.h_cl_eta_ = iBooker.book1D("cl_eta", "cluster2D eta; eta", 320, -3.2, 3.2); + histograms.h_cl_phi_ = iBooker.book1D("cl_phi", "cluster2D phi; phi", 100, -M_PI, M_PI); + histograms.h_cl_cells_n_ = iBooker.book1D("cl_cells_n", "cluster2D cells_n; cells_n", 20, 0, 20); histograms.h_cl_layer_ = iBooker.book1D("cl_layer", "cluster2D layer; layer", 50, 0, 50); // multiclusters - histograms.h_cl3d_n_ = iBooker.book1D("cl3d_n", "cl3duster3D number; number", 12, 0, 12); + histograms.h_cl3d_n_ = iBooker.book1D("cl3d_n", "cl3duster3D number; number", 200, 0, 400); histograms.h_cl3d_pt_ = iBooker.book1D("cl3d_pt", "cl3duster3D pt; pt [GeV]", 50, 0, 50); - histograms.h_cl3d_energy_ = iBooker.book1D("cl3d_energy", "cl3duster3D energy; energy [GeV]", 80, 0, 80); - histograms.h_cl3d_eta_ = iBooker.book1D("cl3d_eta", "cl3duster3D eta; eta", 60, -3.14, 3.14); - histograms.h_cl3d_phi_ = iBooker.book1D("cl3d_phi", "cl3duster3D phi; phi", 60, -3.14, 3.14); - histograms.h_cl3d_clusters_n_ = iBooker.book1D("cl3d_clusters_n", "cl3duster3D clusters_n; clusters_n", 30, 0, 30); + histograms.h_cl3d_energy_ = iBooker.book1D("cl3d_energy", "cl3duster3D energy; energy [GeV]", 50, 0, 200); + histograms.h_cl3d_eta_ = iBooker.book1D("cl3d_eta", "cl3duster3D eta; eta", 320, -3.2, 3.2); + histograms.h_cl3d_phi_ = iBooker.book1D("cl3d_phi", "cl3duster3D phi; phi", 100, -M_PI, M_PI); + histograms.h_cl3d_clusters_n_ = iBooker.book1D("cl3d_clusters_n", "cl3duster3D clusters_n; clusters_n", 50, 0, 200); // cluster shower shapes histograms.h_cl3d_showerlength_ = iBooker.book1D("cl3d_showerlength", "cl3duster3D showerlength; showerlength", 50, 0, 50); histograms.h_cl3d_coreshowerlength_ = - iBooker.book1D("cl3d_coreshowerlength", "cl3duster3D coreshowerlength; coreshowerlength", 16, 0, 16); - histograms.h_cl3d_firstlayer_ = iBooker.book1D("cl3d_firstlayer", "cl3duster3D firstlayer; firstlayer", 50, 0, 50); + iBooker.book1D("cl3d_coreshowerlength", "cl3duster3D coreshowerlength; coreshowerlength", 20, 0, 20); + histograms.h_cl3d_firstlayer_ = iBooker.book1D("cl3d_firstlayer", "cl3duster3D firstlayer; firstlayer", 10, 0, 10); histograms.h_cl3d_maxlayer_ = iBooker.book1D("cl3d_maxlayer", "cl3duster3D maxlayer; maxlayer", 50, 0, 50); - histograms.h_cl3d_seetot_ = iBooker.book1D("cl3d_seetot", "cl3duster3D seetot; seetot", 50, 0, 0.05); - histograms.h_cl3d_seemax_ = iBooker.book1D("cl3d_seemax", "cl3duster3D seemax; seemax", 40, 0, 0.04); - histograms.h_cl3d_spptot_ = iBooker.book1D("cl3d_spptot", "cl3duster3D spptot; spptot", 800, 0, 0.08); - histograms.h_cl3d_sppmax_ = iBooker.book1D("cl3d_sppmax", "cl3duster3D sppmax; sppmax", 800, 0, 0.08); - histograms.h_cl3d_szz_ = iBooker.book1D("cl3d_szz", "cl3duster3D szz; szz", 50, 0, 50); - histograms.h_cl3d_srrtot_ = iBooker.book1D("cl3d_srrtot", "cl3duster3D srrtot; srrtot", 800, 0, 0.008); - histograms.h_cl3d_srrmax_ = iBooker.book1D("cl3d_srrmax", "cl3duster3D srrmax; srrmax", 900, 0, 0.009); - histograms.h_cl3d_srrmean_ = iBooker.book1D("cl3d_srrmean", "cl3duster3D srrmean; srrmean", 800, 0, 0.008); - histograms.h_cl3d_emaxe_ = iBooker.book1D("cl3d_emaxe", "cl3duster3D emaxe; emaxe", 15, 0, 1.5); - histograms.h_cl3d_bdteg_ = iBooker.book1D("cl3d_bdteg", "cl3duster3D bdteg; bdteg", 30, -0.7, 0.4); - histograms.h_cl3d_quality_ = iBooker.book1D("cl3d_quality", "cl3duster3D quality; quality", 20, 0, 2); + histograms.h_cl3d_seetot_ = iBooker.book1D("cl3d_seetot", "cl3duster3D seetot; seetot", 50, 0, 0.1); + histograms.h_cl3d_seemax_ = iBooker.book1D("cl3d_seemax", "cl3duster3D seemax; seemax", 50, 0, 0.2); + histograms.h_cl3d_spptot_ = iBooker.book1D("cl3d_spptot", "cl3duster3D spptot; spptot", 50, 0, 0.1); + histograms.h_cl3d_sppmax_ = iBooker.book1D("cl3d_sppmax", "cl3duster3D sppmax; sppmax", 50, 0, 0.15); + histograms.h_cl3d_szz_ = iBooker.book1D("cl3d_szz", "cl3duster3D szz; szz", 50, 0, 80); + histograms.h_cl3d_srrtot_ = iBooker.book1D("cl3d_srrtot", "cl3duster3D srrtot; srrtot", 50, 0, 0.01); + histograms.h_cl3d_srrmax_ = iBooker.book1D("cl3d_srrmax", "cl3duster3D srrmax; srrmax", 50, 0, 0.014); + histograms.h_cl3d_srrmean_ = iBooker.book1D("cl3d_srrmean", "cl3duster3D srrmean; srrmean", 50, 0, 0.006); + histograms.h_cl3d_emaxe_ = iBooker.book1D("cl3d_emaxe", "cl3duster3D emaxe; emaxe", 50, 0, 1.01); + histograms.h_cl3d_hoe_ = iBooker.book1D("cl3d_hoe", "cl3duster3D hoe; hoe", 50, 0, 2.); + histograms.h_cl3d_meanz_ = iBooker.book1D("cl3d_meanz", "cl3duster3D meanz; meanz", 50, 300, 400); + histograms.h_cl3d_layer10_ = iBooker.book1D("cl3d_layer10", "cl3duster3D layer10; layer10", 10, 0, 10); + histograms.h_cl3d_layer50_ = iBooker.book1D("cl3d_layer50", "cl3duster3D layer50; layer50", 20, 0, 20); + histograms.h_cl3d_layer90_ = iBooker.book1D("cl3d_layer90", "cl3duster3D layer90; layer90", 40, 0, 40); + histograms.h_cl3d_first1layers_ = + iBooker.book1D("cl3d_first1layers", "cl3duster3D first1layers; first1layers", 50, 0, 1.01); + histograms.h_cl3d_first3layers_ = + iBooker.book1D("cl3d_first3layers", "cl3duster3D first3layers; first3layers", 50, 0, 1.01); + histograms.h_cl3d_first5layers_ = + iBooker.book1D("cl3d_first5layers", "cl3duster3D first5layers; first5layers", 50, 0, 1.01); + histograms.h_cl3d_firstHcal1layers_ = + iBooker.book1D("cl3d_firstHcal1layers", "cl3duster3D firstHcal1layers; hcal1layers", 50, 0, 0.5); + histograms.h_cl3d_firstHcal3layers_ = + iBooker.book1D("cl3d_firstHcal3layers", "cl3duster3D firstHcal3layers; hcal1layers", 50, 0, 0.5); + histograms.h_cl3d_firstHcal5layers_ = + iBooker.book1D("cl3d_firstHcal5layers", "cl3duster3D firstHcal5layers; hcal1layers", 50, 0, 0.5); + histograms.h_cl3d_last1layers_ = + iBooker.book1D("cl3d_last1layers", "cl3duster3D last1layers; last1layers", 50, 0, 0.1); + histograms.h_cl3d_last3layers_ = + iBooker.book1D("cl3d_last3layers", "cl3duster3D last3layers; last3layers", 50, 0, 0.1); + histograms.h_cl3d_last5layers_ = + iBooker.book1D("cl3d_last5layers", "cl3duster3D last5layers; last5layers", 50, 0, 0.1); + histograms.h_cl3d_emax1layers_ = + iBooker.book1D("cl3d_emax1layers", "cl3duster3D emax1layers; emax1layers", 50, 0, 1.01); + histograms.h_cl3d_emax3layers_ = + iBooker.book1D("cl3d_emax3layers", "cl3duster3D emax3layers; emax3layers", 50, 0, 1.01); + histograms.h_cl3d_emax5layers_ = + iBooker.book1D("cl3d_emax5layers", "cl3duster3D emax5layers; emax5layers", 50, 0, 1.01); + histograms.h_cl3d_eot_ = iBooker.book1D("cl3d_eot", "cl3duster3D eot; eot", 50, 0, 1.01); + histograms.h_cl3d_ebm0_ = iBooker.book1D("cl3d_ebm0", "cl3duster3D ebm0; ebm0", 50, 0, 9000); + histograms.h_cl3d_ebm1_ = iBooker.book1D("cl3d_ebm1", "cl3duster3D ebm1; ebm1", 50, 0, 9000); + histograms.h_cl3d_hbm_ = iBooker.book1D("cl3d_hbm", "cl3duster3D hbm; hbm", 50, 0, 4000); + histograms.h_cl3d_ntc67_ = iBooker.book1D("cl3d_ntc67", "cl3duster3D ntc67; ntc67", 50, 0, 50); + histograms.h_cl3d_ntc90_ = iBooker.book1D("cl3d_ntc90", "cl3duster3D ntc90; ntc90", 50, 0, 100); + histograms.h_cl3d_bdteg_ = iBooker.book1D("cl3d_bdteg", "cl3duster3D bdteg; bdteg", 50, -1., 1.); + histograms.h_cl3d_quality_ = iBooker.book1D("cl3d_quality", "cl3duster3D quality; quality", 5, 0, 5); // towers - histograms.h_tower_n_ = iBooker.book1D("tower_n", "tower n; number", 400, 1200, 1600); - histograms.h_tower_pt_ = iBooker.book1D("tower_pt", "tower pt; pt [GeV]", 50, 0, 50); - histograms.h_tower_energy_ = iBooker.book1D("tower_energy", "tower energy; energy [GeV]", 200, 0, 200); - histograms.h_tower_eta_ = iBooker.book1D("tower_eta", "tower eta; eta", 60, -3.14, 3.14); - histograms.h_tower_phi_ = iBooker.book1D("tower_phi", "tower phi; phi", 60, -3.14, 3.14); - histograms.h_tower_etEm_ = iBooker.book1D("tower_etEm", "tower etEm; etEm", 50, 0, 50); - histograms.h_tower_etHad_ = iBooker.book1D("tower_etHad", "tower etHad; etHad", 30, 0, 0.3); + histograms.h_tower_n_ = iBooker.book1D("tower_n", "tower n; number", 100, 2000, 3000); + histograms.h_tower_pt_ = iBooker.book1D("tower_pt", "tower pt; pt [GeV]", 100, 0, 10); + histograms.h_tower_energy_ = iBooker.book1D("tower_energy", "tower energy; energy [GeV]", 100, 0, 100); + histograms.h_tower_eta_ = iBooker.book1D("tower_eta", "tower eta; eta", 640, -3.2, 3.2); + histograms.h_tower_phi_ = iBooker.book1D("tower_phi", "tower phi; phi", 600, -M_PI, M_PI); + histograms.h_tower_etEm_ = iBooker.book1D("tower_etEm", "tower etEm; etEm", 100, 0, 10); + histograms.h_tower_etHad_ = iBooker.book1D("tower_etHad", "tower etHad; etHad", 100, 0, 5); histograms.h_tower_iEta_ = iBooker.book1D("tower_iEta", "tower iEta; iEta", 20, 0, 20); histograms.h_tower_iPhi_ = iBooker.book1D("tower_iPhi", "tower iPhi; iPhi", 80, 0, 80); } @@ -281,6 +339,29 @@ void HGCalTriggerValidator::dqmAnalyze(edm::Event const &iEvent, histograms.h_cl3d_srrmax_->Fill(cl3d_itr->sigmaRRMax()); histograms.h_cl3d_srrmean_->Fill(cl3d_itr->sigmaRRMean()); histograms.h_cl3d_emaxe_->Fill(cl3d_itr->eMax() / cl3d_itr->energy()); + histograms.h_cl3d_hoe_->Fill(cl3d_itr->hOverE()); + histograms.h_cl3d_meanz_->Fill(cl3d_itr->zBarycenter()); + histograms.h_cl3d_layer10_->Fill(cl3d_itr->layer10percent()); + histograms.h_cl3d_layer50_->Fill(cl3d_itr->layer50percent()); + histograms.h_cl3d_layer90_->Fill(cl3d_itr->layer90percent()); + histograms.h_cl3d_first1layers_->Fill(cl3d_itr->first1layers()); + histograms.h_cl3d_first3layers_->Fill(cl3d_itr->first3layers()); + histograms.h_cl3d_first5layers_->Fill(cl3d_itr->first5layers()); + histograms.h_cl3d_firstHcal1layers_->Fill(cl3d_itr->firstHcal1layers()); + histograms.h_cl3d_firstHcal3layers_->Fill(cl3d_itr->firstHcal3layers()); + histograms.h_cl3d_firstHcal5layers_->Fill(cl3d_itr->firstHcal5layers()); + histograms.h_cl3d_last1layers_->Fill(cl3d_itr->last1layers()); + histograms.h_cl3d_last3layers_->Fill(cl3d_itr->last3layers()); + histograms.h_cl3d_last5layers_->Fill(cl3d_itr->last5layers()); + histograms.h_cl3d_emax1layers_->Fill(cl3d_itr->emax1layers()); + histograms.h_cl3d_emax3layers_->Fill(cl3d_itr->emax3layers()); + histograms.h_cl3d_emax5layers_->Fill(cl3d_itr->emax5layers()); + histograms.h_cl3d_eot_->Fill(cl3d_itr->eot()); + histograms.h_cl3d_ebm0_->Fill(cl3d_itr->ebm0()); + histograms.h_cl3d_ebm1_->Fill(cl3d_itr->ebm1()); + histograms.h_cl3d_hbm_->Fill(cl3d_itr->hbm()); + histograms.h_cl3d_ntc67_->Fill(cl3d_itr->triggerCells67percent()); + histograms.h_cl3d_ntc90_->Fill(cl3d_itr->triggerCells90percent()); histograms.h_cl3d_bdteg_->Fill(id_->value(*cl3d_itr)); histograms.h_cl3d_quality_->Fill(cl3d_itr->hwQual()); } diff --git a/Validation/HGCalValidation/python/hgcalValidationTPG_cfi.py b/Validation/HGCalValidation/python/hgcalValidationTPG_cfi.py index 379eaa5849e33..330e144688307 100644 --- a/Validation/HGCalValidation/python/hgcalValidationTPG_cfi.py +++ b/Validation/HGCalValidation/python/hgcalValidationTPG_cfi.py @@ -2,7 +2,7 @@ from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer from L1Trigger.L1THGCal.egammaIdentification import egamma_identification_histomax -hgcalTrigPrimValidation = DQMEDAnalyzer( +L1THGCalTrigPrimValidation = DQMEDAnalyzer( "HGCalTriggerValidator", TriggerCells = cms.InputTag('l1tHGCalConcentratorProducer:HGCalConcentratorProcessorSelection'), Clusters = cms.InputTag('l1tHGCalBackEndLayer1Producer:HGCalBackendLayer1Processor2DClustering'), diff --git a/Validation/HGCalValidation/src/HGVHistoProducerAlgo.cc b/Validation/HGCalValidation/src/HGVHistoProducerAlgo.cc index b47225972070f..ff64fba9959bd 100644 --- a/Validation/HGCalValidation/src/HGVHistoProducerAlgo.cc +++ b/Validation/HGCalValidation/src/HGVHistoProducerAlgo.cc @@ -2514,7 +2514,7 @@ void HGVHistoProducerAlgo::tracksters_to_SimTracksters( std::unordered_map> detIdSimTSId_Map; std::unordered_map> detIdToTracksterId_Map; std::vector tracksters_FakeMerge(nTracksters, 0); - std::vector tracksters_PurityDuplicate(nTracksters, 0); + std::vector tracksters_PurityDuplicate(nSimTracksters, 0); // This vector contains the ids of the SimTracksters contributing with at least one hit to the Trackster and the reconstruction error //stsInTrackster[trackster][STSids] @@ -3222,10 +3222,10 @@ void HGVHistoProducerAlgo::tracksters_to_SimTracksters( } if (score3d_iSTS[tstId] < ScoreCutSTStoTSPurDup) { - if (tracksters_PurityDuplicate[tstId] < 1) - tracksters_PurityDuplicate[tstId]++; // for Purity + if (tracksters_PurityDuplicate[iSTS] < 1) + tracksters_PurityDuplicate[iSTS]++; // for Purity if (sts_considered_pure) - tracksters_PurityDuplicate[tstId]++; // for Duplicate + tracksters_PurityDuplicate[iSTS]++; // for Duplicate sts_considered_pure = true; } } // end of loop through Tracksters related to SimTrackster @@ -3260,6 +3260,32 @@ void HGVHistoProducerAlgo::tracksters_to_SimTracksters( } } // end of loop through SimTracksters + // Fill the plots to compute the different metrics linked to + // sim-level, namely purity and duplicate-rate + for (unsigned int stsId = 0; stsId < nSimTracksters; ++stsId) { + const auto& sts = simTSs[stsId]; + if (sts.vertices().empty()) + continue; + const auto sts_eta = sts.barycenter().eta(); + const auto sts_phi = sts.barycenter().phi(); + const auto sts_en = sts.raw_energy(); + const auto sts_pt = sts.raw_pt(); + + if (tracksters_PurityDuplicate[stsId] > 0) { + histograms.h_num_caloparticle_eta[valType][count]->Fill(sts_eta); + histograms.h_num_caloparticle_phi[valType][count]->Fill(sts_phi); + histograms.h_num_caloparticle_en[valType][count]->Fill(sts_en); + histograms.h_num_caloparticle_pt[valType][count]->Fill(sts_pt); + + if (tracksters_PurityDuplicate[stsId] > 1) { + histograms.h_numDup_trackster_eta[valType][count]->Fill(sts_eta); + histograms.h_numDup_trackster_phi[valType][count]->Fill(sts_phi); + histograms.h_numDup_trackster_en[valType][count]->Fill(sts_en); + histograms.h_numDup_trackster_pt[valType][count]->Fill(sts_pt); + } + } + } + // Fill the plots to compute the different metrics linked to // reco-level, namely fake-rate an merge-rate. Should *not* // restrict only to the selected caloParaticles. @@ -3276,20 +3302,6 @@ void HGVHistoProducerAlgo::tracksters_to_SimTracksters( histograms.h_denom_trackster_en[valType][count]->Fill(iTS_en); histograms.h_denom_trackster_pt[valType][count]->Fill(iTS_pt); - if (tracksters_PurityDuplicate[tstId] > 0) { - histograms.h_num_caloparticle_eta[valType][count]->Fill(iTS_eta); - histograms.h_num_caloparticle_phi[valType][count]->Fill(iTS_phi); - histograms.h_num_caloparticle_en[valType][count]->Fill(iTS_en); - histograms.h_num_caloparticle_pt[valType][count]->Fill(iTS_pt); - - if (tracksters_PurityDuplicate[tstId] > 1) { - histograms.h_numDup_trackster_eta[valType][count]->Fill(iTS_eta); - histograms.h_numDup_trackster_phi[valType][count]->Fill(iTS_phi); - histograms.h_numDup_trackster_en[valType][count]->Fill(iTS_en); - histograms.h_numDup_trackster_pt[valType][count]->Fill(iTS_pt); - } - } - if (tracksters_FakeMerge[tstId] > 0) { histograms.h_num_trackster_eta[valType][count]->Fill(iTS_eta); histograms.h_num_trackster_phi[valType][count]->Fill(iTS_phi); diff --git a/Validation/MtdValidation/macros/Pt_residuals_fit.C b/Validation/MtdValidation/macros/Pt_residuals_fit.C new file mode 100644 index 0000000000000..e4963b8e8cdcd --- /dev/null +++ b/Validation/MtdValidation/macros/Pt_residuals_fit.C @@ -0,0 +1,188 @@ +// -*- C -*- +// +// 'Resolution in track Pt' macro +// +// \author 12/2023 - Raffaele Delli Gatti + +#include "Riostream.h" +#include "TFile.h" +#include "TDirectoryFile.h" +#include "TTree.h" +#include "TCanvas.h" +#include "TH1D.h" +#include "TStyle.h" +#include "TLegend.h" +#include "TLatex.h" +#include "TString.h" +#include "RooRealVar.h" +#include "RooCrystalBall.h" +#include "RooAddPdf.h" +#include "RooDataHist.h" +#include "RooPlot.h" +#include "RooFitResult.h" +using namespace std; +using namespace RooFit; + +// Funtion for fitting to data with roofit library (defined below) +// --------------------------------------------------------------- +void fit_to_data(TH1D* histogram, TString name_file); + +// Main function +//-------------- +void residuals_fit() { + // Open the root file to read + // -------------------------- + + TFile* file_DQM = TFile::Open("./DQM_V0001_R000000001__Global__CMSSW_X_Y_Z__RECO.root", "READ"); + // and take its directories + TDirectoryFile* dir_DQMData = (TDirectoryFile*)file_DQM->Get("DQMData"); + if (!dir_DQMData) + cout << "Cannot find dir_DQMData" << endl; + TDirectoryFile* dir_Run1 = (TDirectoryFile*)dir_DQMData->Get("Run 1"); + if (!dir_Run1) + cout << "Cannot find dir_Run1" << endl; + TDirectoryFile* dir_MTD = (TDirectoryFile*)dir_Run1->Get("MTD"); + if (!dir_MTD) + cout << "Cannot find dir_MTD" << endl; + TDirectoryFile* dir_Runsum = (TDirectoryFile*)dir_MTD->Get("Run summary"); + if (!dir_Runsum) + cout << "Cannot find dir_Runsum" << endl; + TDirectoryFile* dir_Tracks = (TDirectoryFile*)dir_Runsum->Get("Tracks"); + if (!dir_Tracks) + cout << "Cannot find dir_Tracks" << endl; + + // Take the trees with the method Get() + TH1D* h_TrackMatchedTPBTLPtRatioGen = (TH1D*)dir_Tracks->Get("TrackMatchedTPBTLPtRatioGen"); + TH1D* h_TrackMatchedTPBTLPtRatioMtd = (TH1D*)dir_Tracks->Get("TrackMatchedTPBTLPtRatioMtd"); + TH1D* h_TrackMatchedTPBTLPtResMtd = (TH1D*)dir_Tracks->Get("TrackMatchedTPBTLPtResMtd"); + + TH1D* h_TrackMatchedTPETLPtRatioGen = (TH1D*)dir_Tracks->Get("TrackMatchedTPETLPtRatioGen"); + TH1D* h_TrackMatchedTPETLPtRatioMtd = (TH1D*)dir_Tracks->Get("TrackMatchedTPETLPtRatioMtd"); + TH1D* h_TrackMatchedTPETLPtResMtd = (TH1D*)dir_Tracks->Get("TrackMatchedTPETLPtResMtd"); + + TH1D* h_TrackMatchedTPETL2PtRatioGen = (TH1D*)dir_Tracks->Get("TrackMatchedTPETL2PtRatioGen"); + TH1D* h_TrackMatchedTPETL2PtRatioMtd = (TH1D*)dir_Tracks->Get("TrackMatchedTPETL2PtRatioMtd"); + TH1D* h_TrackMatchedTPETL2PtResMtd = (TH1D*)dir_Tracks->Get("TrackMatchedTPETL2PtResMtd"); + + // Fit to data with the function fit_to_data + //------------------------------------------ + fit_to_data(h_TrackMatchedTPBTLPtRatioGen, "BTLPtRatioGen_fit.pdf"); + fit_to_data(h_TrackMatchedTPETLPtRatioGen, "ETLPtRatioGen_fit.pdf"); + fit_to_data(h_TrackMatchedTPETL2PtRatioGen, "ETL2PtRatioGen_fit.pdf"); + + fit_to_data(h_TrackMatchedTPBTLPtRatioMtd, "BTLPtRatioMtd_fit.pdf"); + fit_to_data(h_TrackMatchedTPETLPtRatioMtd, "ETLPtRatioMtd_fit.pdf"); + fit_to_data(h_TrackMatchedTPETL2PtRatioMtd, "ETL2PtRatioMtd_fit.pdf"); + + fit_to_data(h_TrackMatchedTPBTLPtResMtd, "BTLPtResMtd_fit.pdf"); + fit_to_data(h_TrackMatchedTPETLPtResMtd, "ETLPtResMtd_fit.pdf"); + fit_to_data(h_TrackMatchedTPETL2PtResMtd, "ETL2PtResMtd_fit.pdf"); + + return; +} + +void fit_to_data(TH1D* h_TrackMatchedTP, TString str) { + // Fit to data using roofit library + // -------------------------------- + + Double_t bin_width = h_TrackMatchedTP->GetBinWidth(0); + Double_t range_min = h_TrackMatchedTP->GetXaxis()->GetBinLowEdge(1); + Double_t range_max = + h_TrackMatchedTP->GetXaxis()->GetBinLowEdge(h_TrackMatchedTP->GetNbinsX()) + h_TrackMatchedTP->GetBinWidth(0); + + // Observable + RooRealVar x_res("x res", "", range_min, range_max); + + // Import data from histogram + RooDataHist* h_ = new RooDataHist("h_", "h_", x_res, h_TrackMatchedTP); + + // Parameters + RooRealVar mean("mean", "mean", h_TrackMatchedTP->GetMean(), range_min, range_max); + RooRealVar sigmaL("sigmaL", "sigmaL", 0.05, 0., 5.); + RooRealVar sigmaR("sigmaR", "sigmaR", 0.05, 0., 5.); + RooRealVar alphaL("alphaL", "alphaL", 1., 0., 5.); + RooRealVar alphaR("alphaR", "alphaR", 1., 0., 5.); + RooRealVar nL("NL", "NL", 5., 0., 100.); + RooRealVar nR("NR", "NR", 5., 0., 100.); + + // Build a double sided crystall ball PDF + RooCrystalBall* pdf = new RooCrystalBall("pdf", "pdf", x_res, mean, sigmaL, sigmaR, alphaL, nL, alphaR, nR); + // Construct a signal PDF + RooRealVar nsig("nsig", "#signal events", h_TrackMatchedTP->GetEntries(), 0., h_TrackMatchedTP->GetEntries() * 2); + RooAddPdf* model = new RooAddPdf("model", "model", {*pdf}, {nsig}); + + // The PDF fit to that data set using an un-binned maximum likelihood fit + // Then the data are visualized with the PDF overlaid + + // Perform extended ML fit of PDF to data and save results in a pointer + RooFitResult* r1 = model->fitTo(*h_, Save()); + + // Retrieve values from the fit + Double_t mean_fit = mean.getVal(); + Double_t err_mean = mean.getError(); + Double_t sigmaR_fit = sigmaR.getVal(); + Double_t err_sigmaR = sigmaR.getError(); + Double_t sigmaL_fit = sigmaL.getVal(); + Double_t err_sigmaL = sigmaL.getError(); + + // Compute resolution as half-width of the interval containing 68% of all entries (including overflows), centered around the MPV of the residuals + // ---------------------------------------------------------------------------------------------------------------------------------------------- + + Double_t res = 0; + Double_t min = 0; + double_t integral = 0; + for (Int_t i = 1; i < h_TrackMatchedTP->GetNbinsX() / 2; i++) { + Int_t bin_mean = (mean_fit - range_min) / bin_width; + double_t int_norm = h_TrackMatchedTP->Integral(bin_mean - i, bin_mean + i) / h_TrackMatchedTP->Integral(); + if (int_norm - 0.68 < min) + res = i * bin_width; + } + cout << "Resolution = " << res << " +- " << bin_width << endl; + + // Create a RooPlot to draw on + //---------------------------- + // We don't manage the memory of the returned pointer + // Instead we let it leak such that the plot still exists at the end of the macro and we can take a look at it + RooPlot* xresframe = x_res.frame(); + + // Plot data and PDF overlaid + h_->plotOn(xresframe, MarkerSize(0.8), Name("histogram")); + model->plotOn(xresframe, LineColor(kBlue), LineWidth(3), Name("model")); + // In the previous lines, name is needed for the legend + + auto legend_res = new TLegend(0.65, 0.8, 0.8, 0.9); + gStyle->SetLegendTextSize(0.033); + TLatex* header = new TLatex(); + header->SetTextSize(0.035); + + TCanvas* c1 = new TCanvas("c1", "c1", 600, 500); + c1->cd(); + xresframe->GetXaxis()->SetTitle(h_TrackMatchedTP->GetXaxis()->GetTitle()); + xresframe->Draw(); + if (str.Contains("BTL") && str.Contains("Mtd")) + legend_res->AddEntry("histogram", "BTL", "PLerr"); + else if (str.Contains("BTL") && str.Contains("Gen")) + legend_res->AddEntry("histogram", "GenTrack (barrel)", "PLerr"); + else if (str.Contains("ETLPt") && str.Contains("Mtd")) + legend_res->AddEntry("histogram", "ETL (one hit)", "PLerr"); + else if (str.Contains("ETLPt") && str.Contains("Gen")) + legend_res->AddEntry("histogram", "GenTrack (end-caps)", "PLerr"); + else if (str.Contains("ETL2") && str.Contains("Mtd")) + legend_res->AddEntry("histogram", "ETL (2 hits)", "PLerr"); + else if (str.Contains("ETL2") && str.Contains("Gen")) + legend_res->AddEntry("histogram", "GenTrack (end-caps)", "PLerr"); + legend_res->AddEntry("model", "DSCB Fit", "L"); + legend_res->Draw("same"); + header->DrawLatexNDC(0.12, 0.96, "MC Simulation"); + header->DrawLatexNDC(0.81, 0.96, "#sqrt{s} = 14 TeV"); + header->DrawLatexNDC(0.66, 0.75, TString::Format("#mu = %.4f #pm %.4f", mean_fit, err_mean)); + header->DrawLatexNDC(0.66, 0.71, TString::Format("#sigma_{R} = %.4f #pm %.4f", sigmaR_fit, err_sigmaR)); + header->DrawLatexNDC(0.66, 0.67, TString::Format("#sigma_{L} = %.4f #pm %.4f", sigmaL_fit, err_sigmaL)); + if (str.Contains("Ratio")) + header->DrawLatexNDC(0.66, 0.63, TString::Format("#sigma (0.68) = %.3f #pm %.3f", res, bin_width)); + if (str.Contains("Ratio")) + header->DrawLatexNDC(0.66, 0.59, TString::Format("#chi^{2}/ndf = %.2f", xresframe->chiSquare())); + if (str.Contains("Res")) + header->DrawLatexNDC(0.66, 0.63, TString::Format("#chi^{2}/ndf = %.2f", xresframe->chiSquare())); + c1->Print(str); +} diff --git a/Validation/MtdValidation/plugins/MtdTracksValidation.cc b/Validation/MtdValidation/plugins/MtdTracksValidation.cc index 3671e1b5e2a10..2961a8e3f567c 100644 --- a/Validation/MtdValidation/plugins/MtdTracksValidation.cc +++ b/Validation/MtdValidation/plugins/MtdTracksValidation.cc @@ -1,3 +1,4 @@ +#define EDM_ML_DEBUG #include #include "FWCore/Framework/interface/Frameworkfwd.h" @@ -153,6 +154,9 @@ class MtdTracksValidation : public DQMEDAnalyzer { edm::EDGetTokenT> Sigmat0PidToken_; edm::EDGetTokenT> t0SafePidToken_; edm::EDGetTokenT> Sigmat0SafePidToken_; + edm::EDGetTokenT> SigmaTofPiToken_; + edm::EDGetTokenT> SigmaTofKToken_; + edm::EDGetTokenT> SigmaTofPToken_; edm::EDGetTokenT> trackMVAQualToken_; edm::ESGetToken mtdgeoToken_; @@ -195,12 +199,35 @@ class MtdTracksValidation : public DQMEDAnalyzer { MonitorElement* meTrackMVAQual_; MonitorElement* meTrackPathLenghtvsEta_; + MonitorElement* meTrackSigmaTof_[3]; + MonitorElement* meTrackSigmaTofvsP_[3]; + MonitorElement* meTrackPtTot_; MonitorElement* meMVATrackEffPtTot_; MonitorElement* meMVATrackMatchedEffPtTot_; MonitorElement* meMVATrackMatchedEffPtMtd_; MonitorElement* meExtraPtMtd_; MonitorElement* meExtraPtEtl2Mtd_; + + MonitorElement* meBTLTrackMatchedTPPtResMtd_; + MonitorElement* meETLTrackMatchedTPPtResMtd_; + MonitorElement* meETLTrackMatchedTP2PtResMtd_; + MonitorElement* meBTLTrackMatchedTPPtRatioGen_; + MonitorElement* meETLTrackMatchedTPPtRatioGen_; + MonitorElement* meETLTrackMatchedTP2PtRatioGen_; + MonitorElement* meBTLTrackMatchedTPPtRatioMtd_; + MonitorElement* meETLTrackMatchedTPPtRatioMtd_; + MonitorElement* meETLTrackMatchedTP2PtRatioMtd_; + MonitorElement* meBTLTrackMatchedTPPtResvsPtMtd_; + MonitorElement* meETLTrackMatchedTPPtResvsPtMtd_; + MonitorElement* meETLTrackMatchedTP2PtResvsPtMtd_; + MonitorElement* meBTLTrackMatchedTPDPtvsPtGen_; + MonitorElement* meETLTrackMatchedTPDPtvsPtGen_; + MonitorElement* meETLTrackMatchedTP2DPtvsPtGen_; + MonitorElement* meBTLTrackMatchedTPDPtvsPtMtd_; + MonitorElement* meETLTrackMatchedTPDPtvsPtMtd_; + MonitorElement* meETLTrackMatchedTP2DPtvsPtMtd_; + MonitorElement* meTrackMatchedTPEffPtTot_; MonitorElement* meTrackMatchedTPEffPtMtd_; MonitorElement* meTrackMatchedTPEffPtEtl2Mtd_; @@ -259,6 +286,9 @@ MtdTracksValidation::MtdTracksValidation(const edm::ParameterSet& iConfig) Sigmat0PidToken_ = consumes>(iConfig.getParameter("sigmat0PID")); t0SafePidToken_ = consumes>(iConfig.getParameter("t0SafePID")); Sigmat0SafePidToken_ = consumes>(iConfig.getParameter("sigmat0SafePID")); + SigmaTofPiToken_ = consumes>(iConfig.getParameter("sigmaTofPi")); + SigmaTofKToken_ = consumes>(iConfig.getParameter("sigmaTofK")); + SigmaTofPToken_ = consumes>(iConfig.getParameter("sigmaTofP")); trackMVAQualToken_ = consumes>(iConfig.getParameter("trackMVAQual")); mtdgeoToken_ = esConsumes(); mtdtopoToken_ = esConsumes(); @@ -293,6 +323,9 @@ void MtdTracksValidation::analyze(const edm::Event& iEvent, const edm::EventSetu const auto& Sigmat0Pid = iEvent.get(Sigmat0PidToken_); const auto& t0Safe = iEvent.get(t0SafePidToken_); const auto& Sigmat0Safe = iEvent.get(Sigmat0SafePidToken_); + const auto& SigmaTofPi = iEvent.get(SigmaTofPiToken_); + const auto& SigmaTofK = iEvent.get(SigmaTofKToken_); + const auto& SigmaTofP = iEvent.get(SigmaTofPToken_); const auto& mtdQualMVA = iEvent.get(trackMVAQualToken_); const auto& trackAssoc = iEvent.get(trackAssocToken_); const auto& pathLength = iEvent.get(pathLengthToken_); @@ -421,6 +454,13 @@ void MtdTracksValidation::analyze(const edm::Event& iEvent, const edm::EventSetu meTrackSigmat0SafePid_->Fill(Sigmat0Safe[trackref]); meTrackMVAQual_->Fill(mtdQualMVA[trackref]); + meTrackSigmaTof_[0]->Fill(SigmaTofPi[trackref] * 1e3); //save as ps + meTrackSigmaTof_[1]->Fill(SigmaTofK[trackref] * 1e3); + meTrackSigmaTof_[2]->Fill(SigmaTofP[trackref] * 1e3); + meTrackSigmaTofvsP_[0]->Fill(track.p(), SigmaTofPi[trackref] * 1e3); + meTrackSigmaTofvsP_[1]->Fill(track.p(), SigmaTofK[trackref] * 1e3); + meTrackSigmaTofvsP_[2]->Fill(track.p(), SigmaTofP[trackref] * 1e3); + meTrackPathLenghtvsEta_->Fill(std::abs(track.eta()), pathLength[trackref]); if (std::abs(track.eta()) < trackMaxBtlEta_) { @@ -554,6 +594,43 @@ void MtdTracksValidation::analyze(const edm::Event& iEvent, const edm::EventSetu meTrackPtTot_->Fill(trackGen.pt()); meTrackEtaTot_->Fill(std::abs(trackGen.eta())); if (tp_info != nullptr && mvaTPSel(**tp_info)) { + if (track.pt() < 12.) { + if (isBTL) { + meBTLTrackMatchedTPPtResMtd_->Fill(std::abs(track.pt() - (*tp_info)->pt()) / + std::abs(trackGen.pt() - (*tp_info)->pt())); + meBTLTrackMatchedTPPtRatioGen_->Fill(trackGen.pt() / (*tp_info)->pt()); + meBTLTrackMatchedTPPtRatioMtd_->Fill(track.pt() / (*tp_info)->pt()); + meBTLTrackMatchedTPPtResvsPtMtd_->Fill( + (*tp_info)->pt(), std::abs(track.pt() - (*tp_info)->pt()) / std::abs(trackGen.pt() - (*tp_info)->pt())); + meBTLTrackMatchedTPDPtvsPtGen_->Fill((*tp_info)->pt(), + (trackGen.pt() - (*tp_info)->pt()) / (*tp_info)->pt()); + meBTLTrackMatchedTPDPtvsPtMtd_->Fill((*tp_info)->pt(), (track.pt() - (*tp_info)->pt()) / (*tp_info)->pt()); + } + if (isETL && !twoETLdiscs) { + meETLTrackMatchedTPPtResMtd_->Fill(std::abs(track.pt() - (*tp_info)->pt()) / + std::abs(trackGen.pt() - (*tp_info)->pt())); + meETLTrackMatchedTPPtRatioGen_->Fill(trackGen.pt() / (*tp_info)->pt()); + meETLTrackMatchedTPPtRatioMtd_->Fill(track.pt() / (*tp_info)->pt()); + meETLTrackMatchedTPPtResvsPtMtd_->Fill( + (*tp_info)->pt(), std::abs(track.pt() - (*tp_info)->pt()) / std::abs(trackGen.pt() - (*tp_info)->pt())); + meETLTrackMatchedTPDPtvsPtGen_->Fill((*tp_info)->pt(), + (trackGen.pt() - (*tp_info)->pt()) / ((*tp_info)->pt())); + meETLTrackMatchedTPDPtvsPtMtd_->Fill((*tp_info)->pt(), + (track.pt() - (*tp_info)->pt()) / ((*tp_info)->pt())); + } + if (isETL && twoETLdiscs) { + meETLTrackMatchedTP2PtResMtd_->Fill(std::abs(track.pt() - (*tp_info)->pt()) / + std::abs(trackGen.pt() - (*tp_info)->pt())); + meETLTrackMatchedTP2PtRatioGen_->Fill(trackGen.pt() / (*tp_info)->pt()); + meETLTrackMatchedTP2PtRatioMtd_->Fill(track.pt() / (*tp_info)->pt()); + meETLTrackMatchedTP2PtResvsPtMtd_->Fill( + (*tp_info)->pt(), std::abs(track.pt() - (*tp_info)->pt()) / std::abs(trackGen.pt() - (*tp_info)->pt())); + meETLTrackMatchedTP2DPtvsPtGen_->Fill((*tp_info)->pt(), + (trackGen.pt() - (*tp_info)->pt()) / ((*tp_info)->pt())); + meETLTrackMatchedTP2DPtvsPtMtd_->Fill((*tp_info)->pt(), + (track.pt() - (*tp_info)->pt()) / ((*tp_info)->pt())); + } + } const bool withMTD = (m_tp2detid.find(*tp_info) != m_tp2detid.end()); LogDebug("MtdTracksValidation") << "Matched with selected TP, MTD sim hits association: " << withMTD; if (noCrack) { @@ -931,6 +1008,38 @@ void MtdTracksValidation::bookHistograms(DQMStore::IBooker& ibook, edm::Run cons meTrackPathLenghtvsEta_ = ibook.bookProfile( "TrackPathLenghtvsEta", "MTD Track pathlength vs MTD track Eta;|#eta|;Pathlength", 100, 0, 3.2, 100.0, 400.0, "S"); + meTrackSigmaTof_[0] = + ibook.book1D("TrackSigmaTof_Pion", "Sigma(TOF) for pion hypothesis; #sigma_{t0} [ps]", 100, 0, 50); + meTrackSigmaTof_[1] = + ibook.book1D("TrackSigmaTof_Kaon", "Sigma(TOF) for kaon hypothesis; #sigma_{t0} [ps]", 100, 0, 50); + meTrackSigmaTof_[2] = + ibook.book1D("TrackSigmaTof_Proton", "Sigma(TOF) for proton hypothesis; #sigma_{t0} [ps]", 100, 0, 50); + + meTrackSigmaTofvsP_[0] = ibook.bookProfile("TrackSigmaTofvsP_Pion", + "Sigma(TOF) for pion hypothesis vs p; p [GeV]; #sigma_{t0} [ps]", + 20, + 0, + 10., + 0, + 50., + "S"); + meTrackSigmaTofvsP_[1] = ibook.bookProfile("TrackSigmaTofvsP_Kaon", + "Sigma(TOF) for kaon hypothesis vs p; p [GeV]; #sigma_{t0} [ps]", + 20, + 0, + 10., + 0, + 50., + "S"); + meTrackSigmaTofvsP_[2] = ibook.bookProfile("TrackSigmaTofvsP_Proton", + "Sigma(TOF) for proton hypothesis vs p; p [GeV]; #sigma_{t0} [ps]", + 20, + 0, + 10., + 0, + 50., + "S"); + meMVATrackEffPtTot_ = ibook.book1D("MVAEffPtTot", "Pt of tracks associated to LV; track pt [GeV] ", 110, 0., 11.); meMVATrackMatchedEffPtTot_ = ibook.book1D("MVAMatchedEffPtTot", "Pt of tracks associated to LV matched to GEN; track pt [GeV] ", 110, 0., 11.); @@ -949,6 +1058,129 @@ void MtdTracksValidation::bookHistograms(DQMStore::IBooker& ibook, edm::Run cons meTrackMatchedTPEffPtEtl2Mtd_ = ibook.book1D( "MatchedTPEffPtEtl2Mtd", "Pt of tracks matched to TP with time, 2 ETL hits; track pt [GeV] ", 110, 0., 11.); + meBTLTrackMatchedTPPtResMtd_ = ibook.book1D( + "TrackMatchedTPBTLPtResMtd", + "Pt resolution of tracks matched to TP-BTL hit ;|pT_{MTDtrack}-pT_{truth}|/|pT_{Gentrack}-pT_{truth}| ", + 100, + 0., + 4.); + meETLTrackMatchedTPPtResMtd_ = ibook.book1D( + "TrackMatchedTPETLPtResMtd", + "Pt resolution of tracks matched to TP-ETL hit ;|pT_{MTDtrack}-pT_{truth}|/|pT_{Gentrack}-pT_{truth}| ", + 100, + 0., + 4.); + meETLTrackMatchedTP2PtResMtd_ = ibook.book1D( + "TrackMatchedTPETL2PtResMtd", + "Pt resolution of tracks matched to TP-ETL 2hits ;|pT_{MTDtrack}-pT_{truth}|/|pT_{Gentrack}-pT_{truth}| ", + 100, + 0., + 4.); + meBTLTrackMatchedTPPtRatioGen_ = ibook.book1D( + "TrackMatchedTPBTLPtRatioGen", "Pt ratio of Gentracks (BTL) ;pT_{Gentrack}/pT_{truth} ", 100, 0.9, 1.1); + meETLTrackMatchedTPPtRatioGen_ = ibook.book1D( + "TrackMatchedTPETLPtRatioGen", "Pt ratio of Gentracks (ETL 1hit) ;pT_{Gentrack}/pT_{truth} ", 100, 0.9, 1.1); + meETLTrackMatchedTP2PtRatioGen_ = ibook.book1D( + "TrackMatchedTPETL2PtRatioGen", "Pt ratio of Gentracks (ETL 2hits) ;pT_{Gentrack}/pT_{truth} ", 100, 0.9, 1.1); + meBTLTrackMatchedTPPtRatioMtd_ = ibook.book1D("TrackMatchedTPBTLPtRatioMtd", + "Pt ratio of tracks matched to TP-BTL hits ;pT_{MTDtrack}/pT_{truth} ", + 100, + 0.9, + 1.1); + meETLTrackMatchedTPPtRatioMtd_ = ibook.book1D("TrackMatchedTPETLPtRatioMtd", + "Pt ratio of tracks matched to TP-ETL hits ;pT_{MTDtrack}/pT_{truth} ", + 100, + 0.9, + 1.1); + meETLTrackMatchedTP2PtRatioMtd_ = + ibook.book1D("TrackMatchedTPETL2PtRatioMtd", + "Pt ratio of tracks matched to TP-ETL 2hits ;pT_{MTDtrack}/pT_{truth} ", + 100, + 0.9, + 1.1); + meBTLTrackMatchedTPPtResvsPtMtd_ = ibook.bookProfile("TrackMatchedTPBTLPtResvsPtMtd", + "Pt resolution of tracks matched to TP-BTL hit vs Pt;pT_{truth} " + "[GeV];|pT_{MTDtrack}-pT_{truth}|/|pT_{Gentrack}-pT_{truth}| ", + 20, + 0.7, + 10., + 0., + 4., + "s"); + meETLTrackMatchedTPPtResvsPtMtd_ = ibook.bookProfile("TrackMatchedTPETLPtResvsPtMtd", + "Pt resolution of tracks matched to TP-ETL hit vs Pt;pT_{truth} " + "[GeV];|pT_{MTDtrack}-pT_{truth}|/|pT_{Gentrack}-pT_{truth}| ", + 20, + 0.7, + 10., + 0., + 4., + "s"); + meETLTrackMatchedTP2PtResvsPtMtd_ = + ibook.bookProfile("TrackMatchedTPETL2PtResvsPtMtd", + "Pt resolution of tracks matched to TP-ETL 2hits Pt pT;pT_{truth} " + "[GeV];|pT_{MTDtrack}-pT_{truth}|/|pT_{Gentrack}-pT_{truth}| ", + 20, + 0.7, + 10., + 0., + 4., + "s"); + meBTLTrackMatchedTPDPtvsPtGen_ = ibook.bookProfile( + "TrackMatchedTPBTLDPtvsPtGen", + "Pt relative difference of Gentracks (BTL) vs Pt;pT_{truth} [GeV];pT_{Gentrack}-pT_{truth}/pT_{truth} ", + 20, + 0.7, + 10., + -0.1, + 0.1, + "s"); + meETLTrackMatchedTPDPtvsPtGen_ = ibook.bookProfile( + "TrackMatchedTPETLDPtvsPtGen", + "Pt relative difference of Gentracks (ETL 1hit) vs Pt;pT_{truth} [GeV];pT_{Gentrack}-pT_{truth}/pT_{truth} ", + 20, + 0.7, + 10., + -0.1, + 0.1, + "s"); + meETLTrackMatchedTP2DPtvsPtGen_ = ibook.bookProfile( + "TrackMatchedTPETL2DPtvsPtGen", + "Pt relative difference of Gentracks (ETL 2hits) vs Pt;pT_{truth} [GeV];pT_{Gentrack}-pT_{truth}/pT_{truth} ", + 20, + 0.7, + 10., + -0.1, + 0.1, + "s"); + meBTLTrackMatchedTPDPtvsPtMtd_ = ibook.bookProfile("TrackMatchedTPBTLDPtvsPtMtd", + "Pt relative difference of tracks matched to TP-BTL hits vs " + "Pt;pT_{truth} [GeV];pT_{MTDtrack}-pT_{truth}/pT_{truth} ", + 20, + 0.7, + 10., + -0.1, + 0.1, + "s"); + meETLTrackMatchedTPDPtvsPtMtd_ = ibook.bookProfile("TrackMatchedTPETLDPtvsPtMtd", + "Pt relative difference of tracks matched to TP-ETL hits vs " + "Pt;pT_{truth} [GeV];pT_{MTDtrack}-pT_{truth}/pT_{truth} ", + 20, + 0.7, + 10., + -0.1, + 0.1, + "s"); + meETLTrackMatchedTP2DPtvsPtMtd_ = ibook.bookProfile("TrackMatchedTPETL2DPtvsPtMtd", + "Pt relative difference of tracks matched to TP-ETL 2hits vs " + "Pt;pT_{truth} [GeV];pT_{MTDtrack}-pT_{truth}/pT_{truth} ", + 20, + 0.7, + 10., + -0.1, + 0.1, + "s"); + meTrackMatchedTPmtdEffPtTot_ = ibook.book1D("MatchedTPmtdEffPtTot", "Pt of tracks matched to TP-mtd hit; track pt [GeV] ", 110, 0., 11.); meTrackMatchedTPmtdEffPtMtd_ = ibook.book1D( @@ -1036,6 +1268,9 @@ void MtdTracksValidation::fillDescriptions(edm::ConfigurationDescriptions& descr desc.add("sigmat0SafePID", edm::InputTag("tofPID:sigmat0safe")); desc.add("sigmat0PID", edm::InputTag("tofPID:sigmat0")); desc.add("t0PID", edm::InputTag("tofPID:t0")); + desc.add("sigmaTofPi", edm::InputTag("trackExtenderWithMTD:generalTrackSigmaTofPi")); + desc.add("sigmaTofK", edm::InputTag("trackExtenderWithMTD:generalTrackSigmaTofK")); + desc.add("sigmaTofP", edm::InputTag("trackExtenderWithMTD:generalTrackSigmaTofP")); desc.add("trackMVAQual", edm::InputTag("mtdTrackQualityMVA:mtdQualMVA")); desc.add("trackMinimumPt", 0.7); // [GeV] desc.add("trackMaximumBtlEta", 1.5); @@ -1112,4 +1347,4 @@ const edm::Ref>* MtdTracksValidation::getMatchedTP return nullptr; } -DEFINE_FWK_MODULE(MtdTracksValidation); +DEFINE_FWK_MODULE(MtdTracksValidation); \ No newline at end of file diff --git a/Validation/RecoParticleFlow/Makefile b/Validation/RecoParticleFlow/Makefile index b05b22b10ce83..22d74979652b3 100644 --- a/Validation/RecoParticleFlow/Makefile +++ b/Validation/RecoParticleFlow/Makefile @@ -75,7 +75,7 @@ plots: QCD_plots: rm -Rf plots python3 test/compare.py \ - --sample FlatQCD_noPU:${TMPDIR}/QCD/${DQM_MC}.root:${TMPDIR}/QCD/${DQM_MC}.root \ + --sample FlatQCD_noPU:${TMPDIR}/QCD/${DQM_MC}.root:${TMPDIR}/QCD/${DQM_MC}.root \ --doResponsePlots --doOffsetPlots --doMETPlots --doPFCandPlots QCDPU_plots: diff --git a/Validation/RecoParticleFlow/README.md b/Validation/RecoParticleFlow/README.md index 372f40b8d42a1..0639b2e6fb82e 100644 --- a/Validation/RecoParticleFlow/README.md +++ b/Validation/RecoParticleFlow/README.md @@ -10,8 +10,8 @@ for lxplus with SLC8 (used for Run3 CMSSW releases) ~~~ ssh -X username@lxplus8.cern.ch export SCRAM_ARCH=el8_amd64_gcc10 -cmsrel CMSSW_12_5_0_pre3 -cd CMSSW_12_5_0_pre3 +cmsrel CMSSW_13_3_0_pre3 +cd CMSSW_13_3_0_pre3 cmsenv ~~~ diff --git a/Validation/RecoParticleFlow/plugins/OffsetAnalyzerDQM.cc b/Validation/RecoParticleFlow/plugins/OffsetAnalyzerDQM.cc index 7fd2b7204d6d6..d37d801bcef48 100644 --- a/Validation/RecoParticleFlow/plugins/OffsetAnalyzerDQM.cc +++ b/Validation/RecoParticleFlow/plugins/OffsetAnalyzerDQM.cc @@ -163,6 +163,7 @@ void OffsetAnalyzerDQM::analyze(const edm::Event& iEvent, const edm::EventSetup& int npv = 0; for (unsigned int i = 0; i < nPVall; i++) { const auto& pv = vertexHandle->at(i); + th1dPlots["pv_z"].fill(pv.z()); if (!pv.isFake() && pv.ndof() >= 4 && fabs(pv.z()) <= 24.0 && fabs(pv.position().rho()) <= 2.0) { npv++; diff --git a/Validation/RecoParticleFlow/plugins/PFCaloGPUComparisonTask.cc b/Validation/RecoParticleFlow/plugins/PFCaloGPUComparisonTask.cc new file mode 100644 index 0000000000000..404c27f715773 --- /dev/null +++ b/Validation/RecoParticleFlow/plugins/PFCaloGPUComparisonTask.cc @@ -0,0 +1,168 @@ +#include "DQMServices/Core/interface/DQMEDAnalyzer.h" +#include "DQMServices/Core/interface/DQMStore.h" +#include "DataFormats/CaloRecHit/interface/CaloCluster.h" +#include "DataFormats/CaloRecHit/interface/CaloClusterFwd.h" +#include "DataFormats/CaloTowers/interface/CaloTowerCollection.h" +#include "DataFormats/CaloTowers/interface/CaloTowerDetId.h" +#include "DataFormats/Common/interface/Handle.h" +#include "DataFormats/DetId/interface/DetId.h" +#include "DataFormats/EcalDetId/interface/EcalSubdetector.h" +#include "DataFormats/HcalDetId/interface/HcalDetId.h" +#include "DataFormats/HcalDetId/interface/HcalSubdetector.h" +#include "DataFormats/Math/interface/Vector3D.h" +#include "DataFormats/Math/interface/deltaR.h" +#include "DataFormats/ParticleFlowCandidate/interface/PFCandidate.h" +#include "DataFormats/ParticleFlowReco/interface/PFBlock.h" +#include "DataFormats/ParticleFlowReco/interface/PFBlockElementCluster.h" +#include "DataFormats/ParticleFlowReco/interface/PFBlockElementTrack.h" +#include "DataFormats/ParticleFlowReco/interface/PFCluster.h" +#include "DataFormats/ParticleFlowReco/interface/PFClusterFwd.h" +#include "DataFormats/ParticleFlowReco/interface/PFLayer.h" +#include "DataFormats/ParticleFlowReco/interface/PFRecHit.h" +#include "DataFormats/ParticleFlowReco/interface/PFRecHitFraction.h" +#include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/EventSetup.h" +#include "FWCore/Framework/interface/Frameworkfwd.h" +#include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/ParameterSet/interface/ParameterSet.h" +#include "FWCore/PluginManager/interface/ModuleDef.h" +#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" +#include "SimDataFormats/GeneratorProducts/interface/HepMCProduct.h" + +#include +#include +#include +#include +#include +#include + +#ifdef PFLOW_DEBUG +#define LOGVERB(x) edm::LogVerbatim(x) +#else +#define LOGVERB(x) LogTrace(x) +#endif + +class PFCaloGPUComparisonTask : public DQMEDAnalyzer { +public: + PFCaloGPUComparisonTask(edm::ParameterSet const& conf); + ~PFCaloGPUComparisonTask() override = default; + void analyze(edm::Event const& e, edm::EventSetup const& c) override; + void bookHistograms(DQMStore::IBooker&, edm::Run const&, edm::EventSetup const&) override; + +private: + edm::EDGetTokenT pfClusterTok_ref_; + edm::EDGetTokenT pfClusterTok_target_; + + MonitorElement* pfCluster_Multiplicity_GPUvsCPU_; + MonitorElement* pfCluster_Energy_GPUvsCPU_; + MonitorElement* pfCluster_RecHitMultiplicity_GPUvsCPU_; + MonitorElement* pfCluster_Layer_GPUvsCPU_; + MonitorElement* pfCluster_Depth_GPUvsCPU_; + MonitorElement* pfCluster_Eta_GPUvsCPU_; + MonitorElement* pfCluster_Phi_GPUvsCPU_; + MonitorElement* pfCluster_DuplicateMatches_GPUvsCPU_; + + std::string pfCaloGPUCompDir_; +}; + +PFCaloGPUComparisonTask::PFCaloGPUComparisonTask(const edm::ParameterSet& conf) + : pfClusterTok_ref_{consumes( + conf.getUntrackedParameter("pfClusterToken_ref"))}, + pfClusterTok_target_{ + consumes(conf.getUntrackedParameter("pfClusterToken_target"))}, + pfCaloGPUCompDir_{conf.getUntrackedParameter("pfCaloGPUCompDir")} {} + +void PFCaloGPUComparisonTask::bookHistograms(DQMStore::IBooker& ibooker, + edm::Run const& irun, + edm::EventSetup const& isetup) { + const char* histo; + + ibooker.setCurrentFolder("ParticleFlow/" + pfCaloGPUCompDir_); + + histo = "pfCluster_Multiplicity_GPUvsCPU"; + pfCluster_Multiplicity_GPUvsCPU_ = ibooker.book2D(histo, histo, 100, 0, 2000, 100, 0, 2000); + + histo = "pfCluster_Energy_GPUvsCPU"; + pfCluster_Energy_GPUvsCPU_ = ibooker.book2D(histo, histo, 100, 0, 500, 100, 0, 500); + + histo = "pfCluster_RecHitMultiplicity_GPUvsCPU"; + pfCluster_RecHitMultiplicity_GPUvsCPU_ = ibooker.book2D(histo, histo, 100, 0, 100, 100, 0, 100); + + histo = "pfCluster_Layer_GPUvsCPU"; + pfCluster_Layer_GPUvsCPU_ = ibooker.book2D(histo, histo, 100, 0, 100, 100, 0, 100); + + histo = "pfCluster_Depth_GPUvsCPU"; + pfCluster_Depth_GPUvsCPU_ = ibooker.book2D(histo, histo, 100, 0, 100, 100, 0, 100); + + histo = "pfCluster_Eta_GPUvsCPU"; + pfCluster_Eta_GPUvsCPU_ = ibooker.book2D(histo, histo, 100, 0, 100, 100, 0, 100); + + histo = "pfCluster_Phi_GPUvsCPU"; + pfCluster_Phi_GPUvsCPU_ = ibooker.book2D(histo, histo, 100, 0, 100, 100, 0, 100); + + histo = "pfCluster_DuplicateMatches_GPUvsCPU"; + pfCluster_DuplicateMatches_GPUvsCPU_ = ibooker.book1D(histo, histo, 100, 0., 1000); +} +void PFCaloGPUComparisonTask::analyze(edm::Event const& event, edm::EventSetup const& c) { + edm::Handle pfClusters_ref; + event.getByToken(pfClusterTok_ref_, pfClusters_ref); + + edm::Handle pfClusters_target; + event.getByToken(pfClusterTok_target_, pfClusters_target); + + // + // Compare per-event PF cluster multiplicity + + if (pfClusters_ref->size() != pfClusters_target->size()) + LOGVERB("PFCaloGPUComparisonTask") << " PFCluster multiplicity " << pfClusters_ref->size() << " " + << pfClusters_target->size(); + pfCluster_Multiplicity_GPUvsCPU_->Fill((float)pfClusters_ref->size(), (float)pfClusters_target->size()); + + // + // Find matching PF cluster pairs + std::vector matched_idx; + matched_idx.reserve(pfClusters_ref->size()); + for (unsigned i = 0; i < pfClusters_ref->size(); ++i) { + bool matched = false; + for (unsigned j = 0; j < pfClusters_target->size(); ++j) { + if (pfClusters_ref->at(i).seed() == pfClusters_target->at(j).seed()) { + if (!matched) { + matched = true; + matched_idx.push_back((int)j); + } else { + edm::LogWarning("PFCaloGPUComparisonTask") << "Found duplicate match"; + pfCluster_DuplicateMatches_GPUvsCPU_->Fill((int)j); + } + } + } + if (!matched) + matched_idx.push_back(-1); // if you don't find a match, put a dummy number + } + + // + // Plot matching PF cluster variables + for (unsigned i = 0; i < pfClusters_ref->size(); ++i) { + if (matched_idx[i] >= 0) { + unsigned int j = matched_idx[i]; + int ref_energy_bin = pfCluster_Energy_GPUvsCPU_->getTH2F()->GetXaxis()->FindBin(pfClusters_ref->at(i).energy()); + int target_energy_bin = + pfCluster_Energy_GPUvsCPU_->getTH2F()->GetXaxis()->FindBin(pfClusters_target->at(j).energy()); + if (ref_energy_bin != target_energy_bin) + edm::LogPrint("PFCaloGPUComparisonTask") + << "Off-diagonal energy bin entries: " << pfClusters_ref->at(i).energy() << " " + << pfClusters_ref->at(i).eta() << " " << pfClusters_ref->at(i).phi() << " " + << pfClusters_target->at(j).energy() << " " << pfClusters_target->at(j).eta() << " " + << pfClusters_target->at(j).phi() << std::endl; + pfCluster_Energy_GPUvsCPU_->Fill(pfClusters_ref->at(i).energy(), pfClusters_target->at(j).energy()); + pfCluster_Layer_GPUvsCPU_->Fill(pfClusters_ref->at(i).layer(), pfClusters_target->at(j).layer()); + pfCluster_Eta_GPUvsCPU_->Fill(pfClusters_ref->at(i).eta(), pfClusters_target->at(j).eta()); + pfCluster_Phi_GPUvsCPU_->Fill(pfClusters_ref->at(i).phi(), pfClusters_target->at(j).phi()); + pfCluster_Depth_GPUvsCPU_->Fill(pfClusters_ref->at(i).depth(), pfClusters_target->at(j).depth()); + pfCluster_RecHitMultiplicity_GPUvsCPU_->Fill((float)pfClusters_ref->at(i).recHitFractions().size(), + (float)pfClusters_target->at(j).recHitFractions().size()); + } + } +} + +#include "FWCore/Framework/interface/MakerMacros.h" +DEFINE_FWK_MODULE(PFCaloGPUComparisonTask); diff --git a/Validation/RecoParticleFlow/plugins/PFJetAnalyzerDQM.cc b/Validation/RecoParticleFlow/plugins/PFJetAnalyzerDQM.cc index 1a92fa45956fd..f40e7a0e9bfaf 100644 --- a/Validation/RecoParticleFlow/plugins/PFJetAnalyzerDQM.cc +++ b/Validation/RecoParticleFlow/plugins/PFJetAnalyzerDQM.cc @@ -114,13 +114,17 @@ class PFJetAnalyzerDQM : public DQMEDAnalyzer { std::vector jetResponsePlots; std::vector jetResponsePlots_noJEC; std::vector genJetPlots; - + std::vector genJetPlots_matched; + std::vector genJetPlots_unmatched; + std::vector recoJetPlots; + std::vector recoJetPlots_matched; + std::vector recoJetPlots_unmatched; // Is this data or MC? bool isMC; float jetDeltaR; - bool genJetsOn; + bool genJetsOn, recoJetsOn; std::string jetCollectionName; @@ -133,6 +137,11 @@ class PFJetAnalyzerDQM : public DQMEDAnalyzer { void fillJetResponse(edm::View& recoJetCollection, edm::View& genJetCollection); void prepareJetResponsePlots(const std::vector& genjet_plots_pset); void prepareGenJetPlots(const std::vector& genjet_plots_pset); + void prepareGenJetMatchedPlots(const std::vector& genjet_plots_pset); + void prepareGenJetUnmatchedPlots(const std::vector& genjet_plots_pset); + void prepareRecoJetPlots(const std::vector& recojet_plots_pset); + void prepareRecoJetMatchedPlots(const std::vector& recojet_plots_pset); + void prepareRecoJetUnmatchedPlots(const std::vector& recojet_plots_pset); }; void PFJetAnalyzerDQM::prepareJetResponsePlots(const std::vector& response_plots) { @@ -188,6 +197,121 @@ void PFJetAnalyzerDQM::prepareGenJetPlots(const std::vector& } } +void PFJetAnalyzerDQM::prepareGenJetMatchedPlots(const std::vector& genjet_plots_pset) { + for (auto& pset : genjet_plots_pset) { + const auto name = pset.getParameter("name") + "_matched"; + const auto title = "Matched " + pset.getParameter("title"); + + //Low and high edges of the eta bins for jets to pass to be filled into this histogram + const auto ptbins_d = pset.getParameter>("ptBins"); + std::vector ptbins(ptbins_d.begin(), ptbins_d.end()); + + const auto etabin_low = pset.getParameter("etaBinLow"); + const auto etabin_high = pset.getParameter("etaBinHigh"); + + genJetPlots_matched.push_back(Plot1DInBinVariable( + name, + title, + std::make_unique(name.c_str(), title.c_str(), static_cast(ptbins.size()) - 1, ptbins.data()), + 0.0, + 0.0, + etabin_low, + etabin_high)); + } +} + +void PFJetAnalyzerDQM::prepareGenJetUnmatchedPlots(const std::vector& genjet_plots_pset) { + for (auto& pset : genjet_plots_pset) { + const auto name = pset.getParameter("name") + "_unmatched"; + const auto title = "Unmatched " + pset.getParameter("title"); + + //Low and high edges of the eta bins for jets to pass to be filled into this histogram + const auto ptbins_d = pset.getParameter>("ptBins"); + std::vector ptbins(ptbins_d.begin(), ptbins_d.end()); + + const auto etabin_low = pset.getParameter("etaBinLow"); + const auto etabin_high = pset.getParameter("etaBinHigh"); + + genJetPlots_unmatched.push_back(Plot1DInBinVariable( + name, + title, + std::make_unique(name.c_str(), title.c_str(), static_cast(ptbins.size()) - 1, ptbins.data()), + 0.0, + 0.0, + etabin_low, + etabin_high)); + } +} + +void PFJetAnalyzerDQM::prepareRecoJetPlots(const std::vector& recojet_plots_pset) { + for (auto& pset : recojet_plots_pset) { + const auto name = pset.getParameter("name"); + const auto title = pset.getParameter("title"); + + //Low and high edges of the eta bins for jets to pass to be filled into this histogram + const auto ptbins_d = pset.getParameter>("ptBins"); + std::vector ptbins(ptbins_d.begin(), ptbins_d.end()); + + const auto etabin_low = pset.getParameter("etaBinLow"); + const auto etabin_high = pset.getParameter("etaBinHigh"); + + recoJetPlots.push_back(Plot1DInBinVariable( + name, + title, + std::make_unique(name.c_str(), title.c_str(), static_cast(ptbins.size()) - 1, ptbins.data()), + 0.0, + 0.0, + etabin_low, + etabin_high)); + } +} + +void PFJetAnalyzerDQM::prepareRecoJetMatchedPlots(const std::vector& recojet_plots_pset) { + for (auto& pset : recojet_plots_pset) { + const auto name = pset.getParameter("name") + "_matched"; + const auto title = "Matched " + pset.getParameter("title"); + + //Low and high edges of the eta bins for jets to pass to be filled into this histogram + const auto ptbins_d = pset.getParameter>("ptBins"); + std::vector ptbins(ptbins_d.begin(), ptbins_d.end()); + + const auto etabin_low = pset.getParameter("etaBinLow"); + const auto etabin_high = pset.getParameter("etaBinHigh"); + + recoJetPlots_matched.push_back(Plot1DInBinVariable( + name, + title, + std::make_unique(name.c_str(), title.c_str(), static_cast(ptbins.size()) - 1, ptbins.data()), + 0.0, + 0.0, + etabin_low, + etabin_high)); + } +} + +void PFJetAnalyzerDQM::prepareRecoJetUnmatchedPlots(const std::vector& recojet_plots_pset) { + for (auto& pset : recojet_plots_pset) { + const auto name = pset.getParameter("name") + "_unmatched"; + const auto title = "Unmatched " + pset.getParameter("title"); + + //Low and high edges of the eta bins for jets to pass to be filled into this histogram + const auto ptbins_d = pset.getParameter>("ptBins"); + std::vector ptbins(ptbins_d.begin(), ptbins_d.end()); + + const auto etabin_low = pset.getParameter("etaBinLow"); + const auto etabin_high = pset.getParameter("etaBinHigh"); + + recoJetPlots_unmatched.push_back(Plot1DInBinVariable( + name, + title, + std::make_unique(name.c_str(), title.c_str(), static_cast(ptbins.size()) - 1, ptbins.data()), + 0.0, + 0.0, + etabin_low, + etabin_high)); + } +} + PFJetAnalyzerDQM::PFJetAnalyzerDQM(const edm::ParameterSet& iConfig) { recoJetsLabel = iConfig.getParameter("recoJetCollection"); genJetsLabel = iConfig.getParameter("genJetCollection"); @@ -200,6 +324,7 @@ PFJetAnalyzerDQM::PFJetAnalyzerDQM(const edm::ParameterSet& iConfig) { //for turn genJet on/off genJetsOn = iConfig.getParameter("genJetsOn"); + recoJetsOn = iConfig.getParameter("recoJetsOn"); //Create all jet response plots in bins of genjet pt and eta const auto& response_plots = iConfig.getParameter>("responsePlots"); @@ -207,6 +332,13 @@ PFJetAnalyzerDQM::PFJetAnalyzerDQM(const edm::ParameterSet& iConfig) { const auto& genjet_plots = iConfig.getParameter>("genJetPlots"); prepareGenJetPlots(genjet_plots); + prepareGenJetMatchedPlots(genjet_plots); + prepareGenJetUnmatchedPlots(genjet_plots); + + const auto& recojet_plots = iConfig.getParameter>("recoJetPlots"); + prepareRecoJetPlots(recojet_plots); + prepareRecoJetMatchedPlots(recojet_plots); + prepareRecoJetUnmatchedPlots(recojet_plots); recoJetsToken = consumes>(recoJetsLabel); genJetsToken = consumes>(genJetsLabel); @@ -215,7 +347,37 @@ PFJetAnalyzerDQM::PFJetAnalyzerDQM(const edm::ParameterSet& iConfig) { void PFJetAnalyzerDQM::fillJetResponse(edm::View& recoJetCollection, edm::View& genJetCollection) { //match gen jets to reco jets, require minimum jetDeltaR, choose closest, do not try to match charge std::vector matchIndices; + std::vector matchIndicesReco; PFB::match(genJetCollection, recoJetCollection, matchIndices, false, jetDeltaR); + PFB::match(recoJetCollection, genJetCollection, matchIndicesReco, false, jetDeltaR); + + //Fill recojet pt if recoJetOn + for (unsigned int i = 0; i < recoJetCollection.size(); i++) { + const auto& recoJet = recoJetCollection.at(i); + const auto pt_reco = recoJet.pt(); + const auto eta_reco = abs(recoJet.eta()); + const int iMatch_reco = matchIndicesReco[i]; + if (recoJetsOn) { + for (auto& plot : recoJetPlots) { + if (plot.isInEtaBin(eta_reco)) { + plot.fill(pt_reco); + } + } + if (iMatch_reco != -1) { + for (auto& plot : recoJetPlots_matched) { + if (plot.isInEtaBin(eta_reco)) { + plot.fill(pt_reco); + } + } + } else { + for (auto& plot : recoJetPlots_unmatched) { + if (plot.isInEtaBin(eta_reco)) { + plot.fill(pt_reco); + } + } + } + } + } for (unsigned int i = 0; i < genJetCollection.size(); i++) { const auto& genJet = genJetCollection.at(i); @@ -231,6 +393,21 @@ void PFJetAnalyzerDQM::fillJetResponse(edm::View& recoJetCollection, e } } } + if (recoJetsOn) { + if (iMatch != -1) { + for (auto& plot : genJetPlots_matched) { + if (plot.isInEtaBin(eta_gen)) { + plot.fill(pt_gen); + } + } + } else { + for (auto& plot : genJetPlots_unmatched) { + if (plot.isInEtaBin(eta_gen)) { + plot.fill(pt_gen); + } + } + } + } //If gen jet had a matched reco jet if (iMatch != -1) { @@ -269,6 +446,26 @@ void PFJetAnalyzerDQM::bookHistograms(DQMStore::IBooker& booker, edm::Run const& plot.book(booker); } + if (recoJetsOn) { + booker.setCurrentFolder("ParticleFlow/JetResponse/" + jetCollectionName + "/noJEC/"); + for (auto& plot : genJetPlots_matched) { + plot.book(booker); + } + for (auto& plot : genJetPlots_unmatched) { + plot.book(booker); + } + booker.setCurrentFolder("ParticleFlow/JetResponse/" + jetCollectionName + "/JEC/"); + for (auto& plot : recoJetPlots) { + plot.book(booker); + } + for (auto& plot : recoJetPlots_matched) { + plot.book(booker); + } + for (auto& plot : recoJetPlots_unmatched) { + plot.book(booker); + } + } + //Book plots for gen-jet pt spectra if (genJetsOn) { booker.setCurrentFolder("ParticleFlow/GenJets/"); diff --git a/Validation/RecoParticleFlow/plugins/PFJetDQMPostProcessor.cc b/Validation/RecoParticleFlow/plugins/PFJetDQMPostProcessor.cc index 86d0119066aa4..ddcfd810e9026 100644 --- a/Validation/RecoParticleFlow/plugins/PFJetDQMPostProcessor.cc +++ b/Validation/RecoParticleFlow/plugins/PFJetDQMPostProcessor.cc @@ -18,6 +18,7 @@ #include "DQMServices/Core/interface/DQMStore.h" #include "TCanvas.h" +#include "TGraphAsymmErrors.h" // // class declaration @@ -43,6 +44,7 @@ class PFJetDQMPostProcessor : public DQMEDHarvester { std::vector jetResponseDir; std::string genjetDir; + std::string offsetDir; std::vector ptBins; std::vector etaBins; @@ -70,6 +72,7 @@ class PFJetDQMPostProcessor : public DQMEDHarvester { PFJetDQMPostProcessor::PFJetDQMPostProcessor(const edm::ParameterSet& iConfig) { jetResponseDir = iConfig.getParameter>("jetResponseDir"); genjetDir = iConfig.getParameter("genjetDir"); + offsetDir = iConfig.getParameter("offsetDir"); ptBins = iConfig.getParameter>("ptBins"); etaBins = iConfig.getParameter>("etaBins"); recoptcut = iConfig.getParameter("recoPtCut"); @@ -83,12 +86,14 @@ void PFJetDQMPostProcessor::dqmEndJob(DQMStore::IBooker& ibook_, DQMStore::IGett iget_.setCurrentFolder(genjetDir); std::vector sME_genjets = iget_.getMEs(); std::for_each(sME_genjets.begin(), sME_genjets.end(), [&](auto& s) { s.insert(0, genjetDir); }); - //for (unsigned int i=0; i sME_offset = iget_.getMEs(); + std::for_each(sME_offset.begin(), sME_offset.end(), [&](auto& s) { s.insert(0, offsetDir); }); iget_.setCurrentFolder(jetResponseDir[idir]); std::vector sME_response = iget_.getMEs(); std::for_each(sME_response.begin(), sME_response.end(), [&](auto& s) { s.insert(0, jetResponseDir[idir]); }); - //for (unsigned int i=0; i vME_presponse; + std::vector vME_presponse_mean; + std::vector vME_presponse_median; std::vector vME_preso; std::vector vME_preso_rms; + std::vector vME_efficiency; + std::vector vME_purity; + std::vector vME_ratePUJet; MonitorElement* me; TH1F* h_resp; - TH1F* h_genjet_pt; + TH1F *h_genjet_pt, *h_genjet_matched_pt = nullptr; + TH1F *h_recojet_pt = nullptr, *h_recojet_matched_pt = nullptr, *h_recojet_unmatched_pt = nullptr; + + stitle = offsetDir + "mu"; + std::vector::const_iterator it = std::find(sME_offset.begin(), sME_offset.end(), stitle); + if (it == sME_offset.end()) + continue; + me = iget_.get(stitle); + int nEvents = ((TH1F*)me->getTH1F())->GetEntries(); + if (nEvents == 0) + continue; + iget_.setCurrentFolder(jetResponseDir[idir]); + + bool isNoJEC = (jetResponseDir[idir].find("noJEC") != std::string::npos); + bool isJEC = false; + if (!isNoJEC) + isJEC = (jetResponseDir[idir].find("JEC") != std::string::npos); // // Response distributions // for (unsigned int ieta = 1; ieta < etaBins.size(); ++ieta) { stitle = genjetDir + "genjet_pt" + "_eta" + seta(etaBins[ieta]); - //std::cout << ieta << " " << stitle << std::endl; std::vector::const_iterator it = std::find(sME_genjets.begin(), sME_genjets.end(), stitle); if (it == sME_genjets.end()) @@ -120,17 +145,63 @@ void PFJetDQMPostProcessor::dqmEndJob(DQMStore::IBooker& ibook_, DQMStore::IGett me = iget_.get(stitle); h_genjet_pt = (TH1F*)me->getTH1F(); + if (isNoJEC) { + // getting the histogram for matched gen jets + stitle = jetResponseDir[idir] + "genjet_pt" + "_eta" + seta(etaBins[ieta]) + "_matched"; + me = iget_.get(stitle); + h_genjet_matched_pt = (TH1F*)me->getTH1F(); + + /*// getting the histogram for unmatched gen jets + stitle = jetResponseDir[idir] + "genjet_pt" + "_eta" + seta(etaBins[ieta]) + "_unmatched"; + me = iget_.get(stitle); + h_genjet_unmatched_pt = (TH1F*)me->getTH1F();*/ + } + if (isJEC) { + // getting the histogram for reco jets + stitle = jetResponseDir[idir] + "recojet_pt" + "_eta" + seta(etaBins[ieta]); + me = iget_.get(stitle); + h_recojet_pt = (TH1F*)me->getTH1F(); + + // getting the histogram for matched reco jets + stitle = jetResponseDir[idir] + "recojet_pt" + "_eta" + seta(etaBins[ieta]) + "_matched"; + me = iget_.get(stitle); + h_recojet_matched_pt = (TH1F*)me->getTH1F(); + + // getting the histogram for unmatched reco jets + stitle = jetResponseDir[idir] + "recojet_pt" + "_eta" + seta(etaBins[ieta]) + "_unmatched"; + me = iget_.get(stitle); + h_recojet_unmatched_pt = (TH1F*)me->getTH1F(); + } + stitle = "presponse_eta" + seta(etaBins[ieta]); // adding "Raw" to the title of raw jet response histograms - if (jetResponseDir[idir].find("noJEC") != std::string::npos) { + if (isNoJEC) { sprintf(ctitle, "Raw Jet pT response, %4.1f<|#eta|<%4.1f", etaBins[ieta - 1], etaBins[ieta]); } else { sprintf(ctitle, "Jet pT response, %4.1f<|#eta|<%4.1f", etaBins[ieta - 1], etaBins[ieta]); } TH1F* h_presponse = new TH1F(stitle.c_str(), ctitle, nPtBins, ptBinsArray); - stitle = "preso_eta" + seta(etaBins[ieta]); + stitle = "presponse_eta" + seta(etaBins[ieta]) + "_mean"; + // adding "Raw" to the title of raw jet response histograms if (jetResponseDir[idir].find("noJEC") != std::string::npos) { + sprintf(ctitle, "Raw Jet pT response using Mean, %4.1f<|#eta|<%4.1f", etaBins[ieta - 1], etaBins[ieta]); + } else { + sprintf(ctitle, "Jet pT response using Mean, %4.1f<|#eta|<%4.1f", etaBins[ieta - 1], etaBins[ieta]); + } + TH1F* h_presponse_mean = new TH1F(stitle.c_str(), ctitle, nPtBins, ptBinsArray); + + stitle = "presponse_eta" + seta(etaBins[ieta]) + "_median"; + // adding "Raw" to the title of raw jet response histograms + if (isNoJEC) { + sprintf(ctitle, "Raw Jet pT response using Med., %4.1f<|#eta|<%4.1f", etaBins[ieta - 1], etaBins[ieta]); + } else { + sprintf(ctitle, "Jet pT response using Med., %4.1f<|#eta|<%4.1f", etaBins[ieta - 1], etaBins[ieta]); + } + TH1F* h_presponse_median = new TH1F(stitle.c_str(), ctitle, nPtBins, ptBinsArray); + + stitle = "preso_eta" + seta(etaBins[ieta]); + if (isNoJEC) { sprintf(ctitle, "Raw Jet pT resolution, %4.1f<|#eta|<%4.1f", etaBins[ieta - 1], etaBins[ieta]); } else { sprintf(ctitle, "Jet pT resolution, %4.1f<|#eta|<%4.1f", etaBins[ieta - 1], etaBins[ieta]); @@ -145,6 +216,31 @@ void PFJetDQMPostProcessor::dqmEndJob(DQMStore::IBooker& ibook_, DQMStore::IGett } TH1F* h_preso_rms = new TH1F(stitle.c_str(), ctitle, nPtBins, ptBinsArray); + //Booking histogram for Jet Efficiency vs pT + stitle = "efficiency_eta" + seta(etaBins[ieta]); + sprintf(ctitle, "Efficiency, %4.1f<|#eta|<%4.1f", etaBins[ieta - 1], etaBins[ieta]); + TH1F* h_efficiency = new TH1F(stitle.c_str(), ctitle, nPtBins, ptBinsArray); + + //Booking histogram for Jet Purity vs pT + stitle = "purity_eta" + seta(etaBins[ieta]); + sprintf(ctitle, "Purity, %4.1f<|#eta|<%4.1f", etaBins[ieta - 1], etaBins[ieta]); + TH1F* h_purity = new TH1F(stitle.c_str(), ctitle, nPtBins, ptBinsArray); + + //Booking histogram for #PU jets vs pT + stitle = "ratePUJet_eta" + seta(etaBins[ieta]); + sprintf(ctitle, "PU Jet Rate, %4.1f<|#eta|<%4.1f", etaBins[ieta - 1], etaBins[ieta]); + TH1F* h_ratePUJet = new TH1F(stitle.c_str(), ctitle, nPtBins, ptBinsArray); + + if (isNoJEC) { + h_efficiency->Divide(h_genjet_matched_pt, h_genjet_pt, 1, 1, "B"); + } + if (isJEC) { + h_purity->Divide(h_recojet_matched_pt, h_recojet_pt, 1, 1, "B"); + h_ratePUJet = (TH1F*)h_recojet_unmatched_pt->Clone(); + h_ratePUJet->SetName("h_ratePUJet"); + h_ratePUJet->Scale(1. / double(nEvents)); + } + for (unsigned int ipt = 0; ipt < ptBins.size() - 1; ++ipt) { stitle = jetResponseDir[idir] + "reso_dist_" + spt(ptBins[ipt], ptBins[ipt + 1]) + "_eta" + seta(etaBins[ieta]); std::vector::const_iterator it = std::find(sME_response.begin(), sME_response.end(), stitle); @@ -184,12 +280,55 @@ void PFJetDQMPostProcessor::dqmEndJob(DQMStore::IBooker& ibook_, DQMStore::IGett h_preso_rms->SetBinContent(ipt + 1, std); h_preso_rms->SetBinError(ipt + 1, err); + // Mean-based + h_presponse_mean->SetBinContent(ipt + 1, h_resp->GetMean()); + h_presponse_mean->SetBinError(ipt + 1, h_resp->GetMeanError()); + + // Median-based + if (h_resp->GetEntries() > 0) { + int numBins = h_resp->GetXaxis()->GetNbins(); + Double_t x1[numBins]; + Double_t y1[numBins]; + for (int i = 0; i < numBins; i++) { + x1[i] = h_resp->GetBinCenter(i + 1); + y1[i] = h_resp->GetBinContent(i + 1) > 0 ? h_resp->GetBinContent(i + 1) : 0.0; + } + const auto x = x1, y = y1; + double median = TMath::Median(numBins, x, y); + h_presponse_median->SetBinContent(ipt + 1, median); + h_presponse_median->SetBinError(ipt + 1, 1.2533 * (h_resp->GetMeanError())); + } + } // ipt + if (isNoJEC) { + stitle = "efficiency_eta" + seta(etaBins[ieta]); + me = ibook_.book1D(stitle.c_str(), h_efficiency); + vME_efficiency.push_back(me); + } + + if (isJEC) { + stitle = "purity_eta" + seta(etaBins[ieta]); + me = ibook_.book1D(stitle.c_str(), h_purity); + vME_purity.push_back(me); + + stitle = "ratePUJet_eta" + seta(etaBins[ieta]); + me = ibook_.book1D(stitle.c_str(), h_ratePUJet); + vME_ratePUJet.push_back(me); + } + stitle = "presponse_eta" + seta(etaBins[ieta]); me = ibook_.book1D(stitle.c_str(), h_presponse); vME_presponse.push_back(me); + stitle = "presponse_eta" + seta(etaBins[ieta]) + "_mean"; + me = ibook_.book1D(stitle.c_str(), h_presponse_mean); + vME_presponse_mean.push_back(me); + + stitle = "presponse_eta" + seta(etaBins[ieta]) + "_median"; + me = ibook_.book1D(stitle.c_str(), h_presponse_median); + vME_presponse_median.push_back(me); + stitle = "preso_eta" + seta(etaBins[ieta]); me = ibook_.book1D(stitle.c_str(), h_preso); vME_preso.push_back(me); @@ -204,8 +343,26 @@ void PFJetDQMPostProcessor::dqmEndJob(DQMStore::IBooker& ibook_, DQMStore::IGett // Checks // if (debug) { + if (isNoJEC) { + for (std::vector::const_iterator i = vME_efficiency.begin(); i != vME_efficiency.end(); ++i) + (*i)->getTH1F()->Print(); + } + if (isJEC) { + for (std::vector::const_iterator i = vME_purity.begin(); i != vME_purity.end(); ++i) + (*i)->getTH1F()->Print(); + for (std::vector::const_iterator i = vME_ratePUJet.begin(); i != vME_ratePUJet.end(); ++i) + (*i)->getTH1F()->Print(); + } + for (std::vector::const_iterator i = vME_presponse.begin(); i != vME_presponse.end(); ++i) (*i)->getTH1F()->Print(); + for (std::vector::const_iterator i = vME_presponse_mean.begin(); i != vME_presponse_mean.end(); + ++i) + (*i)->getTH1F()->Print(); + for (std::vector::const_iterator i = vME_presponse_median.begin(); + i != vME_presponse_median.end(); + ++i) + (*i)->getTH1F()->Print(); for (std::vector::const_iterator i = vME_preso.begin(); i != vME_preso.end(); ++i) (*i)->getTH1F()->Print(); for (std::vector::const_iterator i = vME_preso_rms.begin(); i != vME_preso_rms.end(); ++i) @@ -246,7 +403,7 @@ void PFJetDQMPostProcessor::fitResponse(TH1F* hreso, TF1* fg = new TF1("mygaus", "gaus", fitlow, fithigh); TF1* fg2 = new TF1("fg2", "TMath::Gaus(x,[0],[1],true)*[2]", fitlow, fithigh); - hreso->Fit("mygaus", "RQNL"); + hreso->Fit("mygaus", "RQN"); fg2->SetParameter(0, fg->GetParameter(1)); fg2->SetParameter(1, fg->GetParameter(2)); @@ -259,10 +416,10 @@ void PFJetDQMPostProcessor::fitResponse(TH1F* hreso, // (3) and number of bins (100) fg2->FixParameter(2, ngenjet * 3. / 100.); - hreso->Fit("fg2", "RQNL"); + hreso->Fit("fg2", "RQN"); fitlow = fg2->GetParameter(0) - 1.5 * fg2->GetParameter(1); - fitlow = TMath::Max(15. / ptlow, fitlow); + fitlow = TMath::Max(recoptcut / ptlow, fitlow); fithigh = fg2->GetParameter(0) + 1.5 * fg2->GetParameter(1); fg2->SetRange(fitlow, fithigh); diff --git a/Validation/RecoParticleFlow/python/defaults_cfi.py b/Validation/RecoParticleFlow/python/defaults_cfi.py index 80fdf342f9f26..e7b3c17931997 100644 --- a/Validation/RecoParticleFlow/python/defaults_cfi.py +++ b/Validation/RecoParticleFlow/python/defaults_cfi.py @@ -15,6 +15,11 @@ def genjet_distribution_name(ietabin): eta_string = "{0:.1f}".format(etabins[ietabin+1]).replace(".", "") return "genjet_pt_eta{0}".format(eta_string) +def recojet_distribution_name(ietabin): + eta_string = "{0:.1f}".format(etabins[ietabin+1]).replace(".", "") + return "recojet_pt_eta{0}".format(eta_string) + + jetResponseDir = 'ParticleFlow/JetResponse/' genjetDir = 'ParticleFlow/GenJets/' @@ -32,6 +37,7 @@ def genjet_distribution_name(ietabin): npvLowOffset = 0 npvHighOffset = 100 +pvzHighOffset = 100 eBinsOffset = 1000 eLowOffset = 0 diff --git a/Validation/RecoParticleFlow/python/offsetAnalyzerDQM_cff.py b/Validation/RecoParticleFlow/python/offsetAnalyzerDQM_cff.py index b7a0ca8669281..d3cb95be5bea5 100644 --- a/Validation/RecoParticleFlow/python/offsetAnalyzerDQM_cff.py +++ b/Validation/RecoParticleFlow/python/offsetAnalyzerDQM_cff.py @@ -50,14 +50,15 @@ def createOffsetVPSet(): def createTH1DVPSet(): plots = [] #hname, title, xmax - toplot = ( ("mu", "#mu", default.muHighOffset), ("npv", "N_{PV}", default.npvHighOffset) ) + toplot = ( ("mu", "#mu", 0, default.muHighOffset), ("npv", "N_{PV}", 0, default.npvHighOffset), + ("pv_z", "z_{PV}", -default.pvzHighOffset, default.pvzHighOffset)) - for hname, title, xmax in toplot : + for hname, title, xmin, xmax in toplot : plots += [ plotPSet( hname, hname + ";" + title, default.offsetDir, - xmax, 0, xmax + xmax-xmin, xmin, xmax )] return plots diff --git a/Validation/RecoParticleFlow/python/particleFlowDQM_cff.py b/Validation/RecoParticleFlow/python/particleFlowDQM_cff.py index 1c1eaa86129a4..c6d846a15e335 100644 --- a/Validation/RecoParticleFlow/python/particleFlowDQM_cff.py +++ b/Validation/RecoParticleFlow/python/particleFlowDQM_cff.py @@ -1,6 +1,6 @@ import FWCore.ParameterSet.Config as cms import Validation.RecoParticleFlow.defaults_cfi as default -from Validation.RecoParticleFlow.defaults_cfi import ptbins, etabins, response_distribution_name, genjet_distribution_name,jetResponseDir,genjetDir +from Validation.RecoParticleFlow.defaults_cfi import ptbins, etabins, response_distribution_name, genjet_distribution_name, recojet_distribution_name, jetResponseDir, genjetDir, offsetDir #----- ----- ----- ----- ----- ----- ----- ----- # @@ -34,7 +34,7 @@ def createResponsePlots(ptbins, etabins): response_plots += [make_response_plot_pset( response_distribution_name(iptbin, ietabin), - "Jet response (pT/pTgen) in {0} <= pt < {1}, {2} <= |eta| < {3}".format(ptbins[iptbin], ptbins[iptbin+1], etabins[ietabin], etabins[ietabin+1]), + "Jet response (pT/pTgen) in {0} <= pt < {1}, {2} <= |eta| < {3})".format(ptbins[iptbin], ptbins[iptbin+1], etabins[ietabin], etabins[ietabin+1]), 100, 0.0, 3.0, ptbins[iptbin], ptbins[iptbin+1], etabins[ietabin], etabins[ietabin+1] )] return response_plots @@ -47,7 +47,22 @@ def createGenJetPlots(ptbins, etabins): plots += [ cms.PSet( name = cms.string(genjet_distribution_name(ietabin)), - title = cms.string("GenJet pT ({0} <= |eta| <= {1}".format(eta_low, eta_high)), + title = cms.string("GenJet pT ({0} <= |eta| <= {1})".format(eta_low, eta_high)), + ptBins = cms.vdouble(ptbins), + etaBinLow = cms.double(eta_low), + etaBinHigh = cms.double(eta_high), + )] + return plots + +def createRecoJetPlots(ptbins, etabins): + plots = [] + for ietabin in range(len(etabins)-1): + eta_low = etabins[ietabin] + eta_high = etabins[ietabin + 1] + plots += [ + cms.PSet( + name = cms.string(recojet_distribution_name(ietabin)), + title = cms.string("RecoJet ({0} <= |eta| <= {1})".format(eta_low, eta_high)), ptBins = cms.vdouble(ptbins), etaBinLow = cms.double(eta_low), etaBinHigh = cms.double(eta_high), @@ -70,10 +85,10 @@ def createGenJetPlots(ptbins, etabins): # turn gen jets on or off genJetsOn = cms.bool(True), - + recoJetsOn = cms.bool(True), responsePlots = cms.VPSet(createResponsePlots(ptbins, etabins)), - genJetPlots = cms.VPSet(createGenJetPlots(ptbins, etabins)) - + genJetPlots = cms.VPSet(createGenJetPlots(ptbins, etabins)), + recoJetPlots = cms.VPSet(createRecoJetPlots(ptbins, etabins)), ) pfPuppiJetAnalyzerDQM = pfJetAnalyzerDQM.clone( @@ -90,10 +105,10 @@ def createGenJetPlots(ptbins, etabins): jetResponseDir = cms.vstring( vjetResponseDir ), genjetDir = cms.string( genjetDir ), + offsetDir = cms.string( offsetDir ), ptBins = cms.vdouble( ptbins ), etaBins = cms.vdouble( etabins ), - recoPtCut = cms.double( 15. ) - + recoPtCut = cms.double(10. ) ) diff --git a/Validation/RecoParticleFlow/test/compare.py b/Validation/RecoParticleFlow/test/compare.py index 2f21e4baef7bd..be14663394f85 100644 --- a/Validation/RecoParticleFlow/test/compare.py +++ b/Validation/RecoParticleFlow/test/compare.py @@ -112,6 +112,12 @@ def parse_args(): JetFolderDirs = ["JetResponse/slimmedJets/JEC", "JetResponse/slimmedJets/noJEC", "JetResponse/slimmedJetsPuppi/JEC", "JetResponse/slimmedJetsPuppi/noJEC"] for JetFolderDir in JetFolderDirs: + plots += [(JetFolderDir, "efficiency_pt", ["efficiency_eta05", "efficiency_eta13", + "efficiency_eta21","efficiency_eta25","efficiency_eta30","efficiency_eta50"])] + plots += [(JetFolderDir, "purity_pt", ["purity_eta05", "purity_eta13", + "purity_eta21","purity_eta25","purity_eta30","purity_eta50"])] + plots += [(JetFolderDir, "ratePUJet_pt", ["ratePUJet_eta05", "ratePUJet_eta13", + "ratePUJet_eta21","ratePUJet_eta25","ratePUJet_eta30","ratePUJet_eta50"])] plots += [(JetFolderDir, "reso_pt", ["preso_eta05", "preso_eta13", "preso_eta21","preso_eta25","preso_eta30","preso_eta50"])] plots += [(JetFolderDir, "reso_pt_rms", ["preso_eta05_rms", @@ -120,6 +126,13 @@ def parse_args(): plots += [(JetFolderDir, "response_pt", ["presponse_eta05", "presponse_eta13", "presponse_eta21", "presponse_eta25", "presponse_eta30", "presponse_eta50"])] + plots += [(JetFolderDir, "response_pt_mean", ["presponse_eta05_mean", + "presponse_eta13_mean", "presponse_eta21_mean", + "presponse_eta25_mean", "presponse_eta30_mean", + "presponse_eta50_mean"])] + plots += [(JetFolderDir, "response_pt_median", ["presponse_eta05_median", + "presponse_eta13_median", "presponse_eta21_median", "presponse_eta25_median", + "presponse_eta30_median", "presponse_eta50_median"])] for iptbin in range(len(ptbins)-1): pthistograms = [] for ietabin in range(len(etabins)-1): @@ -189,42 +202,43 @@ def doPFCandPlots(files, plots): def addPlots(plotter, folder, name, section, histograms, opts, Offset=False): - folders = [folder] + folders = [folder] #plots = [PlotGroup(name, [Plot(h, **opts) for h in histograms])] #KH print plots - if Offset : - plots = [PlotGroup(name, [Plot(h, **opts) for h in histograms])] - plotter.append("Offset", folders, PlotFolder(*plots, loopSubFolders=False, page="offset", section=section)) - elif "JetResponse" in folder : - plots = [PlotGroup(name, [Plot(h, **opts) for h in histograms])] - plotter.append("ParticleFlow/" + section, folders, PlotFolder(*plots, loopSubFolders=False, page="pf", section=section)) - for plot in plots: - plot.setProperties(ncols=3) - plot.setProperties(legendDw=-0.68) - plot.setProperties(legendDh=0.005) - plot.setProperties(legendDy=0.24) - plot.setProperties(legendDx=0.05) - elif "JetMET" in folder: - for h in histograms: - plots = [PlotGroup(h, [Plot(h, **opts)])] - for plot in plots: - plot.setProperties(legendDw=-0.5) - plot.setProperties(legendDh=0.01) - plot.setProperties(legendDy=0.24) - plot.setProperties(legendDx=0.05) - plotter.append("JetMET" + section, folders, PlotFolder(*plots, loopSubFolders=False, page="JetMET", section=section)) - if "PackedCandidates" in folder: - for h in histograms: - if ("PtMid" in h or "PtHigh" in h): - plots = [PlotGroup(h, [Plot(h, ymin = pow(10,-1), ylog = True)])] - else: - plots = [PlotGroup(h, [Plot(h, **opts)])] - for plot in plots: - plot.setProperties(legendDw=-0.5) - plot.setProperties(legendDh=0.01) - plot.setProperties(legendDy=0.24) - plot.setProperties(legendDx=0.05) - plotter.append("ParticleFlow/PackedCandidates/" + section, folders, PlotFolder(*plots, loopSubFolders=False, page="PackedCandidates", section= section)) + if Offset : + plots = [PlotGroup(name, [Plot(h, **opts) for h in histograms])] + plotter.append("Offset", folders, PlotFolder(*plots, loopSubFolders=False, page="offset", section=section)) + elif "JetResponse" in folder : + plots = [PlotGroup(name, [Plot(h, **opts) for h in histograms])] + plotter.append("ParticleFlow/" + section, folders, PlotFolder(*plots, loopSubFolders=False, page="pf", section=section)) + for plot in plots: + plot.setProperties(ncols=3) + plot.setProperties(legendDw=-0.68) + plot.setProperties(legendDh=0.005) + plot.setProperties(legendDy=0.24) + plot.setProperties(legendDx=0.05) + elif "JetMET" in folder: + for h in histograms: + plots = [PlotGroup(h, [Plot(h, **opts)])] + for plot in plots: + plot.setProperties(ncols=1) + plot.setProperties(legendDw=-0.5) + plot.setProperties(legendDh=0.01) + plot.setProperties(legendDy=0.24) + plot.setProperties(legendDx=0.05) + plotter.append("JetMET" + section, folders, PlotFolder(*plots, loopSubFolders=False, page="JetMET", section=section)) + if "PackedCandidates" in folder: + for h in histograms: + if ("PtMid" in h or "PtHigh" in h): + plots = [PlotGroup(h, [Plot(h, ymin = pow(10,-1), ylog = True)])] + else: + plots = [PlotGroup(h, [Plot(h, **opts)])] + for plot in plots: + plot.setProperties(legendDw=-0.5) + plot.setProperties(legendDh=0.01) + plot.setProperties(legendDy=0.24) + plot.setProperties(legendDx=0.05) + plotter.append("ParticleFlow/PackedCandidates/" + section, folders, PlotFolder(*plots, loopSubFolders=False, page="PackedCandidates", section= section)) def main(): @@ -238,10 +252,28 @@ def main(): styledict_response = {"xlog": True, "xgrid":False, "ygrid":False, "xtitle":"GenJet pT (GeV)", "ytitle":"Jet response", "xtitleoffset":7.7,"ytitleoffset":3.8,"adjustMarginLeft":0.00} + + styledict_rate = {"xlog": True, "xgrid":False, "ygrid":False, + "xtitle":"RecoJet pT (GeV)", "ytitle":"PU Jet rate (#PUJets/event)", + "xtitleoffset":7.7,"ytitleoffset":3.8,"adjustMarginLeft":0.00} + + styledict_efficiency = {"xlog": True, "xgrid":False, "ygrid":False, + "xtitle":"GenJet pT (GeV)", "ytitle":"Efficiency", + "xtitleoffset":7.7,"ytitleoffset":3.8,"adjustMarginLeft":0.00} + + styledict_purity = {"xlog": True, "xgrid":False, "ygrid":False, + "xtitle":"RecoJet pT (GeV)", "ytitle":"Purity", + "xtitleoffset":7.7,"ytitleoffset":3.8,"adjustMarginLeft":0.00} + plot_opts = { + "efficiency_pt": styledict_efficiency, + "purity_pt": styledict_purity, + "ratePUJet_pt": styledict_rate, "reso_pt": styledict_resolution, "reso_pt_rms": styledict_resolution, - "response_pt": styledict_response + "response_pt": styledict_response, + "response_pt_mean": styledict_response, + "response_pt_median": styledict_response, } for iptbin in range(len(ptbins)-1): plot_opts["response_{0:.0f}_{1:.0f}".format(ptbins[iptbin], ptbins[iptbin+1])] = {"stat": True} @@ -276,6 +308,7 @@ def main(): plotterDrawArgs = dict( separate=False, # Set to true if you want each plot in it's own canvas # ratio=False, # Uncomment to disable ratio pad + saveFormat=".png", ) diff --git a/Validation/RecoParticleFlow/test/datasets.py b/Validation/RecoParticleFlow/test/datasets.py index db2f01c311a34..31d56b9e8b804 100644 --- a/Validation/RecoParticleFlow/test/datasets.py +++ b/Validation/RecoParticleFlow/test/datasets.py @@ -129,7 +129,7 @@ def cache_das_filenames(self): #prefix = "root://xrootd-cms.infn.it//" tmpdir = "tmp" datasets = [ - Dataset("/RelValQCD_FlatPt_15_3000HS_14/CMSSW_12_1_0_pre2-121X_mcRun3_2021_realistic_v1-v1/GEN-SIM-DIGI-RAW", "QCD_noPU", prefix, None, False, tmpdir), + Dataset("/RelValQCD_FlatPt_15_3000HS_14/CMSSW_13_3_0_pre3-132X_mcRun3_2023_realistic_v4-v1/GEN-SIM-DIGI-RAW", "QCD_noPU", prefix, None, False, tmpdir), Dataset("/RelValQCD_FlatPt_15_3000HS_14/CMSSW_12_1_0_pre2-PU_121X_mcRun3_2021_realistic_v1-v1/GEN-SIM-DIGI-RAW", "QCD_PU", prefix, None, False, tmpdir), Dataset("/RelValZEE_14/CMSSW_12_1_0_pre2-PU_121X_mcRun3_2021_realistic_v1-v1/GEN-SIM-DIGI-RAW", "ZEE_PU", prefix, None, False, tmpdir), Dataset("/RelValZMM_14/CMSSW_12_1_0_pre2-PU_121X_mcRun3_2021_realistic_v1-v1/GEN-SIM-DIGI-RAW", "ZMM_PU", prefix, None, False, tmpdir), diff --git a/Validation/RecoParticleFlow/test/run_relval.sh b/Validation/RecoParticleFlow/test/run_relval.sh index c613ba73d071b..2ebf3535e063f 100755 --- a/Validation/RecoParticleFlow/test/run_relval.sh +++ b/Validation/RecoParticleFlow/test/run_relval.sh @@ -113,7 +113,7 @@ if [ $STEP == "RECO" ]; then FILENAME=`sed -n "${NJOB}p" $INPUT_FILELIST` echo "FILENAME="$FILENAME - cmsDriver.py step3 --conditions $CONDITIONS -s RAW2DIGI,L1Reco,RECO,RECOSIM,PAT --datatier MINIAODSIM --nThreads $NTHREADS -n -1 --era $ERA --eventcontent MINIAODSIM --geometry=$GEOM --filein step2.root --fileout file:step3_inMINIAODSIM.root --no_exec --python_filename=step3.py $CUSTOM + cmsDriver.py step3 --conditions $CONDITIONS -s RAW2DIGI,L1Reco,RECO,RECOSIM,PAT --datatier MINIAODSIM --nThreads $NTHREADS -n 100 --era $ERA --eventcontent MINIAODSIM --geometry=$GEOM --filein step2.root --fileout file:step3_inMINIAODSIM.root --no_exec --python_filename=step3.py $CUSTOM else @@ -132,7 +132,7 @@ if [ $STEP == "RECO" ]; then echo "FILENAME="$FILENAME #Run the actual CMS reco with particle flow. echo "Running step RECO" - cmsDriver.py step3 --conditions $CONDITIONS -s RAW2DIGI,L1Reco,RECO,RECOSIM,PAT --datatier MINIAODSIM --nThreads $NTHREADS -n -1 --era $ERA --eventcontent MINIAODSIM --geometry=$GEOM --filein $FILENAME --fileout file:step3_inMINIAODSIM.root $CUSTOM | tee step3.log 2>&1 + cmsDriver.py step3 --conditions $CONDITIONS -s RAW2DIGI,L1Reco,RECO,RECOSIM,PAT --datatier MINIAODSIM --nThreads $NTHREADS -n 100 --era $ERA --eventcontent MINIAODSIM --geometry=$GEOM --filein $FILENAME --fileout file:step3_inMINIAODSIM.root $CUSTOM | tee step3.log 2>&1 #NanoAOD #On lxplus, this step takes about 1 minute / 1000 events @@ -153,7 +153,7 @@ elif [ $STEP == "DQM" ]; then #Run the DQM sequences (PF DQM only) #override the filenames here as cmsDriver does not allow multiple input files and there is no easy way to merge EDM files - cmsDriver.py step5 --conditions $CONDITIONS -s DQM:@pfDQM --datatier DQMIO --nThreads $NTHREADS --era $ERA --eventcontent DQM --filein filelist:step3_filelist.txt --fileout file:step5.root -n -1 2>&1 | tee step5.log + cmsDriver.py step5 --conditions $CONDITIONS -s DQM:@pfDQM --datatier DQMIO --nThreads $NTHREADS --era $ERA --eventcontent DQM --filein filelist:step3_filelist.txt --fileout file:step5.root -n 100 2>&1 | tee step5.log #Harvesting converts the histograms stored in TTrees to be stored in folders by run etc cmsDriver.py step6 --conditions $CONDITIONS -s HARVESTING:@pfDQM --era $ERA --filetype DQM --filein file:step5.root --fileout file:step6.root 2>&1 | tee step6.log diff --git a/Validation/RecoTrack/plugins/BuildFile.xml b/Validation/RecoTrack/plugins/BuildFile.xml index 089a3c17ed76b..e16d946a28f6b 100644 --- a/Validation/RecoTrack/plugins/BuildFile.xml +++ b/Validation/RecoTrack/plugins/BuildFile.xml @@ -22,6 +22,7 @@ + diff --git a/Validation/RecoTrack/plugins/TrackingNtuple.cc b/Validation/RecoTrack/plugins/TrackingNtuple.cc index 647ad1dcc3f18..e5f5dbe7b4d96 100644 --- a/Validation/RecoTrack/plugins/TrackingNtuple.cc +++ b/Validation/RecoTrack/plugins/TrackingNtuple.cc @@ -55,6 +55,8 @@ #include "MagneticField/Engine/interface/MagneticField.h" #include "MagneticField/Records/interface/IdealMagneticFieldRecord.h" +#include "DataFormats/Phase2TrackerCluster/interface/Phase2TrackerCluster1D.h" + #include "DataFormats/SiPixelDetId/interface/PixelChannelIdentifier.h" #include "DataFormats/SiStripCluster/interface/SiStripClusterTools.h" #include "DataFormats/TrackerRecHit2D/interface/SiPixelRecHitCollection.h" @@ -492,6 +494,7 @@ class TrackingNtuple : public edm::one::EDAnalyzer { using PixelMaskContainer = edm::ContainerMask>; using StripMaskContainer = edm::ContainerMask>; + using Phase2OTMaskContainer = edm::ContainerMask>; struct TPHitIndex { TPHitIndex(unsigned int tp = 0, unsigned int simHit = 0, float to = 0, unsigned int id = 0) @@ -681,6 +684,7 @@ class TrackingNtuple : public edm::one::EDAnalyzer { std::vector>> pixelUseMaskTokens_; std::vector>> stripUseMaskTokens_; + std::vector>> ph2OTUseMaskTokens_; std::string builderName_; const bool includeSeeds_; @@ -1287,6 +1291,7 @@ class TrackingNtuple : public edm::one::EDAnalyzer { std::vector ph2_radL; //http://cmslxr.fnal.gov/lxr/source/DataFormats/GeometrySurface/interface/MediumProperties.h std::vector ph2_bbxi; + std::vector ph2_usedMask; //////////////////// // invalid (missing/inactive/etc) hits @@ -1490,6 +1495,7 @@ TrackingNtuple::TrackingNtuple(const edm::ParameterSet& iConfig) auto const& maskVPset = iConfig.getUntrackedParameterSetVector("clusterMasks"); pixelUseMaskTokens_.reserve(maskVPset.size()); stripUseMaskTokens_.reserve(maskVPset.size()); + ph2OTUseMaskTokens_.reserve(maskVPset.size()); for (auto const& mask : maskVPset) { auto index = mask.getUntrackedParameter("index"); assert(index < 64); @@ -1498,6 +1504,9 @@ TrackingNtuple::TrackingNtuple(const edm::ParameterSet& iConfig) if (includeStripHits_) stripUseMaskTokens_.emplace_back( index, consumes(mask.getUntrackedParameter("src"))); + if (includePhase2OTHits_) + ph2OTUseMaskTokens_.emplace_back( + index, consumes(mask.getUntrackedParameter("src"))); } } @@ -1872,7 +1881,7 @@ TrackingNtuple::TrackingNtuple(const edm::ParameterSet& iConfig) t->Branch("ph2_zx", &ph2_zx); t->Branch("ph2_radL", &ph2_radL); t->Branch("ph2_bbxi", &ph2_bbxi); - t->Branch("ph2_bbxi", &ph2_bbxi); + t->Branch("ph2_usedMask", &ph2_usedMask); } //invalid hits t->Branch("inv_isBarrel", &inv_isBarrel); @@ -2299,6 +2308,7 @@ void TrackingNtuple::clearVariables() { ph2_zx.clear(); ph2_radL.clear(); ph2_bbxi.clear(); + ph2_usedMask.clear(); //invalid hits inv_isBarrel.clear(); inv_detId.clear(); @@ -3314,6 +3324,22 @@ void TrackingNtuple::fillPhase2OTHits(const edm::Event& iEvent, const TrackerTopology& tTopo, const SimHitRefKeyToIndex& simHitRefKeyToIndex, std::set& hitProductIds) { + std::vector> phase2OTMasks; + phase2OTMasks.reserve(ph2OTUseMaskTokens_.size()); + for (const auto& itoken : ph2OTUseMaskTokens_) { + edm::Handle aH; + iEvent.getByToken(itoken.second, aH); + phase2OTMasks.emplace_back(1 << itoken.first, aH.product()); + } + auto ph2OTUsedMask = [&phase2OTMasks](size_t key) { + uint64_t mask = 0; + for (auto const& m : phase2OTMasks) { + if (m.second->mask(key)) + mask |= m.first; + } + return mask; + }; + edm::Handle phase2OTHits; iEvent.getByToken(phase2OTRecHitToken_, phase2OTHits); for (auto it = phase2OTHits->begin(); it != phase2OTHits->end(); it++) { @@ -3349,6 +3375,7 @@ void TrackingNtuple::fillPhase2OTHits(const edm::Event& iEvent, ph2_zx.push_back(hit->globalPositionError().czx()); ph2_radL.push_back(hit->surface()->mediumProperties().radLen()); ph2_bbxi.push_back(hit->surface()->mediumProperties().xi()); + ph2_usedMask.push_back(ph2OTUsedMask(hit->firstClusterRef().key())); LogTrace("TrackingNtuple") << "phase2 OT cluster=" << key << " subdId=" << hitId.subdetId() << " lay=" << lay << " rawId=" << hitId.rawId() << " pos =" << hit->globalPosition(); diff --git a/Validation/RecoTrack/python/TrackValidation_cff.py b/Validation/RecoTrack/python/TrackValidation_cff.py index 7bc788fcb592c..5e0026ada5175 100644 --- a/Validation/RecoTrack/python/TrackValidation_cff.py +++ b/Validation/RecoTrack/python/TrackValidation_cff.py @@ -997,7 +997,7 @@ def _uniqueFirstLayers(layerList): trackValidatorsTrackingOnly.remove(trackValidatorGsfTracksStandalone) trackValidatorsTrackingOnly.replace(trackValidatorBHadronStandalone, trackValidatorBHadronTrackingOnly) -seedingDeepCore.toReplaceWith(trackValidatorsTrackingOnly, cms.Sequence( +(seedingDeepCore & ~fastSim).toReplaceWith(trackValidatorsTrackingOnly, cms.Sequence( trackValidatorsTrackingOnly.copy()+ trackValidatorJetCore+ trackValidatorJetCoreSeedingTrackingOnly